Merge remote-tracking branch 'upstream/release-v1.41'

This commit is contained in:
Tulir Asokan 2021-08-18 18:12:12 +03:00
commit f285b4200c
237 changed files with 9601 additions and 6005 deletions

View file

@ -47,7 +47,7 @@ try:
except ImportError:
pass
__version__ = "1.40.0"
__version__ = "1.41.0rc1"
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
# We import here so that we don't have to install a bunch of deps when

View file

@ -76,6 +76,8 @@ class RoomVersion:
# MSC2716: Adds m.room.power_levels -> content.historical field to control
# whether "insertion", "chunk", "marker" events can be sent
msc2716_historical = attr.ib(type=bool)
# MSC2716: Adds support for redacting "insertion", "chunk", and "marker" events
msc2716_redactions = attr.ib(type=bool)
class RoomVersions:
@ -92,6 +94,7 @@ class RoomVersions:
msc3083_join_rules=False,
msc2403_knocking=False,
msc2716_historical=False,
msc2716_redactions=False,
)
V2 = RoomVersion(
"2",
@ -106,6 +109,7 @@ class RoomVersions:
msc3083_join_rules=False,
msc2403_knocking=False,
msc2716_historical=False,
msc2716_redactions=False,
)
V3 = RoomVersion(
"3",
@ -120,6 +124,7 @@ class RoomVersions:
msc3083_join_rules=False,
msc2403_knocking=False,
msc2716_historical=False,
msc2716_redactions=False,
)
V4 = RoomVersion(
"4",
@ -134,6 +139,7 @@ class RoomVersions:
msc3083_join_rules=False,
msc2403_knocking=False,
msc2716_historical=False,
msc2716_redactions=False,
)
V5 = RoomVersion(
"5",
@ -148,6 +154,7 @@ class RoomVersions:
msc3083_join_rules=False,
msc2403_knocking=False,
msc2716_historical=False,
msc2716_redactions=False,
)
V6 = RoomVersion(
"6",
@ -162,6 +169,7 @@ class RoomVersions:
msc3083_join_rules=False,
msc2403_knocking=False,
msc2716_historical=False,
msc2716_redactions=False,
)
MSC2176 = RoomVersion(
"org.matrix.msc2176",
@ -176,6 +184,7 @@ class RoomVersions:
msc3083_join_rules=False,
msc2403_knocking=False,
msc2716_historical=False,
msc2716_redactions=False,
)
V7 = RoomVersion(
"7",
@ -190,20 +199,7 @@ class RoomVersions:
msc3083_join_rules=False,
msc2403_knocking=True,
msc2716_historical=False,
)
MSC2716 = RoomVersion(
"org.matrix.msc2716",
RoomDisposition.UNSTABLE,
EventFormatVersions.V3,
StateResolutionVersions.V2,
enforce_key_validity=True,
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
msc2176_redaction_rules=False,
msc3083_join_rules=False,
msc2403_knocking=True,
msc2716_historical=True,
msc2716_redactions=False,
)
V8 = RoomVersion(
"8",
@ -218,6 +214,37 @@ class RoomVersions:
msc3083_join_rules=True,
msc2403_knocking=True,
msc2716_historical=False,
msc2716_redactions=False,
)
MSC2716 = RoomVersion(
"org.matrix.msc2716",
RoomDisposition.UNSTABLE,
EventFormatVersions.V3,
StateResolutionVersions.V2,
enforce_key_validity=True,
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
msc2176_redaction_rules=False,
msc3083_join_rules=False,
msc2403_knocking=True,
msc2716_historical=True,
msc2716_redactions=False,
)
MSC2716v2 = RoomVersion(
"org.matrix.msc2716v2",
RoomDisposition.UNSTABLE,
EventFormatVersions.V3,
StateResolutionVersions.V2,
enforce_key_validity=True,
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
msc2176_redaction_rules=False,
msc3083_join_rules=False,
msc2403_knocking=True,
msc2716_historical=True,
msc2716_redactions=True,
)

View file

@ -38,7 +38,6 @@ from synapse.replication.slave.storage.groups import SlavedGroupServerStore
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.replication.slave.storage.room import RoomStore
from synapse.server import HomeServer
from synapse.util.logcontext import LoggingContext
from synapse.util.versionstring import get_version_string
@ -58,7 +57,6 @@ class AdminCmdSlavedStore(
SlavedPushRuleStore,
SlavedEventStore,
SlavedClientIpStore,
RoomStore,
BaseSlavedStore,
):
pass

View file

@ -64,42 +64,41 @@ from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
from synapse.replication.slave.storage.pushers import SlavedPusherStore
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.replication.slave.storage.room import RoomStore
from synapse.rest.admin import register_servlets_for_media_repo
from synapse.rest.client.v1 import events, login, presence, room
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
from synapse.rest.client.v1.profile import (
ProfileAvatarURLRestServlet,
ProfileDisplaynameRestServlet,
ProfileRestServlet,
)
from synapse.rest.client.v1.push_rule import PushRuleRestServlet
from synapse.rest.client.v1.voip import VoipRestServlet
from synapse.rest.client.v2_alpha import (
from synapse.rest.client import (
account_data,
events,
groups,
login,
presence,
read_marker,
receipts,
room,
room_keys,
sync,
tags,
user_directory,
)
from synapse.rest.client.v2_alpha._base import client_patterns
from synapse.rest.client.v2_alpha.account import ThreepidRestServlet
from synapse.rest.client.v2_alpha.account_data import (
AccountDataServlet,
RoomAccountDataServlet,
)
from synapse.rest.client.v2_alpha.devices import DevicesRestServlet
from synapse.rest.client.v2_alpha.keys import (
from synapse.rest.client._base import client_patterns
from synapse.rest.client.account import ThreepidRestServlet
from synapse.rest.client.account_data import AccountDataServlet, RoomAccountDataServlet
from synapse.rest.client.devices import DevicesRestServlet
from synapse.rest.client.initial_sync import InitialSyncRestServlet
from synapse.rest.client.keys import (
KeyChangesServlet,
KeyQueryServlet,
OneTimeKeyServlet,
)
from synapse.rest.client.v2_alpha.register import RegisterRestServlet
from synapse.rest.client.v2_alpha.sendtodevice import SendToDeviceRestServlet
from synapse.rest.client.profile import (
ProfileAvatarURLRestServlet,
ProfileDisplaynameRestServlet,
ProfileRestServlet,
)
from synapse.rest.client.push_rule import PushRuleRestServlet
from synapse.rest.client.register import RegisterRestServlet
from synapse.rest.client.sendtodevice import SendToDeviceRestServlet
from synapse.rest.client.versions import VersionsRestServlet
from synapse.rest.client.voip import VoipRestServlet
from synapse.rest.health import HealthResource
from synapse.rest.key.v2 import KeyApiV2Resource
from synapse.rest.synapse.client import build_synapse_client_resource_tree
@ -114,6 +113,7 @@ from synapse.storage.databases.main.monthly_active_users import (
MonthlyActiveUsersWorkerStore,
)
from synapse.storage.databases.main.presence import PresenceStore
from synapse.storage.databases.main.room import RoomWorkerStore
from synapse.storage.databases.main.search import SearchStore
from synapse.storage.databases.main.stats import StatsStore
from synapse.storage.databases.main.transactions import TransactionWorkerStore
@ -237,7 +237,7 @@ class GenericWorkerSlavedStore(
ClientIpWorkerStore,
SlavedEventStore,
SlavedKeyStore,
RoomStore,
RoomWorkerStore,
DirectoryStore,
SlavedApplicationServiceStore,
SlavedRegistrationStore,

View file

@ -237,13 +237,14 @@ class Config:
def read_templates(
self,
filenames: List[str],
custom_template_directory: Optional[str] = None,
custom_template_directories: Optional[Iterable[str]] = None,
) -> List[jinja2.Template]:
"""Load a list of template files from disk using the given variables.
This function will attempt to load the given templates from the default Synapse
template directory. If `custom_template_directory` is supplied, that directory
is tried first.
template directory. If `custom_template_directories` is supplied, any directory
in this list is tried (in the order they appear in the list) before trying
Synapse's default directory.
Files read are treated as Jinja templates. The templates are not rendered yet
and have autoescape enabled.
@ -251,8 +252,8 @@ class Config:
Args:
filenames: A list of template filenames to read.
custom_template_directory: A directory to try to look for the templates
before using the default Synapse template directory instead.
custom_template_directories: A list of directory to try to look for the
templates before using the default Synapse template directory instead.
Raises:
ConfigError: if the file's path is incorrect or otherwise cannot be read.
@ -260,20 +261,26 @@ class Config:
Returns:
A list of jinja2 templates.
"""
search_directories = [self.default_template_dir]
search_directories = []
# The loader will first look in the custom template directory (if specified) for the
# given filename. If it doesn't find it, it will use the default template dir instead
if custom_template_directory:
# Check that the given template directory exists
if not self.path_exists(custom_template_directory):
raise ConfigError(
"Configured template directory does not exist: %s"
% (custom_template_directory,)
)
# The loader will first look in the custom template directories (if specified)
# for the given filename. If it doesn't find it, it will use the default
# template dir instead.
if custom_template_directories is not None:
for custom_template_directory in custom_template_directories:
# Check that the given template directory exists
if not self.path_exists(custom_template_directory):
raise ConfigError(
"Configured template directory does not exist: %s"
% (custom_template_directory,)
)
# Search the custom template directory as well
search_directories.insert(0, custom_template_directory)
# Search the custom template directory as well
search_directories.append(custom_template_directory)
# Append the default directory at the end of the list so Jinja can fallback on it
# if a template is missing from any custom directory.
search_directories.append(self.default_template_dir)
# TODO: switch to synapse.util.templates.build_jinja_env
loader = jinja2.FileSystemLoader(search_directories)

View file

@ -78,6 +78,11 @@ class AccountValidityConfig(Config):
)
# Read and store template content
custom_template_directories = (
self.root.server.custom_template_directory,
account_validity_template_dir,
)
(
self.account_validity_account_renewed_template,
self.account_validity_account_previously_renewed_template,
@ -88,5 +93,5 @@ class AccountValidityConfig(Config):
"account_previously_renewed.html",
invalid_token_template_filename,
],
account_validity_template_dir,
(td for td in custom_template_directories if td),
)

View file

@ -151,6 +151,15 @@ class CacheConfig(Config):
# entries are never evicted based on time.
#
#expiry_time: 30m
# Controls how long the results of a /sync request are cached for after
# a successful response is returned. A higher duration can help clients with
# intermittent connections, at the cost of higher memory usage.
#
# By default, this is zero, which means that sync responses are not cached
# at all.
#
#sync_response_cache_duration: 2m
"""
def read_config(self, config, **kwargs):
@ -212,6 +221,10 @@ class CacheConfig(Config):
else:
self.expiry_time_msec = None
self.sync_response_cache_duration = self.parse_duration(
cache_config.get("sync_response_cache_duration", 0)
)
# Resize all caches (if necessary) with the new factors we've loaded
self.resize_all_caches()

View file

@ -80,6 +80,12 @@ class EmailConfig(Config):
self.require_transport_security = email_config.get(
"require_transport_security", False
)
self.enable_smtp_tls = email_config.get("enable_tls", True)
if self.require_transport_security and not self.enable_smtp_tls:
raise ConfigError(
"email.require_transport_security requires email.enable_tls to be true"
)
if "app_name" in email_config:
self.email_app_name = email_config["app_name"]
else:
@ -251,7 +257,14 @@ class EmailConfig(Config):
registration_template_success_html,
add_threepid_template_success_html,
],
template_dir,
(
td
for td in (
self.root.server.custom_template_directory,
template_dir,
)
if td
), # Filter out template_dir if not provided
)
# Render templates that do not contain any placeholders
@ -291,7 +304,14 @@ class EmailConfig(Config):
self.email_notif_template_text,
) = self.read_templates(
[notif_template_html, notif_template_text],
template_dir,
(
td
for td in (
self.root.server.custom_template_directory,
template_dir,
)
if td
), # Filter out template_dir if not provided
)
self.email_notif_for_new_users = email_config.get(
@ -314,7 +334,14 @@ class EmailConfig(Config):
self.account_validity_template_text,
) = self.read_templates(
[expiry_template_html, expiry_template_text],
template_dir,
(
td
for td in (
self.root.server.custom_template_directory,
template_dir,
)
if td
), # Filter out template_dir if not provided
)
subjects_config = email_config.get("subjects", {})
@ -346,6 +373,9 @@ class EmailConfig(Config):
"""\
# Configuration for sending emails from Synapse.
#
# Server admins can configure custom templates for email content. See
# https://matrix-org.github.io/synapse/latest/templates.html for more information.
#
email:
# The hostname of the outgoing SMTP server to use. Defaults to 'localhost'.
#
@ -368,6 +398,14 @@ class EmailConfig(Config):
#
#require_transport_security: true
# Uncomment the following to disable TLS for SMTP.
#
# By default, if the server supports TLS, it will be used, and the server
# must present a certificate that is valid for 'smtp_host'. If this option
# is set to false, TLS will not be used.
#
#enable_tls: false
# notif_from defines the "From" address to use when sending emails.
# It must be set if email sending is enabled.
#
@ -414,49 +452,6 @@ class EmailConfig(Config):
#
#invite_client_location: https://app.element.io
# Directory in which Synapse will try to find the template files below.
# If not set, or the files named below are not found within the template
# directory, default templates from within the Synapse package will be used.
#
# Synapse will look for the following templates in this directory:
#
# * The contents of email notifications of missed events: 'notif_mail.html' and
# 'notif_mail.txt'.
#
# * The contents of account expiry notice emails: 'notice_expiry.html' and
# 'notice_expiry.txt'.
#
# * The contents of password reset emails sent by the homeserver:
# 'password_reset.html' and 'password_reset.txt'
#
# * An HTML page that a user will see when they follow the link in the password
# reset email. The user will be asked to confirm the action before their
# password is reset: 'password_reset_confirmation.html'
#
# * HTML pages for success and failure that a user will see when they confirm
# the password reset flow using the page above: 'password_reset_success.html'
# and 'password_reset_failure.html'
#
# * The contents of address verification emails sent during registration:
# 'registration.html' and 'registration.txt'
#
# * HTML pages for success and failure that a user will see when they follow
# the link in an address verification email sent during registration:
# 'registration_success.html' and 'registration_failure.html'
#
# * The contents of address verification emails sent when an address is added
# to a Matrix account: 'add_threepid.html' and 'add_threepid.txt'
#
# * HTML pages for success and failure that a user will see when they follow
# the link in an address verification email sent when an address is added
# to a Matrix account: 'add_threepid_success.html' and
# 'add_threepid_failure.html'
#
# You can see the default templates at:
# https://github.com/matrix-org/synapse/tree/master/synapse/res/templates
#
#template_dir: "res/templates"
# Subjects to use when sending emails from Synapse.
#
# The placeholder '%%(app)s' will be replaced with the value of the 'app_name'

View file

@ -38,3 +38,6 @@ class ExperimentalConfig(Config):
# MSC3244 (room version capabilities)
self.msc3244_enabled: bool = experimental.get("msc3244_enabled", False)
# MSC3266 (room summary api)
self.msc3266_enabled: bool = experimental.get("msc3266_enabled", False)

View file

@ -67,18 +67,31 @@ handlers:
backupCount: 3 # Does not include the current log file.
encoding: utf8
# Default to buffering writes to log file for efficiency. This means that
# will be a delay for INFO/DEBUG logs to get written, but WARNING/ERROR
# logs will still be flushed immediately.
# Default to buffering writes to log file for efficiency.
# WARNING/ERROR logs will still be flushed immediately, but there will be a
# delay (of up to `period` seconds, or until the buffer is full with
# `capacity` messages) before INFO/DEBUG logs get written.
buffer:
class: logging.handlers.MemoryHandler
class: synapse.logging.handlers.PeriodicallyFlushingMemoryHandler
target: file
# The capacity is the number of log lines that are buffered before
# being written to disk. Increasing this will lead to better
# The capacity is the maximum number of log lines that are buffered
# before being written to disk. Increasing this will lead to better
# performance, at the expensive of it taking longer for log lines to
# be written to disk.
# This parameter is required.
capacity: 10
flushLevel: 30 # Flush for WARNING logs as well
# Logs with a level at or above the flush level will cause the buffer to
# be flushed immediately.
# Default value: 40 (ERROR)
# Other values: 50 (CRITICAL), 30 (WARNING), 20 (INFO), 10 (DEBUG)
flushLevel: 30 # Flush immediately for WARNING logs and higher
# The period of time, in seconds, between forced flushes.
# Messages will not be delayed for longer than this time.
# Default value: 5 seconds
period: 5
# A handler that writes logs to stderr. Unused by default, but can be used
# instead of "buffer" and "file" in the logger handlers.

View file

@ -12,9 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from collections import namedtuple
from typing import Dict, List
from urllib.request import getproxies_environment # type: ignore
from synapse.config.server import DEFAULT_IP_RANGE_BLACKLIST, generate_ip_set
from synapse.python_dependencies import DependencyException, check_requirements
@ -22,6 +24,8 @@ from synapse.util.module_loader import load_module
from ._base import Config, ConfigError
logger = logging.getLogger(__name__)
DEFAULT_THUMBNAIL_SIZES = [
{"width": 32, "height": 32, "method": "crop"},
{"width": 96, "height": 96, "method": "crop"},
@ -36,6 +40,9 @@ THUMBNAIL_SIZE_YAML = """\
# method: %(method)s
"""
HTTP_PROXY_SET_WARNING = """\
The Synapse config url_preview_ip_range_blacklist will be ignored as an HTTP(s) proxy is configured."""
ThumbnailRequirement = namedtuple(
"ThumbnailRequirement", ["width", "height", "method", "media_type"]
)
@ -181,12 +188,17 @@ class ContentRepositoryConfig(Config):
e.message # noqa: B306, DependencyException.message is a property
)
proxy_env = getproxies_environment()
if "url_preview_ip_range_blacklist" not in config:
raise ConfigError(
"For security, you must specify an explicit target IP address "
"blacklist in url_preview_ip_range_blacklist for url previewing "
"to work"
)
if "http" not in proxy_env or "https" not in proxy_env:
raise ConfigError(
"For security, you must specify an explicit target IP address "
"blacklist in url_preview_ip_range_blacklist for url previewing "
"to work"
)
else:
if "http" in proxy_env or "https" in proxy_env:
logger.warning("".join(HTTP_PROXY_SET_WARNING))
# we always blacklist '0.0.0.0' and '::', which are supposed to be
# unroutable addresses.
@ -293,6 +305,8 @@ class ContentRepositoryConfig(Config):
# This must be specified if url_preview_enabled is set. It is recommended that
# you uncomment the following list as a starting point.
#
# Note: The value is ignored when an HTTP proxy is in use
#
#url_preview_ip_range_blacklist:
%(ip_range_blacklist)s

View file

@ -710,6 +710,18 @@ class ServerConfig(Config):
# Turn the list into a set to improve lookup speed.
self.next_link_domain_whitelist = set(next_link_domain_whitelist)
templates_config = config.get("templates") or {}
if not isinstance(templates_config, dict):
raise ConfigError("The 'templates' section must be a dictionary")
self.custom_template_directory = templates_config.get(
"custom_template_directory"
)
if self.custom_template_directory is not None and not isinstance(
self.custom_template_directory, str
):
raise ConfigError("'custom_template_directory' must be a string")
def has_tls_listener(self) -> bool:
return any(listener.tls for listener in self.listeners)
@ -960,6 +972,8 @@ class ServerConfig(Config):
#
# This option replaces federation_ip_range_blacklist in Synapse v1.25.0.
#
# Note: The value is ignored when an HTTP proxy is in use
#
#ip_range_blacklist:
%(ip_range_blacklist)s
@ -1282,6 +1296,19 @@ class ServerConfig(Config):
# all domains.
#
#next_link_domain_whitelist: ["matrix.org"]
# Templates to use when generating email or HTML page contents.
#
templates:
# Directory in which Synapse will try to find template files to use to generate
# email or HTML page contents.
# If not set, or a file is not found within the template directory, a default
# template from within the Synapse package will be used.
#
# See https://matrix-org.github.io/synapse/latest/templates.html for more
# information about using custom templates.
#
#custom_template_directory: /path/to/custom/templates/
"""
% locals()
)

View file

@ -45,6 +45,11 @@ class SSOConfig(Config):
self.sso_template_dir = sso_config.get("template_dir")
# Read templates from disk
custom_template_directories = (
self.root.server.custom_template_directory,
self.sso_template_dir,
)
(
self.sso_login_idp_picker_template,
self.sso_redirect_confirm_template,
@ -63,7 +68,7 @@ class SSOConfig(Config):
"sso_auth_success.html",
"sso_auth_bad_user.html",
],
self.sso_template_dir,
(td for td in custom_template_directories if td),
)
# These templates have no placeholders, so render them here
@ -94,6 +99,9 @@ class SSOConfig(Config):
# Additional settings to use with single-sign on systems such as OpenID Connect,
# SAML2 and CAS.
#
# Server admins can configure custom templates for pages related to SSO. See
# https://matrix-org.github.io/synapse/latest/templates.html for more information.
#
sso:
# A list of client URLs which are whitelisted so that the user does not
# have to confirm giving access to their account to the URL. Any client
@ -125,167 +133,4 @@ class SSOConfig(Config):
# information when first signing in. Defaults to false.
#
#update_profile_information: true
# Directory in which Synapse will try to find the template files below.
# If not set, or the files named below are not found within the template
# directory, default templates from within the Synapse package will be used.
#
# Synapse will look for the following templates in this directory:
#
# * HTML page to prompt the user to choose an Identity Provider during
# login: 'sso_login_idp_picker.html'.
#
# This is only used if multiple SSO Identity Providers are configured.
#
# When rendering, this template is given the following variables:
# * redirect_url: the URL that the user will be redirected to after
# login.
#
# * server_name: the homeserver's name.
#
# * providers: a list of available Identity Providers. Each element is
# an object with the following attributes:
#
# * idp_id: unique identifier for the IdP
# * idp_name: user-facing name for the IdP
# * idp_icon: if specified in the IdP config, an MXC URI for an icon
# for the IdP
# * idp_brand: if specified in the IdP config, a textual identifier
# for the brand of the IdP
#
# The rendered HTML page should contain a form which submits its results
# back as a GET request, with the following query parameters:
#
# * redirectUrl: the client redirect URI (ie, the `redirect_url` passed
# to the template)
#
# * idp: the 'idp_id' of the chosen IDP.
#
# * HTML page to prompt new users to enter a userid and confirm other
# details: 'sso_auth_account_details.html'. This is only shown if the
# SSO implementation (with any user_mapping_provider) does not return
# a localpart.
#
# When rendering, this template is given the following variables:
#
# * server_name: the homeserver's name.
#
# * idp: details of the SSO Identity Provider that the user logged in
# with: an object with the following attributes:
#
# * idp_id: unique identifier for the IdP
# * idp_name: user-facing name for the IdP
# * idp_icon: if specified in the IdP config, an MXC URI for an icon
# for the IdP
# * idp_brand: if specified in the IdP config, a textual identifier
# for the brand of the IdP
#
# * user_attributes: an object containing details about the user that
# we received from the IdP. May have the following attributes:
#
# * display_name: the user's display_name
# * emails: a list of email addresses
#
# The template should render a form which submits the following fields:
#
# * username: the localpart of the user's chosen user id
#
# * HTML page allowing the user to consent to the server's terms and
# conditions. This is only shown for new users, and only if
# `user_consent.require_at_registration` is set.
#
# When rendering, this template is given the following variables:
#
# * server_name: the homeserver's name.
#
# * user_id: the user's matrix proposed ID.
#
# * user_profile.display_name: the user's proposed display name, if any.
#
# * consent_version: the version of the terms that the user will be
# shown
#
# * terms_url: a link to the page showing the terms.
#
# The template should render a form which submits the following fields:
#
# * accepted_version: the version of the terms accepted by the user
# (ie, 'consent_version' from the input variables).
#
# * HTML page for a confirmation step before redirecting back to the client
# with the login token: 'sso_redirect_confirm.html'.
#
# When rendering, this template is given the following variables:
#
# * redirect_url: the URL the user is about to be redirected to.
#
# * display_url: the same as `redirect_url`, but with the query
# parameters stripped. The intention is to have a
# human-readable URL to show to users, not to use it as
# the final address to redirect to.
#
# * server_name: the homeserver's name.
#
# * new_user: a boolean indicating whether this is the user's first time
# logging in.
#
# * user_id: the user's matrix ID.
#
# * user_profile.avatar_url: an MXC URI for the user's avatar, if any.
# None if the user has not set an avatar.
#
# * user_profile.display_name: the user's display name. None if the user
# has not set a display name.
#
# * HTML page which notifies the user that they are authenticating to confirm
# an operation on their account during the user interactive authentication
# process: 'sso_auth_confirm.html'.
#
# When rendering, this template is given the following variables:
# * redirect_url: the URL the user is about to be redirected to.
#
# * description: the operation which the user is being asked to confirm
#
# * idp: details of the Identity Provider that we will use to confirm
# the user's identity: an object with the following attributes:
#
# * idp_id: unique identifier for the IdP
# * idp_name: user-facing name for the IdP
# * idp_icon: if specified in the IdP config, an MXC URI for an icon
# for the IdP
# * idp_brand: if specified in the IdP config, a textual identifier
# for the brand of the IdP
#
# * HTML page shown after a successful user interactive authentication session:
# 'sso_auth_success.html'.
#
# Note that this page must include the JavaScript which notifies of a successful authentication
# (see https://matrix.org/docs/spec/client_server/r0.6.0#fallback).
#
# This template has no additional variables.
#
# * HTML page shown after a user-interactive authentication session which
# does not map correctly onto the expected user: 'sso_auth_bad_user.html'.
#
# When rendering, this template is given the following variables:
# * server_name: the homeserver's name.
# * user_id_to_verify: the MXID of the user that we are trying to
# validate.
#
# * HTML page shown during single sign-on if a deactivated user (according to Synapse's database)
# attempts to login: 'sso_account_deactivated.html'.
#
# This template has no additional variables.
#
# * HTML page to display to users if something goes wrong during the
# OpenID Connect authentication process: 'sso_error.html'.
#
# When rendering, this template is given two variables:
# * error: the technical name of the error
# * error_description: a human-readable message for the error
#
# You can see the default templates at:
# https://github.com/matrix-org/synapse/tree/master/synapse/res/templates
#
#template_dir: "res/templates"
"""

View file

@ -396,10 +396,11 @@ class FrozenEvent(EventBase):
return self.__repr__()
def __repr__(self):
return "<FrozenEvent event_id=%r, type=%r, state_key=%r>" % (
return "<FrozenEvent event_id=%r, type=%r, state_key=%r, outlier=%s>" % (
self.get("event_id", None),
self.get("type", None),
self.get("state_key", None),
self.internal_metadata.is_outlier(),
)

View file

@ -17,7 +17,7 @@ from typing import Any, Mapping, Union
from frozendict import frozendict
from synapse.api.constants import EventTypes, RelationTypes
from synapse.api.constants import EventContentFields, EventTypes, RelationTypes
from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import RoomVersion
from synapse.util.async_helpers import yieldable_gather_results
@ -135,6 +135,12 @@ def prune_event_dict(room_version: RoomVersion, event_dict: dict) -> dict:
add_fields("history_visibility")
elif event_type == EventTypes.Redaction and room_version.msc2176_redaction_rules:
add_fields("redacts")
elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_INSERTION:
add_fields(EventContentFields.MSC2716_NEXT_CHUNK_ID)
elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_CHUNK:
add_fields(EventContentFields.MSC2716_CHUNK_ID)
elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_MARKER:
add_fields(EventContentFields.MSC2716_MARKER_INSERTION)
allowed_fields = {k: v for k, v in event_dict.items() if k in allowed_keys}

View file

@ -1108,7 +1108,8 @@ class FederationClient(FederationBase):
The response from the remote server.
Raises:
HttpResponseException: There was an exception returned from the remote server
HttpResponseException / RequestSendFailed: There was an exception
returned from the remote server
SynapseException: M_FORBIDDEN when the remote server has disallowed publicRoom
requests over federation
@ -1289,8 +1290,136 @@ class FederationClient(FederationBase):
failover_on_unknown_endpoint=True,
)
async def get_room_hierarchy(
self,
destinations: Iterable[str],
room_id: str,
suggested_only: bool,
) -> Tuple[JsonDict, Sequence[JsonDict], Sequence[str]]:
"""
Call other servers to get a hierarchy of the given room.
@attr.s(frozen=True, slots=True)
Performs simple data validates and parsing of the response.
Args:
destinations: The remote servers. We will try them in turn, omitting any
that have been blacklisted.
room_id: ID of the space to be queried
suggested_only: If true, ask the remote server to only return children
with the "suggested" flag set
Returns:
A tuple of:
The room as a JSON dictionary.
A list of children rooms, as JSON dictionaries.
A list of inaccessible children room IDs.
Raises:
SynapseError if we were unable to get a valid summary from any of the
remote servers
"""
async def send_request(
destination: str,
) -> Tuple[JsonDict, Sequence[JsonDict], Sequence[str]]:
res = await self.transport_layer.get_room_hierarchy(
destination=destination,
room_id=room_id,
suggested_only=suggested_only,
)
room = res.get("room")
if not isinstance(room, dict):
raise InvalidResponseError("'room' must be a dict")
# Validate children_state of the room.
children_state = room.get("children_state", [])
if not isinstance(children_state, Sequence):
raise InvalidResponseError("'room.children_state' must be a list")
if any(not isinstance(e, dict) for e in children_state):
raise InvalidResponseError("Invalid event in 'children_state' list")
try:
[
FederationSpaceSummaryEventResult.from_json_dict(e)
for e in children_state
]
except ValueError as e:
raise InvalidResponseError(str(e))
# Validate the children rooms.
children = res.get("children", [])
if not isinstance(children, Sequence):
raise InvalidResponseError("'children' must be a list")
if any(not isinstance(r, dict) for r in children):
raise InvalidResponseError("Invalid room in 'children' list")
# Validate the inaccessible children.
inaccessible_children = res.get("inaccessible_children", [])
if not isinstance(inaccessible_children, Sequence):
raise InvalidResponseError("'inaccessible_children' must be a list")
if any(not isinstance(r, str) for r in inaccessible_children):
raise InvalidResponseError(
"Invalid room ID in 'inaccessible_children' list"
)
return room, children, inaccessible_children
try:
return await self._try_destination_list(
"fetch room hierarchy",
destinations,
send_request,
failover_on_unknown_endpoint=True,
)
except SynapseError as e:
# Fallback to the old federation API and translate the results if
# no servers implement the new API.
#
# The algorithm below is a bit inefficient as it only attempts to
# get information for the requested room, but the legacy API may
# return additional layers.
if e.code == 502:
legacy_result = await self.get_space_summary(
destinations,
room_id,
suggested_only,
max_rooms_per_space=None,
exclude_rooms=[],
)
# Find the requested room in the response (and remove it).
for _i, room in enumerate(legacy_result.rooms):
if room.get("room_id") == room_id:
break
else:
# The requested room was not returned, nothing we can do.
raise
requested_room = legacy_result.rooms.pop(_i)
# Find any children events of the requested room.
children_events = []
children_room_ids = set()
for event in legacy_result.events:
if event.room_id == room_id:
children_events.append(event.data)
children_room_ids.add(event.state_key)
# And add them under the requested room.
requested_room["children_state"] = children_events
# Find the children rooms.
children = []
for room in legacy_result.rooms:
if room.get("room_id") in children_room_ids:
children.append(room)
# It isn't clear from the response whether some of the rooms are
# not accessible.
return requested_room, children, ()
raise
@attr.s(frozen=True, slots=True, auto_attribs=True)
class FederationSpaceSummaryEventResult:
"""Represents a single event in the result of a successful get_space_summary call.
@ -1299,12 +1428,13 @@ class FederationSpaceSummaryEventResult:
object attributes.
"""
event_type = attr.ib(type=str)
state_key = attr.ib(type=str)
via = attr.ib(type=Sequence[str])
event_type: str
room_id: str
state_key: str
via: Sequence[str]
# the raw data, including the above keys
data = attr.ib(type=JsonDict)
data: JsonDict
@classmethod
def from_json_dict(cls, d: JsonDict) -> "FederationSpaceSummaryEventResult":
@ -1321,6 +1451,10 @@ class FederationSpaceSummaryEventResult:
if not isinstance(event_type, str):
raise ValueError("Invalid event: 'event_type' must be a str")
room_id = d.get("room_id")
if not isinstance(room_id, str):
raise ValueError("Invalid event: 'room_id' must be a str")
state_key = d.get("state_key")
if not isinstance(state_key, str):
raise ValueError("Invalid event: 'state_key' must be a str")
@ -1335,15 +1469,15 @@ class FederationSpaceSummaryEventResult:
if any(not isinstance(v, str) for v in via):
raise ValueError("Invalid event: 'via' must be a list of strings")
return cls(event_type, state_key, via, d)
return cls(event_type, room_id, state_key, via, d)
@attr.s(frozen=True, slots=True)
@attr.s(frozen=True, slots=True, auto_attribs=True)
class FederationSpaceSummaryResult:
"""Represents the data returned by a successful get_space_summary call."""
rooms = attr.ib(type=Sequence[JsonDict])
events = attr.ib(type=Sequence[FederationSpaceSummaryEventResult])
rooms: List[JsonDict]
events: Sequence[FederationSpaceSummaryEventResult]
@classmethod
def from_json_dict(cls, d: JsonDict) -> "FederationSpaceSummaryResult":
@ -1356,7 +1490,7 @@ class FederationSpaceSummaryResult:
ValueError if d is not a valid /spaces/ response
"""
rooms = d.get("rooms")
if not isinstance(rooms, Sequence):
if not isinstance(rooms, List):
raise ValueError("'rooms' must be a list")
if any(not isinstance(r, dict) for r in rooms):
raise ValueError("Invalid room in 'rooms' list")

View file

@ -195,13 +195,17 @@ class FederationServer(FederationBase):
origin, room_id, versions, limit
)
res = self._transaction_from_pdus(pdus).get_dict()
res = self._transaction_dict_from_pdus(pdus)
return 200, res
async def on_incoming_transaction(
self, origin: str, transaction_data: JsonDict
) -> Tuple[int, Dict[str, Any]]:
self,
origin: str,
transaction_id: str,
destination: str,
transaction_data: JsonDict,
) -> Tuple[int, JsonDict]:
# If we receive a transaction we should make sure that kick off handling
# any old events in the staging area.
if not self._started_handling_of_staged_events:
@ -212,8 +216,14 @@ class FederationServer(FederationBase):
# accurate as possible.
request_time = self._clock.time_msec()
transaction = Transaction(**transaction_data)
transaction_id = transaction.transaction_id # type: ignore
transaction = Transaction(
transaction_id=transaction_id,
destination=destination,
origin=origin,
origin_server_ts=transaction_data.get("origin_server_ts"), # type: ignore
pdus=transaction_data.get("pdus"), # type: ignore
edus=transaction_data.get("edus"),
)
if not transaction_id:
raise Exception("Transaction missing transaction_id")
@ -221,9 +231,7 @@ class FederationServer(FederationBase):
logger.debug("[%s] Got transaction", transaction_id)
# Reject malformed transactions early: reject if too many PDUs/EDUs
if len(transaction.pdus) > 50 or ( # type: ignore
hasattr(transaction, "edus") and len(transaction.edus) > 100 # type: ignore
):
if len(transaction.pdus) > 50 or len(transaction.edus) > 100:
logger.info("Transaction PDU or EDU count too large. Returning 400")
return 400, {}
@ -263,7 +271,7 @@ class FederationServer(FederationBase):
# CRITICAL SECTION: the first thing we must do (before awaiting) is
# add an entry to _active_transactions.
assert origin not in self._active_transactions
self._active_transactions[origin] = transaction.transaction_id # type: ignore
self._active_transactions[origin] = transaction.transaction_id
try:
result = await self._handle_incoming_transaction(
@ -291,11 +299,11 @@ class FederationServer(FederationBase):
if response:
logger.debug(
"[%s] We've already responded to this request",
transaction.transaction_id, # type: ignore
transaction.transaction_id,
)
return response
logger.debug("[%s] Transaction is new", transaction.transaction_id) # type: ignore
logger.debug("[%s] Transaction is new", transaction.transaction_id)
# We process PDUs and EDUs in parallel. This is important as we don't
# want to block things like to device messages from reaching clients
@ -334,7 +342,7 @@ class FederationServer(FederationBase):
report back to the sending server.
"""
received_pdus_counter.inc(len(transaction.pdus)) # type: ignore
received_pdus_counter.inc(len(transaction.pdus))
origin_host, _ = parse_server_name(origin)
@ -342,7 +350,7 @@ class FederationServer(FederationBase):
newest_pdu_ts = 0
for p in transaction.pdus: # type: ignore
for p in transaction.pdus:
# FIXME (richardv): I don't think this works:
# https://github.com/matrix-org/synapse/issues/8429
if "unsigned" in p:
@ -436,10 +444,10 @@ class FederationServer(FederationBase):
return pdu_results
async def _handle_edus_in_txn(self, origin: str, transaction: Transaction):
async def _handle_edus_in_txn(self, origin: str, transaction: Transaction) -> None:
"""Process the EDUs in a received transaction."""
async def _process_edu(edu_dict):
async def _process_edu(edu_dict: JsonDict) -> None:
received_edus_counter.inc()
edu = Edu(
@ -452,7 +460,7 @@ class FederationServer(FederationBase):
await concurrently_execute(
_process_edu,
getattr(transaction, "edus", []),
transaction.edus,
TRANSACTION_CONCURRENCY_LIMIT,
)
@ -538,7 +546,7 @@ class FederationServer(FederationBase):
pdu = await self.handler.get_persisted_pdu(origin, event_id)
if pdu:
return 200, self._transaction_from_pdus([pdu]).get_dict()
return 200, self._transaction_dict_from_pdus([pdu])
else:
return 404, ""
@ -879,18 +887,20 @@ class FederationServer(FederationBase):
ts_now_ms = self._clock.time_msec()
return await self.store.get_user_id_for_open_id_token(token, ts_now_ms)
def _transaction_from_pdus(self, pdu_list: List[EventBase]) -> Transaction:
def _transaction_dict_from_pdus(self, pdu_list: List[EventBase]) -> JsonDict:
"""Returns a new Transaction containing the given PDUs suitable for
transmission.
"""
time_now = self._clock.time_msec()
pdus = [p.get_pdu_json(time_now) for p in pdu_list]
return Transaction(
# Just need a dummy transaction ID and destination since it won't be used.
transaction_id="",
origin=self.server_name,
pdus=pdus,
origin_server_ts=int(time_now),
destination=None,
)
destination="",
).get_dict()
async def _handle_received_pdu(self, origin: str, pdu: EventBase) -> None:
"""Process a PDU received in a federation /send/ transaction.
@ -962,13 +972,18 @@ class FederationServer(FederationBase):
# the room, so instead of pulling the event out of the DB and parsing
# the event we just pull out the next event ID and check if that matches.
if latest_event is not None and latest_origin is not None:
(
next_origin,
next_event_id,
) = await self.store.get_next_staged_event_id_for_room(room_id)
if next_origin != latest_origin or next_event_id != latest_event.event_id:
result = await self.store.get_next_staged_event_id_for_room(room_id)
if result is None:
latest_origin = None
latest_event = None
else:
next_origin, next_event_id = result
if (
next_origin != latest_origin
or next_event_id != latest_event.event_id
):
latest_origin = None
latest_event = None
if latest_origin is None or latest_event is None:
next = await self.store.get_next_staged_event_for_room(
@ -988,6 +1003,7 @@ class FederationServer(FederationBase):
# has started processing).
while True:
async with lock:
logger.info("handling received PDU: %s", event)
try:
await self.handler.on_receive_pdu(
origin, event, sent_to_us_directly=True

View file

@ -45,7 +45,7 @@ class TransactionActions:
`None` if we have not previously responded to this transaction or a
2-tuple of `(int, dict)` representing the response code and response body.
"""
transaction_id = transaction.transaction_id # type: ignore
transaction_id = transaction.transaction_id
if not transaction_id:
raise RuntimeError("Cannot persist a transaction with no transaction_id")
@ -56,7 +56,7 @@ class TransactionActions:
self, origin: str, transaction: Transaction, code: int, response: JsonDict
) -> None:
"""Persist how we responded to a transaction."""
transaction_id = transaction.transaction_id # type: ignore
transaction_id = transaction.transaction_id
if not transaction_id:
raise RuntimeError("Cannot persist a transaction with no transaction_id")

View file

@ -27,6 +27,7 @@ from synapse.logging.opentracing import (
tags,
whitelisted_homeserver,
)
from synapse.types import JsonDict
from synapse.util import json_decoder
from synapse.util.metrics import measure_func
@ -104,13 +105,13 @@ class TransactionManager:
len(edus),
)
transaction = Transaction.create_new(
transaction = Transaction(
origin_server_ts=int(self.clock.time_msec()),
transaction_id=txn_id,
origin=self._server_name,
destination=destination,
pdus=pdus,
edus=edus,
pdus=[p.get_pdu_json() for p in pdus],
edus=[edu.get_dict() for edu in edus],
)
self._next_txn_id += 1
@ -131,7 +132,7 @@ class TransactionManager:
# FIXME (richardv): I also believe it no longer works. We (now?) store
# "age_ts" in "unsigned" rather than at the top level. See
# https://github.com/matrix-org/synapse/issues/8429.
def json_data_cb():
def json_data_cb() -> JsonDict:
data = transaction.get_dict()
now = int(self.clock.time_msec())
if "pdus" in data:

View file

@ -143,7 +143,7 @@ class TransportLayerClient:
"""Sends the given Transaction to its destination
Args:
transaction (Transaction)
transaction
Returns:
Succeeds when we get a 2xx HTTP response. The result
@ -1177,6 +1177,28 @@ class TransportLayerClient:
destination=destination, path=path, data=params
)
async def get_room_hierarchy(
self,
destination: str,
room_id: str,
suggested_only: bool,
) -> JsonDict:
"""
Args:
destination: The remote server
room_id: The room ID to ask about.
suggested_only: if True, only suggested rooms will be returned
"""
path = _create_path(
FEDERATION_UNSTABLE_PREFIX, "/org.matrix.msc2946/hierarchy/%s", room_id
)
return await self.client.get_json(
destination=destination,
path=path,
args={"suggested_only": "true" if suggested_only else "false"},
)
def _create_path(federation_prefix: str, path: str, *args: str) -> str:
"""

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,332 @@
# Copyright 2014-2021 The Matrix.org Foundation C.I.C.
# Copyright 2020 Sorunome
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Dict, Iterable, List, Optional, Tuple, Type
from typing_extensions import Literal
from synapse.api.errors import FederationDeniedError, SynapseError
from synapse.federation.transport.server._base import (
Authenticator,
BaseFederationServlet,
)
from synapse.federation.transport.server.federation import FEDERATION_SERVLET_CLASSES
from synapse.federation.transport.server.groups_local import GROUP_LOCAL_SERVLET_CLASSES
from synapse.federation.transport.server.groups_server import (
GROUP_SERVER_SERVLET_CLASSES,
)
from synapse.http.server import HttpServer, JsonResource
from synapse.http.servlet import (
parse_boolean_from_args,
parse_integer_from_args,
parse_string_from_args,
)
from synapse.server import HomeServer
from synapse.types import JsonDict, ThirdPartyInstanceID
from synapse.util.ratelimitutils import FederationRateLimiter
logger = logging.getLogger(__name__)
class TransportLayerServer(JsonResource):
"""Handles incoming federation HTTP requests"""
def __init__(self, hs: HomeServer, servlet_groups: Optional[List[str]] = None):
"""Initialize the TransportLayerServer
Will by default register all servlets. For custom behaviour, pass in
a list of servlet_groups to register.
Args:
hs: homeserver
servlet_groups: List of servlet groups to register.
Defaults to ``DEFAULT_SERVLET_GROUPS``.
"""
self.hs = hs
self.clock = hs.get_clock()
self.servlet_groups = servlet_groups
super().__init__(hs, canonical_json=False)
self.authenticator = Authenticator(hs)
self.ratelimiter = hs.get_federation_ratelimiter()
self.register_servlets()
def register_servlets(self) -> None:
register_servlets(
self.hs,
resource=self,
ratelimiter=self.ratelimiter,
authenticator=self.authenticator,
servlet_groups=self.servlet_groups,
)
class PublicRoomList(BaseFederationServlet):
"""
Fetch the public room list for this server.
This API returns information in the same format as /publicRooms on the
client API, but will only ever include local public rooms and hence is
intended for consumption by other homeservers.
GET /publicRooms HTTP/1.1
HTTP/1.1 200 OK
Content-Type: application/json
{
"chunk": [
{
"aliases": [
"#test:localhost"
],
"guest_can_join": false,
"name": "test room",
"num_joined_members": 3,
"room_id": "!whkydVegtvatLfXmPN:localhost",
"world_readable": false
}
],
"end": "END",
"start": "START"
}
"""
PATH = "/publicRooms"
def __init__(
self,
hs: HomeServer,
authenticator: Authenticator,
ratelimiter: FederationRateLimiter,
server_name: str,
):
super().__init__(hs, authenticator, ratelimiter, server_name)
self.handler = hs.get_room_list_handler()
self.allow_access = hs.config.allow_public_rooms_over_federation
async def on_GET(
self, origin: str, content: Literal[None], query: Dict[bytes, List[bytes]]
) -> Tuple[int, JsonDict]:
if not self.allow_access:
raise FederationDeniedError(origin)
limit = parse_integer_from_args(query, "limit", 0)
since_token = parse_string_from_args(query, "since", None)
include_all_networks = parse_boolean_from_args(
query, "include_all_networks", default=False
)
third_party_instance_id = parse_string_from_args(
query, "third_party_instance_id", None
)
if include_all_networks:
network_tuple = None
elif third_party_instance_id:
network_tuple = ThirdPartyInstanceID.from_string(third_party_instance_id)
else:
network_tuple = ThirdPartyInstanceID(None, None)
if limit == 0:
# zero is a special value which corresponds to no limit.
limit = None
data = await self.handler.get_local_public_room_list(
limit, since_token, network_tuple=network_tuple, from_federation=True
)
return 200, data
async def on_POST(
self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
) -> Tuple[int, JsonDict]:
# This implements MSC2197 (Search Filtering over Federation)
if not self.allow_access:
raise FederationDeniedError(origin)
limit: Optional[int] = int(content.get("limit", 100))
since_token = content.get("since", None)
search_filter = content.get("filter", None)
include_all_networks = content.get("include_all_networks", False)
third_party_instance_id = content.get("third_party_instance_id", None)
if include_all_networks:
network_tuple = None
if third_party_instance_id is not None:
raise SynapseError(
400, "Can't use include_all_networks with an explicit network"
)
elif third_party_instance_id is None:
network_tuple = ThirdPartyInstanceID(None, None)
else:
network_tuple = ThirdPartyInstanceID.from_string(third_party_instance_id)
if search_filter is None:
logger.warning("Nonefilter")
if limit == 0:
# zero is a special value which corresponds to no limit.
limit = None
data = await self.handler.get_local_public_room_list(
limit=limit,
since_token=since_token,
search_filter=search_filter,
network_tuple=network_tuple,
from_federation=True,
)
return 200, data
class FederationGroupsRenewAttestaionServlet(BaseFederationServlet):
"""A group or user's server renews their attestation"""
PATH = "/groups/(?P<group_id>[^/]*)/renew_attestation/(?P<user_id>[^/]*)"
def __init__(
self,
hs: HomeServer,
authenticator: Authenticator,
ratelimiter: FederationRateLimiter,
server_name: str,
):
super().__init__(hs, authenticator, ratelimiter, server_name)
self.handler = hs.get_groups_attestation_renewer()
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
# We don't need to check auth here as we check the attestation signatures
new_content = await self.handler.on_renew_attestation(
group_id, user_id, content
)
return 200, new_content
class OpenIdUserInfo(BaseFederationServlet):
"""
Exchange a bearer token for information about a user.
The response format should be compatible with:
http://openid.net/specs/openid-connect-core-1_0.html#UserInfoResponse
GET /openid/userinfo?access_token=ABDEFGH HTTP/1.1
HTTP/1.1 200 OK
Content-Type: application/json
{
"sub": "@userpart:example.org",
}
"""
PATH = "/openid/userinfo"
REQUIRE_AUTH = False
def __init__(
self,
hs: HomeServer,
authenticator: Authenticator,
ratelimiter: FederationRateLimiter,
server_name: str,
):
super().__init__(hs, authenticator, ratelimiter, server_name)
self.handler = hs.get_federation_server()
async def on_GET(
self,
origin: Optional[str],
content: Literal[None],
query: Dict[bytes, List[bytes]],
) -> Tuple[int, JsonDict]:
token = parse_string_from_args(query, "access_token")
if token is None:
return (
401,
{"errcode": "M_MISSING_TOKEN", "error": "Access Token required"},
)
user_id = await self.handler.on_openid_userinfo(token)
if user_id is None:
return (
401,
{
"errcode": "M_UNKNOWN_TOKEN",
"error": "Access Token unknown or expired",
},
)
return 200, {"sub": user_id}
DEFAULT_SERVLET_GROUPS: Dict[str, Iterable[Type[BaseFederationServlet]]] = {
"federation": FEDERATION_SERVLET_CLASSES,
"room_list": (PublicRoomList,),
"group_server": GROUP_SERVER_SERVLET_CLASSES,
"group_local": GROUP_LOCAL_SERVLET_CLASSES,
"group_attestation": (FederationGroupsRenewAttestaionServlet,),
"openid": (OpenIdUserInfo,),
}
def register_servlets(
hs: HomeServer,
resource: HttpServer,
authenticator: Authenticator,
ratelimiter: FederationRateLimiter,
servlet_groups: Optional[Iterable[str]] = None,
):
"""Initialize and register servlet classes.
Will by default register all servlets. For custom behaviour, pass in
a list of servlet_groups to register.
Args:
hs: homeserver
resource: resource class to register to
authenticator: authenticator to use
ratelimiter: ratelimiter to use
servlet_groups: List of servlet groups to register.
Defaults to ``DEFAULT_SERVLET_GROUPS``.
"""
if not servlet_groups:
servlet_groups = DEFAULT_SERVLET_GROUPS.keys()
for servlet_group in servlet_groups:
# Skip unknown servlet groups.
if servlet_group not in DEFAULT_SERVLET_GROUPS:
raise RuntimeError(
f"Attempting to register unknown federation servlet: '{servlet_group}'"
)
for servletclass in DEFAULT_SERVLET_GROUPS[servlet_group]:
servletclass(
hs=hs,
authenticator=authenticator,
ratelimiter=ratelimiter,
server_name=hs.hostname,
).register(resource)

View file

@ -0,0 +1,328 @@
# Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import re
from synapse.api.errors import Codes, FederationDeniedError, SynapseError
from synapse.api.urls import FEDERATION_V1_PREFIX
from synapse.http.servlet import parse_json_object_from_request
from synapse.logging import opentracing
from synapse.logging.context import run_in_background
from synapse.logging.opentracing import (
SynapseTags,
start_active_span,
start_active_span_from_request,
tags,
whitelisted_homeserver,
)
from synapse.server import HomeServer
from synapse.util.ratelimitutils import FederationRateLimiter
from synapse.util.stringutils import parse_and_validate_server_name
logger = logging.getLogger(__name__)
class AuthenticationError(SynapseError):
"""There was a problem authenticating the request"""
class NoAuthenticationError(AuthenticationError):
"""The request had no authentication information"""
class Authenticator:
def __init__(self, hs: HomeServer):
self._clock = hs.get_clock()
self.keyring = hs.get_keyring()
self.server_name = hs.hostname
self.store = hs.get_datastore()
self.federation_domain_whitelist = hs.config.federation_domain_whitelist
self.notifier = hs.get_notifier()
self.replication_client = None
if hs.config.worker.worker_app:
self.replication_client = hs.get_tcp_replication()
# A method just so we can pass 'self' as the authenticator to the Servlets
async def authenticate_request(self, request, content):
now = self._clock.time_msec()
json_request = {
"method": request.method.decode("ascii"),
"uri": request.uri.decode("ascii"),
"destination": self.server_name,
"signatures": {},
}
if content is not None:
json_request["content"] = content
origin = None
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
if not auth_headers:
raise NoAuthenticationError(
401, "Missing Authorization headers", Codes.UNAUTHORIZED
)
for auth in auth_headers:
if auth.startswith(b"X-Matrix"):
(origin, key, sig) = _parse_auth_header(auth)
json_request["origin"] = origin
json_request["signatures"].setdefault(origin, {})[key] = sig
if (
self.federation_domain_whitelist is not None
and origin not in self.federation_domain_whitelist
):
raise FederationDeniedError(origin)
if origin is None or not json_request["signatures"]:
raise NoAuthenticationError(
401, "Missing Authorization headers", Codes.UNAUTHORIZED
)
await self.keyring.verify_json_for_server(
origin,
json_request,
now,
)
logger.debug("Request from %s", origin)
request.requester = origin
# If we get a valid signed request from the other side, its probably
# alive
retry_timings = await self.store.get_destination_retry_timings(origin)
if retry_timings and retry_timings.retry_last_ts:
run_in_background(self._reset_retry_timings, origin)
return origin
async def _reset_retry_timings(self, origin):
try:
logger.info("Marking origin %r as up", origin)
await self.store.set_destination_retry_timings(origin, None, 0, 0)
# Inform the relevant places that the remote server is back up.
self.notifier.notify_remote_server_up(origin)
if self.replication_client:
# If we're on a worker we try and inform master about this. The
# replication client doesn't hook into the notifier to avoid
# infinite loops where we send a `REMOTE_SERVER_UP` command to
# master, which then echoes it back to us which in turn pokes
# the notifier.
self.replication_client.send_remote_server_up(origin)
except Exception:
logger.exception("Error resetting retry timings on %s", origin)
def _parse_auth_header(header_bytes):
"""Parse an X-Matrix auth header
Args:
header_bytes (bytes): header value
Returns:
Tuple[str, str, str]: origin, key id, signature.
Raises:
AuthenticationError if the header could not be parsed
"""
try:
header_str = header_bytes.decode("utf-8")
params = header_str.split(" ")[1].split(",")
param_dict = dict(kv.split("=") for kv in params)
def strip_quotes(value):
if value.startswith('"'):
return value[1:-1]
else:
return value
origin = strip_quotes(param_dict["origin"])
# ensure that the origin is a valid server name
parse_and_validate_server_name(origin)
key = strip_quotes(param_dict["key"])
sig = strip_quotes(param_dict["sig"])
return origin, key, sig
except Exception as e:
logger.warning(
"Error parsing auth header '%s': %s",
header_bytes.decode("ascii", "replace"),
e,
)
raise AuthenticationError(
400, "Malformed Authorization header", Codes.UNAUTHORIZED
)
class BaseFederationServlet:
"""Abstract base class for federation servlet classes.
The servlet object should have a PATH attribute which takes the form of a regexp to
match against the request path (excluding the /federation/v1 prefix).
The servlet should also implement one or more of on_GET, on_POST, on_PUT, to match
the appropriate HTTP method. These methods must be *asynchronous* and have the
signature:
on_<METHOD>(self, origin, content, query, **kwargs)
With arguments:
origin (unicode|None): The authenticated server_name of the calling server,
unless REQUIRE_AUTH is set to False and authentication failed.
content (unicode|None): decoded json body of the request. None if the
request was a GET.
query (dict[bytes, list[bytes]]): Query params from the request. url-decoded
(ie, '+' and '%xx' are decoded) but note that it is *not* utf8-decoded
yet.
**kwargs (dict[unicode, unicode]): the dict mapping keys to path
components as specified in the path match regexp.
Returns:
Optional[Tuple[int, object]]: either (response code, response object) to
return a JSON response, or None if the request has already been handled.
Raises:
SynapseError: to return an error code
Exception: other exceptions will be caught, logged, and a 500 will be
returned.
"""
PATH = "" # Overridden in subclasses, the regex to match against the path.
REQUIRE_AUTH = True
PREFIX = FEDERATION_V1_PREFIX # Allows specifying the API version
RATELIMIT = True # Whether to rate limit requests or not
def __init__(
self,
hs: HomeServer,
authenticator: Authenticator,
ratelimiter: FederationRateLimiter,
server_name: str,
):
self.hs = hs
self.authenticator = authenticator
self.ratelimiter = ratelimiter
self.server_name = server_name
def _wrap(self, func):
authenticator = self.authenticator
ratelimiter = self.ratelimiter
@functools.wraps(func)
async def new_func(request, *args, **kwargs):
"""A callback which can be passed to HttpServer.RegisterPaths
Args:
request (twisted.web.http.Request):
*args: unused?
**kwargs (dict[unicode, unicode]): the dict mapping keys to path
components as specified in the path match regexp.
Returns:
Tuple[int, object]|None: (response code, response object) as returned by
the callback method. None if the request has already been handled.
"""
content = None
if request.method in [b"PUT", b"POST"]:
# TODO: Handle other method types? other content types?
content = parse_json_object_from_request(request)
try:
origin = await authenticator.authenticate_request(request, content)
except NoAuthenticationError:
origin = None
if self.REQUIRE_AUTH:
logger.warning(
"authenticate_request failed: missing authentication"
)
raise
except Exception as e:
logger.warning("authenticate_request failed: %s", e)
raise
request_tags = {
SynapseTags.REQUEST_ID: request.get_request_id(),
tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
tags.HTTP_METHOD: request.get_method(),
tags.HTTP_URL: request.get_redacted_uri(),
tags.PEER_HOST_IPV6: request.getClientIP(),
"authenticated_entity": origin,
"servlet_name": request.request_metrics.name,
}
# Only accept the span context if the origin is authenticated
# and whitelisted
if origin and whitelisted_homeserver(origin):
scope = start_active_span_from_request(
request, "incoming-federation-request", tags=request_tags
)
else:
scope = start_active_span(
"incoming-federation-request", tags=request_tags
)
with scope:
opentracing.inject_response_headers(request.responseHeaders)
if origin and self.RATELIMIT:
with ratelimiter.ratelimit(origin) as d:
await d
if request._disconnected:
logger.warning(
"client disconnected before we started processing "
"request"
)
return -1, None
response = await func(
origin, content, request.args, *args, **kwargs
)
else:
response = await func(
origin, content, request.args, *args, **kwargs
)
return response
return new_func
def register(self, server):
pattern = re.compile("^" + self.PREFIX + self.PATH + "$")
for method in ("GET", "PUT", "POST"):
code = getattr(self, "on_%s" % (method), None)
if code is None:
continue
server.register_paths(
method,
(pattern,),
self._wrap(code),
self.__class__.__name__,
)

View file

@ -0,0 +1,706 @@
# Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Dict, List, Mapping, Optional, Sequence, Tuple, Type, Union
from typing_extensions import Literal
import synapse
from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import RoomVersions
from synapse.api.urls import FEDERATION_UNSTABLE_PREFIX, FEDERATION_V2_PREFIX
from synapse.federation.transport.server._base import (
Authenticator,
BaseFederationServlet,
)
from synapse.http.servlet import (
parse_boolean_from_args,
parse_integer_from_args,
parse_string_from_args,
parse_strings_from_args,
)
from synapse.server import HomeServer
from synapse.types import JsonDict
from synapse.util.ratelimitutils import FederationRateLimiter
from synapse.util.versionstring import get_version_string
logger = logging.getLogger(__name__)
class BaseFederationServerServlet(BaseFederationServlet):
"""Abstract base class for federation servlet classes which provides a federation server handler.
See BaseFederationServlet for more information.
"""
def __init__(
self,
hs: HomeServer,
authenticator: Authenticator,
ratelimiter: FederationRateLimiter,
server_name: str,
):
super().__init__(hs, authenticator, ratelimiter, server_name)
self.handler = hs.get_federation_server()
class FederationSendServlet(BaseFederationServerServlet):
PATH = "/send/(?P<transaction_id>[^/]*)/?"
# We ratelimit manually in the handler as we queue up the requests and we
# don't want to fill up the ratelimiter with blocked requests.
RATELIMIT = False
# This is when someone is trying to send us a bunch of data.
async def on_PUT(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
transaction_id: str,
) -> Tuple[int, JsonDict]:
"""Called on PUT /send/<transaction_id>/
Args:
transaction_id: The transaction_id associated with this request. This
is *not* None.
Returns:
Tuple of `(code, response)`, where
`response` is a python dict to be converted into JSON that is
used as the response body.
"""
# Parse the request
try:
transaction_data = content
logger.debug("Decoded %s: %s", transaction_id, str(transaction_data))
logger.info(
"Received txn %s from %s. (PDUs: %d, EDUs: %d)",
transaction_id,
origin,
len(transaction_data.get("pdus", [])),
len(transaction_data.get("edus", [])),
)
except Exception as e:
logger.exception(e)
return 400, {"error": "Invalid transaction"}
code, response = await self.handler.on_incoming_transaction(
origin, transaction_id, self.server_name, transaction_data
)
return code, response
class FederationEventServlet(BaseFederationServerServlet):
PATH = "/event/(?P<event_id>[^/]*)/?"
# This is when someone asks for a data item for a given server data_id pair.
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
event_id: str,
) -> Tuple[int, Union[JsonDict, str]]:
return await self.handler.on_pdu_request(origin, event_id)
class FederationStateV1Servlet(BaseFederationServerServlet):
PATH = "/state/(?P<room_id>[^/]*)/?"
# This is when someone asks for all data for a given room.
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
room_id: str,
) -> Tuple[int, JsonDict]:
return await self.handler.on_room_state_request(
origin,
room_id,
parse_string_from_args(query, "event_id", None, required=False),
)
class FederationStateIdsServlet(BaseFederationServerServlet):
PATH = "/state_ids/(?P<room_id>[^/]*)/?"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
room_id: str,
) -> Tuple[int, JsonDict]:
return await self.handler.on_state_ids_request(
origin,
room_id,
parse_string_from_args(query, "event_id", None, required=True),
)
class FederationBackfillServlet(BaseFederationServerServlet):
PATH = "/backfill/(?P<room_id>[^/]*)/?"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
room_id: str,
) -> Tuple[int, JsonDict]:
versions = [x.decode("ascii") for x in query[b"v"]]
limit = parse_integer_from_args(query, "limit", None)
if not limit:
return 400, {"error": "Did not include limit param"}
return await self.handler.on_backfill_request(origin, room_id, versions, limit)
class FederationQueryServlet(BaseFederationServerServlet):
PATH = "/query/(?P<query_type>[^/]*)"
# This is when we receive a server-server Query
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
query_type: str,
) -> Tuple[int, JsonDict]:
args = {k.decode("utf8"): v[0].decode("utf-8") for k, v in query.items()}
args["origin"] = origin
return await self.handler.on_query_request(query_type, args)
class FederationMakeJoinServlet(BaseFederationServerServlet):
PATH = "/make_join/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
room_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
"""
Args:
origin: The authenticated server_name of the calling server
content: (GETs don't have bodies)
query: Query params from the request.
**kwargs: the dict mapping keys to path components as specified in
the path match regexp.
Returns:
Tuple of (response code, response object)
"""
supported_versions = parse_strings_from_args(query, "ver", encoding="utf-8")
if supported_versions is None:
supported_versions = ["1"]
result = await self.handler.on_make_join_request(
origin, room_id, user_id, supported_versions=supported_versions
)
return 200, result
class FederationMakeLeaveServlet(BaseFederationServerServlet):
PATH = "/make_leave/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
room_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
result = await self.handler.on_make_leave_request(origin, room_id, user_id)
return 200, result
class FederationV1SendLeaveServlet(BaseFederationServerServlet):
PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
async def on_PUT(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
room_id: str,
event_id: str,
) -> Tuple[int, Tuple[int, JsonDict]]:
result = await self.handler.on_send_leave_request(origin, content, room_id)
return 200, (200, result)
class FederationV2SendLeaveServlet(BaseFederationServerServlet):
PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
PREFIX = FEDERATION_V2_PREFIX
async def on_PUT(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
room_id: str,
event_id: str,
) -> Tuple[int, JsonDict]:
result = await self.handler.on_send_leave_request(origin, content, room_id)
return 200, result
class FederationMakeKnockServlet(BaseFederationServerServlet):
PATH = "/make_knock/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
room_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
# Retrieve the room versions the remote homeserver claims to support
supported_versions = parse_strings_from_args(
query, "ver", required=True, encoding="utf-8"
)
result = await self.handler.on_make_knock_request(
origin, room_id, user_id, supported_versions=supported_versions
)
return 200, result
class FederationV1SendKnockServlet(BaseFederationServerServlet):
PATH = "/send_knock/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
async def on_PUT(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
room_id: str,
event_id: str,
) -> Tuple[int, JsonDict]:
result = await self.handler.on_send_knock_request(origin, content, room_id)
return 200, result
class FederationEventAuthServlet(BaseFederationServerServlet):
PATH = "/event_auth/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
room_id: str,
event_id: str,
) -> Tuple[int, JsonDict]:
return await self.handler.on_event_auth(origin, room_id, event_id)
class FederationV1SendJoinServlet(BaseFederationServerServlet):
PATH = "/send_join/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
async def on_PUT(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
room_id: str,
event_id: str,
) -> Tuple[int, Tuple[int, JsonDict]]:
# TODO(paul): assert that event_id parsed from path actually
# match those given in content
result = await self.handler.on_send_join_request(origin, content, room_id)
return 200, (200, result)
class FederationV2SendJoinServlet(BaseFederationServerServlet):
PATH = "/send_join/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
PREFIX = FEDERATION_V2_PREFIX
async def on_PUT(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
room_id: str,
event_id: str,
) -> Tuple[int, JsonDict]:
# TODO(paul): assert that event_id parsed from path actually
# match those given in content
result = await self.handler.on_send_join_request(origin, content, room_id)
return 200, result
class FederationV1InviteServlet(BaseFederationServerServlet):
PATH = "/invite/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
async def on_PUT(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
room_id: str,
event_id: str,
) -> Tuple[int, Tuple[int, JsonDict]]:
# We don't get a room version, so we have to assume its EITHER v1 or
# v2. This is "fine" as the only difference between V1 and V2 is the
# state resolution algorithm, and we don't use that for processing
# invites
result = await self.handler.on_invite_request(
origin, content, room_version_id=RoomVersions.V1.identifier
)
# V1 federation API is defined to return a content of `[200, {...}]`
# due to a historical bug.
return 200, (200, result)
class FederationV2InviteServlet(BaseFederationServerServlet):
PATH = "/invite/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
PREFIX = FEDERATION_V2_PREFIX
async def on_PUT(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
room_id: str,
event_id: str,
) -> Tuple[int, JsonDict]:
# TODO(paul): assert that room_id/event_id parsed from path actually
# match those given in content
room_version = content["room_version"]
event = content["event"]
invite_room_state = content["invite_room_state"]
# Synapse expects invite_room_state to be in unsigned, as it is in v1
# API
event.setdefault("unsigned", {})["invite_room_state"] = invite_room_state
result = await self.handler.on_invite_request(
origin, event, room_version_id=room_version
)
return 200, result
class FederationThirdPartyInviteExchangeServlet(BaseFederationServerServlet):
PATH = "/exchange_third_party_invite/(?P<room_id>[^/]*)"
async def on_PUT(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
room_id: str,
) -> Tuple[int, JsonDict]:
await self.handler.on_exchange_third_party_invite_request(content)
return 200, {}
class FederationClientKeysQueryServlet(BaseFederationServerServlet):
PATH = "/user/keys/query"
async def on_POST(
self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
) -> Tuple[int, JsonDict]:
return await self.handler.on_query_client_keys(origin, content)
class FederationUserDevicesQueryServlet(BaseFederationServerServlet):
PATH = "/user/devices/(?P<user_id>[^/]*)"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
user_id: str,
) -> Tuple[int, JsonDict]:
return await self.handler.on_query_user_devices(origin, user_id)
class FederationClientKeysClaimServlet(BaseFederationServerServlet):
PATH = "/user/keys/claim"
async def on_POST(
self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
) -> Tuple[int, JsonDict]:
response = await self.handler.on_claim_client_keys(origin, content)
return 200, response
class FederationGetMissingEventsServlet(BaseFederationServerServlet):
# TODO(paul): Why does this path alone end with "/?" optional?
PATH = "/get_missing_events/(?P<room_id>[^/]*)/?"
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
room_id: str,
) -> Tuple[int, JsonDict]:
limit = int(content.get("limit", 10))
earliest_events = content.get("earliest_events", [])
latest_events = content.get("latest_events", [])
result = await self.handler.on_get_missing_events(
origin,
room_id=room_id,
earliest_events=earliest_events,
latest_events=latest_events,
limit=limit,
)
return 200, result
class On3pidBindServlet(BaseFederationServerServlet):
PATH = "/3pid/onbind"
REQUIRE_AUTH = False
async def on_POST(
self, origin: Optional[str], content: JsonDict, query: Dict[bytes, List[bytes]]
) -> Tuple[int, JsonDict]:
if "invites" in content:
last_exception = None
for invite in content["invites"]:
try:
if "signed" not in invite or "token" not in invite["signed"]:
message = (
"Rejecting received notification of third-"
"party invite without signed: %s" % (invite,)
)
logger.info(message)
raise SynapseError(400, message)
await self.handler.exchange_third_party_invite(
invite["sender"],
invite["mxid"],
invite["room_id"],
invite["signed"],
)
except Exception as e:
last_exception = e
if last_exception:
raise last_exception
return 200, {}
class FederationVersionServlet(BaseFederationServlet):
PATH = "/version"
REQUIRE_AUTH = False
async def on_GET(
self,
origin: Optional[str],
content: Literal[None],
query: Dict[bytes, List[bytes]],
) -> Tuple[int, JsonDict]:
return (
200,
{"server": {"name": "Synapse", "version": get_version_string(synapse)}},
)
class FederationSpaceSummaryServlet(BaseFederationServlet):
PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc2946"
PATH = "/spaces/(?P<room_id>[^/]*)"
def __init__(
self,
hs: HomeServer,
authenticator: Authenticator,
ratelimiter: FederationRateLimiter,
server_name: str,
):
super().__init__(hs, authenticator, ratelimiter, server_name)
self.handler = hs.get_room_summary_handler()
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Mapping[bytes, Sequence[bytes]],
room_id: str,
) -> Tuple[int, JsonDict]:
suggested_only = parse_boolean_from_args(query, "suggested_only", default=False)
max_rooms_per_space = parse_integer_from_args(query, "max_rooms_per_space")
if max_rooms_per_space is not None and max_rooms_per_space < 0:
raise SynapseError(
400,
"Value for 'max_rooms_per_space' must be a non-negative integer",
Codes.BAD_JSON,
)
exclude_rooms = parse_strings_from_args(query, "exclude_rooms", default=[])
return 200, await self.handler.federation_space_summary(
origin, room_id, suggested_only, max_rooms_per_space, exclude_rooms
)
# TODO When switching to the stable endpoint, remove the POST handler.
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Mapping[bytes, Sequence[bytes]],
room_id: str,
) -> Tuple[int, JsonDict]:
suggested_only = content.get("suggested_only", False)
if not isinstance(suggested_only, bool):
raise SynapseError(
400, "'suggested_only' must be a boolean", Codes.BAD_JSON
)
exclude_rooms = content.get("exclude_rooms", [])
if not isinstance(exclude_rooms, list) or any(
not isinstance(x, str) for x in exclude_rooms
):
raise SynapseError(400, "bad value for 'exclude_rooms'", Codes.BAD_JSON)
max_rooms_per_space = content.get("max_rooms_per_space")
if max_rooms_per_space is not None:
if not isinstance(max_rooms_per_space, int):
raise SynapseError(
400, "bad value for 'max_rooms_per_space'", Codes.BAD_JSON
)
if max_rooms_per_space < 0:
raise SynapseError(
400,
"Value for 'max_rooms_per_space' must be a non-negative integer",
Codes.BAD_JSON,
)
return 200, await self.handler.federation_space_summary(
origin, room_id, suggested_only, max_rooms_per_space, exclude_rooms
)
class FederationRoomHierarchyServlet(BaseFederationServlet):
PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc2946"
PATH = "/hierarchy/(?P<room_id>[^/]*)"
def __init__(
self,
hs: HomeServer,
authenticator: Authenticator,
ratelimiter: FederationRateLimiter,
server_name: str,
):
super().__init__(hs, authenticator, ratelimiter, server_name)
self.handler = hs.get_room_summary_handler()
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Mapping[bytes, Sequence[bytes]],
room_id: str,
) -> Tuple[int, JsonDict]:
suggested_only = parse_boolean_from_args(query, "suggested_only", default=False)
return 200, await self.handler.get_federation_hierarchy(
origin, room_id, suggested_only
)
class RoomComplexityServlet(BaseFederationServlet):
"""
Indicates to other servers how complex (and therefore likely
resource-intensive) a public room this server knows about is.
"""
PATH = "/rooms/(?P<room_id>[^/]*)/complexity"
PREFIX = FEDERATION_UNSTABLE_PREFIX
def __init__(
self,
hs: HomeServer,
authenticator: Authenticator,
ratelimiter: FederationRateLimiter,
server_name: str,
):
super().__init__(hs, authenticator, ratelimiter, server_name)
self._store = self.hs.get_datastore()
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
room_id: str,
) -> Tuple[int, JsonDict]:
is_public = await self._store.is_room_world_readable_or_publicly_joinable(
room_id
)
if not is_public:
raise SynapseError(404, "Room not found", errcode=Codes.INVALID_PARAM)
complexity = await self._store.get_room_complexity(room_id)
return 200, complexity
FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
FederationSendServlet,
FederationEventServlet,
FederationStateV1Servlet,
FederationStateIdsServlet,
FederationBackfillServlet,
FederationQueryServlet,
FederationMakeJoinServlet,
FederationMakeLeaveServlet,
FederationEventServlet,
FederationV1SendJoinServlet,
FederationV2SendJoinServlet,
FederationV1SendLeaveServlet,
FederationV2SendLeaveServlet,
FederationV1InviteServlet,
FederationV2InviteServlet,
FederationGetMissingEventsServlet,
FederationEventAuthServlet,
FederationClientKeysQueryServlet,
FederationUserDevicesQueryServlet,
FederationClientKeysClaimServlet,
FederationThirdPartyInviteExchangeServlet,
On3pidBindServlet,
FederationVersionServlet,
RoomComplexityServlet,
FederationSpaceSummaryServlet,
FederationRoomHierarchyServlet,
FederationV1SendKnockServlet,
FederationMakeKnockServlet,
)

View file

@ -0,0 +1,113 @@
# Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Tuple, Type
from synapse.api.errors import SynapseError
from synapse.federation.transport.server._base import (
Authenticator,
BaseFederationServlet,
)
from synapse.handlers.groups_local import GroupsLocalHandler
from synapse.server import HomeServer
from synapse.types import JsonDict, get_domain_from_id
from synapse.util.ratelimitutils import FederationRateLimiter
class BaseGroupsLocalServlet(BaseFederationServlet):
"""Abstract base class for federation servlet classes which provides a groups local handler.
See BaseFederationServlet for more information.
"""
def __init__(
self,
hs: HomeServer,
authenticator: Authenticator,
ratelimiter: FederationRateLimiter,
server_name: str,
):
super().__init__(hs, authenticator, ratelimiter, server_name)
self.handler = hs.get_groups_local_handler()
class FederationGroupsLocalInviteServlet(BaseGroupsLocalServlet):
"""A group server has invited a local user"""
PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite"
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
if get_domain_from_id(group_id) != origin:
raise SynapseError(403, "group_id doesn't match origin")
assert isinstance(
self.handler, GroupsLocalHandler
), "Workers cannot handle group invites."
new_content = await self.handler.on_invite(group_id, user_id, content)
return 200, new_content
class FederationGroupsRemoveLocalUserServlet(BaseGroupsLocalServlet):
"""A group server has removed a local user"""
PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove"
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
user_id: str,
) -> Tuple[int, None]:
if get_domain_from_id(group_id) != origin:
raise SynapseError(403, "user_id doesn't match origin")
assert isinstance(
self.handler, GroupsLocalHandler
), "Workers cannot handle group removals."
await self.handler.user_removed_from_group(group_id, user_id, content)
return 200, None
class FederationGroupsBulkPublicisedServlet(BaseGroupsLocalServlet):
"""Get roles in a group"""
PATH = "/get_groups_publicised"
async def on_POST(
self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
) -> Tuple[int, JsonDict]:
resp = await self.handler.bulk_get_publicised_groups(
content["user_ids"], proxy=False
)
return 200, resp
GROUP_LOCAL_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
FederationGroupsLocalInviteServlet,
FederationGroupsRemoveLocalUserServlet,
FederationGroupsBulkPublicisedServlet,
)

View file

@ -0,0 +1,753 @@
# Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Tuple, Type
from typing_extensions import Literal
from synapse.api.constants import MAX_GROUP_CATEGORYID_LENGTH, MAX_GROUP_ROLEID_LENGTH
from synapse.api.errors import Codes, SynapseError
from synapse.federation.transport.server._base import (
Authenticator,
BaseFederationServlet,
)
from synapse.http.servlet import parse_string_from_args
from synapse.server import HomeServer
from synapse.types import JsonDict, get_domain_from_id
from synapse.util.ratelimitutils import FederationRateLimiter
class BaseGroupsServerServlet(BaseFederationServlet):
"""Abstract base class for federation servlet classes which provides a groups server handler.
See BaseFederationServlet for more information.
"""
def __init__(
self,
hs: HomeServer,
authenticator: Authenticator,
ratelimiter: FederationRateLimiter,
server_name: str,
):
super().__init__(hs, authenticator, ratelimiter, server_name)
self.handler = hs.get_groups_server_handler()
class FederationGroupsProfileServlet(BaseGroupsServerServlet):
"""Get/set the basic profile of a group on behalf of a user"""
PATH = "/groups/(?P<group_id>[^/]*)/profile"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.get_group_profile(group_id, requester_user_id)
return 200, new_content
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.update_group_profile(
group_id, requester_user_id, content
)
return 200, new_content
class FederationGroupsSummaryServlet(BaseGroupsServerServlet):
PATH = "/groups/(?P<group_id>[^/]*)/summary"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.get_group_summary(group_id, requester_user_id)
return 200, new_content
class FederationGroupsRoomsServlet(BaseGroupsServerServlet):
"""Get the rooms in a group on behalf of a user"""
PATH = "/groups/(?P<group_id>[^/]*)/rooms"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.get_rooms_in_group(group_id, requester_user_id)
return 200, new_content
class FederationGroupsAddRoomsServlet(BaseGroupsServerServlet):
"""Add/remove room from group"""
PATH = "/groups/(?P<group_id>[^/]*)/room/(?P<room_id>[^/]*)"
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
room_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.add_room_to_group(
group_id, requester_user_id, room_id, content
)
return 200, new_content
async def on_DELETE(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
room_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.remove_room_from_group(
group_id, requester_user_id, room_id
)
return 200, new_content
class FederationGroupsAddRoomsConfigServlet(BaseGroupsServerServlet):
"""Update room config in group"""
PATH = (
"/groups/(?P<group_id>[^/]*)/room/(?P<room_id>[^/]*)"
"/config/(?P<config_key>[^/]*)"
)
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
room_id: str,
config_key: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
result = await self.handler.update_room_in_group(
group_id, requester_user_id, room_id, config_key, content
)
return 200, result
class FederationGroupsUsersServlet(BaseGroupsServerServlet):
"""Get the users in a group on behalf of a user"""
PATH = "/groups/(?P<group_id>[^/]*)/users"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.get_users_in_group(group_id, requester_user_id)
return 200, new_content
class FederationGroupsInvitedUsersServlet(BaseGroupsServerServlet):
"""Get the users that have been invited to a group"""
PATH = "/groups/(?P<group_id>[^/]*)/invited_users"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.get_invited_users_in_group(
group_id, requester_user_id
)
return 200, new_content
class FederationGroupsInviteServlet(BaseGroupsServerServlet):
"""Ask a group server to invite someone to the group"""
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite"
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.invite_to_group(
group_id, user_id, requester_user_id, content
)
return 200, new_content
class FederationGroupsAcceptInviteServlet(BaseGroupsServerServlet):
"""Accept an invitation from the group server"""
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/accept_invite"
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
if get_domain_from_id(user_id) != origin:
raise SynapseError(403, "user_id doesn't match origin")
new_content = await self.handler.accept_invite(group_id, user_id, content)
return 200, new_content
class FederationGroupsJoinServlet(BaseGroupsServerServlet):
"""Attempt to join a group"""
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/join"
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
if get_domain_from_id(user_id) != origin:
raise SynapseError(403, "user_id doesn't match origin")
new_content = await self.handler.join_group(group_id, user_id, content)
return 200, new_content
class FederationGroupsRemoveUserServlet(BaseGroupsServerServlet):
"""Leave or kick a user from the group"""
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove"
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.remove_user_from_group(
group_id, user_id, requester_user_id, content
)
return 200, new_content
class FederationGroupsSummaryRoomsServlet(BaseGroupsServerServlet):
"""Add/remove a room from the group summary, with optional category.
Matches both:
- /groups/:group/summary/rooms/:room_id
- /groups/:group/summary/categories/:category/rooms/:room_id
"""
PATH = (
"/groups/(?P<group_id>[^/]*)/summary"
"(/categories/(?P<category_id>[^/]+))?"
"/rooms/(?P<room_id>[^/]*)"
)
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
category_id: str,
room_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if category_id == "":
raise SynapseError(
400, "category_id cannot be empty string", Codes.INVALID_PARAM
)
if len(category_id) > MAX_GROUP_CATEGORYID_LENGTH:
raise SynapseError(
400,
"category_id may not be longer than %s characters"
% (MAX_GROUP_CATEGORYID_LENGTH,),
Codes.INVALID_PARAM,
)
resp = await self.handler.update_group_summary_room(
group_id,
requester_user_id,
room_id=room_id,
category_id=category_id,
content=content,
)
return 200, resp
async def on_DELETE(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
category_id: str,
room_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if category_id == "":
raise SynapseError(400, "category_id cannot be empty string")
resp = await self.handler.delete_group_summary_room(
group_id, requester_user_id, room_id=room_id, category_id=category_id
)
return 200, resp
class FederationGroupsCategoriesServlet(BaseGroupsServerServlet):
"""Get all categories for a group"""
PATH = "/groups/(?P<group_id>[^/]*)/categories/?"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
resp = await self.handler.get_group_categories(group_id, requester_user_id)
return 200, resp
class FederationGroupsCategoryServlet(BaseGroupsServerServlet):
"""Add/remove/get a category in a group"""
PATH = "/groups/(?P<group_id>[^/]*)/categories/(?P<category_id>[^/]+)"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
category_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
resp = await self.handler.get_group_category(
group_id, requester_user_id, category_id
)
return 200, resp
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
category_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if category_id == "":
raise SynapseError(400, "category_id cannot be empty string")
if len(category_id) > MAX_GROUP_CATEGORYID_LENGTH:
raise SynapseError(
400,
"category_id may not be longer than %s characters"
% (MAX_GROUP_CATEGORYID_LENGTH,),
Codes.INVALID_PARAM,
)
resp = await self.handler.upsert_group_category(
group_id, requester_user_id, category_id, content
)
return 200, resp
async def on_DELETE(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
category_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if category_id == "":
raise SynapseError(400, "category_id cannot be empty string")
resp = await self.handler.delete_group_category(
group_id, requester_user_id, category_id
)
return 200, resp
class FederationGroupsRolesServlet(BaseGroupsServerServlet):
"""Get roles in a group"""
PATH = "/groups/(?P<group_id>[^/]*)/roles/?"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
resp = await self.handler.get_group_roles(group_id, requester_user_id)
return 200, resp
class FederationGroupsRoleServlet(BaseGroupsServerServlet):
"""Add/remove/get a role in a group"""
PATH = "/groups/(?P<group_id>[^/]*)/roles/(?P<role_id>[^/]+)"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
role_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
resp = await self.handler.get_group_role(group_id, requester_user_id, role_id)
return 200, resp
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
role_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if role_id == "":
raise SynapseError(
400, "role_id cannot be empty string", Codes.INVALID_PARAM
)
if len(role_id) > MAX_GROUP_ROLEID_LENGTH:
raise SynapseError(
400,
"role_id may not be longer than %s characters"
% (MAX_GROUP_ROLEID_LENGTH,),
Codes.INVALID_PARAM,
)
resp = await self.handler.update_group_role(
group_id, requester_user_id, role_id, content
)
return 200, resp
async def on_DELETE(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
role_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if role_id == "":
raise SynapseError(400, "role_id cannot be empty string")
resp = await self.handler.delete_group_role(
group_id, requester_user_id, role_id
)
return 200, resp
class FederationGroupsSummaryUsersServlet(BaseGroupsServerServlet):
"""Add/remove a user from the group summary, with optional role.
Matches both:
- /groups/:group/summary/users/:user_id
- /groups/:group/summary/roles/:role/users/:user_id
"""
PATH = (
"/groups/(?P<group_id>[^/]*)/summary"
"(/roles/(?P<role_id>[^/]+))?"
"/users/(?P<user_id>[^/]*)"
)
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
role_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if role_id == "":
raise SynapseError(400, "role_id cannot be empty string")
if len(role_id) > MAX_GROUP_ROLEID_LENGTH:
raise SynapseError(
400,
"role_id may not be longer than %s characters"
% (MAX_GROUP_ROLEID_LENGTH,),
Codes.INVALID_PARAM,
)
resp = await self.handler.update_group_summary_user(
group_id,
requester_user_id,
user_id=user_id,
role_id=role_id,
content=content,
)
return 200, resp
async def on_DELETE(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
role_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if role_id == "":
raise SynapseError(400, "role_id cannot be empty string")
resp = await self.handler.delete_group_summary_user(
group_id, requester_user_id, user_id=user_id, role_id=role_id
)
return 200, resp
class FederationGroupsSettingJoinPolicyServlet(BaseGroupsServerServlet):
"""Sets whether a group is joinable without an invite or knock"""
PATH = "/groups/(?P<group_id>[^/]*)/settings/m.join_policy"
async def on_PUT(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.set_group_join_policy(
group_id, requester_user_id, content
)
return 200, new_content
GROUP_SERVER_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
FederationGroupsProfileServlet,
FederationGroupsSummaryServlet,
FederationGroupsRoomsServlet,
FederationGroupsUsersServlet,
FederationGroupsInvitedUsersServlet,
FederationGroupsInviteServlet,
FederationGroupsAcceptInviteServlet,
FederationGroupsJoinServlet,
FederationGroupsRemoveUserServlet,
FederationGroupsSummaryRoomsServlet,
FederationGroupsCategoriesServlet,
FederationGroupsCategoryServlet,
FederationGroupsRolesServlet,
FederationGroupsRoleServlet,
FederationGroupsSummaryUsersServlet,
FederationGroupsAddRoomsServlet,
FederationGroupsAddRoomsConfigServlet,
FederationGroupsSettingJoinPolicyServlet,
)

View file

@ -17,18 +17,17 @@ server protocol.
"""
import logging
from typing import Optional
from typing import List, Optional
import attr
from synapse.types import JsonDict
from synapse.util.jsonobject import JsonEncodedObject
logger = logging.getLogger(__name__)
@attr.s(slots=True)
class Edu(JsonEncodedObject):
@attr.s(slots=True, frozen=True, auto_attribs=True)
class Edu:
"""An Edu represents a piece of data sent from one homeserver to another.
In comparison to Pdus, Edus are not persisted for a long time on disk, are
@ -36,10 +35,10 @@ class Edu(JsonEncodedObject):
internal ID or previous references graph.
"""
edu_type = attr.ib(type=str)
content = attr.ib(type=dict)
origin = attr.ib(type=str)
destination = attr.ib(type=str)
edu_type: str
content: dict
origin: str
destination: str
def get_dict(self) -> JsonDict:
return {
@ -55,14 +54,21 @@ class Edu(JsonEncodedObject):
"destination": self.destination,
}
def get_context(self):
def get_context(self) -> str:
return getattr(self, "content", {}).get("org.matrix.opentracing_context", "{}")
def strip_context(self):
def strip_context(self) -> None:
getattr(self, "content", {})["org.matrix.opentracing_context"] = "{}"
class Transaction(JsonEncodedObject):
def _none_to_list(edus: Optional[List[JsonDict]]) -> List[JsonDict]:
if edus is None:
return []
return edus
@attr.s(slots=True, frozen=True, auto_attribs=True)
class Transaction:
"""A transaction is a list of Pdus and Edus to be sent to a remote home
server with some extra metadata.
@ -78,47 +84,21 @@ class Transaction(JsonEncodedObject):
"""
valid_keys = [
"transaction_id",
"origin",
"destination",
"origin_server_ts",
"previous_ids",
"pdus",
"edus",
]
# Required keys.
transaction_id: str
origin: str
destination: str
origin_server_ts: int
pdus: List[JsonDict] = attr.ib(factory=list, converter=_none_to_list)
edus: List[JsonDict] = attr.ib(factory=list, converter=_none_to_list)
internal_keys = ["transaction_id", "destination"]
required_keys = [
"transaction_id",
"origin",
"destination",
"origin_server_ts",
"pdus",
]
def __init__(self, transaction_id=None, pdus: Optional[list] = None, **kwargs):
"""If we include a list of pdus then we decode then as PDU's
automatically.
"""
# If there's no EDUs then remove the arg
if "edus" in kwargs and not kwargs["edus"]:
del kwargs["edus"]
super().__init__(transaction_id=transaction_id, pdus=pdus or [], **kwargs)
@staticmethod
def create_new(pdus, **kwargs):
"""Used to create a new transaction. Will auto fill out
transaction_id and origin_server_ts keys.
"""
if "origin_server_ts" not in kwargs:
raise KeyError("Require 'origin_server_ts' to construct a Transaction")
if "transaction_id" not in kwargs:
raise KeyError("Require 'transaction_id' to construct a Transaction")
kwargs["pdus"] = [p.get_pdu_json() for p in pdus]
return Transaction(**kwargs)
def get_dict(self) -> JsonDict:
"""A JSON-ready dictionary of valid keys which aren't internal."""
result = {
"origin": self.origin,
"origin_server_ts": self.origin_server_ts,
"pdus": self.pdus,
}
if self.edus:
result["edus"] = self.edus
return result

View file

@ -392,9 +392,6 @@ class ApplicationServicesHandler:
protocols[p].append(info)
def _merge_instances(infos: List[JsonDict]) -> JsonDict:
if not infos:
return {}
# Merge the 'instances' lists of multiple results, but just take
# the other fields from the first as they ought to be identical
# copy the result so as not to corrupt the cached one
@ -406,7 +403,9 @@ class ApplicationServicesHandler:
return combined
return {p: _merge_instances(protocols[p]) for p in protocols.keys()}
return {
p: _merge_instances(protocols[p]) for p in protocols.keys() if protocols[p]
}
async def _get_services_for_event(
self, event: EventBase

View file

@ -73,7 +73,7 @@ from synapse.util.stringutils import base62_encode
from synapse.util.threepids import canonicalise_email
if TYPE_CHECKING:
from synapse.rest.client.v1.login import LoginResponse
from synapse.rest.client.login import LoginResponse
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
@ -461,7 +461,7 @@ class AuthHandler(BaseHandler):
If no auth flows have been completed successfully, raises an
InteractiveAuthIncompleteError. To handle this, you can use
synapse.rest.client.v2_alpha._base.interactive_auth_handler as a
synapse.rest.client._base.interactive_auth_handler as a
decorator.
Args:
@ -543,7 +543,7 @@ class AuthHandler(BaseHandler):
# Note that the registration endpoint explicitly removes the
# "initial_device_display_name" parameter if it is provided
# without a "password" parameter. See the changes to
# synapse.rest.client.v2_alpha.register.RegisterRestServlet.on_POST
# synapse.rest.client.register.RegisterRestServlet.on_POST
# in commit 544722bad23fc31056b9240189c3cbbbf0ffd3f9.
if not clientdict:
clientdict = session.clientdict

View file

@ -213,7 +213,7 @@ class EventAuthHandler:
raise AuthError(
403,
"You do not belong to any of the required rooms to join this room.",
"You do not belong to any of the required rooms/spaces to join this room.",
)
async def has_restricted_join_rules(

View file

@ -42,6 +42,7 @@ from twisted.internet import defer
from synapse import event_auth
from synapse.api.constants import (
EventContentFields,
EventTypes,
Membership,
RejectedReason,
@ -108,21 +109,33 @@ soft_failed_event_counter = Counter(
)
@attr.s(slots=True)
@attr.s(slots=True, frozen=True, auto_attribs=True)
class _NewEventInfo:
"""Holds information about a received event, ready for passing to _auth_and_persist_events
Attributes:
event: the received event
state: the state at that event
state: the state at that event, according to /state_ids from a remote
homeserver. Only populated for backfilled events which are going to be a
new backwards extremity.
claimed_auth_event_map: a map of (type, state_key) => event for the event's
claimed auth_events.
This can include events which have not yet been persisted, in the case that
we are backfilling a batch of events.
Note: May be incomplete: if we were unable to find all of the claimed auth
events. Also, treat the contents with caution: the events might also have
been rejected, might not yet have been authorized themselves, or they might
be in the wrong room.
auth_events: the auth_event map for that event
"""
event = attr.ib(type=EventBase)
state = attr.ib(type=Optional[Sequence[EventBase]], default=None)
auth_events = attr.ib(type=Optional[MutableStateMap[EventBase]], default=None)
event: EventBase
state: Optional[Sequence[EventBase]]
claimed_auth_event_map: StateMap[EventBase]
class FederationHandler(BaseHandler):
@ -207,8 +220,6 @@ class FederationHandler(BaseHandler):
room_id = pdu.room_id
event_id = pdu.event_id
logger.info("handling received PDU: %s", pdu)
# We reprocess pdus when we have seen them only as outliers
existing = await self.store.get_event(
event_id, allow_none=True, allow_rejected=True
@ -216,14 +227,19 @@ class FederationHandler(BaseHandler):
# FIXME: Currently we fetch an event again when we already have it
# if it has been marked as an outlier.
already_seen = existing and (
not existing.internal_metadata.is_outlier()
or pdu.internal_metadata.is_outlier()
)
if already_seen:
logger.debug("Already seen pdu")
return
if existing:
if not existing.internal_metadata.is_outlier():
logger.info(
"Ignoring received event %s which we have already seen", event_id
)
return
if pdu.internal_metadata.is_outlier():
logger.info(
"Ignoring received outlier %s which we already have as an outlier",
event_id,
)
return
logger.info("De-outliering event %s", event_id)
# do some initial sanity-checking of the event. In particular, make
# sure it doesn't have hundreds of prev_events or auth_events, which
@ -262,7 +278,12 @@ class FederationHandler(BaseHandler):
state = None
# Get missing pdus if necessary.
# Check that the event passes auth based on the state at the event. This is
# done for events that are to be added to the timeline (non-outliers).
#
# Get missing pdus if necessary:
# - Fetching any missing prev events to fill in gaps in the graph
# - Fetching state if we have a hole in the graph
if not pdu.internal_metadata.is_outlier():
# We only backfill backwards to the min depth.
min_depth = await self.get_min_depth_for_context(pdu.room_id)
@ -313,7 +334,8 @@ class FederationHandler(BaseHandler):
"Found all missing prev_events",
)
if prevs - seen:
missing_prevs = prevs - seen
if missing_prevs:
# We've still not been able to get all of the prev_events for this event.
#
# In this case, we need to fall back to asking another server in the
@ -341,8 +363,8 @@ class FederationHandler(BaseHandler):
if sent_to_us_directly:
logger.warning(
"Rejecting: failed to fetch %d prev events: %s",
len(prevs - seen),
shortstr(prevs - seen),
len(missing_prevs),
shortstr(missing_prevs),
)
raise FederationError(
"ERROR",
@ -355,9 +377,10 @@ class FederationHandler(BaseHandler):
)
logger.info(
"Event %s is missing prev_events: calculating state for a "
"Event %s is missing prev_events %s: calculating state for a "
"backwards extremity",
event_id,
shortstr(missing_prevs),
)
# Calculate the state after each of the previous events, and
@ -375,7 +398,7 @@ class FederationHandler(BaseHandler):
# Ask the remote server for the states we don't
# know about
for p in prevs - seen:
for p in missing_prevs:
logger.info("Requesting state after missing prev_event %s", p)
with nested_logging_context(p):
@ -432,6 +455,13 @@ class FederationHandler(BaseHandler):
affected=event_id,
)
# A second round of checks for all events. Check that the event passes auth
# based on `auth_events`, this allows us to assert that the event would
# have been allowed at some point. If an event passes this check its OK
# for it to be used as part of a returned `/state` request, as either
# a) we received the event as part of the original join and so trust it, or
# b) we'll do a state resolution with existing state before it becomes
# part of the "current state", which adds more protection.
await self._process_received_pdu(origin, pdu, state=state)
async def _get_missing_events_for_pdu(
@ -531,21 +561,14 @@ class FederationHandler(BaseHandler):
logger.warning("Failed to get prev_events: %s", e)
return
logger.info(
"Got %d prev_events: %s",
len(missing_events),
shortstr(missing_events),
)
logger.info("Got %d prev_events", len(missing_events))
# We want to sort these by depth so we process them and
# tell clients about them in order.
missing_events.sort(key=lambda x: x.depth)
for ev in missing_events:
logger.info(
"Handling received prev_event %s",
ev.event_id,
)
logger.info("Handling received prev_event %s", ev)
with nested_logging_context(ev.event_id):
try:
await self.on_receive_pdu(origin, ev, sent_to_us_directly=False)
@ -889,6 +912,79 @@ class FederationHandler(BaseHandler):
"resync_device_due_to_pdu", self._resync_device, event.sender
)
await self._handle_marker_event(origin, event)
async def _handle_marker_event(self, origin: str, marker_event: EventBase):
"""Handles backfilling the insertion event when we receive a marker
event that points to one.
Args:
origin: Origin of the event. Will be called to get the insertion event
marker_event: The event to process
"""
if marker_event.type != EventTypes.MSC2716_MARKER:
# Not a marker event
return
if marker_event.rejected_reason is not None:
# Rejected event
return
# Skip processing a marker event if the room version doesn't
# support it.
room_version = await self.store.get_room_version(marker_event.room_id)
if not room_version.msc2716_historical:
return
logger.debug("_handle_marker_event: received %s", marker_event)
insertion_event_id = marker_event.content.get(
EventContentFields.MSC2716_MARKER_INSERTION
)
if insertion_event_id is None:
# Nothing to retrieve then (invalid marker)
return
logger.debug(
"_handle_marker_event: backfilling insertion event %s", insertion_event_id
)
await self._get_events_and_persist(
origin,
marker_event.room_id,
[insertion_event_id],
)
insertion_event = await self.store.get_event(
insertion_event_id, allow_none=True
)
if insertion_event is None:
logger.warning(
"_handle_marker_event: server %s didn't return insertion event %s for marker %s",
origin,
insertion_event_id,
marker_event.event_id,
)
return
logger.debug(
"_handle_marker_event: succesfully backfilled insertion event %s from marker event %s",
insertion_event,
marker_event,
)
await self.store.insert_insertion_extremity(
insertion_event_id, marker_event.room_id
)
logger.debug(
"_handle_marker_event: insertion extremity added for %s from marker event %s",
insertion_event,
marker_event,
)
async def _resync_device(self, sender: str) -> None:
"""We have detected that the device list for the given user may be out
of sync, so we try and resync them.
@ -1000,7 +1096,7 @@ class FederationHandler(BaseHandler):
_NewEventInfo(
event=ev,
state=events_to_state[e_id],
auth_events={
claimed_auth_event_map={
(
auth_events[a_id].type,
auth_events[a_id].state_key,
@ -1057,9 +1153,19 @@ class FederationHandler(BaseHandler):
async def _maybe_backfill_inner(
self, room_id: str, current_depth: int, limit: int
) -> bool:
extremities = await self.store.get_oldest_events_with_depth_in_room(room_id)
oldest_events_with_depth = (
await self.store.get_oldest_event_ids_with_depth_in_room(room_id)
)
insertion_events_to_be_backfilled = (
await self.store.get_insertion_event_backwards_extremities_in_room(room_id)
)
logger.debug(
"_maybe_backfill_inner: extremities oldest_events_with_depth=%s insertion_events_to_be_backfilled=%s",
oldest_events_with_depth,
insertion_events_to_be_backfilled,
)
if not extremities:
if not oldest_events_with_depth and not insertion_events_to_be_backfilled:
logger.debug("Not backfilling as no extremeties found.")
return False
@ -1089,10 +1195,12 @@ class FederationHandler(BaseHandler):
# state *before* the event, ignoring the special casing certain event
# types have.
forward_events = await self.store.get_successor_events(list(extremities))
forward_event_ids = await self.store.get_successor_events(
list(oldest_events_with_depth)
)
extremities_events = await self.store.get_events(
forward_events,
forward_event_ids,
redact_behaviour=EventRedactBehaviour.AS_IS,
get_prev_content=False,
)
@ -1106,10 +1214,19 @@ class FederationHandler(BaseHandler):
redact=False,
check_history_visibility_only=True,
)
logger.debug(
"_maybe_backfill_inner: filtered_extremities %s", filtered_extremities
)
if not filtered_extremities:
if not filtered_extremities and not insertion_events_to_be_backfilled:
return False
extremities = {
**oldest_events_with_depth,
# TODO: insertion_events_to_be_backfilled is currently skipping the filtered_extremities checks
**insertion_events_to_be_backfilled,
}
# Check if we reached a point where we should start backfilling.
sorted_extremeties_tuple = sorted(extremities.items(), key=lambda e: -int(e[1]))
max_depth = sorted_extremeties_tuple[0][1]
@ -1643,10 +1760,8 @@ class FederationHandler(BaseHandler):
for p, origin in room_queue:
try:
logger.info(
"Processing queued PDU %s which was received "
"while we were joining %s",
p.event_id,
p.room_id,
"Processing queued PDU %s which was received while we were joining",
p,
)
with nested_logging_context(p.event_id):
await self.on_receive_pdu(origin, p, sent_to_us_directly=True)
@ -2208,7 +2323,7 @@ class FederationHandler(BaseHandler):
event: EventBase,
context: EventContext,
state: Optional[Iterable[EventBase]] = None,
auth_events: Optional[MutableStateMap[EventBase]] = None,
claimed_auth_event_map: Optional[StateMap[EventBase]] = None,
backfilled: bool = False,
) -> None:
"""
@ -2220,17 +2335,18 @@ class FederationHandler(BaseHandler):
context:
The event context.
NB that this function potentially modifies it.
state:
The state events used to check the event for soft-fail. If this is
not provided the current state events will be used.
auth_events:
Map from (event_type, state_key) to event
Normally, our calculated auth_events based on the state of the room
at the event's position in the DAG, though occasionally (eg if the
event is an outlier), may be the auth events claimed by the remote
server.
claimed_auth_event_map:
A map of (type, state_key) => event for the event's claimed auth_events.
Possibly incomplete, and possibly including events that are not yet
persisted, or authed, or in the right room.
Only populated where we may not already have persisted these events -
for example, when populating outliers.
backfilled: True if the event was backfilled.
"""
context = await self._check_event_auth(
@ -2238,7 +2354,7 @@ class FederationHandler(BaseHandler):
event,
context,
state=state,
auth_events=auth_events,
claimed_auth_event_map=claimed_auth_event_map,
backfilled=backfilled,
)
@ -2302,7 +2418,7 @@ class FederationHandler(BaseHandler):
event,
res,
state=ev_info.state,
auth_events=ev_info.auth_events,
claimed_auth_event_map=ev_info.claimed_auth_event_map,
backfilled=backfilled,
)
return res
@ -2568,7 +2684,7 @@ class FederationHandler(BaseHandler):
event: EventBase,
context: EventContext,
state: Optional[Iterable[EventBase]] = None,
auth_events: Optional[MutableStateMap[EventBase]] = None,
claimed_auth_event_map: Optional[StateMap[EventBase]] = None,
backfilled: bool = False,
) -> EventContext:
"""
@ -2580,21 +2696,19 @@ class FederationHandler(BaseHandler):
context:
The event context.
NB that this function potentially modifies it.
state:
The state events used to check the event for soft-fail. If this is
not provided the current state events will be used.
auth_events:
Map from (event_type, state_key) to event
Normally, our calculated auth_events based on the state of the room
at the event's position in the DAG, though occasionally (eg if the
event is an outlier), may be the auth events claimed by the remote
server.
claimed_auth_event_map:
A map of (type, state_key) => event for the event's claimed auth_events.
Possibly incomplete, and possibly including events that are not yet
persisted, or authed, or in the right room.
Also NB that this function adds entries to it.
Only populated where we may not already have persisted these events -
for example, when populating outliers, or the state for a backwards
extremity.
If this is not provided, it is calculated from the previous state IDs.
backfilled: True if the event was backfilled.
Returns:
@ -2603,7 +2717,12 @@ class FederationHandler(BaseHandler):
room_version = await self.store.get_room_version_id(event.room_id)
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
if not auth_events:
if claimed_auth_event_map:
# if we have a copy of the auth events from the event, use that as the
# basis for auth.
auth_events = claimed_auth_event_map
else:
# otherwise, we calculate what the auth events *should* be, and use that
prev_state_ids = await context.get_prev_state_ids()
auth_events_ids = self._event_auth_handler.compute_auth_events(
event, prev_state_ids, for_verification=True
@ -2611,18 +2730,11 @@ class FederationHandler(BaseHandler):
auth_events_x = await self.store.get_events(auth_events_ids)
auth_events = {(e.type, e.state_key): e for e in auth_events_x.values()}
# This is a hack to fix some old rooms where the initial join event
# didn't reference the create event in its auth events.
if event.type == EventTypes.Member and not event.auth_event_ids():
if len(event.prev_event_ids()) == 1 and event.depth < 5:
c = await self.store.get_event(
event.prev_event_ids()[0], allow_none=True
)
if c and c.type == EventTypes.Create:
auth_events[(c.type, c.state_key)] = c
try:
context = await self._update_auth_events_and_context_for_auth(
(
context,
auth_events_for_auth,
) = await self._update_auth_events_and_context_for_auth(
origin, event, context, auth_events
)
except Exception:
@ -2635,9 +2747,10 @@ class FederationHandler(BaseHandler):
"Ignoring failure and continuing processing of event.",
event.event_id,
)
auth_events_for_auth = auth_events
try:
event_auth.check(room_version_obj, event, auth_events=auth_events)
event_auth.check(room_version_obj, event, auth_events=auth_events_for_auth)
except AuthError as e:
logger.warning("Failed auth resolution for %r because %s", event, e)
context.rejected = RejectedReason.AUTH_ERROR
@ -2662,8 +2775,8 @@ class FederationHandler(BaseHandler):
origin: str,
event: EventBase,
context: EventContext,
auth_events: MutableStateMap[EventBase],
) -> EventContext:
input_auth_events: StateMap[EventBase],
) -> Tuple[EventContext, StateMap[EventBase]]:
"""Helper for _check_event_auth. See there for docs.
Checks whether a given event has the expected auth events. If it
@ -2680,7 +2793,7 @@ class FederationHandler(BaseHandler):
event:
context:
auth_events:
input_auth_events:
Map from (event_type, state_key) to event
Normally, our calculated auth_events based on the state of the room
@ -2688,11 +2801,12 @@ class FederationHandler(BaseHandler):
event is an outlier), may be the auth events claimed by the remote
server.
Also NB that this function adds entries to it.
Returns:
updated context
updated context, updated auth event map
"""
# take a copy of input_auth_events before we modify it.
auth_events: MutableStateMap[EventBase] = dict(input_auth_events)
event_auth_events = set(event.auth_event_ids())
# missing_auth is the set of the event's auth_events which we don't yet have
@ -2721,7 +2835,7 @@ class FederationHandler(BaseHandler):
# The other side isn't around or doesn't implement the
# endpoint, so lets just bail out.
logger.info("Failed to get event auth from remote: %s", e1)
return context
return context, auth_events
seen_remotes = await self.store.have_seen_events(
event.room_id, [e.event_id for e in remote_auth_chain]
@ -2752,7 +2866,10 @@ class FederationHandler(BaseHandler):
await self.state_handler.compute_event_context(e)
)
await self._auth_and_persist_event(
origin, e, missing_auth_event_context, auth_events=auth
origin,
e,
missing_auth_event_context,
claimed_auth_event_map=auth,
)
if e.event_id in event_auth_events:
@ -2770,14 +2887,14 @@ class FederationHandler(BaseHandler):
# obviously be empty
# (b) alternatively, why don't we do it earlier?
logger.info("Skipping auth_event fetch for outlier")
return context
return context, auth_events
different_auth = event_auth_events.difference(
e.event_id for e in auth_events.values()
)
if not different_auth:
return context
return context, auth_events
logger.info(
"auth_events refers to events which are not in our calculated auth "
@ -2803,7 +2920,7 @@ class FederationHandler(BaseHandler):
# XXX: should we reject the event in this case? It feels like we should,
# but then shouldn't we also do so if we've failed to fetch any of the
# auth events?
return context
return context, auth_events
# now we state-resolve between our own idea of the auth events, and the remote's
# idea of them.
@ -2833,7 +2950,7 @@ class FederationHandler(BaseHandler):
event, context, auth_events
)
return context
return context, auth_events
async def _update_context_for_auth_events(
self, event: EventBase, context: EventContext, auth_events: StateMap[EventBase]

View file

@ -824,6 +824,7 @@ class IdentityHandler(BaseHandler):
room_avatar_url: str,
room_join_rules: str,
room_name: str,
room_type: Optional[str],
inviter_display_name: str,
inviter_avatar_url: str,
id_access_token: Optional[str] = None,
@ -843,6 +844,7 @@ class IdentityHandler(BaseHandler):
notifications.
room_join_rules: The join rules of the email (e.g. "public").
room_name: The m.room.name of the room.
room_type: The type of the room from its m.room.create event (e.g "m.space").
inviter_display_name: The current display name of the
inviter.
inviter_avatar_url: The URL of the inviter's avatar.
@ -869,6 +871,10 @@ class IdentityHandler(BaseHandler):
"sender_display_name": inviter_display_name,
"sender_avatar_url": inviter_avatar_url,
}
if room_type is not None:
invite_config["org.matrix.msc3288.room_type"] = room_type
# If a custom web client location is available, include it in the request.
if self._web_client_location:
invite_config["org.matrix.web_client_location"] = self._web_client_location

View file

@ -1184,8 +1184,7 @@ class PresenceHandler(BasePresenceHandler):
new_fields = {"state": presence}
if not ignore_status_msg:
msg = status_msg if presence != PresenceState.OFFLINE else None
new_fields["status_msg"] = msg
new_fields["status_msg"] = status_msg
if presence == PresenceState.ONLINE or (
presence == PresenceState.BUSY and self._busy_presence_enabled
@ -1478,7 +1477,7 @@ def format_user_presence_state(
content["user_id"] = state.user_id
if state.last_active_ts:
content["last_active_ago"] = now - state.last_active_ts
if state.status_msg and state.state != PresenceState.OFFLINE:
if state.status_msg:
content["status_msg"] = state.status_msg
if state.state == PresenceState.ONLINE:
content["currently_active"] = state.currently_active
@ -1840,9 +1839,7 @@ def handle_timeout(
# don't set them as offline.
sync_or_active = max(state.last_user_sync_ts, state.last_active_ts)
if now - sync_or_active > SYNC_ONLINE_TIMEOUT:
state = state.copy_and_replace(
state=PresenceState.OFFLINE, status_msg=None
)
state = state.copy_and_replace(state=PresenceState.OFFLINE)
changed = True
else:
# We expect to be poked occasionally by the other side.
@ -1850,7 +1847,7 @@ def handle_timeout(
# no one gets stuck online forever.
if now - state.last_federation_update_ts > FEDERATION_TIMEOUT:
# The other side seems to have disappeared.
state = state.copy_and_replace(state=PresenceState.OFFLINE, status_msg=None)
state = state.copy_and_replace(state=PresenceState.OFFLINE)
changed = True
return state if changed else None

View file

@ -70,7 +70,8 @@ class ReceiptsHandler(BaseHandler):
)
if not is_in_room:
logger.info(
"Ignoring receipt from %s as we're not in the room",
"Ignoring receipt for room %r from server %s as we're not in the room",
room_id,
origin,
)
continue
@ -188,7 +189,14 @@ class ReceiptEventSource:
new_users = {}
for rr_user_id, user_rr in m_read.items():
hidden = user_rr.get("hidden", None)
try:
hidden = user_rr.get("hidden")
except AttributeError:
# Due to https://github.com/matrix-org/synapse/issues/10376
# there are cases where user_rr is a string, in those cases
# we just ignore the read receipt
continue
if hidden is not True or rr_user_id == user_id:
new_users[rr_user_id] = user_rr.copy()
# If hidden has a value replace hidden with the correct prefixed key

View file

@ -356,6 +356,12 @@ class RoomListHandler(BaseHandler):
include_all_networks: bool = False,
third_party_instance_id: Optional[str] = None,
) -> JsonDict:
"""Get the public room list from remote server
Raises:
SynapseError
"""
if not self.enable_room_list_search:
return {"chunk": [], "total_room_count_estimate": 0}
@ -395,13 +401,16 @@ class RoomListHandler(BaseHandler):
limit = None
since_token = None
res = await self._get_remote_list_cached(
server_name,
limit=limit,
since_token=since_token,
include_all_networks=include_all_networks,
third_party_instance_id=third_party_instance_id,
)
try:
res = await self._get_remote_list_cached(
server_name,
limit=limit,
since_token=since_token,
include_all_networks=include_all_networks,
third_party_instance_id=third_party_instance_id,
)
except (RequestSendFailed, HttpResponseException):
raise SynapseError(502, "Failed to fetch room list")
if search_filter:
res = {
@ -423,20 +432,21 @@ class RoomListHandler(BaseHandler):
include_all_networks: bool = False,
third_party_instance_id: Optional[str] = None,
) -> JsonDict:
"""Wrapper around FederationClient.get_public_rooms that caches the
result.
"""
repl_layer = self.hs.get_federation_client()
if search_filter:
# We can't cache when asking for search
try:
return await repl_layer.get_public_rooms(
server_name,
limit=limit,
since_token=since_token,
search_filter=search_filter,
include_all_networks=include_all_networks,
third_party_instance_id=third_party_instance_id,
)
except (RequestSendFailed, HttpResponseException):
raise SynapseError(502, "Failed to fetch room list")
return await repl_layer.get_public_rooms(
server_name,
limit=limit,
since_token=since_token,
search_filter=search_filter,
include_all_networks=include_all_networks,
third_party_instance_id=third_party_instance_id,
)
key = (
server_name,

View file

@ -19,7 +19,12 @@ from http import HTTPStatus
from typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple
from synapse import types
from synapse.api.constants import AccountDataTypes, EventTypes, Membership
from synapse.api.constants import (
AccountDataTypes,
EventContentFields,
EventTypes,
Membership,
)
from synapse.api.errors import (
AuthError,
Codes,
@ -1237,6 +1242,11 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
if room_name_event:
room_name = room_name_event.content.get("name", "")
room_type = None
room_create_event = room_state.get((EventTypes.Create, ""))
if room_create_event:
room_type = room_create_event.content.get(EventContentFields.ROOM_TYPE)
room_join_rules = ""
join_rules_event = room_state.get((EventTypes.JoinRules, ""))
if join_rules_event:
@ -1263,6 +1273,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
room_avatar_url=room_avatar_url,
room_join_rules=room_join_rules,
room_name=room_name,
room_type=room_type,
inviter_display_name=inviter_display_name,
inviter_avatar_url=inviter_avatar_url,
id_access_token=id_access_token,

File diff suppressed because it is too large Load diff

View file

@ -16,7 +16,12 @@ import email.utils
import logging
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from typing import TYPE_CHECKING
from io import BytesIO
from typing import TYPE_CHECKING, Optional
from twisted.internet.defer import Deferred
from twisted.internet.interfaces import IReactorTCP
from twisted.mail.smtp import ESMTPSenderFactory
from synapse.logging.context import make_deferred_yieldable
@ -26,19 +31,75 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
async def _sendmail(
reactor: IReactorTCP,
smtphost: str,
smtpport: int,
from_addr: str,
to_addr: str,
msg_bytes: bytes,
username: Optional[bytes] = None,
password: Optional[bytes] = None,
require_auth: bool = False,
require_tls: bool = False,
tls_hostname: Optional[str] = None,
) -> None:
"""A simple wrapper around ESMTPSenderFactory, to allow substitution in tests
Params:
reactor: reactor to use to make the outbound connection
smtphost: hostname to connect to
smtpport: port to connect to
from_addr: "From" address for email
to_addr: "To" address for email
msg_bytes: Message content
username: username to authenticate with, if auth is enabled
password: password to give when authenticating
require_auth: if auth is not offered, fail the request
require_tls: if TLS is not offered, fail the reqest
tls_hostname: TLS hostname to check for. None to disable TLS.
"""
msg = BytesIO(msg_bytes)
d: "Deferred[object]" = Deferred()
factory = ESMTPSenderFactory(
username,
password,
from_addr,
to_addr,
msg,
d,
heloFallback=True,
requireAuthentication=require_auth,
requireTransportSecurity=require_tls,
hostname=tls_hostname,
)
# the IReactorTCP interface claims host has to be a bytes, which seems to be wrong
reactor.connectTCP(smtphost, smtpport, factory, timeout=30, bindAddress=None) # type: ignore[arg-type]
await make_deferred_yieldable(d)
class SendEmailHandler:
def __init__(self, hs: "HomeServer"):
self.hs = hs
self._sendmail = hs.get_sendmail()
self._reactor = hs.get_reactor()
self._from = hs.config.email.email_notif_from
self._smtp_host = hs.config.email.email_smtp_host
self._smtp_port = hs.config.email.email_smtp_port
self._smtp_user = hs.config.email.email_smtp_user
self._smtp_pass = hs.config.email.email_smtp_pass
user = hs.config.email.email_smtp_user
self._smtp_user = user.encode("utf-8") if user is not None else None
passwd = hs.config.email.email_smtp_pass
self._smtp_pass = passwd.encode("utf-8") if passwd is not None else None
self._require_transport_security = hs.config.email.require_transport_security
self._enable_tls = hs.config.email.enable_smtp_tls
self._sendmail = _sendmail
async def send_email(
self,
@ -82,17 +143,16 @@ class SendEmailHandler:
logger.info("Sending email to %s" % email_address)
await make_deferred_yieldable(
self._sendmail(
self._smtp_host,
raw_from,
raw_to,
multipart_msg.as_string().encode("utf8"),
reactor=self._reactor,
port=self._smtp_port,
requireAuthentication=self._smtp_user is not None,
username=self._smtp_user,
password=self._smtp_pass,
requireTransportSecurity=self._require_transport_security,
)
await self._sendmail(
self._reactor,
self._smtp_host,
self._smtp_port,
raw_from,
raw_to,
multipart_msg.as_string().encode("utf8"),
username=self._smtp_user,
password=self._smtp_pass,
require_auth=self._smtp_user is not None,
require_tls=self._require_transport_security,
tls_hostname=self._smtp_host if self._enable_tls else None,
)

View file

@ -1,667 +0,0 @@
# Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import logging
import re
from collections import deque
from typing import TYPE_CHECKING, Iterable, List, Optional, Sequence, Set, Tuple
import attr
from synapse.api.constants import (
EventContentFields,
EventTypes,
HistoryVisibility,
JoinRules,
Membership,
RoomTypes,
)
from synapse.events import EventBase
from synapse.events.utils import format_event_for_client_v2
from synapse.types import JsonDict
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
# number of rooms to return. We'll stop once we hit this limit.
MAX_ROOMS = 50
# max number of events to return per room.
MAX_ROOMS_PER_SPACE = 50
# max number of federation servers to hit per room
MAX_SERVERS_PER_SPACE = 3
class SpaceSummaryHandler:
def __init__(self, hs: "HomeServer"):
self._clock = hs.get_clock()
self._auth = hs.get_auth()
self._event_auth_handler = hs.get_event_auth_handler()
self._store = hs.get_datastore()
self._event_serializer = hs.get_event_client_serializer()
self._server_name = hs.hostname
self._federation_client = hs.get_federation_client()
async def get_space_summary(
self,
requester: str,
room_id: str,
suggested_only: bool = False,
max_rooms_per_space: Optional[int] = None,
) -> JsonDict:
"""
Implementation of the space summary C-S API
Args:
requester: user id of the user making this request
room_id: room id to start the summary at
suggested_only: whether we should only return children with the "suggested"
flag set.
max_rooms_per_space: an optional limit on the number of child rooms we will
return. This does not apply to the root room (ie, room_id), and
is overridden by MAX_ROOMS_PER_SPACE.
Returns:
summary dict to return
"""
# first of all, check that the user is in the room in question (or it's
# world-readable)
await self._auth.check_user_in_room_or_world_readable(room_id, requester)
# the queue of rooms to process
room_queue = deque((_RoomQueueEntry(room_id, ()),))
# rooms we have already processed
processed_rooms: Set[str] = set()
# events we have already processed. We don't necessarily have their event ids,
# so instead we key on (room id, state key)
processed_events: Set[Tuple[str, str]] = set()
rooms_result: List[JsonDict] = []
events_result: List[JsonDict] = []
while room_queue and len(rooms_result) < MAX_ROOMS:
queue_entry = room_queue.popleft()
room_id = queue_entry.room_id
if room_id in processed_rooms:
# already done this room
continue
logger.debug("Processing room %s", room_id)
is_in_room = await self._store.is_host_joined(room_id, self._server_name)
# The client-specified max_rooms_per_space limit doesn't apply to the
# room_id specified in the request, so we ignore it if this is the
# first room we are processing.
max_children = max_rooms_per_space if processed_rooms else None
if is_in_room:
room, events = await self._summarize_local_room(
requester, None, room_id, suggested_only, max_children
)
logger.debug(
"Query of local room %s returned events %s",
room_id,
["%s->%s" % (ev["room_id"], ev["state_key"]) for ev in events],
)
if room:
rooms_result.append(room)
else:
fed_rooms, fed_events = await self._summarize_remote_room(
queue_entry,
suggested_only,
max_children,
exclude_rooms=processed_rooms,
)
# The results over federation might include rooms that the we,
# as the requesting server, are allowed to see, but the requesting
# user is not permitted see.
#
# Filter the returned results to only what is accessible to the user.
room_ids = set()
events = []
for room in fed_rooms:
fed_room_id = room.get("room_id")
if not fed_room_id or not isinstance(fed_room_id, str):
continue
# The room should only be included in the summary if:
# a. the user is in the room;
# b. the room is world readable; or
# c. the user could join the room, e.g. the join rules
# are set to public or the user is in a space that
# has been granted access to the room.
#
# Note that we know the user is not in the root room (which is
# why the remote call was made in the first place), but the user
# could be in one of the children rooms and we just didn't know
# about the link.
# The API doesn't return the room version so assume that a
# join rule of knock is valid.
include_room = (
room.get("join_rules") in (JoinRules.PUBLIC, JoinRules.KNOCK)
or room.get("world_readable") is True
)
# Check if the user is a member of any of the allowed spaces
# from the response.
allowed_rooms = room.get("allowed_spaces")
if (
not include_room
and allowed_rooms
and isinstance(allowed_rooms, list)
):
include_room = await self._event_auth_handler.is_user_in_rooms(
allowed_rooms, requester
)
# Finally, if this isn't the requested room, check ourselves
# if we can access the room.
if not include_room and fed_room_id != queue_entry.room_id:
include_room = await self._is_room_accessible(
fed_room_id, requester, None
)
# The user can see the room, include it!
if include_room:
rooms_result.append(room)
room_ids.add(fed_room_id)
# All rooms returned don't need visiting again (even if the user
# didn't have access to them).
processed_rooms.add(fed_room_id)
for event in fed_events:
if event.get("room_id") in room_ids:
events.append(event)
logger.debug(
"Query of %s returned rooms %s, events %s",
room_id,
[room.get("room_id") for room in fed_rooms],
["%s->%s" % (ev["room_id"], ev["state_key"]) for ev in fed_events],
)
# the room we queried may or may not have been returned, but don't process
# it again, anyway.
processed_rooms.add(room_id)
# XXX: is it ok that we blindly iterate through any events returned by
# a remote server, whether or not they actually link to any rooms in our
# tree?
for ev in events:
# remote servers might return events we have already processed
# (eg, Dendrite returns inward pointers as well as outward ones), so
# we need to filter them out, to avoid returning duplicate links to the
# client.
ev_key = (ev["room_id"], ev["state_key"])
if ev_key in processed_events:
continue
events_result.append(ev)
# add the child to the queue. we have already validated
# that the vias are a list of server names.
room_queue.append(
_RoomQueueEntry(ev["state_key"], ev["content"]["via"])
)
processed_events.add(ev_key)
# Before returning to the client, remove the allowed_spaces key for any
# rooms.
for room in rooms_result:
room.pop("allowed_spaces", None)
return {"rooms": rooms_result, "events": events_result}
async def federation_space_summary(
self,
origin: str,
room_id: str,
suggested_only: bool,
max_rooms_per_space: Optional[int],
exclude_rooms: Iterable[str],
) -> JsonDict:
"""
Implementation of the space summary Federation API
Args:
origin: The server requesting the spaces summary.
room_id: room id to start the summary at
suggested_only: whether we should only return children with the "suggested"
flag set.
max_rooms_per_space: an optional limit on the number of child rooms we will
return. Unlike the C-S API, this applies to the root room (room_id).
It is clipped to MAX_ROOMS_PER_SPACE.
exclude_rooms: a list of rooms to skip over (presumably because the
calling server has already seen them).
Returns:
summary dict to return
"""
# the queue of rooms to process
room_queue = deque((room_id,))
# the set of rooms that we should not walk further. Initialise it with the
# excluded-rooms list; we will add other rooms as we process them so that
# we do not loop.
processed_rooms: Set[str] = set(exclude_rooms)
rooms_result: List[JsonDict] = []
events_result: List[JsonDict] = []
while room_queue and len(rooms_result) < MAX_ROOMS:
room_id = room_queue.popleft()
if room_id in processed_rooms:
# already done this room
continue
logger.debug("Processing room %s", room_id)
room, events = await self._summarize_local_room(
None, origin, room_id, suggested_only, max_rooms_per_space
)
processed_rooms.add(room_id)
if room:
rooms_result.append(room)
events_result.extend(events)
# add any children to the queue
room_queue.extend(edge_event["state_key"] for edge_event in events)
return {"rooms": rooms_result, "events": events_result}
async def _summarize_local_room(
self,
requester: Optional[str],
origin: Optional[str],
room_id: str,
suggested_only: bool,
max_children: Optional[int],
) -> Tuple[Optional[JsonDict], Sequence[JsonDict]]:
"""
Generate a room entry and a list of event entries for a given room.
Args:
requester:
The user requesting the summary, if it is a local request. None
if this is a federation request.
origin:
The server requesting the summary, if it is a federation request.
None if this is a local request.
room_id: The room ID to summarize.
suggested_only: True if only suggested children should be returned.
Otherwise, all children are returned.
max_children:
The maximum number of children rooms to include. This is capped
to a server-set limit.
Returns:
A tuple of:
The room information, if the room should be returned to the
user. None, otherwise.
An iterable of the sorted children events. This may be limited
to a maximum size or may include all children.
"""
if not await self._is_room_accessible(room_id, requester, origin):
return None, ()
room_entry = await self._build_room_entry(room_id)
# If the room is not a space, return just the room information.
if room_entry.get("room_type") != RoomTypes.SPACE:
return room_entry, ()
# Otherwise, look for child rooms/spaces.
child_events = await self._get_child_events(room_id)
if suggested_only:
# we only care about suggested children
child_events = filter(_is_suggested_child_event, child_events)
if max_children is None or max_children > MAX_ROOMS_PER_SPACE:
max_children = MAX_ROOMS_PER_SPACE
now = self._clock.time_msec()
events_result: List[JsonDict] = []
for edge_event in itertools.islice(child_events, max_children):
events_result.append(
await self._event_serializer.serialize_event(
edge_event,
time_now=now,
event_format=format_event_for_client_v2,
)
)
return room_entry, events_result
async def _summarize_remote_room(
self,
room: "_RoomQueueEntry",
suggested_only: bool,
max_children: Optional[int],
exclude_rooms: Iterable[str],
) -> Tuple[Sequence[JsonDict], Sequence[JsonDict]]:
"""
Request room entries and a list of event entries for a given room by querying a remote server.
Args:
room: The room to summarize.
suggested_only: True if only suggested children should be returned.
Otherwise, all children are returned.
max_children:
The maximum number of children rooms to include. This is capped
to a server-set limit.
exclude_rooms:
Rooms IDs which do not need to be summarized.
Returns:
A tuple of:
An iterable of rooms.
An iterable of the sorted children events. This may be limited
to a maximum size or may include all children.
"""
room_id = room.room_id
logger.info("Requesting summary for %s via %s", room_id, room.via)
# we need to make the exclusion list json-serialisable
exclude_rooms = list(exclude_rooms)
via = itertools.islice(room.via, MAX_SERVERS_PER_SPACE)
try:
res = await self._federation_client.get_space_summary(
via,
room_id,
suggested_only=suggested_only,
max_rooms_per_space=max_children,
exclude_rooms=exclude_rooms,
)
except Exception as e:
logger.warning(
"Unable to get summary of %s via federation: %s",
room_id,
e,
exc_info=logger.isEnabledFor(logging.DEBUG),
)
return (), ()
return res.rooms, tuple(
ev.data for ev in res.events if ev.event_type == EventTypes.SpaceChild
)
async def _is_room_accessible(
self, room_id: str, requester: Optional[str], origin: Optional[str]
) -> bool:
"""
Calculate whether the room should be shown in the spaces summary.
It should be included if:
* The requester is joined or can join the room (per MSC3173).
* The origin server has any user that is joined or can join the room.
* The history visibility is set to world readable.
Args:
room_id: The room ID to summarize.
requester:
The user requesting the summary, if it is a local request. None
if this is a federation request.
origin:
The server requesting the summary, if it is a federation request.
None if this is a local request.
Returns:
True if the room should be included in the spaces summary.
"""
state_ids = await self._store.get_current_state_ids(room_id)
# If there's no state for the room, it isn't known.
if not state_ids:
# The user might have a pending invite for the room.
if requester and await self._store.get_invite_for_local_user_in_room(
requester, room_id
):
return True
logger.info("room %s is unknown, omitting from summary", room_id)
return False
room_version = await self._store.get_room_version(room_id)
# Include the room if it has join rules of public or knock.
join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""))
if join_rules_event_id:
join_rules_event = await self._store.get_event(join_rules_event_id)
join_rule = join_rules_event.content.get("join_rule")
if join_rule == JoinRules.PUBLIC or (
room_version.msc2403_knocking and join_rule == JoinRules.KNOCK
):
return True
# Include the room if it is peekable.
hist_vis_event_id = state_ids.get((EventTypes.RoomHistoryVisibility, ""))
if hist_vis_event_id:
hist_vis_ev = await self._store.get_event(hist_vis_event_id)
hist_vis = hist_vis_ev.content.get("history_visibility")
if hist_vis == HistoryVisibility.WORLD_READABLE:
return True
# Otherwise we need to check information specific to the user or server.
# If we have an authenticated requesting user, check if they are a member
# of the room (or can join the room).
if requester:
member_event_id = state_ids.get((EventTypes.Member, requester), None)
# If they're in the room they can see info on it.
if member_event_id:
member_event = await self._store.get_event(member_event_id)
if member_event.membership in (Membership.JOIN, Membership.INVITE):
return True
# Otherwise, check if they should be allowed access via membership in a space.
if await self._event_auth_handler.has_restricted_join_rules(
state_ids, room_version
):
allowed_rooms = (
await self._event_auth_handler.get_rooms_that_allow_join(state_ids)
)
if await self._event_auth_handler.is_user_in_rooms(
allowed_rooms, requester
):
return True
# If this is a request over federation, check if the host is in the room or
# has a user who could join the room.
elif origin:
if await self._event_auth_handler.check_host_in_room(
room_id, origin
) or await self._store.is_host_invited(room_id, origin):
return True
# Alternately, if the host has a user in any of the spaces specified
# for access, then the host can see this room (and should do filtering
# if the requester cannot see it).
if await self._event_auth_handler.has_restricted_join_rules(
state_ids, room_version
):
allowed_rooms = (
await self._event_auth_handler.get_rooms_that_allow_join(state_ids)
)
for space_id in allowed_rooms:
if await self._event_auth_handler.check_host_in_room(
space_id, origin
):
return True
logger.info(
"room %s is unpeekable and requester %s is not a member / not allowed to join, omitting from summary",
room_id,
requester or origin,
)
return False
async def _build_room_entry(self, room_id: str) -> JsonDict:
"""Generate en entry suitable for the 'rooms' list in the summary response"""
stats = await self._store.get_room_with_stats(room_id)
# currently this should be impossible because we call
# check_user_in_room_or_world_readable on the room before we get here, so
# there should always be an entry
assert stats is not None, "unable to retrieve stats for %s" % (room_id,)
current_state_ids = await self._store.get_current_state_ids(room_id)
create_event = await self._store.get_event(
current_state_ids[(EventTypes.Create, "")]
)
room_version = await self._store.get_room_version(room_id)
allowed_rooms = None
if await self._event_auth_handler.has_restricted_join_rules(
current_state_ids, room_version
):
allowed_rooms = await self._event_auth_handler.get_rooms_that_allow_join(
current_state_ids
)
entry = {
"room_id": stats["room_id"],
"name": stats["name"],
"topic": stats["topic"],
"canonical_alias": stats["canonical_alias"],
"num_joined_members": stats["joined_members"],
"avatar_url": stats["avatar"],
"join_rules": stats["join_rules"],
"world_readable": (
stats["history_visibility"] == HistoryVisibility.WORLD_READABLE
),
"guest_can_join": stats["guest_access"] == "can_join",
"creation_ts": create_event.origin_server_ts,
"room_type": create_event.content.get(EventContentFields.ROOM_TYPE),
"allowed_spaces": allowed_rooms,
}
# Filter out Nones rather omit the field altogether
room_entry = {k: v for k, v in entry.items() if v is not None}
return room_entry
async def _get_child_events(self, room_id: str) -> Iterable[EventBase]:
"""
Get the child events for a given room.
The returned results are sorted for stability.
Args:
room_id: The room id to get the children of.
Returns:
An iterable of sorted child events.
"""
# look for child rooms/spaces.
current_state_ids = await self._store.get_current_state_ids(room_id)
events = await self._store.get_events_as_list(
[
event_id
for key, event_id in current_state_ids.items()
if key[0] == EventTypes.SpaceChild
]
)
# filter out any events without a "via" (which implies it has been redacted),
# and order to ensure we return stable results.
return sorted(filter(_has_valid_via, events), key=_child_events_comparison_key)
@attr.s(frozen=True, slots=True)
class _RoomQueueEntry:
room_id = attr.ib(type=str)
via = attr.ib(type=Sequence[str])
def _has_valid_via(e: EventBase) -> bool:
via = e.content.get("via")
if not via or not isinstance(via, Sequence):
return False
for v in via:
if not isinstance(v, str):
logger.debug("Ignoring edge event %s with invalid via entry", e.event_id)
return False
return True
def _is_suggested_child_event(edge_event: EventBase) -> bool:
suggested = edge_event.content.get("suggested")
if isinstance(suggested, bool) and suggested:
return True
logger.debug("Ignorning not-suggested child %s", edge_event.state_key)
return False
# Order may only contain characters in the range of \x20 (space) to \x7E (~) inclusive.
_INVALID_ORDER_CHARS_RE = re.compile(r"[^\x20-\x7E]")
def _child_events_comparison_key(child: EventBase) -> Tuple[bool, Optional[str], str]:
"""
Generate a value for comparing two child events for ordering.
The rules for ordering are supposed to be:
1. The 'order' key, if it is valid.
2. The 'origin_server_ts' of the 'm.room.create' event.
3. The 'room_id'.
But we skip step 2 since we may not have any state from the room.
Args:
child: The event for generating a comparison key.
Returns:
The comparison key as a tuple of:
False if the ordering is valid.
The ordering field.
The room ID.
"""
order = child.content.get("order")
# If order is not a string or doesn't meet the requirements, ignore it.
if not isinstance(order, str):
order = None
elif len(order) > 50 or _INVALID_ORDER_CHARS_RE.search(order):
order = None
# Items without an order come last.
return (order is None, order, child.room_id)

View file

@ -269,14 +269,22 @@ class SyncHandler:
self.presence_handler = hs.get_presence_handler()
self.event_sources = hs.get_event_sources()
self.clock = hs.get_clock()
self.response_cache: ResponseCache[SyncRequestKey] = ResponseCache(
hs.get_clock(), "sync"
)
self.state = hs.get_state_handler()
self.auth = hs.get_auth()
self.storage = hs.get_storage()
self.state_store = self.storage.state
# TODO: flush cache entries on subsequent sync request.
# Once we get the next /sync request (ie, one with the same access token
# that sets 'since' to 'next_batch'), we know that device won't need a
# cached result any more, and we could flush the entry from the cache to save
# memory.
self.response_cache: ResponseCache[SyncRequestKey] = ResponseCache(
hs.get_clock(),
"sync",
timeout_ms=hs.config.caches.sync_response_cache_duration,
)
# ExpiringCache((User, Device)) -> LruCache(user_id => event_id)
self.lazy_loaded_members_cache: ExpiringCache[
Tuple[str, Optional[str]], LruCache[str, str]

View file

@ -335,7 +335,8 @@ class TypingWriterHandler(FollowerTypingHandler):
)
if not is_in_room:
logger.info(
"Ignoring typing update from %s as we're not in the room",
"Ignoring typing update for room %r from server %s as we're not in the room",
room_id,
origin,
)
return

View file

@ -12,8 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import logging
from typing import Optional
import attr
from zope.interface import implementer
from twisted.internet import defer, protocol
@ -21,7 +24,6 @@ from twisted.internet.error import ConnectError
from twisted.internet.interfaces import IReactorCore, IStreamClientEndpoint
from twisted.internet.protocol import ClientFactory, Protocol, connectionDone
from twisted.web import http
from twisted.web.http_headers import Headers
logger = logging.getLogger(__name__)
@ -30,6 +32,22 @@ class ProxyConnectError(ConnectError):
pass
@attr.s
class ProxyCredentials:
username_password = attr.ib(type=bytes)
def as_proxy_authorization_value(self) -> bytes:
"""
Return the value for a Proxy-Authorization header (i.e. 'Basic abdef==').
Returns:
A transformation of the authentication string the encoded value for
a Proxy-Authorization header.
"""
# Encode as base64 and prepend the authorization type
return b"Basic " + base64.encodebytes(self.username_password)
@implementer(IStreamClientEndpoint)
class HTTPConnectProxyEndpoint:
"""An Endpoint implementation which will send a CONNECT request to an http proxy
@ -46,7 +64,7 @@ class HTTPConnectProxyEndpoint:
proxy_endpoint: the endpoint to use to connect to the proxy
host: hostname that we want to CONNECT to
port: port that we want to connect to
headers: Extra HTTP headers to include in the CONNECT request
proxy_creds: credentials to authenticate at proxy
"""
def __init__(
@ -55,20 +73,20 @@ class HTTPConnectProxyEndpoint:
proxy_endpoint: IStreamClientEndpoint,
host: bytes,
port: int,
headers: Headers,
proxy_creds: Optional[ProxyCredentials],
):
self._reactor = reactor
self._proxy_endpoint = proxy_endpoint
self._host = host
self._port = port
self._headers = headers
self._proxy_creds = proxy_creds
def __repr__(self):
return "<HTTPConnectProxyEndpoint %s>" % (self._proxy_endpoint,)
def connect(self, protocolFactory: ClientFactory):
f = HTTPProxiedClientFactory(
self._host, self._port, protocolFactory, self._headers
self._host, self._port, protocolFactory, self._proxy_creds
)
d = self._proxy_endpoint.connect(f)
# once the tcp socket connects successfully, we need to wait for the
@ -87,7 +105,7 @@ class HTTPProxiedClientFactory(protocol.ClientFactory):
dst_host: hostname that we want to CONNECT to
dst_port: port that we want to connect to
wrapped_factory: The original Factory
headers: Extra HTTP headers to include in the CONNECT request
proxy_creds: credentials to authenticate at proxy
"""
def __init__(
@ -95,12 +113,12 @@ class HTTPProxiedClientFactory(protocol.ClientFactory):
dst_host: bytes,
dst_port: int,
wrapped_factory: ClientFactory,
headers: Headers,
proxy_creds: Optional[ProxyCredentials],
):
self.dst_host = dst_host
self.dst_port = dst_port
self.wrapped_factory = wrapped_factory
self.headers = headers
self.proxy_creds = proxy_creds
self.on_connection = defer.Deferred()
def startedConnecting(self, connector):
@ -114,7 +132,7 @@ class HTTPProxiedClientFactory(protocol.ClientFactory):
self.dst_port,
wrapped_protocol,
self.on_connection,
self.headers,
self.proxy_creds,
)
def clientConnectionFailed(self, connector, reason):
@ -145,7 +163,7 @@ class HTTPConnectProtocol(protocol.Protocol):
connected_deferred: a Deferred which will be callbacked with
wrapped_protocol when the CONNECT completes
headers: Extra HTTP headers to include in the CONNECT request
proxy_creds: credentials to authenticate at proxy
"""
def __init__(
@ -154,16 +172,16 @@ class HTTPConnectProtocol(protocol.Protocol):
port: int,
wrapped_protocol: Protocol,
connected_deferred: defer.Deferred,
headers: Headers,
proxy_creds: Optional[ProxyCredentials],
):
self.host = host
self.port = port
self.wrapped_protocol = wrapped_protocol
self.connected_deferred = connected_deferred
self.headers = headers
self.proxy_creds = proxy_creds
self.http_setup_client = HTTPConnectSetupClient(
self.host, self.port, self.headers
self.host, self.port, self.proxy_creds
)
self.http_setup_client.on_connected.addCallback(self.proxyConnected)
@ -205,30 +223,38 @@ class HTTPConnectSetupClient(http.HTTPClient):
Args:
host: The hostname to send in the CONNECT message
port: The port to send in the CONNECT message
headers: Extra headers to send with the CONNECT message
proxy_creds: credentials to authenticate at proxy
"""
def __init__(self, host: bytes, port: int, headers: Headers):
def __init__(
self,
host: bytes,
port: int,
proxy_creds: Optional[ProxyCredentials],
):
self.host = host
self.port = port
self.headers = headers
self.proxy_creds = proxy_creds
self.on_connected = defer.Deferred()
def connectionMade(self):
logger.debug("Connected to proxy, sending CONNECT")
self.sendCommand(b"CONNECT", b"%s:%d" % (self.host, self.port))
# Send any additional specified headers
for name, values in self.headers.getAllRawHeaders():
for value in values:
self.sendHeader(name, value)
# Determine whether we need to set Proxy-Authorization headers
if self.proxy_creds:
# Set a Proxy-Authorization header
self.sendHeader(
b"Proxy-Authorization",
self.proxy_creds.as_proxy_authorization_value(),
)
self.endHeaders()
def handleStatus(self, version: bytes, status: bytes, message: bytes):
logger.debug("Got Status: %s %s %s", status, message, version)
if status != b"200":
raise ProxyConnectError("Unexpected status on CONNECT: %s" % status)
raise ProxyConnectError(f"Unexpected status on CONNECT: {status!s}")
def handleEndHeaders(self):
logger.debug("End Headers")

View file

@ -14,6 +14,10 @@
import logging
import urllib.parse
from typing import Any, Generator, List, Optional
from urllib.request import ( # type: ignore[attr-defined]
getproxies_environment,
proxy_bypass_environment,
)
from netaddr import AddrFormatError, IPAddress, IPSet
from zope.interface import implementer
@ -30,9 +34,12 @@ from twisted.web.http_headers import Headers
from twisted.web.iweb import IAgent, IAgentEndpointFactory, IBodyProducer, IResponse
from synapse.crypto.context_factory import FederationPolicyForHTTPS
from synapse.http.client import BlacklistingAgentWrapper
from synapse.http import proxyagent
from synapse.http.client import BlacklistingAgentWrapper, BlacklistingReactorWrapper
from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint
from synapse.http.federation.srv_resolver import Server, SrvResolver
from synapse.http.federation.well_known_resolver import WellKnownResolver
from synapse.http.proxyagent import ProxyAgent
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.types import ISynapseReactor
from synapse.util import Clock
@ -57,6 +64,14 @@ class MatrixFederationAgent:
user_agent:
The user agent header to use for federation requests.
ip_whitelist: Allowed IP addresses.
ip_blacklist: Disallowed IP addresses.
proxy_reactor: twisted reactor to use for connections to the proxy server
reactor might have some blacklisting applied (i.e. for DNS queries),
but we need unblocked access to the proxy.
_srv_resolver:
SrvResolver implementation to use for looking up SRV records. None
to use a default implementation.
@ -71,11 +86,18 @@ class MatrixFederationAgent:
reactor: ISynapseReactor,
tls_client_options_factory: Optional[FederationPolicyForHTTPS],
user_agent: bytes,
ip_whitelist: IPSet,
ip_blacklist: IPSet,
_srv_resolver: Optional[SrvResolver] = None,
_well_known_resolver: Optional[WellKnownResolver] = None,
):
self._reactor = reactor
# proxy_reactor is not blacklisted
proxy_reactor = reactor
# We need to use a DNS resolver which filters out blacklisted IP
# addresses, to prevent DNS rebinding.
reactor = BlacklistingReactorWrapper(reactor, ip_whitelist, ip_blacklist)
self._clock = Clock(reactor)
self._pool = HTTPConnectionPool(reactor)
self._pool.retryAutomatically = False
@ -83,24 +105,27 @@ class MatrixFederationAgent:
self._pool.cachedConnectionTimeout = 2 * 60
self._agent = Agent.usingEndpointFactory(
self._reactor,
reactor,
MatrixHostnameEndpointFactory(
reactor, tls_client_options_factory, _srv_resolver
reactor,
proxy_reactor,
tls_client_options_factory,
_srv_resolver,
),
pool=self._pool,
)
self.user_agent = user_agent
if _well_known_resolver is None:
# Note that the name resolver has already been wrapped in a
# IPBlacklistingResolver by MatrixFederationHttpClient.
_well_known_resolver = WellKnownResolver(
self._reactor,
reactor,
agent=BlacklistingAgentWrapper(
Agent(
self._reactor,
ProxyAgent(
reactor,
proxy_reactor,
pool=self._pool,
contextFactory=tls_client_options_factory,
use_proxy=True,
),
ip_blacklist=ip_blacklist,
),
@ -200,10 +225,12 @@ class MatrixHostnameEndpointFactory:
def __init__(
self,
reactor: IReactorCore,
proxy_reactor: IReactorCore,
tls_client_options_factory: Optional[FederationPolicyForHTTPS],
srv_resolver: Optional[SrvResolver],
):
self._reactor = reactor
self._proxy_reactor = proxy_reactor
self._tls_client_options_factory = tls_client_options_factory
if srv_resolver is None:
@ -211,9 +238,10 @@ class MatrixHostnameEndpointFactory:
self._srv_resolver = srv_resolver
def endpointForURI(self, parsed_uri):
def endpointForURI(self, parsed_uri: URI):
return MatrixHostnameEndpoint(
self._reactor,
self._proxy_reactor,
self._tls_client_options_factory,
self._srv_resolver,
parsed_uri,
@ -227,23 +255,45 @@ class MatrixHostnameEndpoint:
Args:
reactor: twisted reactor to use for underlying requests
proxy_reactor: twisted reactor to use for connections to the proxy server.
'reactor' might have some blacklisting applied (i.e. for DNS queries),
but we need unblocked access to the proxy.
tls_client_options_factory:
factory to use for fetching client tls options, or none to disable TLS.
srv_resolver: The SRV resolver to use
parsed_uri: The parsed URI that we're wanting to connect to.
Raises:
ValueError if the environment variables contain an invalid proxy specification.
RuntimeError if no tls_options_factory is given for a https connection
"""
def __init__(
self,
reactor: IReactorCore,
proxy_reactor: IReactorCore,
tls_client_options_factory: Optional[FederationPolicyForHTTPS],
srv_resolver: SrvResolver,
parsed_uri: URI,
):
self._reactor = reactor
self._parsed_uri = parsed_uri
# http_proxy is not needed because federation is always over TLS
proxies = getproxies_environment()
https_proxy = proxies["https"].encode() if "https" in proxies else None
self.no_proxy = proxies["no"] if "no" in proxies else None
# endpoint and credentials to use to connect to the outbound https proxy, if any.
(
self._https_proxy_endpoint,
self._https_proxy_creds,
) = proxyagent.http_proxy_endpoint(
https_proxy,
proxy_reactor,
tls_client_options_factory,
)
# set up the TLS connection params
#
# XXX disabling TLS is really only supported here for the benefit of the
@ -273,9 +323,33 @@ class MatrixHostnameEndpoint:
host = server.host
port = server.port
should_skip_proxy = False
if self.no_proxy is not None:
should_skip_proxy = proxy_bypass_environment(
host.decode(),
proxies={"no": self.no_proxy},
)
endpoint: IStreamClientEndpoint
try:
logger.debug("Connecting to %s:%i", host.decode("ascii"), port)
endpoint = HostnameEndpoint(self._reactor, host, port)
if self._https_proxy_endpoint and not should_skip_proxy:
logger.debug(
"Connecting to %s:%i via %s",
host.decode("ascii"),
port,
self._https_proxy_endpoint,
)
endpoint = HTTPConnectProxyEndpoint(
self._reactor,
self._https_proxy_endpoint,
host,
port,
proxy_creds=self._https_proxy_creds,
)
else:
logger.debug("Connecting to %s:%i", host.decode("ascii"), port)
# not using a proxy
endpoint = HostnameEndpoint(self._reactor, host, port)
if self._tls_options:
endpoint = wrapClientTLS(self._tls_options, endpoint)
result = await make_deferred_yieldable(

View file

@ -59,7 +59,6 @@ from synapse.api.errors import (
from synapse.http import QuieterFileBodyProducer
from synapse.http.client import (
BlacklistingAgentWrapper,
BlacklistingReactorWrapper,
BodyExceededMaxSize,
ByteWriteable,
encode_query_args,
@ -69,7 +68,7 @@ from synapse.http.federation.matrix_federation_agent import MatrixFederationAgen
from synapse.logging import opentracing
from synapse.logging.context import make_deferred_yieldable
from synapse.logging.opentracing import set_tag, start_active_span, tags
from synapse.types import ISynapseReactor, JsonDict
from synapse.types import JsonDict
from synapse.util import json_decoder
from synapse.util.async_helpers import timeout_deferred
from synapse.util.metrics import Measure
@ -325,13 +324,7 @@ class MatrixFederationHttpClient:
self.signing_key = hs.signing_key
self.server_name = hs.hostname
# We need to use a DNS resolver which filters out blacklisted IP
# addresses, to prevent DNS rebinding.
self.reactor: ISynapseReactor = BlacklistingReactorWrapper(
hs.get_reactor(),
hs.config.federation_ip_range_whitelist,
hs.config.federation_ip_range_blacklist,
)
self.reactor = hs.get_reactor()
user_agent = hs.version_string
if hs.config.user_agent_suffix:
@ -342,6 +335,7 @@ class MatrixFederationHttpClient:
self.reactor,
tls_client_options_factory,
user_agent,
hs.config.federation_ip_range_whitelist,
hs.config.federation_ip_range_blacklist,
)

View file

@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import logging
import re
from typing import Any, Dict, Optional, Tuple
@ -21,7 +20,6 @@ from urllib.request import ( # type: ignore[attr-defined]
proxy_bypass_environment,
)
import attr
from zope.interface import implementer
from twisted.internet import defer
@ -38,7 +36,7 @@ from twisted.web.error import SchemeNotSupported
from twisted.web.http_headers import Headers
from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS
from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint
from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint, ProxyCredentials
from synapse.types import ISynapseReactor
logger = logging.getLogger(__name__)
@ -46,22 +44,6 @@ logger = logging.getLogger(__name__)
_VALID_URI = re.compile(br"\A[\x21-\x7e]+\Z")
@attr.s
class ProxyCredentials:
username_password = attr.ib(type=bytes)
def as_proxy_authorization_value(self) -> bytes:
"""
Return the value for a Proxy-Authorization header (i.e. 'Basic abdef==').
Returns:
A transformation of the authentication string the encoded value for
a Proxy-Authorization header.
"""
# Encode as base64 and prepend the authorization type
return b"Basic " + base64.encodebytes(self.username_password)
@implementer(IAgent)
class ProxyAgent(_AgentBase):
"""An Agent implementation which will use an HTTP proxy if one was requested
@ -95,6 +77,7 @@ class ProxyAgent(_AgentBase):
Raises:
ValueError if use_proxy is set and the environment variables
contain an invalid proxy specification.
RuntimeError if no tls_options_factory is given for a https connection
"""
def __init__(
@ -131,11 +114,11 @@ class ProxyAgent(_AgentBase):
https_proxy = proxies["https"].encode() if "https" in proxies else None
no_proxy = proxies["no"] if "no" in proxies else None
self.http_proxy_endpoint, self.http_proxy_creds = _http_proxy_endpoint(
self.http_proxy_endpoint, self.http_proxy_creds = http_proxy_endpoint(
http_proxy, self.proxy_reactor, contextFactory, **self._endpoint_kwargs
)
self.https_proxy_endpoint, self.https_proxy_creds = _http_proxy_endpoint(
self.https_proxy_endpoint, self.https_proxy_creds = http_proxy_endpoint(
https_proxy, self.proxy_reactor, contextFactory, **self._endpoint_kwargs
)
@ -224,22 +207,12 @@ class ProxyAgent(_AgentBase):
and self.https_proxy_endpoint
and not should_skip_proxy
):
connect_headers = Headers()
# Determine whether we need to set Proxy-Authorization headers
if self.https_proxy_creds:
# Set a Proxy-Authorization header
connect_headers.addRawHeader(
b"Proxy-Authorization",
self.https_proxy_creds.as_proxy_authorization_value(),
)
endpoint = HTTPConnectProxyEndpoint(
self.proxy_reactor,
self.https_proxy_endpoint,
parsed_uri.host,
parsed_uri.port,
headers=connect_headers,
self.https_proxy_creds,
)
else:
# not using a proxy
@ -268,10 +241,10 @@ class ProxyAgent(_AgentBase):
)
def _http_proxy_endpoint(
def http_proxy_endpoint(
proxy: Optional[bytes],
reactor: IReactorCore,
tls_options_factory: IPolicyForHTTPS,
tls_options_factory: Optional[IPolicyForHTTPS],
**kwargs,
) -> Tuple[Optional[IStreamClientEndpoint], Optional[ProxyCredentials]]:
"""Parses an http proxy setting and returns an endpoint for the proxy
@ -294,6 +267,7 @@ def _http_proxy_endpoint(
Raise:
ValueError if proxy has no hostname or unsupported scheme.
RuntimeError if no tls_options_factory is given for a https connection
"""
if proxy is None:
return None, None
@ -305,8 +279,13 @@ def _http_proxy_endpoint(
proxy_endpoint = HostnameEndpoint(reactor, host, port, **kwargs)
if scheme == b"https":
tls_options = tls_options_factory.creatorForNetloc(host, port)
proxy_endpoint = wrapClientTLS(tls_options, proxy_endpoint)
if tls_options_factory:
tls_options = tls_options_factory.creatorForNetloc(host, port)
proxy_endpoint = wrapClientTLS(tls_options, proxy_endpoint)
else:
raise RuntimeError(
f"No TLS options for a https connection via proxy {proxy!s}"
)
return proxy_endpoint, credentials

View file

@ -14,16 +14,28 @@
""" This module contains base REST classes for constructing REST servlets. """
import logging
from typing import Iterable, List, Mapping, Optional, Sequence, overload
from typing import (
TYPE_CHECKING,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
overload,
)
from typing_extensions import Literal
from twisted.web.server import Request
from synapse.api.errors import Codes, SynapseError
from synapse.types import JsonDict
from synapse.types import JsonDict, RoomAlias, RoomID
from synapse.util import json_decoder
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
@ -663,3 +675,45 @@ class RestServlet:
else:
raise NotImplementedError("RestServlet must register something.")
class ResolveRoomIdMixin:
def __init__(self, hs: "HomeServer"):
self.room_member_handler = hs.get_room_member_handler()
async def resolve_room_id(
self, room_identifier: str, remote_room_hosts: Optional[List[str]] = None
) -> Tuple[str, Optional[List[str]]]:
"""
Resolve a room identifier to a room ID, if necessary.
This also performanes checks to ensure the room ID is of the proper form.
Args:
room_identifier: The room ID or alias.
remote_room_hosts: The potential remote room hosts to use.
Returns:
The resolved room ID.
Raises:
SynapseError if the room ID is of the wrong form.
"""
if RoomID.is_valid(room_identifier):
resolved_room_id = room_identifier
elif RoomAlias.is_valid(room_identifier):
room_alias = RoomAlias.from_string(room_identifier)
(
room_id,
remote_room_hosts,
) = await self.room_member_handler.lookup_room_alias(room_alias)
resolved_room_id = room_id.to_string()
else:
raise SynapseError(
400, "%s was not legal room ID or room alias" % (room_identifier,)
)
if not resolved_room_id:
raise SynapseError(
400, "Unknown room ID or room alias %s" % room_identifier
)
return resolved_room_id, remote_room_hosts

View file

@ -45,7 +45,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.database import DatabasePool, LoggingTransaction
from synapse.storage.databases.main.roommember import ProfileInfo
from synapse.storage.state import StateFilter
from synapse.types import JsonDict, Requester, UserID, create_requester
from synapse.types import JsonDict, Requester, UserID, UserInfo, create_requester
from synapse.util import Clock
from synapse.util.caches.descriptors import cached
@ -91,6 +91,7 @@ class ModuleApi:
self._state = hs.get_state_handler()
self._clock: Clock = hs.get_clock()
self._send_email_handler = hs.get_send_email_handler()
self.custom_template_dir = hs.config.server.custom_template_directory
try:
app_name = self._hs.config.email_app_name
@ -174,6 +175,16 @@ class ModuleApi:
"""The application name configured in the homeserver's configuration."""
return self._hs.config.email.email_app_name
async def get_userinfo_by_id(self, user_id: str) -> Optional[UserInfo]:
"""Get user info by user_id
Args:
user_id: Fully qualified user id.
Returns:
UserInfo object if a user was found, otherwise None
"""
return await self._store.get_userinfo_by_id(user_id)
async def get_user_by_req(
self,
req: SynapseRequest,
@ -593,10 +604,15 @@ class ModuleApi:
msec: float,
*args,
desc: Optional[str] = None,
run_on_all_instances: bool = False,
**kwargs,
):
"""Wraps a function as a background process and calls it repeatedly.
NOTE: Will only run on the instance that is configured to run
background processes (which is the main process by default), unless
`run_on_all_workers` is set.
Waits `msec` initially before calling `f` for the first time.
Args:
@ -607,12 +623,14 @@ class ModuleApi:
msec: How long to wait between calls in milliseconds.
*args: Positional arguments to pass to function.
desc: The background task's description. Default to the function's name.
run_on_all_instances: Whether to run this on all instances, rather
than just the instance configured to run background tasks.
**kwargs: Key arguments to pass to function.
"""
if desc is None:
desc = f.__name__
if self._hs.config.run_background_tasks:
if self._hs.config.run_background_tasks or run_on_all_instances:
self._clock.looping_call(
run_as_background_process,
msec,
@ -667,7 +685,10 @@ class ModuleApi:
A list containing the loaded templates, with the orders matching the one of
the filenames parameter.
"""
return self._hs.config.read_templates(filenames, custom_template_directory)
return self._hs.config.read_templates(
filenames,
(td for td in (self.custom_template_dir, custom_template_directory) if td),
)
class PublicRoomListManager:

View file

@ -1,37 +0,0 @@
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.replication.tcp.streams import PublicRoomsStream
from synapse.storage.database import DatabasePool
from synapse.storage.databases.main.room import RoomWorkerStore
from ._base import BaseSlavedStore
from ._slaved_id_tracker import SlavedIdTracker
class RoomStore(RoomWorkerStore, BaseSlavedStore):
def __init__(self, database: DatabasePool, db_conn, hs):
super().__init__(database, db_conn, hs)
self._public_room_id_gen = SlavedIdTracker(
db_conn, "public_room_list_stream", "stream_id"
)
def get_current_public_room_stream_id(self):
return self._public_room_id_gen.get_current_token()
def process_replication_rows(self, stream_name, instance_name, token, rows):
if stream_name == PublicRoomsStream.NAME:
self._public_room_id_gen.advance(instance_name, token)
return super().process_replication_rows(stream_name, instance_name, token, rows)

View file

@ -32,7 +32,6 @@ from synapse.replication.tcp.streams._base import (
GroupServerStream,
PresenceFederationStream,
PresenceStream,
PublicRoomsStream,
PushersStream,
PushRulesStream,
ReceiptsStream,
@ -57,7 +56,6 @@ STREAMS_MAP = {
PushRulesStream,
PushersStream,
CachesStream,
PublicRoomsStream,
DeviceListsStream,
ToDeviceStream,
FederationStream,
@ -79,7 +77,6 @@ __all__ = [
"PushRulesStream",
"PushersStream",
"CachesStream",
"PublicRoomsStream",
"DeviceListsStream",
"ToDeviceStream",
"TagAccountDataStream",

View file

@ -447,31 +447,6 @@ class CachesStream(Stream):
)
class PublicRoomsStream(Stream):
"""The public rooms list changed"""
PublicRoomsStreamRow = namedtuple(
"PublicRoomsStreamRow",
(
"room_id", # str
"visibility", # str
"appservice_id", # str, optional
"network_id", # str, optional
),
)
NAME = "public_rooms"
ROW_TYPE = PublicRoomsStreamRow
def __init__(self, hs):
store = hs.get_datastore()
super().__init__(
hs.get_instance_name(),
current_token_without_instance(store.get_current_public_room_stream_id),
store.get_all_new_public_rooms,
)
class DeviceListsStream(Stream):
"""Either a user has updated their devices or a remote server needs to be
told about a device update.

View file

@ -14,39 +14,36 @@
# limitations under the License.
from synapse.http.server import JsonResource
from synapse.rest import admin
from synapse.rest.client import versions
from synapse.rest.client.v1 import (
directory,
events,
initial_sync,
login as v1_login,
logout,
presence,
profile,
push_rule,
pusher,
room,
voip,
)
from synapse.rest.client.v2_alpha import (
from synapse.rest.client import (
account,
account_data,
account_validity,
auth,
capabilities,
devices,
directory,
events,
filter,
groups,
initial_sync,
keys,
knock,
login as v1_login,
logout,
notifications,
openid,
password_policy,
presence,
profile,
push_rule,
pusher,
read_marker,
receipts,
register,
relations,
report_event,
room,
room_batch,
room_keys,
room_upgrade_rest_servlet,
sendtodevice,
@ -56,6 +53,8 @@ from synapse.rest.client.v2_alpha import (
thirdparty,
tokenrefresh,
user_directory,
versions,
voip,
)
@ -84,7 +83,6 @@ class ClientRestResource(JsonResource):
# Partially deprecated in r0
events.register_servlets(hs, client_resource)
# "v1" + "r0"
room.register_servlets(hs, client_resource)
v1_login.register_servlets(hs, client_resource)
profile.register_servlets(hs, client_resource)
@ -94,8 +92,6 @@ class ClientRestResource(JsonResource):
pusher.register_servlets(hs, client_resource)
push_rule.register_servlets(hs, client_resource)
logout.register_servlets(hs, client_resource)
# "v2"
sync.register_servlets(hs, client_resource)
filter.register_servlets(hs, client_resource)
account.register_servlets(hs, client_resource)
@ -117,6 +113,7 @@ class ClientRestResource(JsonResource):
user_directory.register_servlets(hs, client_resource)
groups.register_servlets(hs, client_resource)
room_upgrade_rest_servlet.register_servlets(hs, client_resource)
room_batch.register_servlets(hs, client_resource)
capabilities.register_servlets(hs, client_resource)
account_validity.register_servlets(hs, client_resource)
relations.register_servlets(hs, client_resource)

View file

@ -51,6 +51,7 @@ from synapse.rest.admin.rooms import (
)
from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet
from synapse.rest.admin.statistics import UserMediaStatisticsRestServlet
from synapse.rest.admin.username_available import UsernameAvailableRestServlet
from synapse.rest.admin.users import (
AccountValidityRenewServlet,
DeactivateAccountRestServlet,
@ -60,7 +61,6 @@ from synapse.rest.admin.users import (
SearchUsersRestServlet,
ShadowBanRestServlet,
UserAdminServlet,
UserMediaRestServlet,
UserMembershipRestServlet,
UserRegisterServlet,
UserRestServletV2,
@ -224,7 +224,6 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
SendServerNoticeServlet(hs).register(http_server)
VersionServlet(hs).register(http_server)
UserAdminServlet(hs).register(http_server)
UserMediaRestServlet(hs).register(http_server)
UserMembershipRestServlet(hs).register(http_server)
UserTokenRestServlet(hs).register(http_server)
UserRestServletV2(hs).register(http_server)
@ -241,6 +240,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
ForwardExtremitiesRestServlet(hs).register(http_server)
RoomEventContextServlet(hs).register(http_server)
RateLimitRestServlet(hs).register(http_server)
UsernameAvailableRestServlet(hs).register(http_server)
def register_servlets_for_client_rest_resource(

View file

@ -18,14 +18,15 @@ from typing import TYPE_CHECKING, Tuple
from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_boolean, parse_integer
from synapse.http.servlet import RestServlet, parse_boolean, parse_integer, parse_string
from synapse.http.site import SynapseRequest
from synapse.rest.admin._base import (
admin_patterns,
assert_requester_is_admin,
assert_user_is_admin,
)
from synapse.types import JsonDict
from synapse.storage.databases.main.media_repository import MediaSortOrder
from synapse.types import JsonDict, UserID
if TYPE_CHECKING:
from synapse.server import HomeServer
@ -259,7 +260,9 @@ class DeleteMediaByID(RestServlet):
logging.info("Deleting local media by ID: %s", media_id)
deleted_media, total = await self.media_repository.delete_local_media(media_id)
deleted_media, total = await self.media_repository.delete_local_media_ids(
[media_id]
)
return 200, {"deleted_media": deleted_media, "total": total}
@ -312,6 +315,165 @@ class DeleteMediaByDateSize(RestServlet):
return 200, {"deleted_media": deleted_media, "total": total}
class UserMediaRestServlet(RestServlet):
"""
Gets information about all uploaded local media for a specific `user_id`.
With DELETE request you can delete all this media.
Example:
http://localhost:8008/_synapse/admin/v1/users/@user:server/media
Args:
The parameters `from` and `limit` are required for pagination.
By default, a `limit` of 100 is used.
Returns:
A list of media and an integer representing the total number of
media that exist given for this user
"""
PATTERNS = admin_patterns("/users/(?P<user_id>[^/]+)/media$")
def __init__(self, hs: "HomeServer"):
self.is_mine = hs.is_mine
self.auth = hs.get_auth()
self.store = hs.get_datastore()
self.media_repository = hs.get_media_repository()
async def on_GET(
self, request: SynapseRequest, user_id: str
) -> Tuple[int, JsonDict]:
# This will always be set by the time Twisted calls us.
assert request.args is not None
await assert_requester_is_admin(self.auth, request)
if not self.is_mine(UserID.from_string(user_id)):
raise SynapseError(400, "Can only look up local users")
user = await self.store.get_user_by_id(user_id)
if user is None:
raise NotFoundError("Unknown user")
start = parse_integer(request, "from", default=0)
limit = parse_integer(request, "limit", default=100)
if start < 0:
raise SynapseError(
400,
"Query parameter from must be a string representing a positive integer.",
errcode=Codes.INVALID_PARAM,
)
if limit < 0:
raise SynapseError(
400,
"Query parameter limit must be a string representing a positive integer.",
errcode=Codes.INVALID_PARAM,
)
# If neither `order_by` nor `dir` is set, set the default order
# to newest media is on top for backward compatibility.
if b"order_by" not in request.args and b"dir" not in request.args:
order_by = MediaSortOrder.CREATED_TS.value
direction = "b"
else:
order_by = parse_string(
request,
"order_by",
default=MediaSortOrder.CREATED_TS.value,
allowed_values=(
MediaSortOrder.MEDIA_ID.value,
MediaSortOrder.UPLOAD_NAME.value,
MediaSortOrder.CREATED_TS.value,
MediaSortOrder.LAST_ACCESS_TS.value,
MediaSortOrder.MEDIA_LENGTH.value,
MediaSortOrder.MEDIA_TYPE.value,
MediaSortOrder.QUARANTINED_BY.value,
MediaSortOrder.SAFE_FROM_QUARANTINE.value,
),
)
direction = parse_string(
request, "dir", default="f", allowed_values=("f", "b")
)
media, total = await self.store.get_local_media_by_user_paginate(
start, limit, user_id, order_by, direction
)
ret = {"media": media, "total": total}
if (start + limit) < total:
ret["next_token"] = start + len(media)
return 200, ret
async def on_DELETE(
self, request: SynapseRequest, user_id: str
) -> Tuple[int, JsonDict]:
# This will always be set by the time Twisted calls us.
assert request.args is not None
await assert_requester_is_admin(self.auth, request)
if not self.is_mine(UserID.from_string(user_id)):
raise SynapseError(400, "Can only look up local users")
user = await self.store.get_user_by_id(user_id)
if user is None:
raise NotFoundError("Unknown user")
start = parse_integer(request, "from", default=0)
limit = parse_integer(request, "limit", default=100)
if start < 0:
raise SynapseError(
400,
"Query parameter from must be a string representing a positive integer.",
errcode=Codes.INVALID_PARAM,
)
if limit < 0:
raise SynapseError(
400,
"Query parameter limit must be a string representing a positive integer.",
errcode=Codes.INVALID_PARAM,
)
# If neither `order_by` nor `dir` is set, set the default order
# to newest media is on top for backward compatibility.
if b"order_by" not in request.args and b"dir" not in request.args:
order_by = MediaSortOrder.CREATED_TS.value
direction = "b"
else:
order_by = parse_string(
request,
"order_by",
default=MediaSortOrder.CREATED_TS.value,
allowed_values=(
MediaSortOrder.MEDIA_ID.value,
MediaSortOrder.UPLOAD_NAME.value,
MediaSortOrder.CREATED_TS.value,
MediaSortOrder.LAST_ACCESS_TS.value,
MediaSortOrder.MEDIA_LENGTH.value,
MediaSortOrder.MEDIA_TYPE.value,
MediaSortOrder.QUARANTINED_BY.value,
MediaSortOrder.SAFE_FROM_QUARANTINE.value,
),
)
direction = parse_string(
request, "dir", default="f", allowed_values=("f", "b")
)
media, _ = await self.store.get_local_media_by_user_paginate(
start, limit, user_id, order_by, direction
)
deleted_media, total = await self.media_repository.delete_local_media_ids(
([row["media_id"] for row in media])
)
return 200, {"deleted_media": deleted_media, "total": total}
def register_servlets_for_media_repo(hs: "HomeServer", http_server: HttpServer) -> None:
"""
Media repo specific APIs.
@ -326,3 +488,4 @@ def register_servlets_for_media_repo(hs: "HomeServer", http_server: HttpServer)
ListMediaInRoom(hs).register(http_server)
DeleteMediaByID(hs).register(http_server)
DeleteMediaByDateSize(hs).register(http_server)
UserMediaRestServlet(hs).register(http_server)

View file

@ -20,6 +20,7 @@ from synapse.api.constants import EventTypes, JoinRules, Membership
from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
from synapse.api.filtering import Filter
from synapse.http.servlet import (
ResolveRoomIdMixin,
RestServlet,
assert_params_in_dict,
parse_integer,
@ -33,7 +34,7 @@ from synapse.rest.admin._base import (
assert_user_is_admin,
)
from synapse.storage.databases.main.room import RoomSortOrder
from synapse.types import JsonDict, RoomAlias, RoomID, UserID, create_requester
from synapse.types import JsonDict, UserID, create_requester
from synapse.util import json_decoder
if TYPE_CHECKING:
@ -45,48 +46,6 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
class ResolveRoomIdMixin:
def __init__(self, hs: "HomeServer"):
self.room_member_handler = hs.get_room_member_handler()
async def resolve_room_id(
self, room_identifier: str, remote_room_hosts: Optional[List[str]] = None
) -> Tuple[str, Optional[List[str]]]:
"""
Resolve a room identifier to a room ID, if necessary.
This also performanes checks to ensure the room ID is of the proper form.
Args:
room_identifier: The room ID or alias.
remote_room_hosts: The potential remote room hosts to use.
Returns:
The resolved room ID.
Raises:
SynapseError if the room ID is of the wrong form.
"""
if RoomID.is_valid(room_identifier):
resolved_room_id = room_identifier
elif RoomAlias.is_valid(room_identifier):
room_alias = RoomAlias.from_string(room_identifier)
(
room_id,
remote_room_hosts,
) = await self.room_member_handler.lookup_room_alias(room_alias)
resolved_room_id = room_id.to_string()
else:
raise SynapseError(
400, "%s was not legal room ID or room alias" % (room_identifier,)
)
if not resolved_room_id:
raise SynapseError(
400, "Unknown room ID or room alias %s" % room_identifier
)
return resolved_room_id, remote_room_hosts
class ShutdownRoomRestServlet(RestServlet):
"""Shuts down a room by removing all local users from the room and blocking
all future invites and joins to the room. Any local aliases will be repointed

View file

@ -0,0 +1,51 @@
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from http import HTTPStatus
from typing import TYPE_CHECKING, Tuple
from synapse.http.servlet import RestServlet, parse_string
from synapse.http.site import SynapseRequest
from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin
from synapse.types import JsonDict
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class UsernameAvailableRestServlet(RestServlet):
"""An admin API to check if a given username is available, regardless of whether registration is enabled.
Example:
GET /_synapse/admin/v1/username_available?username=foo
200 OK
{
"available": true
}
"""
PATTERNS = admin_patterns("/username_available")
def __init__(self, hs: "HomeServer"):
self.auth = hs.get_auth()
self.registration_handler = hs.get_registration_handler()
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
await assert_requester_is_admin(self.auth, request)
username = parse_string(request, "username", required=True)
await self.registration_handler.check_username(username)
return HTTPStatus.OK, {"available": True}

View file

@ -34,8 +34,7 @@ from synapse.rest.admin._base import (
assert_requester_is_admin,
assert_user_is_admin,
)
from synapse.rest.client.v2_alpha._base import client_patterns
from synapse.storage.databases.main.media_repository import MediaSortOrder
from synapse.rest.client._base import client_patterns
from synapse.storage.databases.main.stats import UserSortOrder
from synapse.types import JsonDict, UserID
@ -172,7 +171,7 @@ class UserRestServletV2(RestServlet):
target_user = UserID.from_string(user_id)
if not self.hs.is_mine(target_user):
raise SynapseError(400, "Can only lookup local users")
raise SynapseError(400, "Can only look up local users")
ret = await self.admin_handler.get_user(target_user)
@ -196,20 +195,57 @@ class UserRestServletV2(RestServlet):
user = await self.admin_handler.get_user(target_user)
user_id = target_user.to_string()
# check for required parameters for each threepid
threepids = body.get("threepids")
if threepids is not None:
for threepid in threepids:
assert_params_in_dict(threepid, ["medium", "address"])
# check for required parameters for each external_id
external_ids = body.get("external_ids")
if external_ids is not None:
for external_id in external_ids:
assert_params_in_dict(external_id, ["auth_provider", "external_id"])
user_type = body.get("user_type", None)
if user_type is not None and user_type not in UserTypes.ALL_USER_TYPES:
raise SynapseError(400, "Invalid user type")
set_admin_to = body.get("admin", False)
if not isinstance(set_admin_to, bool):
raise SynapseError(
HTTPStatus.BAD_REQUEST,
"Param 'admin' must be a boolean, if given",
Codes.BAD_JSON,
)
password = body.get("password", None)
if password is not None:
if not isinstance(password, str) or len(password) > 512:
raise SynapseError(400, "Invalid password")
deactivate = body.get("deactivated", False)
if not isinstance(deactivate, bool):
raise SynapseError(400, "'deactivated' parameter is not of type boolean")
# convert into List[Tuple[str, str]]
if external_ids is not None:
new_external_ids = []
for external_id in external_ids:
new_external_ids.append(
(external_id["auth_provider"], external_id["external_id"])
)
if user: # modify user
if "displayname" in body:
await self.profile_handler.set_displayname(
target_user, requester, body["displayname"], True
)
if "threepids" in body:
# check for required parameters for each threepid
for threepid in body["threepids"]:
assert_params_in_dict(threepid, ["medium", "address"])
if threepids is not None:
# remove old threepids from user
threepids = await self.store.user_get_threepids(user_id)
for threepid in threepids:
old_threepids = await self.store.user_get_threepids(user_id)
for threepid in old_threepids:
try:
await self.auth_handler.delete_threepid(
user_id, threepid["medium"], threepid["address"], None
@ -220,18 +256,39 @@ class UserRestServletV2(RestServlet):
# add new threepids to user
current_time = self.hs.get_clock().time_msec()
for threepid in body["threepids"]:
for threepid in threepids:
await self.auth_handler.add_threepid(
user_id, threepid["medium"], threepid["address"], current_time
)
if "avatar_url" in body and type(body["avatar_url"]) == str:
if external_ids is not None:
# get changed external_ids (added and removed)
cur_external_ids = await self.store.get_external_ids_by_user(user_id)
add_external_ids = set(new_external_ids) - set(cur_external_ids)
del_external_ids = set(cur_external_ids) - set(new_external_ids)
# remove old external_ids
for auth_provider, external_id in del_external_ids:
await self.store.remove_user_external_id(
auth_provider,
external_id,
user_id,
)
# add new external_ids
for auth_provider, external_id in add_external_ids:
await self.store.record_user_external_id(
auth_provider,
external_id,
user_id,
)
if "avatar_url" in body and isinstance(body["avatar_url"], str):
await self.profile_handler.set_avatar_url(
target_user, requester, body["avatar_url"], True
)
if "admin" in body:
set_admin_to = bool(body["admin"])
if set_admin_to != user["admin"]:
auth_user = requester.user
if target_user == auth_user and not set_admin_to:
@ -239,29 +296,18 @@ class UserRestServletV2(RestServlet):
await self.store.set_server_admin(target_user, set_admin_to)
if "password" in body:
if not isinstance(body["password"], str) or len(body["password"]) > 512:
raise SynapseError(400, "Invalid password")
else:
new_password = body["password"]
logout_devices = True
if password is not None:
logout_devices = True
new_password_hash = await self.auth_handler.hash(password)
new_password_hash = await self.auth_handler.hash(new_password)
await self.set_password_handler.set_password(
target_user.to_string(),
new_password_hash,
logout_devices,
requester,
)
await self.set_password_handler.set_password(
target_user.to_string(),
new_password_hash,
logout_devices,
requester,
)
if "deactivated" in body:
deactivate = body["deactivated"]
if not isinstance(deactivate, bool):
raise SynapseError(
400, "'deactivated' parameter is not of type boolean"
)
if deactivate and not user["deactivated"]:
await self.deactivate_account_handler.deactivate_account(
target_user.to_string(), False, requester, by_admin=True
@ -285,36 +331,24 @@ class UserRestServletV2(RestServlet):
return 200, user
else: # create user
password = body.get("password")
password_hash = None
if password is not None:
if not isinstance(password, str) or len(password) > 512:
raise SynapseError(400, "Invalid password")
password_hash = await self.auth_handler.hash(password)
admin = body.get("admin", None)
user_type = body.get("user_type", None)
displayname = body.get("displayname", None)
if user_type is not None and user_type not in UserTypes.ALL_USER_TYPES:
raise SynapseError(400, "Invalid user type")
password_hash = None
if password is not None:
password_hash = await self.auth_handler.hash(password)
user_id = await self.registration_handler.register_user(
localpart=target_user.localpart,
password_hash=password_hash,
admin=bool(admin),
admin=set_admin_to,
default_display_name=displayname,
user_type=user_type,
by_admin=True,
)
if "threepids" in body:
# check for required parameters for each threepid
for threepid in body["threepids"]:
assert_params_in_dict(threepid, ["medium", "address"])
if threepids is not None:
current_time = self.hs.get_clock().time_msec()
for threepid in body["threepids"]:
for threepid in threepids:
await self.auth_handler.add_threepid(
user_id, threepid["medium"], threepid["address"], current_time
)
@ -334,6 +368,14 @@ class UserRestServletV2(RestServlet):
data={},
)
if external_ids is not None:
for auth_provider, external_id in new_external_ids:
await self.store.record_user_external_id(
auth_provider,
external_id,
user_id,
)
if "avatar_url" in body and isinstance(body["avatar_url"], str):
await self.profile_handler.set_avatar_url(
target_user, requester, body["avatar_url"], True
@ -461,7 +503,7 @@ class UserRegisterServlet(RestServlet):
raise SynapseError(403, "HMAC incorrect")
# Reuse the parts of RegisterRestServlet to reduce code duplication
from synapse.rest.client.v2_alpha.register import RegisterRestServlet
from synapse.rest.client.register import RegisterRestServlet
register = RegisterRestServlet(self.hs)
@ -796,7 +838,7 @@ class PushersRestServlet(RestServlet):
await assert_requester_is_admin(self.auth, request)
if not self.is_mine(UserID.from_string(user_id)):
raise SynapseError(400, "Can only lookup local users")
raise SynapseError(400, "Can only look up local users")
if not await self.store.get_user_by_id(user_id):
raise NotFoundError("User not found")
@ -808,97 +850,6 @@ class PushersRestServlet(RestServlet):
return 200, {"pushers": filtered_pushers, "total": len(filtered_pushers)}
class UserMediaRestServlet(RestServlet):
"""
Gets information about all uploaded local media for a specific `user_id`.
Example:
http://localhost:8008/_synapse/admin/v1/users/
@user:server/media
Args:
The parameters `from` and `limit` are required for pagination.
By default, a `limit` of 100 is used.
Returns:
A list of media and an integer representing the total number of
media that exist given for this user
"""
PATTERNS = admin_patterns("/users/(?P<user_id>[^/]+)/media$")
def __init__(self, hs: "HomeServer"):
self.is_mine = hs.is_mine
self.auth = hs.get_auth()
self.store = hs.get_datastore()
async def on_GET(
self, request: SynapseRequest, user_id: str
) -> Tuple[int, JsonDict]:
# This will always be set by the time Twisted calls us.
assert request.args is not None
await assert_requester_is_admin(self.auth, request)
if not self.is_mine(UserID.from_string(user_id)):
raise SynapseError(400, "Can only lookup local users")
user = await self.store.get_user_by_id(user_id)
if user is None:
raise NotFoundError("Unknown user")
start = parse_integer(request, "from", default=0)
limit = parse_integer(request, "limit", default=100)
if start < 0:
raise SynapseError(
400,
"Query parameter from must be a string representing a positive integer.",
errcode=Codes.INVALID_PARAM,
)
if limit < 0:
raise SynapseError(
400,
"Query parameter limit must be a string representing a positive integer.",
errcode=Codes.INVALID_PARAM,
)
# If neither `order_by` nor `dir` is set, set the default order
# to newest media is on top for backward compatibility.
if b"order_by" not in request.args and b"dir" not in request.args:
order_by = MediaSortOrder.CREATED_TS.value
direction = "b"
else:
order_by = parse_string(
request,
"order_by",
default=MediaSortOrder.CREATED_TS.value,
allowed_values=(
MediaSortOrder.MEDIA_ID.value,
MediaSortOrder.UPLOAD_NAME.value,
MediaSortOrder.CREATED_TS.value,
MediaSortOrder.LAST_ACCESS_TS.value,
MediaSortOrder.MEDIA_LENGTH.value,
MediaSortOrder.MEDIA_TYPE.value,
MediaSortOrder.QUARANTINED_BY.value,
MediaSortOrder.SAFE_FROM_QUARANTINE.value,
),
)
direction = parse_string(
request, "dir", default="f", allowed_values=("f", "b")
)
media, total = await self.store.get_local_media_by_user_paginate(
start, limit, user_id, order_by, direction
)
ret = {"media": media, "total": total}
if (start + limit) < total:
ret["next_token"] = start + len(media)
return 200, ret
class UserTokenRestServlet(RestServlet):
"""An admin API for logging in as a user.
@ -1017,7 +968,7 @@ class RateLimitRestServlet(RestServlet):
await assert_requester_is_admin(self.auth, request)
if not self.hs.is_mine_id(user_id):
raise SynapseError(400, "Can only lookup local users")
raise SynapseError(400, "Can only look up local users")
if not await self.store.get_user_by_id(user_id):
raise NotFoundError("User not found")

View file

@ -1,4 +1,4 @@
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2014-2016 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View file

@ -23,7 +23,7 @@ from synapse.api.errors import (
SynapseError,
)
from synapse.http.servlet import RestServlet, parse_json_object_from_request
from synapse.rest.client.v2_alpha._base import client_patterns
from synapse.rest.client._base import client_patterns
from synapse.types import RoomAlias
logger = logging.getLogger(__name__)

View file

@ -17,7 +17,7 @@ import logging
from synapse.api.errors import SynapseError
from synapse.http.servlet import RestServlet
from synapse.rest.client.v2_alpha._base import client_patterns
from synapse.rest.client._base import client_patterns
from synapse.streams.config import PaginationConfig
logger = logging.getLogger(__name__)

View file

@ -14,7 +14,7 @@
from synapse.http.servlet import RestServlet, parse_boolean
from synapse.rest.client.v2_alpha._base import client_patterns
from synapse.rest.client._base import client_patterns
from synapse.streams.config import PaginationConfig

View file

@ -34,7 +34,7 @@ from synapse.http.servlet import (
parse_string,
)
from synapse.http.site import SynapseRequest
from synapse.rest.client.v2_alpha._base import client_patterns
from synapse.rest.client._base import client_patterns
from synapse.rest.well_known import WellKnownBuilder
from synapse.types import JsonDict, UserID

View file

@ -15,7 +15,7 @@
import logging
from synapse.http.servlet import RestServlet
from synapse.rest.client.v2_alpha._base import client_patterns
from synapse.rest.client._base import client_patterns
logger = logging.getLogger(__name__)

View file

@ -19,7 +19,7 @@ import logging
from synapse.api.errors import AuthError, SynapseError
from synapse.handlers.presence import format_user_presence_state
from synapse.http.servlet import RestServlet, parse_json_object_from_request
from synapse.rest.client.v2_alpha._base import client_patterns
from synapse.rest.client._base import client_patterns
from synapse.types import UserID
logger = logging.getLogger(__name__)

View file

@ -16,7 +16,7 @@
from synapse.api.errors import Codes, SynapseError
from synapse.http.servlet import RestServlet, parse_json_object_from_request
from synapse.rest.client.v2_alpha._base import client_patterns
from synapse.rest.client._base import client_patterns
from synapse.types import UserID

View file

@ -26,7 +26,7 @@ from synapse.http.servlet import (
from synapse.push.baserules import BASE_RULE_IDS, NEW_RULE_IDS
from synapse.push.clientformat import format_push_rules_for_user
from synapse.push.rulekinds import PRIORITY_CLASS_MAP
from synapse.rest.client.v2_alpha._base import client_patterns
from synapse.rest.client._base import client_patterns
from synapse.storage.push_rule import InconsistentRuleException, RuleNotFoundException

View file

@ -23,7 +23,7 @@ from synapse.http.servlet import (
parse_string,
)
from synapse.push import PusherConfigException
from synapse.rest.client.v2_alpha._base import client_patterns
from synapse.rest.client._base import client_patterns
logger = logging.getLogger(__name__)

View file

@ -115,7 +115,7 @@ class EmailRegisterRequestTokenRestServlet(RestServlet):
# For emails, canonicalise the address.
# We store all email addresses canonicalised in the DB.
# (See on_POST in EmailThreepidRequestTokenRestServlet
# in synapse/rest/client/v2_alpha/account.py)
# in synapse/rest/client/account.py)
try:
email = validate_email(body["email"])
except ValueError as e:
@ -631,7 +631,7 @@ class RegisterRestServlet(RestServlet):
# For emails, canonicalise the address.
# We store all email addresses canonicalised in the DB.
# (See on_POST in EmailThreepidRequestTokenRestServlet
# in synapse/rest/client/v2_alpha/account.py)
# in synapse/rest/client/account.py)
if medium == "email":
try:
address = canonicalise_email(address)

View file

@ -19,19 +19,19 @@ import re
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
from urllib import parse as urlparse
from synapse.api.constants import EventContentFields, EventTypes, Membership
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import (
AuthError,
Codes,
HttpResponseException,
InvalidClientCredentialsError,
MissingClientTokenError,
ShadowBanError,
SynapseError,
)
from synapse.api.filtering import Filter
from synapse.appservice import ApplicationService
from synapse.events.utils import format_event_for_client_v2
from synapse.http.servlet import (
ResolveRoomIdMixin,
RestServlet,
assert_params_in_dict,
parse_boolean,
@ -42,20 +42,11 @@ from synapse.http.servlet import (
)
from synapse.http.site import SynapseRequest
from synapse.logging.opentracing import set_tag
from synapse.rest.client._base import client_patterns
from synapse.rest.client.transactions import HttpTransactionCache
from synapse.rest.client.v2_alpha._base import client_patterns
from synapse.storage.state import StateFilter
from synapse.streams.config import PaginationConfig
from synapse.types import (
JsonDict,
Requester,
RoomAlias,
RoomID,
StreamToken,
ThirdPartyInstanceID,
UserID,
create_requester,
)
from synapse.types import JsonDict, StreamToken, ThirdPartyInstanceID, UserID
from synapse.util import json_decoder
from synapse.util.stringutils import parse_and_validate_server_name, random_string
@ -272,405 +263,11 @@ class RoomSendEventRestServlet(TransactionRestServlet):
)
class RoomBatchSendEventRestServlet(TransactionRestServlet):
"""
API endpoint which can insert a chunk of events historically back in time
next to the given `prev_event`.
`chunk_id` comes from `next_chunk_id `in the response of the batch send
endpoint and is derived from the "insertion" events added to each chunk.
It's not required for the first batch send.
`state_events_at_start` is used to define the historical state events
needed to auth the events like join events. These events will float
outside of the normal DAG as outlier's and won't be visible in the chat
history which also allows us to insert multiple chunks without having a bunch
of `@mxid joined the room` noise between each chunk.
`events` is chronological chunk/list of events you want to insert.
There is a reverse-chronological constraint on chunks so once you insert
some messages, you can only insert older ones after that.
tldr; Insert chunks from your most recent history -> oldest history.
POST /_matrix/client/unstable/org.matrix.msc2716/rooms/<roomID>/batch_send?prev_event=<eventID>&chunk_id=<chunkID>
{
"events": [ ... ],
"state_events_at_start": [ ... ]
}
"""
PATTERNS = (
re.compile(
"^/_matrix/client/unstable/org.matrix.msc2716"
"/rooms/(?P<room_id>[^/]*)/batch_send$"
),
)
def __init__(self, hs):
super().__init__(hs)
self.hs = hs
self.store = hs.get_datastore()
self.state_store = hs.get_storage().state
self.event_creation_handler = hs.get_event_creation_handler()
self.room_member_handler = hs.get_room_member_handler()
self.auth = hs.get_auth()
async def _inherit_depth_from_prev_ids(self, prev_event_ids) -> int:
(
most_recent_prev_event_id,
most_recent_prev_event_depth,
) = await self.store.get_max_depth_of(prev_event_ids)
# We want to insert the historical event after the `prev_event` but before the successor event
#
# We inherit depth from the successor event instead of the `prev_event`
# because events returned from `/messages` are first sorted by `topological_ordering`
# which is just the `depth` and then tie-break with `stream_ordering`.
#
# We mark these inserted historical events as "backfilled" which gives them a
# negative `stream_ordering`. If we use the same depth as the `prev_event`,
# then our historical event will tie-break and be sorted before the `prev_event`
# when it should come after.
#
# We want to use the successor event depth so they appear after `prev_event` because
# it has a larger `depth` but before the successor event because the `stream_ordering`
# is negative before the successor event.
successor_event_ids = await self.store.get_successor_events(
[most_recent_prev_event_id]
)
# If we can't find any successor events, then it's a forward extremity of
# historical messages and we can just inherit from the previous historical
# event which we can already assume has the correct depth where we want
# to insert into.
if not successor_event_ids:
depth = most_recent_prev_event_depth
else:
(
_,
oldest_successor_depth,
) = await self.store.get_min_depth_of(successor_event_ids)
depth = oldest_successor_depth
return depth
def _create_insertion_event_dict(
self, sender: str, room_id: str, origin_server_ts: int
):
"""Creates an event dict for an "insertion" event with the proper fields
and a random chunk ID.
Args:
sender: The event author MXID
room_id: The room ID that the event belongs to
origin_server_ts: Timestamp when the event was sent
Returns:
Tuple of event ID and stream ordering position
"""
next_chunk_id = random_string(8)
insertion_event = {
"type": EventTypes.MSC2716_INSERTION,
"sender": sender,
"room_id": room_id,
"content": {
EventContentFields.MSC2716_NEXT_CHUNK_ID: next_chunk_id,
EventContentFields.MSC2716_HISTORICAL: True,
},
"origin_server_ts": origin_server_ts,
}
return insertion_event
async def _create_requester_for_user_id_from_app_service(
self, user_id: str, app_service: ApplicationService
) -> Requester:
"""Creates a new requester for the given user_id
and validates that the app service is allowed to control
the given user.
Args:
user_id: The author MXID that the app service is controlling
app_service: The app service that controls the user
Returns:
Requester object
"""
await self.auth.validate_appservice_can_control_user_id(app_service, user_id)
return create_requester(user_id, app_service=app_service)
async def on_POST(self, request, room_id):
requester = await self.auth.get_user_by_req(request, allow_guest=False)
if not requester.app_service:
raise AuthError(
403,
"Only application services can use the /batchsend endpoint",
)
body = parse_json_object_from_request(request)
assert_params_in_dict(body, ["state_events_at_start", "events"])
prev_events_from_query = parse_strings_from_args(request.args, "prev_event")
chunk_id_from_query = parse_string(request, "chunk_id")
if prev_events_from_query is None:
raise SynapseError(
400,
"prev_event query parameter is required when inserting historical messages back in time",
errcode=Codes.MISSING_PARAM,
)
# For the event we are inserting next to (`prev_events_from_query`),
# find the most recent auth events (derived from state events) that
# allowed that message to be sent. We will use that as a base
# to auth our historical messages against.
(
most_recent_prev_event_id,
_,
) = await self.store.get_max_depth_of(prev_events_from_query)
# mapping from (type, state_key) -> state_event_id
prev_state_map = await self.state_store.get_state_ids_for_event(
most_recent_prev_event_id
)
# List of state event ID's
prev_state_ids = list(prev_state_map.values())
auth_event_ids = prev_state_ids
for state_event in body["state_events_at_start"]:
assert_params_in_dict(
state_event, ["type", "origin_server_ts", "content", "sender"]
)
logger.debug(
"RoomBatchSendEventRestServlet inserting state_event=%s, auth_event_ids=%s",
state_event,
auth_event_ids,
)
event_dict = {
"type": state_event["type"],
"origin_server_ts": state_event["origin_server_ts"],
"content": state_event["content"],
"room_id": room_id,
"sender": state_event["sender"],
"state_key": state_event["state_key"],
}
# Make the state events float off on their own
fake_prev_event_id = "$" + random_string(43)
# TODO: This is pretty much the same as some other code to handle inserting state in this file
if event_dict["type"] == EventTypes.Member:
membership = event_dict["content"].get("membership", None)
event_id, _ = await self.room_member_handler.update_membership(
await self._create_requester_for_user_id_from_app_service(
state_event["sender"], requester.app_service
),
target=UserID.from_string(event_dict["state_key"]),
room_id=room_id,
action=membership,
content=event_dict["content"],
outlier=True,
prev_event_ids=[fake_prev_event_id],
# Make sure to use a copy of this list because we modify it
# later in the loop here. Otherwise it will be the same
# reference and also update in the event when we append later.
auth_event_ids=auth_event_ids.copy(),
)
else:
# TODO: Add some complement tests that adds state that is not member joins
# and will use this code path. Maybe we only want to support join state events
# and can get rid of this `else`?
(
event,
_,
) = await self.event_creation_handler.create_and_send_nonmember_event(
await self._create_requester_for_user_id_from_app_service(
state_event["sender"], requester.app_service
),
event_dict,
outlier=True,
prev_event_ids=[fake_prev_event_id],
# Make sure to use a copy of this list because we modify it
# later in the loop here. Otherwise it will be the same
# reference and also update in the event when we append later.
auth_event_ids=auth_event_ids.copy(),
)
event_id = event.event_id
auth_event_ids.append(event_id)
events_to_create = body["events"]
inherited_depth = await self._inherit_depth_from_prev_ids(
prev_events_from_query
)
# Figure out which chunk to connect to. If they passed in
# chunk_id_from_query let's use it. The chunk ID passed in comes
# from the chunk_id in the "insertion" event from the previous chunk.
last_event_in_chunk = events_to_create[-1]
chunk_id_to_connect_to = chunk_id_from_query
base_insertion_event = None
if chunk_id_from_query:
# All but the first base insertion event should point at a fake
# event, which causes the HS to ask for the state at the start of
# the chunk later.
prev_event_ids = [fake_prev_event_id]
# TODO: Verify the chunk_id_from_query corresponds to an insertion event
pass
# Otherwise, create an insertion event to act as a starting point.
#
# We don't always have an insertion event to start hanging more history
# off of (ideally there would be one in the main DAG, but that's not the
# case if we're wanting to add history to e.g. existing rooms without
# an insertion event), in which case we just create a new insertion event
# that can then get pointed to by a "marker" event later.
else:
prev_event_ids = prev_events_from_query
base_insertion_event_dict = self._create_insertion_event_dict(
sender=requester.user.to_string(),
room_id=room_id,
origin_server_ts=last_event_in_chunk["origin_server_ts"],
)
base_insertion_event_dict["prev_events"] = prev_event_ids.copy()
(
base_insertion_event,
_,
) = await self.event_creation_handler.create_and_send_nonmember_event(
await self._create_requester_for_user_id_from_app_service(
base_insertion_event_dict["sender"],
requester.app_service,
),
base_insertion_event_dict,
prev_event_ids=base_insertion_event_dict.get("prev_events"),
auth_event_ids=auth_event_ids,
historical=True,
depth=inherited_depth,
)
chunk_id_to_connect_to = base_insertion_event["content"][
EventContentFields.MSC2716_NEXT_CHUNK_ID
]
# Connect this current chunk to the insertion event from the previous chunk
chunk_event = {
"type": EventTypes.MSC2716_CHUNK,
"sender": requester.user.to_string(),
"room_id": room_id,
"content": {EventContentFields.MSC2716_CHUNK_ID: chunk_id_to_connect_to},
# Since the chunk event is put at the end of the chunk,
# where the newest-in-time event is, copy the origin_server_ts from
# the last event we're inserting
"origin_server_ts": last_event_in_chunk["origin_server_ts"],
}
# Add the chunk event to the end of the chunk (newest-in-time)
events_to_create.append(chunk_event)
# Add an "insertion" event to the start of each chunk (next to the oldest-in-time
# event in the chunk) so the next chunk can be connected to this one.
insertion_event = self._create_insertion_event_dict(
sender=requester.user.to_string(),
room_id=room_id,
# Since the insertion event is put at the start of the chunk,
# where the oldest-in-time event is, copy the origin_server_ts from
# the first event we're inserting
origin_server_ts=events_to_create[0]["origin_server_ts"],
)
# Prepend the insertion event to the start of the chunk (oldest-in-time)
events_to_create = [insertion_event] + events_to_create
event_ids = []
events_to_persist = []
for ev in events_to_create:
assert_params_in_dict(ev, ["type", "origin_server_ts", "content", "sender"])
# Mark all events as historical
# This has important semantics within the Synapse internals to backfill properly
ev["content"][EventContentFields.MSC2716_HISTORICAL] = True
event_dict = {
"type": ev["type"],
"origin_server_ts": ev["origin_server_ts"],
"content": ev["content"],
"room_id": room_id,
"sender": ev["sender"], # requester.user.to_string(),
"prev_events": prev_event_ids.copy(),
}
event, context = await self.event_creation_handler.create_event(
await self._create_requester_for_user_id_from_app_service(
ev["sender"], requester.app_service
),
event_dict,
prev_event_ids=event_dict.get("prev_events"),
auth_event_ids=auth_event_ids,
historical=True,
depth=inherited_depth,
)
logger.debug(
"RoomBatchSendEventRestServlet inserting event=%s, prev_event_ids=%s, auth_event_ids=%s",
event,
prev_event_ids,
auth_event_ids,
)
assert self.hs.is_mine_id(event.sender), "User must be our own: %s" % (
event.sender,
)
events_to_persist.append((event, context))
event_id = event.event_id
event_ids.append(event_id)
prev_event_ids = [event_id]
# Persist events in reverse-chronological order so they have the
# correct stream_ordering as they are backfilled (which decrements).
# Events are sorted by (topological_ordering, stream_ordering)
# where topological_ordering is just depth.
for (event, context) in reversed(events_to_persist):
ev = await self.event_creation_handler.handle_new_client_event(
await self._create_requester_for_user_id_from_app_service(
event["sender"], requester.app_service
),
event=event,
context=context,
)
# Add the base_insertion_event to the bottom of the list we return
if base_insertion_event is not None:
event_ids.append(base_insertion_event.event_id)
return 200, {
"state_events": auth_event_ids,
"events": event_ids,
"next_chunk_id": insertion_event["content"][
EventContentFields.MSC2716_NEXT_CHUNK_ID
],
}
def on_GET(self, request, room_id):
return 501, "Not implemented"
def on_PUT(self, request, room_id):
return self.txns.fetch_or_execute_request(
request, self.on_POST, request, room_id
)
# TODO: Needs unit testing for room ID + alias joins
class JoinRoomAliasServlet(TransactionRestServlet):
class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet):
def __init__(self, hs):
super().__init__(hs)
self.room_member_handler = hs.get_room_member_handler()
super(ResolveRoomIdMixin, self).__init__(hs) # ensure the Mixin is set up
self.auth = hs.get_auth()
def register(self, http_server):
@ -693,24 +290,13 @@ class JoinRoomAliasServlet(TransactionRestServlet):
# cheekily send invalid bodies.
content = {}
if RoomID.is_valid(room_identifier):
room_id = room_identifier
# twisted.web.server.Request.args is incorrectly defined as Optional[Any]
args: Dict[bytes, List[bytes]] = request.args # type: ignore
remote_room_hosts = parse_strings_from_args(
args, "server_name", required=False
)
elif RoomAlias.is_valid(room_identifier):
handler = self.room_member_handler
room_alias = RoomAlias.from_string(room_identifier)
room_id_obj, remote_room_hosts = await handler.lookup_room_alias(room_alias)
room_id = room_id_obj.to_string()
else:
raise SynapseError(
400, "%s was not legal room ID or room alias" % (room_identifier,)
)
# twisted.web.server.Request.args is incorrectly defined as Optional[Any]
args: Dict[bytes, List[bytes]] = request.args # type: ignore
remote_room_hosts = parse_strings_from_args(args, "server_name", required=False)
room_id, remote_room_hosts = await self.resolve_room_id(
room_identifier,
remote_room_hosts,
)
await self.room_member_handler.update_membership(
requester=requester,
@ -781,12 +367,9 @@ class PublicRoomListRestServlet(TransactionRestServlet):
Codes.INVALID_PARAM,
)
try:
data = await handler.get_remote_public_room_list(
server, limit=limit, since_token=since_token
)
except HttpResponseException as e:
raise e.to_synapse_error()
data = await handler.get_remote_public_room_list(
server, limit=limit, since_token=since_token
)
else:
data = await handler.get_local_public_room_list(
limit=limit, since_token=since_token
@ -834,17 +417,15 @@ class PublicRoomListRestServlet(TransactionRestServlet):
Codes.INVALID_PARAM,
)
try:
data = await handler.get_remote_public_room_list(
server,
limit=limit,
since_token=since_token,
search_filter=search_filter,
include_all_networks=include_all_networks,
third_party_instance_id=third_party_instance_id,
)
except HttpResponseException as e:
raise e.to_synapse_error()
data = await handler.get_remote_public_room_list(
server,
limit=limit,
since_token=since_token,
search_filter=search_filter,
include_all_networks=include_all_networks,
third_party_instance_id=third_party_instance_id,
)
else:
data = await handler.get_local_public_room_list(
limit=limit,
@ -1408,18 +989,26 @@ class RoomSpaceSummaryRestServlet(RestServlet):
def __init__(self, hs: "HomeServer"):
super().__init__()
self._auth = hs.get_auth()
self._space_summary_handler = hs.get_space_summary_handler()
self._room_summary_handler = hs.get_room_summary_handler()
async def on_GET(
self, request: SynapseRequest, room_id: str
) -> Tuple[int, JsonDict]:
requester = await self._auth.get_user_by_req(request, allow_guest=True)
return 200, await self._space_summary_handler.get_space_summary(
max_rooms_per_space = parse_integer(request, "max_rooms_per_space")
if max_rooms_per_space is not None and max_rooms_per_space < 0:
raise SynapseError(
400,
"Value for 'max_rooms_per_space' must be a non-negative integer",
Codes.BAD_JSON,
)
return 200, await self._room_summary_handler.get_space_summary(
requester.user.to_string(),
room_id,
suggested_only=parse_boolean(request, "suggested_only", default=False),
max_rooms_per_space=parse_integer(request, "max_rooms_per_space"),
max_rooms_per_space=max_rooms_per_space,
)
# TODO When switching to the stable endpoint, remove the POST handler.
@ -1436,12 +1025,19 @@ class RoomSpaceSummaryRestServlet(RestServlet):
)
max_rooms_per_space = content.get("max_rooms_per_space")
if max_rooms_per_space is not None and not isinstance(max_rooms_per_space, int):
raise SynapseError(
400, "'max_rooms_per_space' must be an integer", Codes.BAD_JSON
)
if max_rooms_per_space is not None:
if not isinstance(max_rooms_per_space, int):
raise SynapseError(
400, "'max_rooms_per_space' must be an integer", Codes.BAD_JSON
)
if max_rooms_per_space < 0:
raise SynapseError(
400,
"Value for 'max_rooms_per_space' must be a non-negative integer",
Codes.BAD_JSON,
)
return 200, await self._space_summary_handler.get_space_summary(
return 200, await self._room_summary_handler.get_space_summary(
requester.user.to_string(),
room_id,
suggested_only=suggested_only,
@ -1449,9 +1045,85 @@ class RoomSpaceSummaryRestServlet(RestServlet):
)
def register_servlets(hs: "HomeServer", http_server, is_worker=False):
msc2716_enabled = hs.config.experimental.msc2716_enabled
class RoomHierarchyRestServlet(RestServlet):
PATTERNS = (
re.compile(
"^/_matrix/client/unstable/org.matrix.msc2946"
"/rooms/(?P<room_id>[^/]*)/hierarchy$"
),
)
def __init__(self, hs: "HomeServer"):
super().__init__()
self._auth = hs.get_auth()
self._room_summary_handler = hs.get_room_summary_handler()
async def on_GET(
self, request: SynapseRequest, room_id: str
) -> Tuple[int, JsonDict]:
requester = await self._auth.get_user_by_req(request, allow_guest=True)
max_depth = parse_integer(request, "max_depth")
if max_depth is not None and max_depth < 0:
raise SynapseError(
400, "'max_depth' must be a non-negative integer", Codes.BAD_JSON
)
limit = parse_integer(request, "limit")
if limit is not None and limit <= 0:
raise SynapseError(
400, "'limit' must be a positive integer", Codes.BAD_JSON
)
return 200, await self._room_summary_handler.get_room_hierarchy(
requester.user.to_string(),
room_id,
suggested_only=parse_boolean(request, "suggested_only", default=False),
max_depth=max_depth,
limit=limit,
from_token=parse_string(request, "from"),
)
class RoomSummaryRestServlet(ResolveRoomIdMixin, RestServlet):
PATTERNS = (
re.compile(
"^/_matrix/client/unstable/im.nheko.summary"
"/rooms/(?P<room_identifier>[^/]*)/summary$"
),
)
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self._auth = hs.get_auth()
self._room_summary_handler = hs.get_room_summary_handler()
async def on_GET(
self, request: SynapseRequest, room_identifier: str
) -> Tuple[int, JsonDict]:
try:
requester = await self._auth.get_user_by_req(request, allow_guest=True)
requester_user_id: Optional[str] = requester.user.to_string()
except MissingClientTokenError:
# auth is optional
requester_user_id = None
# twisted.web.server.Request.args is incorrectly defined as Optional[Any]
args: Dict[bytes, List[bytes]] = request.args # type: ignore
remote_room_hosts = parse_strings_from_args(args, "via", required=False)
room_id, remote_room_hosts = await self.resolve_room_id(
room_identifier,
remote_room_hosts,
)
return 200, await self._room_summary_handler.get_room_summary(
requester_user_id,
room_id,
remote_room_hosts,
)
def register_servlets(hs: "HomeServer", http_server, is_worker=False):
RoomStateEventRestServlet(hs).register(http_server)
RoomMemberListRestServlet(hs).register(http_server)
JoinedRoomMemberListRestServlet(hs).register(http_server)
@ -1459,22 +1131,23 @@ def register_servlets(hs: "HomeServer", http_server, is_worker=False):
JoinRoomAliasServlet(hs).register(http_server)
RoomMembershipRestServlet(hs).register(http_server)
RoomSendEventRestServlet(hs).register(http_server)
if msc2716_enabled:
RoomBatchSendEventRestServlet(hs).register(http_server)
PublicRoomListRestServlet(hs).register(http_server)
RoomStateRestServlet(hs).register(http_server)
RoomRedactEventRestServlet(hs).register(http_server)
RoomTypingRestServlet(hs).register(http_server)
RoomEventContextServlet(hs).register(http_server)
RoomSpaceSummaryRestServlet(hs).register(http_server)
RoomHierarchyRestServlet(hs).register(http_server)
if hs.config.experimental.msc3266_enabled:
RoomSummaryRestServlet(hs).register(http_server)
RoomEventServlet(hs).register(http_server)
JoinedRoomsRestServlet(hs).register(http_server)
RoomAliasListServlet(hs).register(http_server)
SearchRestServlet(hs).register(http_server)
RoomCreateRestServlet(hs).register(http_server)
# Some servlets only get registered for the main process.
if not is_worker:
RoomCreateRestServlet(hs).register(http_server)
RoomForgetRestServlet(hs).register(http_server)

View file

@ -0,0 +1,441 @@
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from synapse.api.constants import EventContentFields, EventTypes
from synapse.api.errors import AuthError, Codes, SynapseError
from synapse.appservice import ApplicationService
from synapse.http.servlet import (
RestServlet,
assert_params_in_dict,
parse_json_object_from_request,
parse_string,
parse_strings_from_args,
)
from synapse.rest.client.transactions import HttpTransactionCache
from synapse.types import Requester, UserID, create_requester
from synapse.util.stringutils import random_string
logger = logging.getLogger(__name__)
class RoomBatchSendEventRestServlet(RestServlet):
"""
API endpoint which can insert a chunk of events historically back in time
next to the given `prev_event`.
`chunk_id` comes from `next_chunk_id `in the response of the batch send
endpoint and is derived from the "insertion" events added to each chunk.
It's not required for the first batch send.
`state_events_at_start` is used to define the historical state events
needed to auth the events like join events. These events will float
outside of the normal DAG as outlier's and won't be visible in the chat
history which also allows us to insert multiple chunks without having a bunch
of `@mxid joined the room` noise between each chunk.
`events` is chronological chunk/list of events you want to insert.
There is a reverse-chronological constraint on chunks so once you insert
some messages, you can only insert older ones after that.
tldr; Insert chunks from your most recent history -> oldest history.
POST /_matrix/client/unstable/org.matrix.msc2716/rooms/<roomID>/batch_send?prev_event=<eventID>&chunk_id=<chunkID>
{
"events": [ ... ],
"state_events_at_start": [ ... ]
}
"""
PATTERNS = (
re.compile(
"^/_matrix/client/unstable/org.matrix.msc2716"
"/rooms/(?P<room_id>[^/]*)/batch_send$"
),
)
def __init__(self, hs):
super().__init__()
self.hs = hs
self.store = hs.get_datastore()
self.state_store = hs.get_storage().state
self.event_creation_handler = hs.get_event_creation_handler()
self.room_member_handler = hs.get_room_member_handler()
self.auth = hs.get_auth()
self.txns = HttpTransactionCache(hs)
async def _inherit_depth_from_prev_ids(self, prev_event_ids) -> int:
(
most_recent_prev_event_id,
most_recent_prev_event_depth,
) = await self.store.get_max_depth_of(prev_event_ids)
# We want to insert the historical event after the `prev_event` but before the successor event
#
# We inherit depth from the successor event instead of the `prev_event`
# because events returned from `/messages` are first sorted by `topological_ordering`
# which is just the `depth` and then tie-break with `stream_ordering`.
#
# We mark these inserted historical events as "backfilled" which gives them a
# negative `stream_ordering`. If we use the same depth as the `prev_event`,
# then our historical event will tie-break and be sorted before the `prev_event`
# when it should come after.
#
# We want to use the successor event depth so they appear after `prev_event` because
# it has a larger `depth` but before the successor event because the `stream_ordering`
# is negative before the successor event.
successor_event_ids = await self.store.get_successor_events(
[most_recent_prev_event_id]
)
# If we can't find any successor events, then it's a forward extremity of
# historical messages and we can just inherit from the previous historical
# event which we can already assume has the correct depth where we want
# to insert into.
if not successor_event_ids:
depth = most_recent_prev_event_depth
else:
(
_,
oldest_successor_depth,
) = await self.store.get_min_depth_of(successor_event_ids)
depth = oldest_successor_depth
return depth
def _create_insertion_event_dict(
self, sender: str, room_id: str, origin_server_ts: int
):
"""Creates an event dict for an "insertion" event with the proper fields
and a random chunk ID.
Args:
sender: The event author MXID
room_id: The room ID that the event belongs to
origin_server_ts: Timestamp when the event was sent
Returns:
Tuple of event ID and stream ordering position
"""
next_chunk_id = random_string(8)
insertion_event = {
"type": EventTypes.MSC2716_INSERTION,
"sender": sender,
"room_id": room_id,
"content": {
EventContentFields.MSC2716_NEXT_CHUNK_ID: next_chunk_id,
EventContentFields.MSC2716_HISTORICAL: True,
},
"origin_server_ts": origin_server_ts,
}
return insertion_event
async def _create_requester_for_user_id_from_app_service(
self, user_id: str, app_service: ApplicationService
) -> Requester:
"""Creates a new requester for the given user_id
and validates that the app service is allowed to control
the given user.
Args:
user_id: The author MXID that the app service is controlling
app_service: The app service that controls the user
Returns:
Requester object
"""
await self.auth.validate_appservice_can_control_user_id(app_service, user_id)
return create_requester(user_id, app_service=app_service)
async def on_POST(self, request, room_id):
requester = await self.auth.get_user_by_req(request, allow_guest=False)
if not requester.app_service:
raise AuthError(
403,
"Only application services can use the /batchsend endpoint",
)
body = parse_json_object_from_request(request)
assert_params_in_dict(body, ["state_events_at_start", "events"])
prev_events_from_query = parse_strings_from_args(request.args, "prev_event")
chunk_id_from_query = parse_string(request, "chunk_id")
if prev_events_from_query is None:
raise SynapseError(
400,
"prev_event query parameter is required when inserting historical messages back in time",
errcode=Codes.MISSING_PARAM,
)
# For the event we are inserting next to (`prev_events_from_query`),
# find the most recent auth events (derived from state events) that
# allowed that message to be sent. We will use that as a base
# to auth our historical messages against.
(
most_recent_prev_event_id,
_,
) = await self.store.get_max_depth_of(prev_events_from_query)
# mapping from (type, state_key) -> state_event_id
prev_state_map = await self.state_store.get_state_ids_for_event(
most_recent_prev_event_id
)
# List of state event ID's
prev_state_ids = list(prev_state_map.values())
auth_event_ids = prev_state_ids
state_events_at_start = []
for state_event in body["state_events_at_start"]:
assert_params_in_dict(
state_event, ["type", "origin_server_ts", "content", "sender"]
)
logger.debug(
"RoomBatchSendEventRestServlet inserting state_event=%s, auth_event_ids=%s",
state_event,
auth_event_ids,
)
event_dict = {
"type": state_event["type"],
"origin_server_ts": state_event["origin_server_ts"],
"content": state_event["content"],
"room_id": room_id,
"sender": state_event["sender"],
"state_key": state_event["state_key"],
}
# Mark all events as historical
event_dict["content"][EventContentFields.MSC2716_HISTORICAL] = True
# Make the state events float off on their own
fake_prev_event_id = "$" + random_string(43)
# TODO: This is pretty much the same as some other code to handle inserting state in this file
if event_dict["type"] == EventTypes.Member:
membership = event_dict["content"].get("membership", None)
event_id, _ = await self.room_member_handler.update_membership(
await self._create_requester_for_user_id_from_app_service(
state_event["sender"], requester.app_service
),
target=UserID.from_string(event_dict["state_key"]),
room_id=room_id,
action=membership,
content=event_dict["content"],
outlier=True,
prev_event_ids=[fake_prev_event_id],
# Make sure to use a copy of this list because we modify it
# later in the loop here. Otherwise it will be the same
# reference and also update in the event when we append later.
auth_event_ids=auth_event_ids.copy(),
)
else:
# TODO: Add some complement tests that adds state that is not member joins
# and will use this code path. Maybe we only want to support join state events
# and can get rid of this `else`?
(
event,
_,
) = await self.event_creation_handler.create_and_send_nonmember_event(
await self._create_requester_for_user_id_from_app_service(
state_event["sender"], requester.app_service
),
event_dict,
outlier=True,
prev_event_ids=[fake_prev_event_id],
# Make sure to use a copy of this list because we modify it
# later in the loop here. Otherwise it will be the same
# reference and also update in the event when we append later.
auth_event_ids=auth_event_ids.copy(),
)
event_id = event.event_id
state_events_at_start.append(event_id)
auth_event_ids.append(event_id)
events_to_create = body["events"]
inherited_depth = await self._inherit_depth_from_prev_ids(
prev_events_from_query
)
# Figure out which chunk to connect to. If they passed in
# chunk_id_from_query let's use it. The chunk ID passed in comes
# from the chunk_id in the "insertion" event from the previous chunk.
last_event_in_chunk = events_to_create[-1]
chunk_id_to_connect_to = chunk_id_from_query
base_insertion_event = None
if chunk_id_from_query:
# All but the first base insertion event should point at a fake
# event, which causes the HS to ask for the state at the start of
# the chunk later.
prev_event_ids = [fake_prev_event_id]
# TODO: Verify the chunk_id_from_query corresponds to an insertion event
pass
# Otherwise, create an insertion event to act as a starting point.
#
# We don't always have an insertion event to start hanging more history
# off of (ideally there would be one in the main DAG, but that's not the
# case if we're wanting to add history to e.g. existing rooms without
# an insertion event), in which case we just create a new insertion event
# that can then get pointed to by a "marker" event later.
else:
prev_event_ids = prev_events_from_query
base_insertion_event_dict = self._create_insertion_event_dict(
sender=requester.user.to_string(),
room_id=room_id,
origin_server_ts=last_event_in_chunk["origin_server_ts"],
)
base_insertion_event_dict["prev_events"] = prev_event_ids.copy()
(
base_insertion_event,
_,
) = await self.event_creation_handler.create_and_send_nonmember_event(
await self._create_requester_for_user_id_from_app_service(
base_insertion_event_dict["sender"],
requester.app_service,
),
base_insertion_event_dict,
prev_event_ids=base_insertion_event_dict.get("prev_events"),
auth_event_ids=auth_event_ids,
historical=True,
depth=inherited_depth,
)
chunk_id_to_connect_to = base_insertion_event["content"][
EventContentFields.MSC2716_NEXT_CHUNK_ID
]
# Connect this current chunk to the insertion event from the previous chunk
chunk_event = {
"type": EventTypes.MSC2716_CHUNK,
"sender": requester.user.to_string(),
"room_id": room_id,
"content": {
EventContentFields.MSC2716_CHUNK_ID: chunk_id_to_connect_to,
EventContentFields.MSC2716_HISTORICAL: True,
},
# Since the chunk event is put at the end of the chunk,
# where the newest-in-time event is, copy the origin_server_ts from
# the last event we're inserting
"origin_server_ts": last_event_in_chunk["origin_server_ts"],
}
# Add the chunk event to the end of the chunk (newest-in-time)
events_to_create.append(chunk_event)
# Add an "insertion" event to the start of each chunk (next to the oldest-in-time
# event in the chunk) so the next chunk can be connected to this one.
insertion_event = self._create_insertion_event_dict(
sender=requester.user.to_string(),
room_id=room_id,
# Since the insertion event is put at the start of the chunk,
# where the oldest-in-time event is, copy the origin_server_ts from
# the first event we're inserting
origin_server_ts=events_to_create[0]["origin_server_ts"],
)
# Prepend the insertion event to the start of the chunk (oldest-in-time)
events_to_create = [insertion_event] + events_to_create
event_ids = []
events_to_persist = []
for ev in events_to_create:
assert_params_in_dict(ev, ["type", "origin_server_ts", "content", "sender"])
event_dict = {
"type": ev["type"],
"origin_server_ts": ev["origin_server_ts"],
"content": ev["content"],
"room_id": room_id,
"sender": ev["sender"], # requester.user.to_string(),
"prev_events": prev_event_ids.copy(),
}
# Mark all events as historical
event_dict["content"][EventContentFields.MSC2716_HISTORICAL] = True
event, context = await self.event_creation_handler.create_event(
await self._create_requester_for_user_id_from_app_service(
ev["sender"], requester.app_service
),
event_dict,
prev_event_ids=event_dict.get("prev_events"),
auth_event_ids=auth_event_ids,
historical=True,
depth=inherited_depth,
)
logger.debug(
"RoomBatchSendEventRestServlet inserting event=%s, prev_event_ids=%s, auth_event_ids=%s",
event,
prev_event_ids,
auth_event_ids,
)
assert self.hs.is_mine_id(event.sender), "User must be our own: %s" % (
event.sender,
)
events_to_persist.append((event, context))
event_id = event.event_id
event_ids.append(event_id)
prev_event_ids = [event_id]
# Persist events in reverse-chronological order so they have the
# correct stream_ordering as they are backfilled (which decrements).
# Events are sorted by (topological_ordering, stream_ordering)
# where topological_ordering is just depth.
for (event, context) in reversed(events_to_persist):
ev = await self.event_creation_handler.handle_new_client_event(
await self._create_requester_for_user_id_from_app_service(
event["sender"], requester.app_service
),
event=event,
context=context,
)
# Add the base_insertion_event to the bottom of the list we return
if base_insertion_event is not None:
event_ids.append(base_insertion_event.event_id)
return 200, {
"state_events": state_events_at_start,
"events": event_ids,
"next_chunk_id": insertion_event["content"][
EventContentFields.MSC2716_NEXT_CHUNK_ID
],
}
def on_GET(self, request, room_id):
return 501, "Not implemented"
def on_PUT(self, request, room_id):
return self.txns.fetch_or_execute_request(
request, self.on_POST, request, room_id
)
def register_servlets(hs, http_server):
msc2716_enabled = hs.config.experimental.msc2716_enabled
if msc2716_enabled:
RoomBatchSendEventRestServlet(hs).register(http_server)

View file

@ -259,10 +259,11 @@ class SyncRestServlet(RestServlet):
# Corresponding synapse issue: https://github.com/matrix-org/synapse/issues/10456
response["device_one_time_keys_count"] = sync_result.device_one_time_keys_count
if sync_result.device_unused_fallback_key_types:
response[
"org.matrix.msc2732.device_unused_fallback_key_types"
] = sync_result.device_unused_fallback_key_types
# https://github.com/matrix-org/matrix-doc/blob/54255851f642f84a4f1aaf7bc063eebe3d76752b/proposals/2732-olm-fallback-keys.md
# states that this field should always be included, as long as the server supports the feature.
response[
"org.matrix.msc2732.device_unused_fallback_key_types"
] = sync_result.device_unused_fallback_key_types
if joined:
response["rooms"][Membership.JOIN] = joined

View file

@ -1,13 +0,0 @@
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View file

@ -1,13 +0,0 @@
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View file

@ -17,7 +17,7 @@ import hashlib
import hmac
from synapse.http.servlet import RestServlet
from synapse.rest.client.v2_alpha._base import client_patterns
from synapse.rest.client._base import client_patterns
class VoipRestServlet(RestServlet):

View file

@ -836,7 +836,9 @@ class MediaRepository:
return {"deleted": deleted}
async def delete_local_media(self, media_id: str) -> Tuple[List[str], int]:
async def delete_local_media_ids(
self, media_ids: List[str]
) -> Tuple[List[str], int]:
"""
Delete the given local or remote media ID from this server
@ -845,7 +847,7 @@ class MediaRepository:
Returns:
A tuple of (list of deleted media IDs, total deleted media IDs).
"""
return await self._remove_local_media_from_disk([media_id])
return await self._remove_local_media_from_disk(media_ids)
async def delete_old_local_media(
self,

Some files were not shown because too many files have changed in this diff Show more