mirror of
https://git.anonymousland.org/anonymousland/synapse.git
synced 2024-12-25 20:49:24 -05:00
Merge branch 'develop' of github.com:matrix-org/synapse into erikj/require_format_version
This commit is contained in:
commit
efb8ed1d45
@ -1,11 +1,7 @@
|
|||||||
[run]
|
[run]
|
||||||
branch = True
|
branch = True
|
||||||
parallel = True
|
parallel = True
|
||||||
source = synapse
|
include = synapse/*
|
||||||
|
|
||||||
[paths]
|
|
||||||
source=
|
|
||||||
coverage
|
|
||||||
|
|
||||||
[report]
|
[report]
|
||||||
precision = 2
|
precision = 2
|
||||||
|
6
.gitignore
vendored
6
.gitignore
vendored
@ -25,9 +25,9 @@ homeserver*.pid
|
|||||||
*.tls.dh
|
*.tls.dh
|
||||||
*.tls.key
|
*.tls.key
|
||||||
|
|
||||||
.coverage
|
.coverage*
|
||||||
.coverage.*
|
coverage.*
|
||||||
!.coverage.rc
|
!.coveragerc
|
||||||
htmlcov
|
htmlcov
|
||||||
|
|
||||||
demo/*/*.db
|
demo/*/*.db
|
||||||
|
1
changelog.d/4306.misc
Normal file
1
changelog.d/4306.misc
Normal file
@ -0,0 +1 @@
|
|||||||
|
Synapse will now take advantage of native UPSERT functionality in PostgreSQL 9.5+ and SQLite 3.24+.
|
1
changelog.d/4435.bugfix
Normal file
1
changelog.d/4435.bugfix
Normal file
@ -0,0 +1 @@
|
|||||||
|
Fix None guard in calling config.server.is_threepid_reserved
|
1
changelog.d/4444.misc
Normal file
1
changelog.d/4444.misc
Normal file
@ -0,0 +1 @@
|
|||||||
|
Generate the debian config during build
|
1
changelog.d/4452.bugfix
Normal file
1
changelog.d/4452.bugfix
Normal file
@ -0,0 +1 @@
|
|||||||
|
Don't send IP addresses as SNI
|
1
changelog.d/4458.misc
Normal file
1
changelog.d/4458.misc
Normal file
@ -0,0 +1 @@
|
|||||||
|
Clarify documentation for the `public_baseurl` config param
|
1
changelog.d/4459.misc
Normal file
1
changelog.d/4459.misc
Normal file
@ -0,0 +1 @@
|
|||||||
|
Synapse will now take advantage of native UPSERT functionality in PostgreSQL 9.5+ and SQLite 3.24+.
|
1
changelog.d/4460.bugfix
Normal file
1
changelog.d/4460.bugfix
Normal file
@ -0,0 +1 @@
|
|||||||
|
Fix UnboundLocalError in post_urlencoded_get_json
|
1
changelog.d/4461.bugfix
Normal file
1
changelog.d/4461.bugfix
Normal file
@ -0,0 +1 @@
|
|||||||
|
Add a timeout to filtered room directory queries.
|
1
changelog.d/4464.misc
Normal file
1
changelog.d/4464.misc
Normal file
@ -0,0 +1 @@
|
|||||||
|
Move SRV logic into the Agent layer
|
34
debian/build_virtualenv
vendored
34
debian/build_virtualenv
vendored
@ -45,6 +45,10 @@ dh_virtualenv \
|
|||||||
--extra-pip-arg="--compile" \
|
--extra-pip-arg="--compile" \
|
||||||
--extras="all"
|
--extras="all"
|
||||||
|
|
||||||
|
PACKAGE_BUILD_DIR="debian/matrix-synapse-py3"
|
||||||
|
VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse"
|
||||||
|
TARGET_PYTHON="${VIRTUALENV_DIR}/bin/python"
|
||||||
|
|
||||||
# we copy the tests to a temporary directory so that we can put them on the
|
# we copy the tests to a temporary directory so that we can put them on the
|
||||||
# PYTHONPATH without putting the uninstalled synapse on the pythonpath.
|
# PYTHONPATH without putting the uninstalled synapse on the pythonpath.
|
||||||
tmpdir=`mktemp -d`
|
tmpdir=`mktemp -d`
|
||||||
@ -53,8 +57,34 @@ trap "rm -r $tmpdir" EXIT
|
|||||||
cp -r tests "$tmpdir"
|
cp -r tests "$tmpdir"
|
||||||
|
|
||||||
PYTHONPATH="$tmpdir" \
|
PYTHONPATH="$tmpdir" \
|
||||||
debian/matrix-synapse-py3/opt/venvs/matrix-synapse/bin/python \
|
"${TARGET_PYTHON}" -B -m twisted.trial --reporter=text -j2 tests
|
||||||
-B -m twisted.trial --reporter=text -j2 tests
|
|
||||||
|
# build the config file
|
||||||
|
"${TARGET_PYTHON}" -B "${VIRTUALENV_DIR}/bin/generate_config" \
|
||||||
|
--config-dir="/etc/matrix-synapse" \
|
||||||
|
--data-dir="/var/lib/matrix-synapse" |
|
||||||
|
perl -pe '
|
||||||
|
# tweak the paths to the tls certs and signing keys
|
||||||
|
/^tls_.*_path:/ and s/SERVERNAME/homeserver/;
|
||||||
|
/^signing_key_path:/ and s/SERVERNAME/homeserver/;
|
||||||
|
|
||||||
|
# tweak the pid file location
|
||||||
|
/^pid_file:/ and s#:.*#: "/var/run/matrix-synapse.pid"#;
|
||||||
|
|
||||||
|
# tweak the path to the log config
|
||||||
|
/^log_config:/ and s/SERVERNAME\.log\.config/log.yaml/;
|
||||||
|
|
||||||
|
# tweak the path to the media store
|
||||||
|
/^media_store_path:/ and s#/media_store#/media#;
|
||||||
|
|
||||||
|
# remove the server_name setting, which is set in a separate file
|
||||||
|
/^server_name:/ and $_ = "#\n# This is set in /etc/matrix-synapse/conf.d/server_name.yaml for Debian installations.\n# $_";
|
||||||
|
|
||||||
|
# remove the report_stats setting, which is set in a separate file
|
||||||
|
/^# report_stats:/ and $_ = "";
|
||||||
|
|
||||||
|
' > "${PACKAGE_BUILD_DIR}/etc/matrix-synapse/homeserver.yaml"
|
||||||
|
|
||||||
|
|
||||||
# add a dependency on the right version of python to substvars.
|
# add a dependency on the right version of python to substvars.
|
||||||
PYPKG=`basename $SNAKE`
|
PYPKG=`basename $SNAKE`
|
||||||
|
614
debian/homeserver.yaml
vendored
614
debian/homeserver.yaml
vendored
@ -1,614 +0,0 @@
|
|||||||
# vim:ft=yaml
|
|
||||||
# PEM encoded X509 certificate for TLS.
|
|
||||||
# You can replace the self-signed certificate that synapse
|
|
||||||
# autogenerates on launch with your own SSL certificate + key pair
|
|
||||||
# if you like. Any required intermediary certificates can be
|
|
||||||
# appended after the primary certificate in hierarchical order.
|
|
||||||
tls_certificate_path: "/etc/matrix-synapse/homeserver.tls.crt"
|
|
||||||
|
|
||||||
# PEM encoded private key for TLS
|
|
||||||
tls_private_key_path: "/etc/matrix-synapse/homeserver.tls.key"
|
|
||||||
|
|
||||||
# Don't bind to the https port
|
|
||||||
no_tls: False
|
|
||||||
|
|
||||||
# List of allowed TLS fingerprints for this server to publish along
|
|
||||||
# with the signing keys for this server. Other matrix servers that
|
|
||||||
# make HTTPS requests to this server will check that the TLS
|
|
||||||
# certificates returned by this server match one of the fingerprints.
|
|
||||||
#
|
|
||||||
# Synapse automatically adds the fingerprint of its own certificate
|
|
||||||
# to the list. So if federation traffic is handled directly by synapse
|
|
||||||
# then no modification to the list is required.
|
|
||||||
#
|
|
||||||
# If synapse is run behind a load balancer that handles the TLS then it
|
|
||||||
# will be necessary to add the fingerprints of the certificates used by
|
|
||||||
# the loadbalancers to this list if they are different to the one
|
|
||||||
# synapse is using.
|
|
||||||
#
|
|
||||||
# Homeservers are permitted to cache the list of TLS fingerprints
|
|
||||||
# returned in the key responses up to the "valid_until_ts" returned in
|
|
||||||
# key. It may be necessary to publish the fingerprints of a new
|
|
||||||
# certificate and wait until the "valid_until_ts" of the previous key
|
|
||||||
# responses have passed before deploying it.
|
|
||||||
#
|
|
||||||
# You can calculate a fingerprint from a given TLS listener via:
|
|
||||||
# openssl s_client -connect $host:$port < /dev/null 2> /dev/null |
|
|
||||||
# openssl x509 -outform DER | openssl sha256 -binary | base64 | tr -d '='
|
|
||||||
# or by checking matrix.org/federationtester/api/report?server_name=$host
|
|
||||||
#
|
|
||||||
tls_fingerprints: []
|
|
||||||
# tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
|
|
||||||
|
|
||||||
|
|
||||||
## Server ##
|
|
||||||
|
|
||||||
# When running as a daemon, the file to store the pid in
|
|
||||||
pid_file: "/var/run/matrix-synapse.pid"
|
|
||||||
|
|
||||||
# CPU affinity mask. Setting this restricts the CPUs on which the
|
|
||||||
# process will be scheduled. It is represented as a bitmask, with the
|
|
||||||
# lowest order bit corresponding to the first logical CPU and the
|
|
||||||
# highest order bit corresponding to the last logical CPU. Not all CPUs
|
|
||||||
# may exist on a given system but a mask may specify more CPUs than are
|
|
||||||
# present.
|
|
||||||
#
|
|
||||||
# For example:
|
|
||||||
# 0x00000001 is processor #0,
|
|
||||||
# 0x00000003 is processors #0 and #1,
|
|
||||||
# 0xFFFFFFFF is all processors (#0 through #31).
|
|
||||||
#
|
|
||||||
# Pinning a Python process to a single CPU is desirable, because Python
|
|
||||||
# is inherently single-threaded due to the GIL, and can suffer a
|
|
||||||
# 30-40% slowdown due to cache blow-out and thread context switching
|
|
||||||
# if the scheduler happens to schedule the underlying threads across
|
|
||||||
# different cores. See
|
|
||||||
# https://www.mirantis.com/blog/improve-performance-python-programs-restricting-single-cpu/.
|
|
||||||
#
|
|
||||||
# cpu_affinity: 0xFFFFFFFF
|
|
||||||
|
|
||||||
# The path to the web client which will be served at /_matrix/client/
|
|
||||||
# if 'webclient' is configured under the 'listeners' configuration.
|
|
||||||
#
|
|
||||||
# web_client_location: "/path/to/web/root"
|
|
||||||
|
|
||||||
# The public-facing base URL for the client API (not including _matrix/...)
|
|
||||||
# public_baseurl: https://example.com:8448/
|
|
||||||
|
|
||||||
# Set the soft limit on the number of file descriptors synapse can use
|
|
||||||
# Zero is used to indicate synapse should set the soft limit to the
|
|
||||||
# hard limit.
|
|
||||||
soft_file_limit: 0
|
|
||||||
|
|
||||||
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
|
|
||||||
# gc_thresholds: [700, 10, 10]
|
|
||||||
|
|
||||||
# Set the limit on the returned events in the timeline in the get
|
|
||||||
# and sync operations. The default value is -1, means no upper limit.
|
|
||||||
# filter_timeline_limit: 5000
|
|
||||||
|
|
||||||
# Whether room invites to users on this server should be blocked
|
|
||||||
# (except those sent by local server admins). The default is False.
|
|
||||||
# block_non_admin_invites: True
|
|
||||||
|
|
||||||
# Restrict federation to the following whitelist of domains.
|
|
||||||
# N.B. we recommend also firewalling your federation listener to limit
|
|
||||||
# inbound federation traffic as early as possible, rather than relying
|
|
||||||
# purely on this application-layer restriction. If not specified, the
|
|
||||||
# default is to whitelist everything.
|
|
||||||
#
|
|
||||||
# federation_domain_whitelist:
|
|
||||||
# - lon.example.com
|
|
||||||
# - nyc.example.com
|
|
||||||
# - syd.example.com
|
|
||||||
|
|
||||||
# List of ports that Synapse should listen on, their purpose and their
|
|
||||||
# configuration.
|
|
||||||
listeners:
|
|
||||||
# Main HTTPS listener
|
|
||||||
# For when matrix traffic is sent directly to synapse.
|
|
||||||
-
|
|
||||||
# The port to listen for HTTPS requests on.
|
|
||||||
port: 8448
|
|
||||||
|
|
||||||
# Local addresses to listen on.
|
|
||||||
# On Linux and Mac OS, `::` will listen on all IPv4 and IPv6
|
|
||||||
# addresses by default. For most other OSes, this will only listen
|
|
||||||
# on IPv6.
|
|
||||||
bind_addresses:
|
|
||||||
- '::'
|
|
||||||
- '0.0.0.0'
|
|
||||||
|
|
||||||
# This is a 'http' listener, allows us to specify 'resources'.
|
|
||||||
type: http
|
|
||||||
|
|
||||||
tls: true
|
|
||||||
|
|
||||||
# Use the X-Forwarded-For (XFF) header as the client IP and not the
|
|
||||||
# actual client IP.
|
|
||||||
x_forwarded: false
|
|
||||||
|
|
||||||
# List of HTTP resources to serve on this listener.
|
|
||||||
resources:
|
|
||||||
-
|
|
||||||
# List of resources to host on this listener.
|
|
||||||
names:
|
|
||||||
- client # The client-server APIs, both v1 and v2
|
|
||||||
- webclient # The bundled webclient.
|
|
||||||
|
|
||||||
# Should synapse compress HTTP responses to clients that support it?
|
|
||||||
# This should be disabled if running synapse behind a load balancer
|
|
||||||
# that can do automatic compression.
|
|
||||||
compress: true
|
|
||||||
|
|
||||||
- names: [federation] # Federation APIs
|
|
||||||
compress: false
|
|
||||||
|
|
||||||
# optional list of additional endpoints which can be loaded via
|
|
||||||
# dynamic modules
|
|
||||||
# additional_resources:
|
|
||||||
# "/_matrix/my/custom/endpoint":
|
|
||||||
# module: my_module.CustomRequestHandler
|
|
||||||
# config: {}
|
|
||||||
|
|
||||||
# Unsecure HTTP listener,
|
|
||||||
# For when matrix traffic passes through loadbalancer that unwraps TLS.
|
|
||||||
- port: 8008
|
|
||||||
tls: false
|
|
||||||
bind_addresses: ['::', '0.0.0.0']
|
|
||||||
type: http
|
|
||||||
|
|
||||||
x_forwarded: false
|
|
||||||
|
|
||||||
resources:
|
|
||||||
- names: [client, webclient]
|
|
||||||
compress: true
|
|
||||||
- names: [federation]
|
|
||||||
compress: false
|
|
||||||
|
|
||||||
# Turn on the twisted ssh manhole service on localhost on the given
|
|
||||||
# port.
|
|
||||||
# - port: 9000
|
|
||||||
# bind_addresses: ['::1', '127.0.0.1']
|
|
||||||
# type: manhole
|
|
||||||
|
|
||||||
|
|
||||||
# Database configuration
|
|
||||||
database:
|
|
||||||
# The database engine name
|
|
||||||
name: "sqlite3"
|
|
||||||
# Arguments to pass to the engine
|
|
||||||
args:
|
|
||||||
# Path to the database
|
|
||||||
database: "/var/lib/matrix-synapse/homeserver.db"
|
|
||||||
|
|
||||||
# Number of events to cache in memory.
|
|
||||||
event_cache_size: "10K"
|
|
||||||
|
|
||||||
|
|
||||||
# A yaml python logging config file
|
|
||||||
log_config: "/etc/matrix-synapse/log.yaml"
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Ratelimiting ##
|
|
||||||
|
|
||||||
# Number of messages a client can send per second
|
|
||||||
rc_messages_per_second: 0.2
|
|
||||||
|
|
||||||
# Number of message a client can send before being throttled
|
|
||||||
rc_message_burst_count: 10.0
|
|
||||||
|
|
||||||
# The federation window size in milliseconds
|
|
||||||
federation_rc_window_size: 1000
|
|
||||||
|
|
||||||
# The number of federation requests from a single server in a window
|
|
||||||
# before the server will delay processing the request.
|
|
||||||
federation_rc_sleep_limit: 10
|
|
||||||
|
|
||||||
# The duration in milliseconds to delay processing events from
|
|
||||||
# remote servers by if they go over the sleep limit.
|
|
||||||
federation_rc_sleep_delay: 500
|
|
||||||
|
|
||||||
# The maximum number of concurrent federation requests allowed
|
|
||||||
# from a single server
|
|
||||||
federation_rc_reject_limit: 50
|
|
||||||
|
|
||||||
# The number of federation requests to concurrently process from a
|
|
||||||
# single server
|
|
||||||
federation_rc_concurrent: 3
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Directory where uploaded images and attachments are stored.
|
|
||||||
media_store_path: "/var/lib/matrix-synapse/media"
|
|
||||||
|
|
||||||
# Media storage providers allow media to be stored in different
|
|
||||||
# locations.
|
|
||||||
# media_storage_providers:
|
|
||||||
# - module: file_system
|
|
||||||
# # Whether to write new local files.
|
|
||||||
# store_local: false
|
|
||||||
# # Whether to write new remote media
|
|
||||||
# store_remote: false
|
|
||||||
# # Whether to block upload requests waiting for write to this
|
|
||||||
# # provider to complete
|
|
||||||
# store_synchronous: false
|
|
||||||
# config:
|
|
||||||
# directory: /mnt/some/other/directory
|
|
||||||
|
|
||||||
# Directory where in-progress uploads are stored.
|
|
||||||
uploads_path: "/var/lib/matrix-synapse/uploads"
|
|
||||||
|
|
||||||
# The largest allowed upload size in bytes
|
|
||||||
max_upload_size: "10M"
|
|
||||||
|
|
||||||
# Maximum number of pixels that will be thumbnailed
|
|
||||||
max_image_pixels: "32M"
|
|
||||||
|
|
||||||
# Whether to generate new thumbnails on the fly to precisely match
|
|
||||||
# the resolution requested by the client. If true then whenever
|
|
||||||
# a new resolution is requested by the client the server will
|
|
||||||
# generate a new thumbnail. If false the server will pick a thumbnail
|
|
||||||
# from a precalculated list.
|
|
||||||
dynamic_thumbnails: false
|
|
||||||
|
|
||||||
# List of thumbnail to precalculate when an image is uploaded.
|
|
||||||
thumbnail_sizes:
|
|
||||||
- width: 32
|
|
||||||
height: 32
|
|
||||||
method: crop
|
|
||||||
- width: 96
|
|
||||||
height: 96
|
|
||||||
method: crop
|
|
||||||
- width: 320
|
|
||||||
height: 240
|
|
||||||
method: scale
|
|
||||||
- width: 640
|
|
||||||
height: 480
|
|
||||||
method: scale
|
|
||||||
- width: 800
|
|
||||||
height: 600
|
|
||||||
method: scale
|
|
||||||
|
|
||||||
# Is the preview URL API enabled? If enabled, you *must* specify
|
|
||||||
# an explicit url_preview_ip_range_blacklist of IPs that the spider is
|
|
||||||
# denied from accessing.
|
|
||||||
url_preview_enabled: False
|
|
||||||
|
|
||||||
# List of IP address CIDR ranges that the URL preview spider is denied
|
|
||||||
# from accessing. There are no defaults: you must explicitly
|
|
||||||
# specify a list for URL previewing to work. You should specify any
|
|
||||||
# internal services in your network that you do not want synapse to try
|
|
||||||
# to connect to, otherwise anyone in any Matrix room could cause your
|
|
||||||
# synapse to issue arbitrary GET requests to your internal services,
|
|
||||||
# causing serious security issues.
|
|
||||||
#
|
|
||||||
# url_preview_ip_range_blacklist:
|
|
||||||
# - '127.0.0.0/8'
|
|
||||||
# - '10.0.0.0/8'
|
|
||||||
# - '172.16.0.0/12'
|
|
||||||
# - '192.168.0.0/16'
|
|
||||||
# - '100.64.0.0/10'
|
|
||||||
# - '169.254.0.0/16'
|
|
||||||
#
|
|
||||||
# List of IP address CIDR ranges that the URL preview spider is allowed
|
|
||||||
# to access even if they are specified in url_preview_ip_range_blacklist.
|
|
||||||
# This is useful for specifying exceptions to wide-ranging blacklisted
|
|
||||||
# target IP ranges - e.g. for enabling URL previews for a specific private
|
|
||||||
# website only visible in your network.
|
|
||||||
#
|
|
||||||
# url_preview_ip_range_whitelist:
|
|
||||||
# - '192.168.1.1'
|
|
||||||
|
|
||||||
# Optional list of URL matches that the URL preview spider is
|
|
||||||
# denied from accessing. You should use url_preview_ip_range_blacklist
|
|
||||||
# in preference to this, otherwise someone could define a public DNS
|
|
||||||
# entry that points to a private IP address and circumvent the blacklist.
|
|
||||||
# This is more useful if you know there is an entire shape of URL that
|
|
||||||
# you know that will never want synapse to try to spider.
|
|
||||||
#
|
|
||||||
# Each list entry is a dictionary of url component attributes as returned
|
|
||||||
# by urlparse.urlsplit as applied to the absolute form of the URL. See
|
|
||||||
# https://docs.python.org/2/library/urlparse.html#urlparse.urlsplit
|
|
||||||
# The values of the dictionary are treated as an filename match pattern
|
|
||||||
# applied to that component of URLs, unless they start with a ^ in which
|
|
||||||
# case they are treated as a regular expression match. If all the
|
|
||||||
# specified component matches for a given list item succeed, the URL is
|
|
||||||
# blacklisted.
|
|
||||||
#
|
|
||||||
# url_preview_url_blacklist:
|
|
||||||
# # blacklist any URL with a username in its URI
|
|
||||||
# - username: '*'
|
|
||||||
#
|
|
||||||
# # blacklist all *.google.com URLs
|
|
||||||
# - netloc: 'google.com'
|
|
||||||
# - netloc: '*.google.com'
|
|
||||||
#
|
|
||||||
# # blacklist all plain HTTP URLs
|
|
||||||
# - scheme: 'http'
|
|
||||||
#
|
|
||||||
# # blacklist http(s)://www.acme.com/foo
|
|
||||||
# - netloc: 'www.acme.com'
|
|
||||||
# path: '/foo'
|
|
||||||
#
|
|
||||||
# # blacklist any URL with a literal IPv4 address
|
|
||||||
# - netloc: '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$'
|
|
||||||
|
|
||||||
# The largest allowed URL preview spidering size in bytes
|
|
||||||
max_spider_size: "10M"
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Captcha ##
|
|
||||||
# See docs/CAPTCHA_SETUP for full details of configuring this.
|
|
||||||
|
|
||||||
# This Home Server's ReCAPTCHA public key.
|
|
||||||
recaptcha_public_key: "YOUR_PUBLIC_KEY"
|
|
||||||
|
|
||||||
# This Home Server's ReCAPTCHA private key.
|
|
||||||
recaptcha_private_key: "YOUR_PRIVATE_KEY"
|
|
||||||
|
|
||||||
# Enables ReCaptcha checks when registering, preventing signup
|
|
||||||
# unless a captcha is answered. Requires a valid ReCaptcha
|
|
||||||
# public/private key.
|
|
||||||
enable_registration_captcha: False
|
|
||||||
|
|
||||||
# A secret key used to bypass the captcha test entirely.
|
|
||||||
#captcha_bypass_secret: "YOUR_SECRET_HERE"
|
|
||||||
|
|
||||||
# The API endpoint to use for verifying m.login.recaptcha responses.
|
|
||||||
recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify"
|
|
||||||
|
|
||||||
|
|
||||||
## Turn ##
|
|
||||||
|
|
||||||
# The public URIs of the TURN server to give to clients
|
|
||||||
turn_uris: []
|
|
||||||
|
|
||||||
# The shared secret used to compute passwords for the TURN server
|
|
||||||
turn_shared_secret: "YOUR_SHARED_SECRET"
|
|
||||||
|
|
||||||
# The Username and password if the TURN server needs them and
|
|
||||||
# does not use a token
|
|
||||||
#turn_username: "TURNSERVER_USERNAME"
|
|
||||||
#turn_password: "TURNSERVER_PASSWORD"
|
|
||||||
|
|
||||||
# How long generated TURN credentials last
|
|
||||||
turn_user_lifetime: "1h"
|
|
||||||
|
|
||||||
# Whether guests should be allowed to use the TURN server.
|
|
||||||
# This defaults to True, otherwise VoIP will be unreliable for guests.
|
|
||||||
# However, it does introduce a slight security risk as it allows users to
|
|
||||||
# connect to arbitrary endpoints without having first signed up for a
|
|
||||||
# valid account (e.g. by passing a CAPTCHA).
|
|
||||||
turn_allow_guests: False
|
|
||||||
|
|
||||||
|
|
||||||
## Registration ##
|
|
||||||
|
|
||||||
# Enable registration for new users.
|
|
||||||
enable_registration: False
|
|
||||||
|
|
||||||
# The user must provide all of the below types of 3PID when registering.
|
|
||||||
#
|
|
||||||
# registrations_require_3pid:
|
|
||||||
# - email
|
|
||||||
# - msisdn
|
|
||||||
|
|
||||||
# Mandate that users are only allowed to associate certain formats of
|
|
||||||
# 3PIDs with accounts on this server.
|
|
||||||
#
|
|
||||||
# allowed_local_3pids:
|
|
||||||
# - medium: email
|
|
||||||
# pattern: ".*@matrix\.org"
|
|
||||||
# - medium: email
|
|
||||||
# pattern: ".*@vector\.im"
|
|
||||||
# - medium: msisdn
|
|
||||||
# pattern: "\+44"
|
|
||||||
|
|
||||||
# If set, allows registration by anyone who also has the shared
|
|
||||||
# secret, even if registration is otherwise disabled.
|
|
||||||
# registration_shared_secret: <PRIVATE STRING>
|
|
||||||
|
|
||||||
# Set the number of bcrypt rounds used to generate password hash.
|
|
||||||
# Larger numbers increase the work factor needed to generate the hash.
|
|
||||||
# The default number is 12 (which equates to 2^12 rounds).
|
|
||||||
# N.B. that increasing this will exponentially increase the time required
|
|
||||||
# to register or login - e.g. 24 => 2^24 rounds which will take >20 mins.
|
|
||||||
bcrypt_rounds: 12
|
|
||||||
|
|
||||||
# Allows users to register as guests without a password/email/etc, and
|
|
||||||
# participate in rooms hosted on this server which have been made
|
|
||||||
# accessible to anonymous users.
|
|
||||||
allow_guest_access: False
|
|
||||||
|
|
||||||
# The list of identity servers trusted to verify third party
|
|
||||||
# identifiers by this server.
|
|
||||||
trusted_third_party_id_servers:
|
|
||||||
- matrix.org
|
|
||||||
- vector.im
|
|
||||||
- riot.im
|
|
||||||
|
|
||||||
# Users who register on this homeserver will automatically be joined
|
|
||||||
# to these rooms
|
|
||||||
#auto_join_rooms:
|
|
||||||
# - "#example:example.com"
|
|
||||||
|
|
||||||
|
|
||||||
## Metrics ###
|
|
||||||
|
|
||||||
# Enable collection and rendering of performance metrics
|
|
||||||
enable_metrics: False
|
|
||||||
|
|
||||||
## API Configuration ##
|
|
||||||
|
|
||||||
# A list of event types that will be included in the room_invite_state
|
|
||||||
room_invite_state_types:
|
|
||||||
- "m.room.join_rules"
|
|
||||||
- "m.room.canonical_alias"
|
|
||||||
- "m.room.avatar"
|
|
||||||
- "m.room.name"
|
|
||||||
|
|
||||||
|
|
||||||
# A list of application service config file to use
|
|
||||||
app_service_config_files: []
|
|
||||||
|
|
||||||
|
|
||||||
# macaroon_secret_key: <PRIVATE STRING>
|
|
||||||
|
|
||||||
# Used to enable access token expiration.
|
|
||||||
expire_access_token: False
|
|
||||||
|
|
||||||
## Signing Keys ##
|
|
||||||
|
|
||||||
# Path to the signing key to sign messages with
|
|
||||||
signing_key_path: "/etc/matrix-synapse/homeserver.signing.key"
|
|
||||||
|
|
||||||
# The keys that the server used to sign messages with but won't use
|
|
||||||
# to sign new messages. E.g. it has lost its private key
|
|
||||||
old_signing_keys: {}
|
|
||||||
# "ed25519:auto":
|
|
||||||
# # Base64 encoded public key
|
|
||||||
# key: "The public part of your old signing key."
|
|
||||||
# # Millisecond POSIX timestamp when the key expired.
|
|
||||||
# expired_ts: 123456789123
|
|
||||||
|
|
||||||
# How long key response published by this server is valid for.
|
|
||||||
# Used to set the valid_until_ts in /key/v2 APIs.
|
|
||||||
# Determines how quickly servers will query to check which keys
|
|
||||||
# are still valid.
|
|
||||||
key_refresh_interval: "1d" # 1 Day.
|
|
||||||
|
|
||||||
# The trusted servers to download signing keys from.
|
|
||||||
perspectives:
|
|
||||||
servers:
|
|
||||||
"matrix.org":
|
|
||||||
verify_keys:
|
|
||||||
"ed25519:auto":
|
|
||||||
key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Enable SAML2 for registration and login. Uses pysaml2
|
|
||||||
# config_path: Path to the sp_conf.py configuration file
|
|
||||||
# idp_redirect_url: Identity provider URL which will redirect
|
|
||||||
# the user back to /login/saml2 with proper info.
|
|
||||||
# See pysaml2 docs for format of config.
|
|
||||||
#saml2_config:
|
|
||||||
# enabled: true
|
|
||||||
# config_path: "/home/erikj/git/synapse/sp_conf.py"
|
|
||||||
# idp_redirect_url: "http://test/idp"
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Enable CAS for registration and login.
|
|
||||||
#cas_config:
|
|
||||||
# enabled: true
|
|
||||||
# server_url: "https://cas-server.com"
|
|
||||||
# service_url: "https://homeserver.domain.com:8448"
|
|
||||||
# #required_attributes:
|
|
||||||
# # name: value
|
|
||||||
|
|
||||||
|
|
||||||
# The JWT needs to contain a globally unique "sub" (subject) claim.
|
|
||||||
#
|
|
||||||
# jwt_config:
|
|
||||||
# enabled: true
|
|
||||||
# secret: "a secret"
|
|
||||||
# algorithm: "HS256"
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Enable password for login.
|
|
||||||
password_config:
|
|
||||||
enabled: true
|
|
||||||
# Uncomment and change to a secret random string for extra security.
|
|
||||||
# DO NOT CHANGE THIS AFTER INITIAL SETUP!
|
|
||||||
#pepper: ""
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Enable sending emails for notification events
|
|
||||||
# Defining a custom URL for Riot is only needed if email notifications
|
|
||||||
# should contain links to a self-hosted installation of Riot; when set
|
|
||||||
# the "app_name" setting is ignored.
|
|
||||||
#
|
|
||||||
# If your SMTP server requires authentication, the optional smtp_user &
|
|
||||||
# smtp_pass variables should be used
|
|
||||||
#
|
|
||||||
#email:
|
|
||||||
# enable_notifs: false
|
|
||||||
# smtp_host: "localhost"
|
|
||||||
# smtp_port: 25
|
|
||||||
# smtp_user: "exampleusername"
|
|
||||||
# smtp_pass: "examplepassword"
|
|
||||||
# require_transport_security: False
|
|
||||||
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
|
|
||||||
# app_name: Matrix
|
|
||||||
# template_dir: res/templates
|
|
||||||
# notif_template_html: notif_mail.html
|
|
||||||
# notif_template_text: notif_mail.txt
|
|
||||||
# notif_for_new_users: True
|
|
||||||
# riot_base_url: "http://localhost/riot"
|
|
||||||
|
|
||||||
|
|
||||||
# password_providers:
|
|
||||||
# - module: "ldap_auth_provider.LdapAuthProvider"
|
|
||||||
# config:
|
|
||||||
# enabled: true
|
|
||||||
# uri: "ldap://ldap.example.com:389"
|
|
||||||
# start_tls: true
|
|
||||||
# base: "ou=users,dc=example,dc=com"
|
|
||||||
# attributes:
|
|
||||||
# uid: "cn"
|
|
||||||
# mail: "email"
|
|
||||||
# name: "givenName"
|
|
||||||
# #bind_dn:
|
|
||||||
# #bind_password:
|
|
||||||
# #filter: "(objectClass=posixAccount)"
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Clients requesting push notifications can either have the body of
|
|
||||||
# the message sent in the notification poke along with other details
|
|
||||||
# like the sender, or just the event ID and room ID (`event_id_only`).
|
|
||||||
# If clients choose the former, this option controls whether the
|
|
||||||
# notification request includes the content of the event (other details
|
|
||||||
# like the sender are still included). For `event_id_only` push, it
|
|
||||||
# has no effect.
|
|
||||||
|
|
||||||
# For modern android devices the notification content will still appear
|
|
||||||
# because it is loaded by the app. iPhone, however will send a
|
|
||||||
# notification saying only that a message arrived and who it came from.
|
|
||||||
#
|
|
||||||
#push:
|
|
||||||
# include_content: true
|
|
||||||
|
|
||||||
|
|
||||||
# spam_checker:
|
|
||||||
# module: "my_custom_project.SuperSpamChecker"
|
|
||||||
# config:
|
|
||||||
# example_option: 'things'
|
|
||||||
|
|
||||||
|
|
||||||
# Whether to allow non server admins to create groups on this server
|
|
||||||
enable_group_creation: false
|
|
||||||
|
|
||||||
# If enabled, non server admins can only create groups with local parts
|
|
||||||
# starting with this prefix
|
|
||||||
# group_creation_prefix: "unofficial/"
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# User Directory configuration
|
|
||||||
#
|
|
||||||
# 'search_all_users' defines whether to search all users visible to your HS
|
|
||||||
# when searching the user directory, rather than limiting to users visible
|
|
||||||
# in public rooms. Defaults to false. If you set it True, you'll have to run
|
|
||||||
# UPDATE user_directory_stream_pos SET stream_id = NULL;
|
|
||||||
# on your database to tell it to rebuild the user_directory search indexes.
|
|
||||||
#
|
|
||||||
#user_directory:
|
|
||||||
# search_all_users: false
|
|
1
debian/install
vendored
1
debian/install
vendored
@ -1,2 +1 @@
|
|||||||
debian/homeserver.yaml etc/matrix-synapse
|
|
||||||
debian/log.yaml etc/matrix-synapse
|
debian/log.yaml etc/matrix-synapse
|
||||||
|
@ -819,7 +819,9 @@ class Auth(object):
|
|||||||
elif threepid:
|
elif threepid:
|
||||||
# If the user does not exist yet, but is signing up with a
|
# If the user does not exist yet, but is signing up with a
|
||||||
# reserved threepid then pass auth check
|
# reserved threepid then pass auth check
|
||||||
if is_threepid_reserved(self.hs.config, threepid):
|
if is_threepid_reserved(
|
||||||
|
self.hs.config.mau_limits_reserved_threepids, threepid
|
||||||
|
):
|
||||||
return
|
return
|
||||||
# Else if there is no room in the MAU bucket, bail
|
# Else if there is no room in the MAU bucket, bail
|
||||||
current_mau = yield self.store.get_monthly_active_count()
|
current_mau = yield self.store.get_monthly_active_count()
|
||||||
|
@ -256,7 +256,11 @@ class ServerConfig(Config):
|
|||||||
#
|
#
|
||||||
# web_client_location: "/path/to/web/root"
|
# web_client_location: "/path/to/web/root"
|
||||||
|
|
||||||
# The public-facing base URL for the client API (not including _matrix/...)
|
# The public-facing base URL that clients use to access this HS
|
||||||
|
# (not including _matrix/...). This is the same URL a user would
|
||||||
|
# enter into the 'custom HS URL' field on their client. If you
|
||||||
|
# use synapse with a reverse proxy, this should be the URL to reach
|
||||||
|
# synapse via the proxy.
|
||||||
# public_baseurl: https://example.com:8448/
|
# public_baseurl: https://example.com:8448/
|
||||||
|
|
||||||
# Set the soft limit on the number of file descriptors synapse can use
|
# Set the soft limit on the number of file descriptors synapse can use
|
||||||
@ -420,19 +424,18 @@ class ServerConfig(Config):
|
|||||||
" service on the given port.")
|
" service on the given port.")
|
||||||
|
|
||||||
|
|
||||||
def is_threepid_reserved(config, threepid):
|
def is_threepid_reserved(reserved_threepids, threepid):
|
||||||
"""Check the threepid against the reserved threepid config
|
"""Check the threepid against the reserved threepid config
|
||||||
Args:
|
Args:
|
||||||
config(ServerConfig) - to access server config attributes
|
reserved_threepids([dict]) - list of reserved threepids
|
||||||
threepid(dict) - The threepid to test for
|
threepid(dict) - The threepid to test for
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
boolean Is the threepid undertest reserved_user
|
boolean Is the threepid undertest reserved_user
|
||||||
"""
|
"""
|
||||||
|
|
||||||
for tp in config.mau_limits_reserved_threepids:
|
for tp in reserved_threepids:
|
||||||
if (threepid['medium'] == tp['medium']
|
if (threepid['medium'] == tp['medium'] and threepid['address'] == tp['address']):
|
||||||
and threepid['address'] == tp['address']):
|
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@ -17,6 +17,7 @@ from zope.interface import implementer
|
|||||||
|
|
||||||
from OpenSSL import SSL, crypto
|
from OpenSSL import SSL, crypto
|
||||||
from twisted.internet._sslverify import _defaultCurveName
|
from twisted.internet._sslverify import _defaultCurveName
|
||||||
|
from twisted.internet.abstract import isIPAddress, isIPv6Address
|
||||||
from twisted.internet.interfaces import IOpenSSLClientConnectionCreator
|
from twisted.internet.interfaces import IOpenSSLClientConnectionCreator
|
||||||
from twisted.internet.ssl import CertificateOptions, ContextFactory
|
from twisted.internet.ssl import CertificateOptions, ContextFactory
|
||||||
from twisted.python.failure import Failure
|
from twisted.python.failure import Failure
|
||||||
@ -98,8 +99,14 @@ class ClientTLSOptions(object):
|
|||||||
|
|
||||||
def __init__(self, hostname, ctx):
|
def __init__(self, hostname, ctx):
|
||||||
self._ctx = ctx
|
self._ctx = ctx
|
||||||
self._hostname = hostname
|
|
||||||
self._hostnameBytes = _idnaBytes(hostname)
|
if isIPAddress(hostname) or isIPv6Address(hostname):
|
||||||
|
self._hostnameBytes = hostname.encode('ascii')
|
||||||
|
self._sendSNI = False
|
||||||
|
else:
|
||||||
|
self._hostnameBytes = _idnaBytes(hostname)
|
||||||
|
self._sendSNI = True
|
||||||
|
|
||||||
ctx.set_info_callback(
|
ctx.set_info_callback(
|
||||||
_tolerateErrors(self._identityVerifyingInfoCallback)
|
_tolerateErrors(self._identityVerifyingInfoCallback)
|
||||||
)
|
)
|
||||||
@ -111,7 +118,9 @@ class ClientTLSOptions(object):
|
|||||||
return connection
|
return connection
|
||||||
|
|
||||||
def _identityVerifyingInfoCallback(self, connection, where, ret):
|
def _identityVerifyingInfoCallback(self, connection, where, ret):
|
||||||
if where & SSL.SSL_CB_HANDSHAKE_START:
|
# Literal IPv4 and IPv6 addresses are not permitted
|
||||||
|
# as host names according to the RFCs
|
||||||
|
if where & SSL.SSL_CB_HANDSHAKE_START and self._sendSNI:
|
||||||
connection.set_tlsext_host_name(self._hostnameBytes)
|
connection.set_tlsext_host_name(self._hostnameBytes)
|
||||||
|
|
||||||
|
|
||||||
|
@ -46,12 +46,13 @@ class _EventInternalMetadata(object):
|
|||||||
def is_outlier(self):
|
def is_outlier(self):
|
||||||
return getattr(self, "outlier", False)
|
return getattr(self, "outlier", False)
|
||||||
|
|
||||||
def is_new_remote_event(self):
|
def is_out_of_band_membership(self):
|
||||||
"""Whether this is a new remote event, like an invite or an invite
|
"""Whether this is an out of band membership, like an invite or an invite
|
||||||
rejection. This is needed as those events are marked as outliers, but
|
rejection. This is needed as those events are marked as outliers, but
|
||||||
they still need to be processed.
|
they still need to be processed as if they're new events (e.g. updating
|
||||||
|
invite state in the database, relaying to clients, etc).
|
||||||
"""
|
"""
|
||||||
return getattr(self, "new_remote_event", False)
|
return getattr(self, "out_of_band_membership", False)
|
||||||
|
|
||||||
def get_send_on_behalf_of(self):
|
def get_send_on_behalf_of(self):
|
||||||
"""Whether this server should send the event on behalf of another server.
|
"""Whether this server should send the event on behalf of another server.
|
||||||
|
@ -550,6 +550,8 @@ class FederationClient(FederationBase):
|
|||||||
Does so by asking one of the already participating servers to create an
|
Does so by asking one of the already participating servers to create an
|
||||||
event with proper context.
|
event with proper context.
|
||||||
|
|
||||||
|
Returns a fully signed and hashed event.
|
||||||
|
|
||||||
Note that this does not append any events to any graphs.
|
Note that this does not append any events to any graphs.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@ -703,7 +705,8 @@ class FederationClient(FederationBase):
|
|||||||
break
|
break
|
||||||
|
|
||||||
if room_version is None:
|
if room_version is None:
|
||||||
# We use this error has that is what
|
# If the state doesn't have a create event then the room is
|
||||||
|
# invalid, and it would fail auth checks anyway.
|
||||||
raise SynapseError(400, "No create event in state")
|
raise SynapseError(400, "No create event in state")
|
||||||
|
|
||||||
valid_pdus = yield self._check_sigs_and_hash_and_fetch(
|
valid_pdus = yield self._check_sigs_and_hash_and_fetch(
|
||||||
|
@ -1084,8 +1084,6 @@ class FederationHandler(BaseHandler):
|
|||||||
handled_events = set()
|
handled_events = set()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
event.internal_metadata.outlier = False
|
|
||||||
|
|
||||||
# Try the host we successfully got a response to /make_join/
|
# Try the host we successfully got a response to /make_join/
|
||||||
# request first.
|
# request first.
|
||||||
try:
|
try:
|
||||||
@ -1296,7 +1294,7 @@ class FederationHandler(BaseHandler):
|
|||||||
)
|
)
|
||||||
|
|
||||||
event.internal_metadata.outlier = True
|
event.internal_metadata.outlier = True
|
||||||
event.internal_metadata.new_remote_event = True
|
event.internal_metadata.out_of_band_membership = True
|
||||||
|
|
||||||
event.signatures.update(
|
event.signatures.update(
|
||||||
compute_event_signature(
|
compute_event_signature(
|
||||||
@ -1322,7 +1320,7 @@ class FederationHandler(BaseHandler):
|
|||||||
# Mark as outlier as we don't have any state for this event; we're not
|
# Mark as outlier as we don't have any state for this event; we're not
|
||||||
# even in the room.
|
# even in the room.
|
||||||
event.internal_metadata.outlier = True
|
event.internal_metadata.outlier = True
|
||||||
event.internal_metadata.new_remote_event = True
|
event.internal_metadata.out_of_band_membership = True
|
||||||
|
|
||||||
# Try the host that we succesfully called /make_leave/ on first for
|
# Try the host that we succesfully called /make_leave/ on first for
|
||||||
# the /send_leave/ request.
|
# the /send_leave/ request.
|
||||||
@ -1649,6 +1647,11 @@ class FederationHandler(BaseHandler):
|
|||||||
create_event = e
|
create_event = e
|
||||||
break
|
break
|
||||||
|
|
||||||
|
if create_event is None:
|
||||||
|
# If the state doesn't have a create event then the room is
|
||||||
|
# invalid, and it would fail auth checks anyway.
|
||||||
|
raise SynapseError(400, "No create event in state")
|
||||||
|
|
||||||
room_version = create_event.content.get("room_version", RoomVersions.V1)
|
room_version = create_event.content.get("room_version", RoomVersions.V1)
|
||||||
|
|
||||||
missing_auth_events = set()
|
missing_auth_events = set()
|
||||||
|
@ -73,8 +73,14 @@ class RoomListHandler(BaseHandler):
|
|||||||
# We explicitly don't bother caching searches or requests for
|
# We explicitly don't bother caching searches or requests for
|
||||||
# appservice specific lists.
|
# appservice specific lists.
|
||||||
logger.info("Bypassing cache as search request.")
|
logger.info("Bypassing cache as search request.")
|
||||||
|
|
||||||
|
# XXX: Quick hack to stop room directory queries taking too long.
|
||||||
|
# Timeout request after 60s. Probably want a more fundamental
|
||||||
|
# solution at some point
|
||||||
|
timeout = self.clock.time() + 60
|
||||||
return self._get_public_room_list(
|
return self._get_public_room_list(
|
||||||
limit, since_token, search_filter, network_tuple=network_tuple,
|
limit, since_token, search_filter,
|
||||||
|
network_tuple=network_tuple, timeout=timeout,
|
||||||
)
|
)
|
||||||
|
|
||||||
key = (limit, since_token, network_tuple)
|
key = (limit, since_token, network_tuple)
|
||||||
@ -87,7 +93,8 @@ class RoomListHandler(BaseHandler):
|
|||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _get_public_room_list(self, limit=None, since_token=None,
|
def _get_public_room_list(self, limit=None, since_token=None,
|
||||||
search_filter=None,
|
search_filter=None,
|
||||||
network_tuple=EMPTY_THIRD_PARTY_ID,):
|
network_tuple=EMPTY_THIRD_PARTY_ID,
|
||||||
|
timeout=None,):
|
||||||
if since_token and since_token != "END":
|
if since_token and since_token != "END":
|
||||||
since_token = RoomListNextBatch.from_token(since_token)
|
since_token = RoomListNextBatch.from_token(since_token)
|
||||||
else:
|
else:
|
||||||
@ -202,6 +209,9 @@ class RoomListHandler(BaseHandler):
|
|||||||
|
|
||||||
chunk = []
|
chunk = []
|
||||||
for i in range(0, len(rooms_to_scan), step):
|
for i in range(0, len(rooms_to_scan), step):
|
||||||
|
if timeout and self.clock.time() > timeout:
|
||||||
|
raise Exception("Timed out searching room directory")
|
||||||
|
|
||||||
batch = rooms_to_scan[i:i + step]
|
batch = rooms_to_scan[i:i + step]
|
||||||
logger.info("Processing %i rooms for result", len(batch))
|
logger.info("Processing %i rooms for result", len(batch))
|
||||||
yield concurrently_execute(
|
yield concurrently_execute(
|
||||||
|
@ -333,9 +333,10 @@ class SimpleHttpClient(object):
|
|||||||
"POST", uri, headers=Headers(actual_headers), data=query_bytes
|
"POST", uri, headers=Headers(actual_headers), data=query_bytes
|
||||||
)
|
)
|
||||||
|
|
||||||
|
body = yield make_deferred_yieldable(readBody(response))
|
||||||
|
|
||||||
if 200 <= response.code < 300:
|
if 200 <= response.code < 300:
|
||||||
body = yield make_deferred_yieldable(treq.json_content(response))
|
defer.returnValue(json.loads(body))
|
||||||
defer.returnValue(body)
|
|
||||||
else:
|
else:
|
||||||
raise HttpResponseException(response.code, response.phrase, body)
|
raise HttpResponseException(response.code, response.phrase, body)
|
||||||
|
|
||||||
|
@ -101,7 +101,8 @@ class MatrixFederationAgent(object):
|
|||||||
if port is not None:
|
if port is not None:
|
||||||
target = (host, port)
|
target = (host, port)
|
||||||
else:
|
else:
|
||||||
server_list = yield self._srv_resolver.resolve_service(server_name_bytes)
|
service_name = b"_matrix._tcp.%s" % (server_name_bytes, )
|
||||||
|
server_list = yield self._srv_resolver.resolve_service(service_name)
|
||||||
if not server_list:
|
if not server_list:
|
||||||
target = (host, 8448)
|
target = (host, 8448)
|
||||||
logger.debug("No SRV record for %s, using %s", host, target)
|
logger.debug("No SRV record for %s, using %s", host, target)
|
||||||
|
@ -416,8 +416,11 @@ class RegisterRestServlet(RestServlet):
|
|||||||
)
|
)
|
||||||
# Necessary due to auth checks prior to the threepid being
|
# Necessary due to auth checks prior to the threepid being
|
||||||
# written to the db
|
# written to the db
|
||||||
if is_threepid_reserved(self.hs.config, threepid):
|
if threepid:
|
||||||
yield self.store.upsert_monthly_active_user(registered_user_id)
|
if is_threepid_reserved(
|
||||||
|
self.hs.config.mau_limits_reserved_threepids, threepid
|
||||||
|
):
|
||||||
|
yield self.store.upsert_monthly_active_user(registered_user_id)
|
||||||
|
|
||||||
# remember that we've now registered that user account, and with
|
# remember that we've now registered that user account, and with
|
||||||
# what user ID (since the user may not have specified)
|
# what user ID (since the user may not have specified)
|
||||||
|
@ -26,6 +26,7 @@ from prometheus_client import Histogram
|
|||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.api.errors import StoreError
|
from synapse.api.errors import StoreError
|
||||||
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.storage.engines import PostgresEngine
|
from synapse.storage.engines import PostgresEngine
|
||||||
from synapse.util.caches.descriptors import Cache
|
from synapse.util.caches.descriptors import Cache
|
||||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
||||||
@ -192,6 +193,51 @@ class SQLBaseStore(object):
|
|||||||
|
|
||||||
self.database_engine = hs.database_engine
|
self.database_engine = hs.database_engine
|
||||||
|
|
||||||
|
# A set of tables that are not safe to use native upserts in.
|
||||||
|
self._unsafe_to_upsert_tables = {"user_ips"}
|
||||||
|
|
||||||
|
if self.database_engine.can_native_upsert:
|
||||||
|
# Check ASAP (and then later, every 1s) to see if we have finished
|
||||||
|
# background updates of tables that aren't safe to update.
|
||||||
|
self._clock.call_later(
|
||||||
|
0.0,
|
||||||
|
run_as_background_process,
|
||||||
|
"upsert_safety_check",
|
||||||
|
self._check_safe_to_upsert
|
||||||
|
)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _check_safe_to_upsert(self):
|
||||||
|
"""
|
||||||
|
Is it safe to use native UPSERT?
|
||||||
|
|
||||||
|
If there are background updates, we will need to wait, as they may be
|
||||||
|
the addition of indexes that set the UNIQUE constraint that we require.
|
||||||
|
|
||||||
|
If the background updates have not completed, wait 15 sec and check again.
|
||||||
|
"""
|
||||||
|
updates = yield self._simple_select_list(
|
||||||
|
"background_updates",
|
||||||
|
keyvalues=None,
|
||||||
|
retcols=["update_name"],
|
||||||
|
desc="check_background_updates",
|
||||||
|
)
|
||||||
|
updates = [x["update_name"] for x in updates]
|
||||||
|
|
||||||
|
# The User IPs table in schema #53 was missing a unique index, which we
|
||||||
|
# run as a background update.
|
||||||
|
if "user_ips_device_unique_index" not in updates:
|
||||||
|
self._unsafe_to_upsert_tables.discard("user_ips")
|
||||||
|
|
||||||
|
# If there's any tables left to check, reschedule to run.
|
||||||
|
if self._unsafe_to_upsert_tables:
|
||||||
|
self._clock.call_later(
|
||||||
|
15.0,
|
||||||
|
run_as_background_process,
|
||||||
|
"upsert_safety_check",
|
||||||
|
self._check_safe_to_upsert
|
||||||
|
)
|
||||||
|
|
||||||
def start_profiling(self):
|
def start_profiling(self):
|
||||||
self._previous_loop_ts = self._clock.time_msec()
|
self._previous_loop_ts = self._clock.time_msec()
|
||||||
|
|
||||||
@ -494,8 +540,15 @@ class SQLBaseStore(object):
|
|||||||
txn.executemany(sql, vals)
|
txn.executemany(sql, vals)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _simple_upsert(self, table, keyvalues, values,
|
def _simple_upsert(
|
||||||
insertion_values={}, desc="_simple_upsert", lock=True):
|
self,
|
||||||
|
table,
|
||||||
|
keyvalues,
|
||||||
|
values,
|
||||||
|
insertion_values={},
|
||||||
|
desc="_simple_upsert",
|
||||||
|
lock=True
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
`lock` should generally be set to True (the default), but can be set
|
`lock` should generally be set to True (the default), but can be set
|
||||||
@ -516,16 +569,21 @@ class SQLBaseStore(object):
|
|||||||
inserting
|
inserting
|
||||||
lock (bool): True to lock the table when doing the upsert.
|
lock (bool): True to lock the table when doing the upsert.
|
||||||
Returns:
|
Returns:
|
||||||
Deferred(bool): True if a new entry was created, False if an
|
Deferred(None or bool): Native upserts always return None. Emulated
|
||||||
existing one was updated.
|
upserts return True if a new entry was created, False if an existing
|
||||||
|
one was updated.
|
||||||
"""
|
"""
|
||||||
attempts = 0
|
attempts = 0
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
result = yield self.runInteraction(
|
result = yield self.runInteraction(
|
||||||
desc,
|
desc,
|
||||||
self._simple_upsert_txn, table, keyvalues, values, insertion_values,
|
self._simple_upsert_txn,
|
||||||
lock=lock
|
table,
|
||||||
|
keyvalues,
|
||||||
|
values,
|
||||||
|
insertion_values,
|
||||||
|
lock=lock,
|
||||||
)
|
)
|
||||||
defer.returnValue(result)
|
defer.returnValue(result)
|
||||||
except self.database_engine.module.IntegrityError as e:
|
except self.database_engine.module.IntegrityError as e:
|
||||||
@ -537,12 +595,71 @@ class SQLBaseStore(object):
|
|||||||
|
|
||||||
# presumably we raced with another transaction: let's retry.
|
# presumably we raced with another transaction: let's retry.
|
||||||
logger.warn(
|
logger.warn(
|
||||||
"IntegrityError when upserting into %s; retrying: %s",
|
"%s when upserting into %s; retrying: %s", e.__name__, table, e
|
||||||
table, e
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def _simple_upsert_txn(self, txn, table, keyvalues, values, insertion_values={},
|
def _simple_upsert_txn(
|
||||||
lock=True):
|
self,
|
||||||
|
txn,
|
||||||
|
table,
|
||||||
|
keyvalues,
|
||||||
|
values,
|
||||||
|
insertion_values={},
|
||||||
|
lock=True,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Pick the UPSERT method which works best on the platform. Either the
|
||||||
|
native one (Pg9.5+, recent SQLites), or fall back to an emulated method.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
txn: The transaction to use.
|
||||||
|
table (str): The table to upsert into
|
||||||
|
keyvalues (dict): The unique key tables and their new values
|
||||||
|
values (dict): The nonunique columns and their new values
|
||||||
|
insertion_values (dict): additional key/values to use only when
|
||||||
|
inserting
|
||||||
|
lock (bool): True to lock the table when doing the upsert.
|
||||||
|
Returns:
|
||||||
|
None or bool: Native upserts always return None. Emulated
|
||||||
|
upserts return True if a new entry was created, False if an existing
|
||||||
|
one was updated.
|
||||||
|
"""
|
||||||
|
if (
|
||||||
|
self.database_engine.can_native_upsert
|
||||||
|
and table not in self._unsafe_to_upsert_tables
|
||||||
|
):
|
||||||
|
return self._simple_upsert_txn_native_upsert(
|
||||||
|
txn,
|
||||||
|
table,
|
||||||
|
keyvalues,
|
||||||
|
values,
|
||||||
|
insertion_values=insertion_values,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
return self._simple_upsert_txn_emulated(
|
||||||
|
txn,
|
||||||
|
table,
|
||||||
|
keyvalues,
|
||||||
|
values,
|
||||||
|
insertion_values=insertion_values,
|
||||||
|
lock=lock,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _simple_upsert_txn_emulated(
|
||||||
|
self, txn, table, keyvalues, values, insertion_values={}, lock=True
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
table (str): The table to upsert into
|
||||||
|
keyvalues (dict): The unique key tables and their new values
|
||||||
|
values (dict): The nonunique columns and their new values
|
||||||
|
insertion_values (dict): additional key/values to use only when
|
||||||
|
inserting
|
||||||
|
lock (bool): True to lock the table when doing the upsert.
|
||||||
|
Returns:
|
||||||
|
bool: Return True if a new entry was created, False if an existing
|
||||||
|
one was updated.
|
||||||
|
"""
|
||||||
# We need to lock the table :(, unless we're *really* careful
|
# We need to lock the table :(, unless we're *really* careful
|
||||||
if lock:
|
if lock:
|
||||||
self.database_engine.lock_table(txn, table)
|
self.database_engine.lock_table(txn, table)
|
||||||
@ -577,12 +694,44 @@ class SQLBaseStore(object):
|
|||||||
sql = "INSERT INTO %s (%s) VALUES (%s)" % (
|
sql = "INSERT INTO %s (%s) VALUES (%s)" % (
|
||||||
table,
|
table,
|
||||||
", ".join(k for k in allvalues),
|
", ".join(k for k in allvalues),
|
||||||
", ".join("?" for _ in allvalues)
|
", ".join("?" for _ in allvalues),
|
||||||
)
|
)
|
||||||
txn.execute(sql, list(allvalues.values()))
|
txn.execute(sql, list(allvalues.values()))
|
||||||
# successfully inserted
|
# successfully inserted
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
def _simple_upsert_txn_native_upsert(
|
||||||
|
self, txn, table, keyvalues, values, insertion_values={}
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Use the native UPSERT functionality in recent PostgreSQL versions.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
table (str): The table to upsert into
|
||||||
|
keyvalues (dict): The unique key tables and their new values
|
||||||
|
values (dict): The nonunique columns and their new values
|
||||||
|
insertion_values (dict): additional key/values to use only when
|
||||||
|
inserting
|
||||||
|
Returns:
|
||||||
|
None
|
||||||
|
"""
|
||||||
|
allvalues = {}
|
||||||
|
allvalues.update(keyvalues)
|
||||||
|
allvalues.update(values)
|
||||||
|
allvalues.update(insertion_values)
|
||||||
|
|
||||||
|
sql = (
|
||||||
|
"INSERT INTO %s (%s) VALUES (%s) "
|
||||||
|
"ON CONFLICT (%s) DO UPDATE SET %s"
|
||||||
|
) % (
|
||||||
|
table,
|
||||||
|
", ".join(k for k in allvalues),
|
||||||
|
", ".join("?" for _ in allvalues),
|
||||||
|
", ".join(k for k in keyvalues),
|
||||||
|
", ".join(k + "=EXCLUDED." + k for k in values),
|
||||||
|
)
|
||||||
|
txn.execute(sql, list(allvalues.values()))
|
||||||
|
|
||||||
def _simple_select_one(self, table, keyvalues, retcols,
|
def _simple_select_one(self, table, keyvalues, retcols,
|
||||||
allow_none=False, desc="_simple_select_one"):
|
allow_none=False, desc="_simple_select_one"):
|
||||||
"""Executes a SELECT query on the named table, which is expected to
|
"""Executes a SELECT query on the named table, which is expected to
|
||||||
|
@ -257,7 +257,10 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def _update_client_ips_batch_txn(self, txn, to_update):
|
def _update_client_ips_batch_txn(self, txn, to_update):
|
||||||
self.database_engine.lock_table(txn, "user_ips")
|
if "user_ips" in self._unsafe_to_upsert_tables or (
|
||||||
|
not self.database_engine.can_native_upsert
|
||||||
|
):
|
||||||
|
self.database_engine.lock_table(txn, "user_ips")
|
||||||
|
|
||||||
for entry in iteritems(to_update):
|
for entry in iteritems(to_update):
|
||||||
(user_id, access_token, ip), (user_agent, device_id, last_seen) = entry
|
(user_id, access_token, ip), (user_agent, device_id, last_seen) = entry
|
||||||
|
@ -18,7 +18,7 @@ import platform
|
|||||||
|
|
||||||
from ._base import IncorrectDatabaseSetup
|
from ._base import IncorrectDatabaseSetup
|
||||||
from .postgres import PostgresEngine
|
from .postgres import PostgresEngine
|
||||||
from .sqlite3 import Sqlite3Engine
|
from .sqlite import Sqlite3Engine
|
||||||
|
|
||||||
SUPPORTED_MODULE = {
|
SUPPORTED_MODULE = {
|
||||||
"sqlite3": Sqlite3Engine,
|
"sqlite3": Sqlite3Engine,
|
||||||
|
@ -38,6 +38,13 @@ class PostgresEngine(object):
|
|||||||
return sql.replace("?", "%s")
|
return sql.replace("?", "%s")
|
||||||
|
|
||||||
def on_new_connection(self, db_conn):
|
def on_new_connection(self, db_conn):
|
||||||
|
|
||||||
|
# Get the version of PostgreSQL that we're using. As per the psycopg2
|
||||||
|
# docs: The number is formed by converting the major, minor, and
|
||||||
|
# revision numbers into two-decimal-digit numbers and appending them
|
||||||
|
# together. For example, version 8.1.5 will be returned as 80105
|
||||||
|
self._version = db_conn.server_version
|
||||||
|
|
||||||
db_conn.set_isolation_level(
|
db_conn.set_isolation_level(
|
||||||
self.module.extensions.ISOLATION_LEVEL_REPEATABLE_READ
|
self.module.extensions.ISOLATION_LEVEL_REPEATABLE_READ
|
||||||
)
|
)
|
||||||
@ -54,6 +61,13 @@ class PostgresEngine(object):
|
|||||||
|
|
||||||
cursor.close()
|
cursor.close()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def can_native_upsert(self):
|
||||||
|
"""
|
||||||
|
Can we use native UPSERTs? This requires PostgreSQL 9.5+.
|
||||||
|
"""
|
||||||
|
return self._version >= 90500
|
||||||
|
|
||||||
def is_deadlock(self, error):
|
def is_deadlock(self, error):
|
||||||
if isinstance(error, self.module.DatabaseError):
|
if isinstance(error, self.module.DatabaseError):
|
||||||
# https://www.postgresql.org/docs/current/static/errcodes-appendix.html
|
# https://www.postgresql.org/docs/current/static/errcodes-appendix.html
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
|
|
||||||
import struct
|
import struct
|
||||||
import threading
|
import threading
|
||||||
|
from sqlite3 import sqlite_version_info
|
||||||
|
|
||||||
from synapse.storage.prepare_database import prepare_database
|
from synapse.storage.prepare_database import prepare_database
|
||||||
|
|
||||||
@ -30,6 +31,14 @@ class Sqlite3Engine(object):
|
|||||||
self._current_state_group_id = None
|
self._current_state_group_id = None
|
||||||
self._current_state_group_id_lock = threading.Lock()
|
self._current_state_group_id_lock = threading.Lock()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def can_native_upsert(self):
|
||||||
|
"""
|
||||||
|
Do we support native UPSERTs? This requires SQLite3 3.24+, plus some
|
||||||
|
more work we haven't done yet to tell what was inserted vs updated.
|
||||||
|
"""
|
||||||
|
return sqlite_version_info >= (3, 24, 0)
|
||||||
|
|
||||||
def check_database(self, txn):
|
def check_database(self, txn):
|
||||||
pass
|
pass
|
||||||
|
|
@ -215,7 +215,7 @@ class PusherStore(PusherWorkerStore):
|
|||||||
with self._pushers_id_gen.get_next() as stream_id:
|
with self._pushers_id_gen.get_next() as stream_id:
|
||||||
# no need to lock because `pushers` has a unique key on
|
# no need to lock because `pushers` has a unique key on
|
||||||
# (app_id, pushkey, user_name) so _simple_upsert will retry
|
# (app_id, pushkey, user_name) so _simple_upsert will retry
|
||||||
newly_inserted = yield self._simple_upsert(
|
yield self._simple_upsert(
|
||||||
table="pushers",
|
table="pushers",
|
||||||
keyvalues={
|
keyvalues={
|
||||||
"app_id": app_id,
|
"app_id": app_id,
|
||||||
@ -238,7 +238,12 @@ class PusherStore(PusherWorkerStore):
|
|||||||
lock=False,
|
lock=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
if newly_inserted:
|
user_has_pusher = self.get_if_user_has_pusher.cache.get(
|
||||||
|
(user_id,), None, update_metrics=False
|
||||||
|
)
|
||||||
|
|
||||||
|
if user_has_pusher is not True:
|
||||||
|
# invalidate, since we the user might not have had a pusher before
|
||||||
yield self.runInteraction(
|
yield self.runInteraction(
|
||||||
"add_pusher",
|
"add_pusher",
|
||||||
self._invalidate_cache_and_stream,
|
self._invalidate_cache_and_stream,
|
||||||
|
@ -589,11 +589,11 @@ class RoomMemberStore(RoomMemberWorkerStore):
|
|||||||
|
|
||||||
# We update the local_invites table only if the event is "current",
|
# We update the local_invites table only if the event is "current",
|
||||||
# i.e., its something that has just happened. If the event is an
|
# i.e., its something that has just happened. If the event is an
|
||||||
# outlier it is only current if its a "new remote event", like a
|
# outlier it is only current if its an "out of band membership",
|
||||||
# remote invite or a rejection of a remote invite.
|
# like a remote invite or a rejection of a remote invite.
|
||||||
is_new_state = not backfilled and (
|
is_new_state = not backfilled and (
|
||||||
not event.internal_metadata.is_outlier()
|
not event.internal_metadata.is_outlier()
|
||||||
or event.internal_metadata.is_new_remote_event()
|
or event.internal_metadata.is_out_of_band_membership()
|
||||||
)
|
)
|
||||||
is_mine = self.hs.is_mine_id(event.state_key)
|
is_mine = self.hs.is_mine_id(event.state_key)
|
||||||
if is_new_state and is_mine:
|
if is_new_state and is_mine:
|
||||||
|
@ -168,14 +168,14 @@ class UserDirectoryStore(SQLBaseStore):
|
|||||||
if isinstance(self.database_engine, PostgresEngine):
|
if isinstance(self.database_engine, PostgresEngine):
|
||||||
# We weight the localpart most highly, then display name and finally
|
# We weight the localpart most highly, then display name and finally
|
||||||
# server name
|
# server name
|
||||||
if new_entry:
|
if self.database_engine.can_native_upsert:
|
||||||
sql = """
|
sql = """
|
||||||
INSERT INTO user_directory_search(user_id, vector)
|
INSERT INTO user_directory_search(user_id, vector)
|
||||||
VALUES (?,
|
VALUES (?,
|
||||||
setweight(to_tsvector('english', ?), 'A')
|
setweight(to_tsvector('english', ?), 'A')
|
||||||
|| setweight(to_tsvector('english', ?), 'D')
|
|| setweight(to_tsvector('english', ?), 'D')
|
||||||
|| setweight(to_tsvector('english', COALESCE(?, '')), 'B')
|
|| setweight(to_tsvector('english', COALESCE(?, '')), 'B')
|
||||||
)
|
) ON CONFLICT (user_id) DO UPDATE SET vector=EXCLUDED.vector
|
||||||
"""
|
"""
|
||||||
txn.execute(
|
txn.execute(
|
||||||
sql,
|
sql,
|
||||||
@ -185,20 +185,45 @@ class UserDirectoryStore(SQLBaseStore):
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
sql = """
|
# TODO: Remove this code after we've bumped the minimum version
|
||||||
UPDATE user_directory_search
|
# of postgres to always support upserts, so we can get rid of
|
||||||
SET vector = setweight(to_tsvector('english', ?), 'A')
|
# `new_entry` usage
|
||||||
|| setweight(to_tsvector('english', ?), 'D')
|
if new_entry is True:
|
||||||
|| setweight(to_tsvector('english', COALESCE(?, '')), 'B')
|
sql = """
|
||||||
WHERE user_id = ?
|
INSERT INTO user_directory_search(user_id, vector)
|
||||||
"""
|
VALUES (?,
|
||||||
txn.execute(
|
setweight(to_tsvector('english', ?), 'A')
|
||||||
sql,
|
|| setweight(to_tsvector('english', ?), 'D')
|
||||||
(
|
|| setweight(to_tsvector('english', COALESCE(?, '')), 'B')
|
||||||
get_localpart_from_id(user_id), get_domain_from_id(user_id),
|
)
|
||||||
display_name, user_id,
|
"""
|
||||||
|
txn.execute(
|
||||||
|
sql,
|
||||||
|
(
|
||||||
|
user_id, get_localpart_from_id(user_id),
|
||||||
|
get_domain_from_id(user_id), display_name,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
elif new_entry is False:
|
||||||
|
sql = """
|
||||||
|
UPDATE user_directory_search
|
||||||
|
SET vector = setweight(to_tsvector('english', ?), 'A')
|
||||||
|
|| setweight(to_tsvector('english', ?), 'D')
|
||||||
|
|| setweight(to_tsvector('english', COALESCE(?, '')), 'B')
|
||||||
|
WHERE user_id = ?
|
||||||
|
"""
|
||||||
|
txn.execute(
|
||||||
|
sql,
|
||||||
|
(
|
||||||
|
get_localpart_from_id(user_id),
|
||||||
|
get_domain_from_id(user_id),
|
||||||
|
display_name, user_id,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise RuntimeError(
|
||||||
|
"upsert returned None when 'can_native_upsert' is False"
|
||||||
)
|
)
|
||||||
)
|
|
||||||
elif isinstance(self.database_engine, Sqlite3Engine):
|
elif isinstance(self.database_engine, Sqlite3Engine):
|
||||||
value = "%s %s" % (user_id, display_name,) if display_name else user_id
|
value = "%s %s" % (user_id, display_name,) if display_name else user_id
|
||||||
self._simple_upsert_txn(
|
self._simple_upsert_txn(
|
||||||
|
@ -26,6 +26,7 @@ from twisted.web.http import HTTPChannel
|
|||||||
|
|
||||||
from synapse.crypto.context_factory import ClientTLSOptionsFactory
|
from synapse.crypto.context_factory import ClientTLSOptionsFactory
|
||||||
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
|
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
|
||||||
|
from synapse.http.federation.srv_resolver import Server
|
||||||
from synapse.util.logcontext import LoggingContext
|
from synapse.util.logcontext import LoggingContext
|
||||||
|
|
||||||
from tests.server import FakeTransport, ThreadedMemoryReactorClock
|
from tests.server import FakeTransport, ThreadedMemoryReactorClock
|
||||||
@ -46,7 +47,7 @@ class MatrixFederationAgentTests(TestCase):
|
|||||||
_srv_resolver=self.mock_resolver,
|
_srv_resolver=self.mock_resolver,
|
||||||
)
|
)
|
||||||
|
|
||||||
def _make_connection(self, client_factory):
|
def _make_connection(self, client_factory, expected_sni):
|
||||||
"""Builds a test server, and completes the outgoing client connection
|
"""Builds a test server, and completes the outgoing client connection
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
@ -69,9 +70,17 @@ class MatrixFederationAgentTests(TestCase):
|
|||||||
# tell the server tls protocol to send its stuff back to the client, too
|
# tell the server tls protocol to send its stuff back to the client, too
|
||||||
server_tls_protocol.makeConnection(FakeTransport(client_protocol, self.reactor))
|
server_tls_protocol.makeConnection(FakeTransport(client_protocol, self.reactor))
|
||||||
|
|
||||||
# finally, give the reactor a pump to get the TLS juices flowing.
|
# give the reactor a pump to get the TLS juices flowing.
|
||||||
self.reactor.pump((0.1,))
|
self.reactor.pump((0.1,))
|
||||||
|
|
||||||
|
# check the SNI
|
||||||
|
server_name = server_tls_protocol._tlsConnection.get_servername()
|
||||||
|
self.assertEqual(
|
||||||
|
server_name,
|
||||||
|
expected_sni,
|
||||||
|
"Expected SNI %s but got %s" % (expected_sni, server_name),
|
||||||
|
)
|
||||||
|
|
||||||
# fish the test server back out of the server-side TLS protocol.
|
# fish the test server back out of the server-side TLS protocol.
|
||||||
return server_tls_protocol.wrappedProtocol
|
return server_tls_protocol.wrappedProtocol
|
||||||
|
|
||||||
@ -97,7 +106,7 @@ class MatrixFederationAgentTests(TestCase):
|
|||||||
|
|
||||||
def test_get(self):
|
def test_get(self):
|
||||||
"""
|
"""
|
||||||
happy-path test of a GET request
|
happy-path test of a GET request with an explicit port
|
||||||
"""
|
"""
|
||||||
self.reactor.lookups["testserv"] = "1.2.3.4"
|
self.reactor.lookups["testserv"] = "1.2.3.4"
|
||||||
test_d = self._make_get_request(b"matrix://testserv:8448/foo/bar")
|
test_d = self._make_get_request(b"matrix://testserv:8448/foo/bar")
|
||||||
@ -113,16 +122,15 @@ class MatrixFederationAgentTests(TestCase):
|
|||||||
self.assertEqual(port, 8448)
|
self.assertEqual(port, 8448)
|
||||||
|
|
||||||
# make a test server, and wire up the client
|
# make a test server, and wire up the client
|
||||||
http_server = self._make_connection(client_factory)
|
http_server = self._make_connection(
|
||||||
|
client_factory,
|
||||||
|
expected_sni=b"testserv",
|
||||||
|
)
|
||||||
|
|
||||||
self.assertEqual(len(http_server.requests), 1)
|
self.assertEqual(len(http_server.requests), 1)
|
||||||
request = http_server.requests[0]
|
request = http_server.requests[0]
|
||||||
self.assertEqual(request.method, b'GET')
|
self.assertEqual(request.method, b'GET')
|
||||||
self.assertEqual(request.path, b'/foo/bar')
|
self.assertEqual(request.path, b'/foo/bar')
|
||||||
self.assertEqual(
|
|
||||||
request.requestHeaders.getRawHeaders(b'host'),
|
|
||||||
[b'testserv:8448']
|
|
||||||
)
|
|
||||||
content = request.content.read()
|
content = request.content.read()
|
||||||
self.assertEqual(content, b'')
|
self.assertEqual(content, b'')
|
||||||
|
|
||||||
@ -150,6 +158,130 @@ class MatrixFederationAgentTests(TestCase):
|
|||||||
json = self.successResultOf(treq.json_content(response))
|
json = self.successResultOf(treq.json_content(response))
|
||||||
self.assertEqual(json, {"a": 1})
|
self.assertEqual(json, {"a": 1})
|
||||||
|
|
||||||
|
def test_get_ip_address(self):
|
||||||
|
"""
|
||||||
|
Test the behaviour when the server name contains an explicit IP (with no port)
|
||||||
|
"""
|
||||||
|
|
||||||
|
# the SRV lookup will return an empty list (XXX: why do we even do an SRV lookup?)
|
||||||
|
self.mock_resolver.resolve_service.side_effect = lambda _: []
|
||||||
|
|
||||||
|
# then there will be a getaddrinfo on the IP
|
||||||
|
self.reactor.lookups["1.2.3.4"] = "1.2.3.4"
|
||||||
|
|
||||||
|
test_d = self._make_get_request(b"matrix://1.2.3.4/foo/bar")
|
||||||
|
|
||||||
|
# Nothing happened yet
|
||||||
|
self.assertNoResult(test_d)
|
||||||
|
|
||||||
|
self.mock_resolver.resolve_service.assert_called_once_with(
|
||||||
|
b"_matrix._tcp.1.2.3.4",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Make sure treq is trying to connect
|
||||||
|
clients = self.reactor.tcpClients
|
||||||
|
self.assertEqual(len(clients), 1)
|
||||||
|
(host, port, client_factory, _timeout, _bindAddress) = clients[0]
|
||||||
|
self.assertEqual(host, '1.2.3.4')
|
||||||
|
self.assertEqual(port, 8448)
|
||||||
|
|
||||||
|
# make a test server, and wire up the client
|
||||||
|
http_server = self._make_connection(
|
||||||
|
client_factory,
|
||||||
|
expected_sni=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertEqual(len(http_server.requests), 1)
|
||||||
|
request = http_server.requests[0]
|
||||||
|
self.assertEqual(request.method, b'GET')
|
||||||
|
self.assertEqual(request.path, b'/foo/bar')
|
||||||
|
|
||||||
|
# finish the request
|
||||||
|
request.finish()
|
||||||
|
self.reactor.pump((0.1,))
|
||||||
|
self.successResultOf(test_d)
|
||||||
|
|
||||||
|
def test_get_hostname_no_srv(self):
|
||||||
|
"""
|
||||||
|
Test the behaviour when the server name has no port, and no SRV record
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.mock_resolver.resolve_service.side_effect = lambda _: []
|
||||||
|
self.reactor.lookups["testserv"] = "1.2.3.4"
|
||||||
|
|
||||||
|
test_d = self._make_get_request(b"matrix://testserv/foo/bar")
|
||||||
|
|
||||||
|
# Nothing happened yet
|
||||||
|
self.assertNoResult(test_d)
|
||||||
|
|
||||||
|
self.mock_resolver.resolve_service.assert_called_once_with(
|
||||||
|
b"_matrix._tcp.testserv",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Make sure treq is trying to connect
|
||||||
|
clients = self.reactor.tcpClients
|
||||||
|
self.assertEqual(len(clients), 1)
|
||||||
|
(host, port, client_factory, _timeout, _bindAddress) = clients[0]
|
||||||
|
self.assertEqual(host, '1.2.3.4')
|
||||||
|
self.assertEqual(port, 8448)
|
||||||
|
|
||||||
|
# make a test server, and wire up the client
|
||||||
|
http_server = self._make_connection(
|
||||||
|
client_factory,
|
||||||
|
expected_sni=b'testserv',
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertEqual(len(http_server.requests), 1)
|
||||||
|
request = http_server.requests[0]
|
||||||
|
self.assertEqual(request.method, b'GET')
|
||||||
|
self.assertEqual(request.path, b'/foo/bar')
|
||||||
|
|
||||||
|
# finish the request
|
||||||
|
request.finish()
|
||||||
|
self.reactor.pump((0.1,))
|
||||||
|
self.successResultOf(test_d)
|
||||||
|
|
||||||
|
def test_get_hostname_srv(self):
|
||||||
|
"""
|
||||||
|
Test the behaviour when there is a single SRV record
|
||||||
|
"""
|
||||||
|
self.mock_resolver.resolve_service.side_effect = lambda _: [
|
||||||
|
Server(host="srvtarget", port=8443)
|
||||||
|
]
|
||||||
|
self.reactor.lookups["srvtarget"] = "1.2.3.4"
|
||||||
|
|
||||||
|
test_d = self._make_get_request(b"matrix://testserv/foo/bar")
|
||||||
|
|
||||||
|
# Nothing happened yet
|
||||||
|
self.assertNoResult(test_d)
|
||||||
|
|
||||||
|
self.mock_resolver.resolve_service.assert_called_once_with(
|
||||||
|
b"_matrix._tcp.testserv",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Make sure treq is trying to connect
|
||||||
|
clients = self.reactor.tcpClients
|
||||||
|
self.assertEqual(len(clients), 1)
|
||||||
|
(host, port, client_factory, _timeout, _bindAddress) = clients[0]
|
||||||
|
self.assertEqual(host, '1.2.3.4')
|
||||||
|
self.assertEqual(port, 8443)
|
||||||
|
|
||||||
|
# make a test server, and wire up the client
|
||||||
|
http_server = self._make_connection(
|
||||||
|
client_factory,
|
||||||
|
expected_sni=b'testserv',
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertEqual(len(http_server.requests), 1)
|
||||||
|
request = http_server.requests[0]
|
||||||
|
self.assertEqual(request.method, b'GET')
|
||||||
|
self.assertEqual(request.path, b'/foo/bar')
|
||||||
|
|
||||||
|
# finish the request
|
||||||
|
request.finish()
|
||||||
|
self.reactor.pump((0.1,))
|
||||||
|
self.successResultOf(test_d)
|
||||||
|
|
||||||
|
|
||||||
def _check_logcontext(context):
|
def _check_logcontext(context):
|
||||||
current = LoggingContext.current_context()
|
current = LoggingContext.current_context()
|
||||||
|
@ -49,6 +49,7 @@ class SQLBaseStoreTestCase(unittest.TestCase):
|
|||||||
self.db_pool.runWithConnection = runWithConnection
|
self.db_pool.runWithConnection = runWithConnection
|
||||||
|
|
||||||
config = Mock()
|
config = Mock()
|
||||||
|
config._enable_native_upserts = False
|
||||||
config.event_cache_size = 1
|
config.event_cache_size = 1
|
||||||
config.database_config = {"name": "sqlite3"}
|
config.database_config = {"name": "sqlite3"}
|
||||||
hs = TestHomeServer(
|
hs = TestHomeServer(
|
||||||
|
@ -19,7 +19,7 @@ from six import StringIO
|
|||||||
|
|
||||||
from twisted.internet.defer import Deferred
|
from twisted.internet.defer import Deferred
|
||||||
from twisted.python.failure import Failure
|
from twisted.python.failure import Failure
|
||||||
from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactorClock
|
from twisted.test.proto_helpers import AccumulatingProtocol
|
||||||
from twisted.web.resource import Resource
|
from twisted.web.resource import Resource
|
||||||
from twisted.web.server import NOT_DONE_YET
|
from twisted.web.server import NOT_DONE_YET
|
||||||
|
|
||||||
@ -30,12 +30,18 @@ from synapse.util import Clock
|
|||||||
from synapse.util.logcontext import make_deferred_yieldable
|
from synapse.util.logcontext import make_deferred_yieldable
|
||||||
|
|
||||||
from tests import unittest
|
from tests import unittest
|
||||||
from tests.server import FakeTransport, make_request, render, setup_test_homeserver
|
from tests.server import (
|
||||||
|
FakeTransport,
|
||||||
|
ThreadedMemoryReactorClock,
|
||||||
|
make_request,
|
||||||
|
render,
|
||||||
|
setup_test_homeserver,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class JsonResourceTests(unittest.TestCase):
|
class JsonResourceTests(unittest.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.reactor = MemoryReactorClock()
|
self.reactor = ThreadedMemoryReactorClock()
|
||||||
self.hs_clock = Clock(self.reactor)
|
self.hs_clock = Clock(self.reactor)
|
||||||
self.homeserver = setup_test_homeserver(
|
self.homeserver = setup_test_homeserver(
|
||||||
self.addCleanup, http_client=None, clock=self.hs_clock, reactor=self.reactor
|
self.addCleanup, http_client=None, clock=self.hs_clock, reactor=self.reactor
|
||||||
|
@ -96,7 +96,7 @@ class TestCase(unittest.TestCase):
|
|||||||
|
|
||||||
method = getattr(self, methodName)
|
method = getattr(self, methodName)
|
||||||
|
|
||||||
level = getattr(method, "loglevel", getattr(self, "loglevel", logging.ERROR))
|
level = getattr(method, "loglevel", getattr(self, "loglevel", logging.WARNING))
|
||||||
|
|
||||||
@around(self)
|
@around(self)
|
||||||
def setUp(orig):
|
def setUp(orig):
|
||||||
@ -333,7 +333,15 @@ class HomeserverTestCase(TestCase):
|
|||||||
"""
|
"""
|
||||||
kwargs = dict(kwargs)
|
kwargs = dict(kwargs)
|
||||||
kwargs.update(self._hs_args)
|
kwargs.update(self._hs_args)
|
||||||
return setup_test_homeserver(self.addCleanup, *args, **kwargs)
|
hs = setup_test_homeserver(self.addCleanup, *args, **kwargs)
|
||||||
|
stor = hs.get_datastore()
|
||||||
|
|
||||||
|
# Run the database background updates.
|
||||||
|
if hasattr(stor, "do_next_background_update"):
|
||||||
|
while not self.get_success(stor.has_completed_background_updates()):
|
||||||
|
self.get_success(stor.do_next_background_update(1))
|
||||||
|
|
||||||
|
return hs
|
||||||
|
|
||||||
def pump(self, by=0.0):
|
def pump(self, by=0.0):
|
||||||
"""
|
"""
|
||||||
|
@ -154,7 +154,9 @@ def default_config(name):
|
|||||||
config.update_user_directory = False
|
config.update_user_directory = False
|
||||||
|
|
||||||
def is_threepid_reserved(threepid):
|
def is_threepid_reserved(threepid):
|
||||||
return ServerConfig.is_threepid_reserved(config, threepid)
|
return ServerConfig.is_threepid_reserved(
|
||||||
|
config.mau_limits_reserved_threepids, threepid
|
||||||
|
)
|
||||||
|
|
||||||
config.is_threepid_reserved.side_effect = is_threepid_reserved
|
config.is_threepid_reserved.side_effect = is_threepid_reserved
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user