Merge branch 'develop' of github.com:matrix-org/synapse into erikj/stop_fed_not_in_room

This commit is contained in:
Erik Johnston 2019-03-04 11:54:58 +00:00
commit fbc047f2a5
101 changed files with 1458 additions and 531 deletions

13
.buildkite/.env Normal file
View File

@ -0,0 +1,13 @@
CI
BUILDKITE
BUILDKITE_BUILD_NUMBER
BUILDKITE_BRANCH
BUILDKITE_BUILD_NUMBER
BUILDKITE_JOB_ID
BUILDKITE_BUILD_URL
BUILDKITE_PROJECT_SLUG
BUILDKITE_COMMIT
BUILDKITE_PULL_REQUEST
BUILDKITE_TAG
CODECOV_TOKEN
TRIAL_FLAGS

View File

@ -0,0 +1,21 @@
version: '3.1'
services:
postgres:
image: postgres:9.4
environment:
POSTGRES_PASSWORD: postgres
testenv:
image: python:2.7
depends_on:
- postgres
env_file: .env
environment:
SYNAPSE_POSTGRES_HOST: postgres
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
working_dir: /app
volumes:
- ..:/app

View File

@ -0,0 +1,21 @@
version: '3.1'
services:
postgres:
image: postgres:9.5
environment:
POSTGRES_PASSWORD: postgres
testenv:
image: python:2.7
depends_on:
- postgres
env_file: .env
environment:
SYNAPSE_POSTGRES_HOST: postgres
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
working_dir: /app
volumes:
- ..:/app

View File

@ -0,0 +1,21 @@
version: '3.1'
services:
postgres:
image: postgres:9.4
environment:
POSTGRES_PASSWORD: postgres
testenv:
image: python:3.5
depends_on:
- postgres
env_file: .env
environment:
SYNAPSE_POSTGRES_HOST: postgres
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
working_dir: /app
volumes:
- ..:/app

View File

@ -0,0 +1,21 @@
version: '3.1'
services:
postgres:
image: postgres:9.5
environment:
POSTGRES_PASSWORD: postgres
testenv:
image: python:3.5
depends_on:
- postgres
env_file: .env
environment:
SYNAPSE_POSTGRES_HOST: postgres
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
working_dir: /app
volumes:
- ..:/app

View File

@ -0,0 +1,21 @@
version: '3.1'
services:
postgres:
image: postgres:11
environment:
POSTGRES_PASSWORD: postgres
testenv:
image: python:3.7
depends_on:
- postgres
env_file: .env
environment:
SYNAPSE_POSTGRES_HOST: postgres
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
working_dir: /app
volumes:
- ..:/app

View File

@ -0,0 +1,21 @@
version: '3.1'
services:
postgres:
image: postgres:9.5
environment:
POSTGRES_PASSWORD: postgres
testenv:
image: python:3.7
depends_on:
- postgres
env_file: .env
environment:
SYNAPSE_POSTGRES_HOST: postgres
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
working_dir: /app
volumes:
- ..:/app

149
.buildkite/pipeline.yml Normal file
View File

@ -0,0 +1,149 @@
env:
CODECOV_TOKEN: "2dd7eb9b-0eda-45fe-a47c-9b5ac040045f"
steps:
- command:
- "python -m pip install tox"
- "tox -e pep8"
label: "\U0001F9F9 PEP-8"
plugins:
- docker#v3.0.1:
image: "python:3.6"
- command:
- "python -m pip install tox"
- "tox -e packaging"
label: "\U0001F9F9 packaging"
plugins:
- docker#v3.0.1:
image: "python:3.6"
- command:
- "python -m pip install tox"
- "tox -e check_isort"
label: "\U0001F9F9 isort"
plugins:
- docker#v3.0.1:
image: "python:3.6"
- command:
- "python -m pip install tox"
- "scripts-dev/check-newsfragment"
label: ":newspaper: Newsfile"
branches: "!master !develop !release-*"
plugins:
- docker#v3.0.1:
image: "python:3.6"
propagate-environment: true
- wait
- command:
- "python -m pip install tox"
- "tox -e py27,codecov"
label: ":python: 2.7 / SQLite"
env:
TRIAL_FLAGS: "-j 2"
plugins:
- docker#v3.0.1:
image: "python:2.7"
propagate-environment: true
- command:
- "python -m pip install tox"
- "tox -e py35,codecov"
label: ":python: 3.5 / SQLite"
env:
TRIAL_FLAGS: "-j 2"
plugins:
- docker#v3.0.1:
image: "python:3.5"
propagate-environment: true
- command:
- "python -m pip install tox"
- "tox -e py36,codecov"
label: ":python: 3.6 / SQLite"
env:
TRIAL_FLAGS: "-j 2"
plugins:
- docker#v3.0.1:
image: "python:3.6"
propagate-environment: true
- command:
- "python -m pip install tox"
- "tox -e py37,codecov"
label: ":python: 3.7 / SQLite"
env:
TRIAL_FLAGS: "-j 2"
plugins:
- docker#v3.0.1:
image: "python:3.7"
propagate-environment: true
- label: ":python: 2.7 / :postgres: 9.4"
env:
TRIAL_FLAGS: "-j 4"
command:
- "bash -c 'python -m pip install tox && python -m tox -e py27-postgres,codecov'"
plugins:
- docker-compose#v2.1.0:
run: testenv
config:
- .buildkite/docker-compose.py27.pg94.yaml
- label: ":python: 2.7 / :postgres: 9.5"
env:
TRIAL_FLAGS: "-j 4"
command:
- "bash -c 'python -m pip install tox && python -m tox -e py27-postgres,codecov'"
plugins:
- docker-compose#v2.1.0:
run: testenv
config:
- .buildkite/docker-compose.py27.pg95.yaml
- label: ":python: 3.5 / :postgres: 9.4"
env:
TRIAL_FLAGS: "-j 4"
command:
- "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,codecov'"
plugins:
- docker-compose#v2.1.0:
run: testenv
config:
- .buildkite/docker-compose.py35.pg94.yaml
- label: ":python: 3.5 / :postgres: 9.5"
env:
TRIAL_FLAGS: "-j 4"
command:
- "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,codecov'"
plugins:
- docker-compose#v2.1.0:
run: testenv
config:
- .buildkite/docker-compose.py35.pg95.yaml
- label: ":python: 3.7 / :postgres: 9.5"
env:
TRIAL_FLAGS: "-j 4"
command:
- "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,codecov'"
plugins:
- docker-compose#v2.1.0:
run: testenv
config:
- .buildkite/docker-compose.py37.pg95.yaml
- label: ":python: 3.7 / :postgres: 11"
env:
TRIAL_FLAGS: "-j 4"
command:
- "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,codecov'"
plugins:
- docker-compose#v2.1.0:
run: testenv
config:
- .buildkite/docker-compose.py37.pg11.yaml

View File

@ -1,101 +0,0 @@
dist: xenial
language: python
cache:
directories:
# we only bother to cache the wheels; parts of the http cache get
# invalidated every build (because they get served with a max-age of 600
# seconds), which means that we end up re-uploading the whole cache for
# every build, which is time-consuming In any case, it's not obvious that
# downloading the cache from S3 would be much faster than downloading the
# originals from pypi.
#
- $HOME/.cache/pip/wheels
# don't clone the whole repo history, one commit will do
git:
depth: 1
# only build branches we care about (PRs are built seperately)
branches:
only:
- master
- develop
- /^release-v/
- rav/pg95
# When running the tox environments that call Twisted Trial, we can pass the -j
# flag to run the tests concurrently. We set this to 2 for CPU bound tests
# (SQLite) and 4 for I/O bound tests (PostgreSQL).
matrix:
fast_finish: true
include:
- name: "pep8"
python: 3.6
env: TOX_ENV="pep8,check_isort,packaging"
- name: "py2.7 / sqlite"
python: 2.7
env: TOX_ENV=py27,codecov TRIAL_FLAGS="-j 2"
- name: "py2.7 / sqlite / olddeps"
python: 2.7
env: TOX_ENV=py27-old TRIAL_FLAGS="-j 2"
- name: "py2.7 / postgres9.5"
python: 2.7
addons:
postgresql: "9.5"
env: TOX_ENV=py27-postgres,codecov TRIAL_FLAGS="-j 4"
services:
- postgresql
- name: "py3.5 / sqlite"
python: 3.5
env: TOX_ENV=py35,codecov TRIAL_FLAGS="-j 2"
- name: "py3.7 / sqlite"
python: 3.7
env: TOX_ENV=py37,codecov TRIAL_FLAGS="-j 2"
- name: "py3.7 / postgres9.4"
python: 3.7
addons:
postgresql: "9.4"
env: TOX_ENV=py37-postgres TRIAL_FLAGS="-j 4"
services:
- postgresql
- name: "py3.7 / postgres9.5"
python: 3.7
addons:
postgresql: "9.5"
env: TOX_ENV=py37-postgres,codecov TRIAL_FLAGS="-j 4"
services:
- postgresql
- # we only need to check for the newsfragment if it's a PR build
if: type = pull_request
name: "check-newsfragment"
python: 3.6
env: TOX_ENV=check-newsfragment
script:
- git remote set-branches --add origin develop
- git fetch origin develop
- tox -e $TOX_ENV
install:
# this just logs the postgres version we will be testing against (if any)
- psql -At -U postgres -c 'select version();' || true
- pip install tox
# if we don't have python3.6 in this environment, travis unhelpfully gives us
# a `python3.6` on our path which does nothing but spit out a warning. Tox
# tries to run it (even if we're not running a py36 env), so the build logs
# then have warnings which look like errors. To reduce the noise, remove the
# non-functional python3.6.
- ( ! command -v python3.6 || python3.6 --version ) &>/dev/null || rm -f $(command -v python3.6)
script:
- tox -e $TOX_ENV

View File

@ -1,3 +1,62 @@
Synapse 0.99.2 (2019-03-01)
===========================
Features
--------
- Added an HAProxy example in the reverse proxy documentation. Contributed by Benoît S. (“Benpro”). ([\#4541](https://github.com/matrix-org/synapse/issues/4541))
- Add basic optional sentry integration. ([\#4632](https://github.com/matrix-org/synapse/issues/4632), [\#4694](https://github.com/matrix-org/synapse/issues/4694))
- Transfer bans on room upgrade. ([\#4642](https://github.com/matrix-org/synapse/issues/4642))
- Add configurable room list publishing rules. ([\#4647](https://github.com/matrix-org/synapse/issues/4647))
- Support .well-known delegation when issuing certificates through ACME. ([\#4652](https://github.com/matrix-org/synapse/issues/4652))
- Allow registration and login to be handled by a worker instance. ([\#4666](https://github.com/matrix-org/synapse/issues/4666), [\#4670](https://github.com/matrix-org/synapse/issues/4670), [\#4682](https://github.com/matrix-org/synapse/issues/4682))
- Reduce the overhead of creating outbound federation connections over TLS by caching the TLS client options. ([\#4674](https://github.com/matrix-org/synapse/issues/4674))
- Add prometheus metrics for number of outgoing EDUs, by type. ([\#4695](https://github.com/matrix-org/synapse/issues/4695))
- Return correct error code when inviting a remote user to a room whose homeserver does not support the room version. ([\#4721](https://github.com/matrix-org/synapse/issues/4721))
- Prevent showing rooms to other servers that were set to not federate. ([\#4746](https://github.com/matrix-org/synapse/issues/4746))
Bugfixes
--------
- Fix possible exception when paginating. ([\#4263](https://github.com/matrix-org/synapse/issues/4263))
- The dependency checker now correctly reports a version mismatch for optional
dependencies, instead of reporting the dependency missing. ([\#4450](https://github.com/matrix-org/synapse/issues/4450))
- Set CORS headers on .well-known requests. ([\#4651](https://github.com/matrix-org/synapse/issues/4651))
- Fix kicking guest users on guest access revocation in worker mode. ([\#4667](https://github.com/matrix-org/synapse/issues/4667))
- Fix an issue in the database migration script where the
`e2e_room_keys.is_verified` column wasn't considered as
a boolean. ([\#4680](https://github.com/matrix-org/synapse/issues/4680))
- Fix TaskStopped exceptions in logs when outbound requests time out. ([\#4690](https://github.com/matrix-org/synapse/issues/4690))
- Fix ACME config for python 2. ([\#4717](https://github.com/matrix-org/synapse/issues/4717))
- Fix paginating over federation persisting incorrect state. ([\#4718](https://github.com/matrix-org/synapse/issues/4718))
Internal Changes
----------------
- Run `black` to reformat user directory code. ([\#4635](https://github.com/matrix-org/synapse/issues/4635))
- Reduce number of exceptions we log. ([\#4643](https://github.com/matrix-org/synapse/issues/4643), [\#4668](https://github.com/matrix-org/synapse/issues/4668))
- Introduce upsert batching functionality in the database layer. ([\#4644](https://github.com/matrix-org/synapse/issues/4644))
- Fix various spelling mistakes. ([\#4657](https://github.com/matrix-org/synapse/issues/4657))
- Cleanup request exception logging. ([\#4669](https://github.com/matrix-org/synapse/issues/4669), [\#4737](https://github.com/matrix-org/synapse/issues/4737), [\#4738](https://github.com/matrix-org/synapse/issues/4738))
- Improve replication performance by reducing cache invalidation traffic. ([\#4671](https://github.com/matrix-org/synapse/issues/4671), [\#4715](https://github.com/matrix-org/synapse/issues/4715), [\#4748](https://github.com/matrix-org/synapse/issues/4748))
- Test against Postgres 9.5 as well as 9.4. ([\#4676](https://github.com/matrix-org/synapse/issues/4676))
- Run unit tests against python 3.7. ([\#4677](https://github.com/matrix-org/synapse/issues/4677))
- Attempt to clarify installation instructions/config. ([\#4681](https://github.com/matrix-org/synapse/issues/4681))
- Clean up gitignores. ([\#4688](https://github.com/matrix-org/synapse/issues/4688))
- Minor tweaks to acme docs. ([\#4689](https://github.com/matrix-org/synapse/issues/4689))
- Improve the logging in the pusher process. ([\#4691](https://github.com/matrix-org/synapse/issues/4691))
- Better checks on newsfragments. ([\#4698](https://github.com/matrix-org/synapse/issues/4698), [\#4750](https://github.com/matrix-org/synapse/issues/4750))
- Avoid some redundant work when processing read receipts. ([\#4706](https://github.com/matrix-org/synapse/issues/4706))
- Run `push_receipts_to_remotes` as background job. ([\#4707](https://github.com/matrix-org/synapse/issues/4707))
- Add prometheus metrics for number of badge update pushes. ([\#4709](https://github.com/matrix-org/synapse/issues/4709))
- Reduce pusher logging on startup ([\#4716](https://github.com/matrix-org/synapse/issues/4716))
- Don't log exceptions when failing to fetch remote server keys. ([\#4722](https://github.com/matrix-org/synapse/issues/4722))
- Correctly proxy exception in frontend_proxy worker. ([\#4723](https://github.com/matrix-org/synapse/issues/4723))
- Add database version to phonehome stats. ([\#4753](https://github.com/matrix-org/synapse/issues/4753))
Synapse 0.99.1.1 (2019-02-14)
=============================

View File

@ -74,16 +74,39 @@ entry. These are managed by Towncrier
To create a changelog entry, make a new file in the ``changelog.d``
file named in the format of ``PRnumber.type``. The type can be
one of ``feature``, ``bugfix``, ``removal`` (also used for
deprecations), or ``misc`` (for internal-only changes). The content of
the file is your changelog entry, which can contain Markdown
formatting. Adding credits to the changelog is encouraged, we value
your contributions and would like to have you shouted out in the
release notes!
deprecations), or ``misc`` (for internal-only changes).
The content of the file is your changelog entry, which can contain Markdown
formatting. The entry should end with a full stop ('.') for consistency.
Adding credits to the changelog is encouraged, we value your
contributions and would like to have you shouted out in the release notes!
For example, a fix in PR #1234 would have its changelog entry in
``changelog.d/1234.bugfix``, and contain content like "The security levels of
Florbs are now validated when recieved over federation. Contributed by Jane
Matrix".
Matrix.".
Debian changelog
----------------
Changes which affect the debian packaging files (in ``debian``) are an
exception.
In this case, you will need to add an entry to the debian changelog for the
next release. For this, run the following command::
dch
This will make up a new version number (if there isn't already an unreleased
version in flight), and open an editor where you can add a new changelog entry.
(Our release process will ensure that the version number and maintainer name is
corrected for the release.)
If your change affects both the debian packaging *and* files outside the debian
directory, you will need both a regular newsfragment *and* an entry in the
debian changelog. (Though typically such changes should be submitted as two
separate pull requests.)
Attribution
~~~~~~~~~~~

View File

@ -39,6 +39,7 @@ prune .circleci
prune .coveragerc
prune debian
prune .codecov.yml
prune .buildkite
exclude jenkins*
recursive-exclude jenkins *.sh

View File

@ -199,6 +199,8 @@ by installing the ``libjemalloc1`` package and adding this line to
LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
This can make a significant difference on Python 2.7 - it's unclear how
much of an improvement it provides on Python 3.x.
Upgrading an existing Synapse
=============================

View File

@ -1,2 +0,0 @@
The dependency checker now correctly reports a version mismatch for optional
dependencies, instead of reporting the dependency missing.

View File

@ -1 +0,0 @@
Add basic optional sentry integration

View File

@ -1 +0,0 @@
Run `black` to reformat user directory code.

View File

@ -1 +0,0 @@
Transfer bans on room upgrade.

View File

@ -1 +0,0 @@
Reduce number of exceptions we log

View File

@ -1 +0,0 @@
Introduce upsert batching functionality in the database layer.

View File

@ -1 +0,0 @@
Add configurable room list publishing rules

View File

@ -1 +0,0 @@
Set CORS headers on .well-known requests

View File

@ -1 +0,0 @@
Support .well-known delegation when issuing certificates through ACME.

View File

@ -1 +0,0 @@
Fix various spelling mistakes.

View File

@ -1 +0,0 @@
Allow registration and login to be handled by a worker instance.

View File

@ -1 +0,0 @@
Fix kicking guest users on guest access revocation in worker mode.

View File

@ -1 +0,0 @@
Reduce number of exceptions we log

View File

@ -1 +0,0 @@
Cleanup request exception logging

View File

@ -1 +0,0 @@
Allow registration and login to be handled by a worker instance.

View File

@ -1 +0,0 @@
Improve replication performance by reducing cache invalidation traffic.

View File

@ -1 +0,0 @@
Reduce the overhead of creating outbound federation connections over TLS by caching the TLS client options.

View File

@ -1 +0,0 @@
Test against Postgres 9.5 as well as 9.4

View File

@ -1 +0,0 @@
Run unit tests against python 3.7.

View File

@ -1 +0,0 @@
Attempt to clarify installation instructions/config

View File

@ -1 +0,0 @@
Allow registration and login to be handled by a worker instance.

View File

@ -1 +0,0 @@
Clean up gitignores

View File

@ -1 +0,0 @@
Fix TaskStopped exceptions in logs when outbound requests time out.

View File

@ -1 +0,0 @@
Improve the logging in the pusher process.

View File

@ -1 +0,0 @@
Add basic optional sentry integration

View File

@ -1 +0,0 @@
Add prometheus metrics for number of outgoing EDUs, by type.

1
changelog.d/4740.bugfix Normal file
View File

@ -0,0 +1 @@
'event_id' is now a required parameter in federated state requests, as per the matrix spec.

1
changelog.d/4749.bugfix Normal file
View File

@ -0,0 +1 @@
Fix tightloop over connecting to replication server.

1
changelog.d/4752.misc Normal file
View File

@ -0,0 +1 @@
Change from TravisCI to Buildkite for CI.

1
changelog.d/4757.feature Normal file
View File

@ -0,0 +1 @@
Move server key queries to federation reader.

1
changelog.d/4757.misc Normal file
View File

@ -0,0 +1 @@
When presence is disabled don't send over replication.

1
changelog.d/4759.feature Normal file
View File

@ -0,0 +1 @@
Add support for /account/3pid REST endpoint to client_reader worker.

1
changelog.d/4763.bugfix Normal file
View File

@ -0,0 +1 @@
Fix parsing of Content-Disposition headers on remote media requests and URL previews.

1
changelog.d/4765.misc Normal file
View File

@ -0,0 +1 @@
Minor docstring fixes for MatrixFederationAgent.

1
changelog.d/4771.misc Normal file
View File

@ -0,0 +1 @@
Update test_typing to use HomeserverTestCase.

1
changelog.d/4776.bugfix Normal file
View File

@ -0,0 +1 @@
Fix incorrect log about not persisting duplicate state event.

7
debian/changelog vendored
View File

@ -1,3 +1,10 @@
matrix-synapse-py3 (0.99.2) stable; urgency=medium
* Fix overwriting of config settings on upgrade.
* New synapse release 0.99.2.
-- Synapse Packaging team <packages@matrix.org> Fri, 01 Mar 2019 10:55:08 +0000
matrix-synapse-py3 (0.99.1.1) stable; urgency=medium
* New synapse release 0.99.1.1

1
debian/install vendored
View File

@ -1 +1,2 @@
debian/log.yaml etc/matrix-synapse
debian/manage_debconf.pl /opt/venvs/matrix-synapse/lib/

130
debian/manage_debconf.pl vendored Executable file
View File

@ -0,0 +1,130 @@
#!/usr/bin/perl
#
# Interface between our config files and the debconf database.
#
# Usage:
#
# manage_debconf.pl <action>
#
# where <action> can be:
#
# read: read the configuration from the yaml into debconf
# update: update the yaml config according to the debconf database
use strict;
use warnings;
use Debconf::Client::ConfModule (qw/get set/);
# map from the name of a setting in our .yaml file to the relevant debconf
# setting.
my %MAPPINGS=(
server_name => 'matrix-synapse/server-name',
report_stats => 'matrix-synapse/report-stats',
);
# enable debug if dpkg --debug
my $DEBUG = $ENV{DPKG_MAINTSCRIPT_DEBUG};
sub read_config {
my @files = @_;
foreach my $file (@files) {
print STDERR "reading $file\n" if $DEBUG;
open my $FH, "<", $file or next;
# rudimentary parsing which (a) avoids having to depend on a yaml library,
# and (b) is tolerant of yaml errors
while($_ = <$FH>) {
while (my ($setting, $debconf) = each %MAPPINGS) {
$setting = quotemeta $setting;
if(/^${setting}\s*:(.*)$/) {
my $val = $1;
# remove leading/trailing whitespace
$val =~ s/^\s*//;
$val =~ s/\s*$//;
# remove surrounding quotes
if ($val =~ /^"(.*)"$/ || $val =~ /^'(.*)'$/) {
$val = $1;
}
print STDERR ">> $debconf = $val\n" if $DEBUG;
set($debconf, $val);
}
}
}
close $FH;
}
}
sub update_config {
my @files = @_;
my %substs = ();
while (my ($setting, $debconf) = each %MAPPINGS) {
my @res = get($debconf);
$substs{$setting} = $res[1] if $res[0] == 0;
}
foreach my $file (@files) {
print STDERR "checking $file\n" if $DEBUG;
open my $FH, "<", $file or next;
my $updated = 0;
# read the whole file into memory
my @lines = <$FH>;
while (my ($setting, $val) = each %substs) {
$setting = quotemeta $setting;
map {
if (/^${setting}\s*:\s*(.*)\s*$/) {
my $current = $1;
if ($val ne $current) {
$_ = "${setting}: $val\n";
$updated = 1;
}
}
} @lines;
}
close $FH;
next unless $updated;
print STDERR "updating $file\n" if $DEBUG;
open $FH, ">", $file or die "unable to update $file";
print $FH @lines;
close $FH;
}
}
my $cmd = $ARGV[0];
my $read = 0;
my $update = 0;
if (not $cmd) {
die "must specify a command to perform\n";
} elsif ($cmd eq 'read') {
$read = 1;
} elsif ($cmd eq 'update') {
$update = 1;
} else {
die "unknown command '$cmd'\n";
}
my @files = (
"/etc/matrix-synapse/homeserver.yaml",
glob("/etc/matrix-synapse/conf.d/*.yaml"),
);
if ($read) {
read_config(@files);
} elsif ($update) {
update_config(@files);
}

View File

@ -4,6 +4,9 @@ set -e
. /usr/share/debconf/confmodule
# try to update the debconf db according to whatever is in the config files
/opt/venvs/matrix-synapse/lib/manage_debconf.pl read || true
db_input high matrix-synapse/server-name || true
db_input high matrix-synapse/report-stats || true
db_go

View File

@ -8,19 +8,36 @@ USER="matrix-synapse"
case "$1" in
configure|reconfigure)
# Set server name in config file
# generate template config files if they don't exist
mkdir -p "/etc/matrix-synapse/conf.d/"
db_get matrix-synapse/server-name
if [ ! -e "$CONFIGFILE_SERVERNAME" ]; then
cat > "$CONFIGFILE_SERVERNAME" <<EOF
# This file is autogenerated, and will be recreated on upgrade if it is deleted.
# Any changes you make will be preserved.
if [ "$RET" ]; then
echo "server_name: $RET" > $CONFIGFILE_SERVERNAME
# The domain name of the server, with optional explicit port.
# This is used by remote servers to connect to this server,
# e.g. matrix.org, localhost:8080, etc.
# This is also the last part of your UserID.
#
server_name: ''
EOF
fi
db_get matrix-synapse/report-stats
if [ "$RET" ]; then
echo "report_stats: $RET" > $CONFIGFILE_REPORTSTATS
if [ ! -e "$CONFIGFILE_REPORTSTATS" ]; then
cat > "$CONFIGFILE_REPORTSTATS" <<EOF
# This file is autogenerated, and will be recreated on upgrade if it is deleted.
# Any changes you make will be preserved.
# Whether to report anonymized homeserver usage statistics.
report_stats: false
EOF
fi
# update the config files according to whatever is in the debconf database
/opt/venvs/matrix-synapse/lib/manage_debconf.pl update
if ! getent passwd $USER >/dev/null; then
adduser --quiet --system --no-create-home --home /var/lib/matrix-synapse $USER
fi

View File

@ -12,11 +12,12 @@ In the case that your `server_name` config variable is the same as
the hostname that the client connects to, then the same certificate can be
used between client and federation ports without issue.
For a sample configuration, please inspect the new ACME section in the example
generated config by running the `generate-config` executable. For example:
If your configuration file does not already have an `acme` section, you can
generate an example config by running the `generate_config` executable. For
example:
```
~/synapse/env3/bin/generate-config
~/synapse/env3/bin/generate_config
```
You will need to provide Let's Encrypt (or another ACME provider) access to
@ -27,10 +28,9 @@ like `authbind` to allow Synapse to listen on port 80 without root access.
(Do not run Synapse with root permissions!) Detailed instructions are
available under "ACME setup" below.
If you are already using self-signed certificates, you will need to back up
or delete them (files `example.com.tls.crt` and `example.com.tls.key` in
Synapse's root directory), Synapse's ACME implementation will not overwrite
them.
If you already have certificates, you will need to back up or delete them
(files `example.com.tls.crt` and `example.com.tls.key` in Synapse's root
directory), Synapse's ACME implementation will not overwrite them.
You may wish to use alternate methods such as Certbot to obtain a certificate
from Let's Encrypt, depending on your server configuration. Of course, if you
@ -87,7 +87,6 @@ acme:
port: 8009
```
#### Authbind
`authbind` allows a program which does not run as root to bind to

View File

@ -79,12 +79,30 @@ Let's assume that we expect clients to connect to our server at
SSLEngine on
ServerName example.com;
<Location />
<Location /_matrix>
ProxyPass http://127.0.0.1:8008/_matrix nocanon
ProxyPassReverse http://127.0.0.1:8008/_matrix
</Location>
</VirtualHost>
* HAProxy::
frontend https
bind 0.0.0.0:443 v4v6 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
bind :::443 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
# Matrix client traffic
acl matrix hdr(host) -i matrix.example.com
use_backend matrix if matrix
frontend matrix-federation
bind 0.0.0.0:8448 v4v6 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
bind :::8448 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
default_backend matrix
backend matrix
server matrix 127.0.0.1:8008
You will also want to set ``bind_addresses: ['127.0.0.1']`` and ``x_forwarded: true``
for port 8008 in ``homeserver.yaml`` to ensure that client IP addresses are
recorded correctly.

View File

@ -188,7 +188,9 @@ RDATA (S)
A single update in a stream
POSITION (S)
The position of the stream has been updated
The position of the stream has been updated. Sent to the client after all
missing updates for a stream have been sent to the client and they're now
up to date.
ERROR (S, C)
There was an error

View File

@ -182,6 +182,7 @@ endpoints matching the following regular expressions::
^/_matrix/federation/v1/event_auth/
^/_matrix/federation/v1/exchange_third_party_invite/
^/_matrix/federation/v1/send/
^/_matrix/key/v2/query
The above endpoints should all be routed to the federation_reader worker by the
reverse-proxy configuration.
@ -223,6 +224,7 @@ following regular expressions::
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members$
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state$
^/_matrix/client/(api/v1|r0|unstable)/login$
^/_matrix/client/(api/v1|r0|unstable)/account/3pid$
Additionally, the following REST endpoints can be handled, but all requests must
be routed to the same instance::

41
scripts-dev/check-newsfragment Executable file
View File

@ -0,0 +1,41 @@
#!/bin/bash
#
# A script which checks that an appropriate news file has been added on this
# branch.
set -e
# make sure that origin/develop is up to date
git remote set-branches --add origin develop
git fetch --depth=1 origin develop
UPSTREAM=origin/develop
# if there are changes in the debian directory, check that the debian changelog
# has been updated
if ! git diff --quiet $UPSTREAM... -- debian; then
if git diff --quiet $UPSTREAM... -- debian/changelog; then
echo "Updates to debian directory, but no update to the changelog." >&2
exit 1
fi
fi
# if there are changes *outside* the debian directory, check that the
# newsfragments have been updated.
if git diff --name-only $UPSTREAM... | grep -qv '^develop/'; then
tox -e check-newsfragment
fi
echo
echo "--------------------------"
echo
# check that any new newsfiles on this branch end with a full stop.
for f in `git diff --name-only $UPSTREAM... -- changelog.d`; do
lastchar=`tr -d '\n' < $f | tail -c 1`
if [ $lastchar != '.' ]; then
echo -e "\e[31mERROR: newsfragment $f does not end with a '.'\e[39m" >&2
exit 1
fi
done

View File

@ -53,6 +53,7 @@ BOOLEAN_COLUMNS = {
"group_summary_users": ["is_public"],
"group_roles": ["is_public"],
"local_group_membership": ["is_publicised", "is_admin"],
"e2e_room_keys": ["is_verified"],
}

View File

@ -27,4 +27,4 @@ try:
except ImportError:
pass
__version__ = "0.99.1.1"
__version__ = "0.99.2"

View File

@ -48,6 +48,7 @@ from synapse.rest.client.v1.room import (
RoomMemberListRestServlet,
RoomStateRestServlet,
)
from synapse.rest.client.v2_alpha.account import ThreepidRestServlet
from synapse.rest.client.v2_alpha.register import RegisterRestServlet
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
@ -96,6 +97,7 @@ class ClientReaderServer(HomeServer):
RoomEventContextServlet(self).register(resource)
RegisterRestServlet(self).register(resource)
LoginRestServlet(self).register(resource)
ThreepidRestServlet(self).register(resource)
resources.update({
"/_matrix/client/r0": resource,

View File

@ -21,7 +21,7 @@ from twisted.web.resource import NoResource
import synapse
from synapse import events
from synapse.api.urls import FEDERATION_PREFIX
from synapse.api.urls import FEDERATION_PREFIX, SERVER_KEY_V2_PREFIX
from synapse.app import _base
from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
@ -44,6 +44,7 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto
from synapse.replication.slave.storage.room import RoomStore
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.rest.key.v2 import KeyApiV2Resource
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
from synapse.util.httpresourcetree import create_resource_tree
@ -99,6 +100,9 @@ class FederationReaderServer(HomeServer):
),
})
if name in ["keys", "federation"]:
resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(

View File

@ -21,7 +21,7 @@ from twisted.web.resource import NoResource
import synapse
from synapse import events
from synapse.api.errors import SynapseError
from synapse.api.errors import HttpResponseException, SynapseError
from synapse.app import _base
from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
@ -66,10 +66,15 @@ class PresenceStatusStubServlet(ClientV1RestServlet):
headers = {
"Authorization": auth_headers,
}
try:
result = yield self.http_client.get_json(
self.main_uri + request.uri.decode('ascii'),
headers=headers,
)
except HttpResponseException as e:
raise e.to_synapse_error()
defer.returnValue((200, result))
@defer.inlineCallbacks

View File

@ -555,6 +555,9 @@ def run(hs):
stats["memory_rss"] += process.memory_info().rss
stats["cpu_average"] += int(process.cpu_percent(interval=None))
stats["database_engine"] = hs.get_datastore().database_engine_name
stats["database_server_version"] = hs.get_datastore().get_server_version()
logger.info("Reporting stats to matrix.org: %s" % (stats,))
try:
yield hs.get_simple_http_client().put_json(

View File

@ -47,5 +47,5 @@ class CaptchaConfig(Config):
#captcha_bypass_secret: "YOUR_SECRET_HERE"
# The API endpoint to use for verifying m.login.recaptcha responses.
recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify"
recaptcha_siteverify_api: "https://www.recaptcha.net/recaptcha/api/siteverify"
"""

View File

@ -19,6 +19,8 @@ import warnings
from datetime import datetime
from hashlib import sha256
import six
from unpaddedbase64 import encode_base64
from OpenSSL import crypto
@ -36,9 +38,11 @@ class TlsConfig(Config):
acme_config = {}
self.acme_enabled = acme_config.get("enabled", False)
self.acme_url = acme_config.get(
# hyperlink complains on py2 if this is not a Unicode
self.acme_url = six.text_type(acme_config.get(
"url", u"https://acme-v01.api.letsencrypt.org/directory"
)
))
self.acme_port = acme_config.get("port", 80)
self.acme_bind_addresses = acme_config.get("bind_addresses", ['::', '0.0.0.0'])
self.acme_reprovision_threshold = acme_config.get("reprovision_threshold", 30)
@ -55,7 +59,7 @@ class TlsConfig(Config):
)
if not self.tls_private_key_file:
raise ConfigError(
"tls_certificate_path must be specified if TLS-enabled listeners are "
"tls_private_key_path must be specified if TLS-enabled listeners are "
"configured."
)

View File

@ -17,6 +17,7 @@
import logging
from collections import namedtuple
from six import raise_from
from six.moves import urllib
from signedjson.key import (
@ -35,7 +36,12 @@ from unpaddedbase64 import decode_base64
from twisted.internet import defer
from synapse.api.errors import Codes, RequestSendFailed, SynapseError
from synapse.api.errors import (
Codes,
HttpResponseException,
RequestSendFailed,
SynapseError,
)
from synapse.util import logcontext, unwrapFirstError
from synapse.util.logcontext import (
LoggingContext,
@ -44,6 +50,7 @@ from synapse.util.logcontext import (
run_in_background,
)
from synapse.util.metrics import Measure
from synapse.util.retryutils import NotRetryingDestination
logger = logging.getLogger(__name__)
@ -367,12 +374,17 @@ class Keyring(object):
server_name_and_key_ids, perspective_name, perspective_keys
)
defer.returnValue(result)
except KeyLookupError as e:
logger.warning(
"Key lookup failed from %r: %s", perspective_name, e,
)
except Exception as e:
logger.exception(
"Unable to get key from %r: %s %s",
perspective_name,
type(e).__name__, str(e),
)
defer.returnValue({})
results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
@ -421,6 +433,7 @@ class Keyring(object):
# TODO(mark): Set the minimum_valid_until_ts to that needed by
# the events being validated or the current time if validating
# an incoming request.
try:
query_response = yield self.client.post_json(
destination=perspective_name,
path="/_matrix/key/v2/query",
@ -436,6 +449,14 @@ class Keyring(object):
},
long_retries=True,
)
except (NotRetryingDestination, RequestSendFailed) as e:
raise_from(
KeyLookupError("Failed to connect to remote server"), e,
)
except HttpResponseException as e:
raise_from(
KeyLookupError("Remote server returned an error"), e,
)
keys = {}
@ -502,11 +523,20 @@ class Keyring(object):
if requested_key_id in keys:
continue
try:
response = yield self.client.get_json(
destination=server_name,
path="/_matrix/key/v2/server/" + urllib.parse.quote(requested_key_id),
ignore_backoff=True,
)
except (NotRetryingDestination, RequestSendFailed) as e:
raise_from(
KeyLookupError("Failed to connect to remote server"), e,
)
except HttpResponseException as e:
raise_from(
KeyLookupError("Remote server returned an error"), e,
)
if (u"signatures" not in response
or server_name not in response[u"signatures"]):

View File

@ -33,6 +33,7 @@ from synapse.api.constants import (
)
from synapse.api.errors import (
CodeMessageException,
Codes,
FederationDeniedError,
HttpResponseException,
SynapseError,
@ -792,10 +793,25 @@ class FederationClient(FederationBase):
defer.returnValue(content)
except HttpResponseException as e:
if e.code in [400, 404]:
err = e.to_synapse_error()
# If we receive an error response that isn't a generic error, we
# assume that the remote understands the v2 invite API and this
# is a legitimate error.
if err.errcode != Codes.UNKNOWN:
raise err
# Otherwise, we assume that the remote server doesn't understand
# the v2 invite API.
if room_version in (RoomVersions.V1, RoomVersions.V2):
pass # We'll fall through
else:
raise Exception("Remote server is too old")
raise SynapseError(
400,
"User's homeserver does not support this room version",
Codes.UNSUPPORTED_ROOM_VERSION,
)
elif e.code == 403:
raise e.to_synapse_error()
else:

View File

@ -25,9 +25,10 @@ from twisted.internet import defer
from twisted.internet.abstract import isIPAddress
from twisted.python import failure
from synapse.api.constants import EventTypes, Membership
from synapse.api.constants import KNOWN_ROOM_VERSIONS, EventTypes, Membership
from synapse.api.errors import (
AuthError,
Codes,
FederationError,
IncompatibleRoomVersionError,
NotFoundError,
@ -239,8 +240,9 @@ class FederationServer(FederationBase):
f = failure.Failure()
pdu_results[event_id] = {"error": str(e)}
logger.error(
"Failed to handle PDU %s: %s",
event_id, f.getTraceback().rstrip(),
"Failed to handle PDU %s",
event_id,
exc_info=(f.type, f.value, f.getTracebackObject()),
)
yield concurrently_execute(
@ -386,6 +388,13 @@ class FederationServer(FederationBase):
@defer.inlineCallbacks
def on_invite_request(self, origin, content, room_version):
if room_version not in KNOWN_ROOM_VERSIONS:
raise SynapseError(
400,
"Homeserver does not support this room version",
Codes.UNSUPPORTED_ROOM_VERSION,
)
format_ver = room_version_to_event_format(room_version)
pdu = event_from_pdu_json(content, format_ver)
@ -877,6 +886,9 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry):
def on_edu(self, edu_type, origin, content):
"""Overrides FederationHandlerRegistry
"""
if not self.config.use_presence and edu_type == "m.presence":
return
handler = self.edu_handlers.get(edu_type)
if handler:
return super(ReplicationFederationHandlerRegistry, self).on_edu(

View File

@ -393,7 +393,7 @@ class FederationStateServlet(BaseFederationServlet):
return self.handler.on_context_state_request(
origin,
context,
parse_string_from_args(query, "event_id", None),
parse_string_from_args(query, "event_id", None, required=True),
)
@ -404,7 +404,7 @@ class FederationStateIdsServlet(BaseFederationServlet):
return self.handler.on_state_ids_request(
origin,
room_id,
parse_string_from_args(query, "event_id", None),
parse_string_from_args(query, "event_id", None, required=True),
)
@ -736,7 +736,8 @@ class PublicRoomList(BaseFederationServlet):
data = yield self.handler.get_local_public_room_list(
limit, since_token,
network_tuple=network_tuple
network_tuple=network_tuple,
from_federation=True,
)
defer.returnValue((200, data))

View File

@ -113,8 +113,7 @@ class GroupsServerHandler(object):
room_id = room_entry["room_id"]
joined_users = yield self.store.get_users_in_room(room_id)
entry = yield self.room_list_handler.generate_room_entry(
room_id, len(joined_users),
with_alias=False, allow_private=True,
room_id, len(joined_users), with_alias=False, allow_private=True,
)
entry = dict(entry) # so we don't change whats cached
entry.pop("room_id", None)
@ -544,8 +543,7 @@ class GroupsServerHandler(object):
joined_users = yield self.store.get_users_in_room(room_id)
entry = yield self.room_list_handler.generate_room_entry(
room_id, len(joined_users),
with_alias=False, allow_private=True,
room_id, len(joined_users), with_alias=False, allow_private=True,
)
if not entry:

View File

@ -770,10 +770,26 @@ class FederationHandler(BaseHandler):
set(auth_events.keys()) | set(state_events.keys())
)
# We now have a chunk of events plus associated state and auth chain to
# persist. We do the persistence in two steps:
# 1. Auth events and state get persisted as outliers, plus the
# backward extremities get persisted (as non-outliers).
# 2. The rest of the events in the chunk get persisted one by one, as
# each one depends on the previous event for its state.
#
# The important thing is that events in the chunk get persisted as
# non-outliers, including when those events are also in the state or
# auth chain. Caution must therefore be taken to ensure that they are
# not accidentally marked as outliers.
# Step 1a: persist auth events that *don't* appear in the chunk
ev_infos = []
for a in auth_events.values():
if a.event_id in seen_events:
# We only want to persist auth events as outliers that we haven't
# seen and aren't about to persist as part of the backfilled chunk.
if a.event_id in seen_events or a.event_id in event_map:
continue
a.internal_metadata.outlier = True
ev_infos.append({
"event": a,
@ -785,14 +801,21 @@ class FederationHandler(BaseHandler):
}
})
# Step 1b: persist the events in the chunk we fetched state for (i.e.
# the backwards extremities) as non-outliers.
for e_id in events_to_state:
# For paranoia we ensure that these events are marked as
# non-outliers
ev = event_map[e_id]
assert(not ev.internal_metadata.is_outlier())
ev_infos.append({
"event": event_map[e_id],
"event": ev,
"state": events_to_state[e_id],
"auth_events": {
(auth_events[a_id].type, auth_events[a_id].state_key):
auth_events[a_id]
for a_id in event_map[e_id].auth_event_ids()
for a_id in ev.auth_event_ids()
if a_id in auth_events
}
})
@ -802,12 +825,17 @@ class FederationHandler(BaseHandler):
backfilled=True,
)
# Step 2: Persist the rest of the events in the chunk one by one
events.sort(key=lambda e: e.depth)
for event in events:
if event in events_to_state:
continue
# For paranoia we ensure that these events are marked as
# non-outliers
assert(not event.internal_metadata.is_outlier())
# We store these one at a time since each event depends on the
# previous to work out the state.
# TODO: We can probably do something more clever here.

View File

@ -436,10 +436,11 @@ class EventCreationHandler(object):
if event.is_state():
prev_state = yield self.deduplicate_state_event(event, context)
logger.info(
"Not bothering to persist duplicate state event %s", event.event_id,
)
if prev_state is not None:
logger.info(
"Not bothering to persist state event %s duplicated by %s",
event.event_id, prev_state.event_id,
)
defer.returnValue(prev_state)
yield self.handle_new_client_event(

View File

@ -136,7 +136,11 @@ class PaginationHandler(object):
logger.info("[purge] complete")
self._purges_by_id[purge_id].status = PurgeStatus.STATUS_COMPLETE
except Exception:
logger.error("[purge] failed: %s", Failure().getTraceback().rstrip())
f = Failure()
logger.error(
"[purge] failed",
exc_info=(f.type, f.value, f.getTracebackObject()),
)
self._purges_by_id[purge_id].status = PurgeStatus.STATUS_FAILED
finally:
self._purges_in_progress_by_room.discard(room_id)
@ -254,7 +258,7 @@ class PaginationHandler(object):
})
state = None
if event_filter and event_filter.lazy_load_members():
if event_filter and event_filter.lazy_load_members() and len(events) > 0:
# TODO: remove redundant members
# FIXME: we also care about invite targets etc.

View File

@ -16,8 +16,8 @@ import logging
from twisted.internet import defer
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import get_domain_from_id
from synapse.util import logcontext
from ._base import BaseHandler
@ -59,7 +59,9 @@ class ReceiptsHandler(BaseHandler):
if is_new:
# fire off a process in the background to send the receipt to
# remote servers
self._push_remotes([receipt])
run_as_background_process(
'push_receipts_to_remotes', self._push_remotes, receipt
)
@defer.inlineCallbacks
def _received_remote_receipt(self, origin, content):
@ -125,15 +127,13 @@ class ReceiptsHandler(BaseHandler):
defer.returnValue(True)
@logcontext.preserve_fn # caller should not yield on this
@defer.inlineCallbacks
def _push_remotes(self, receipts):
"""Given a list of receipts, works out which remote servers should be
def _push_remotes(self, receipt):
"""Given a receipt, works out which remote servers should be
poked and pokes them.
"""
try:
# TODO: Some of this stuff should be coallesced.
for receipt in receipts:
# TODO: optimise this to move some of the work to the workers.
room_id = receipt["room_id"]
receipt_type = receipt["receipt_type"]
user_id = receipt["user_id"]

View File

@ -460,7 +460,7 @@ class RegistrationHandler(BaseHandler):
lines = response.split('\n')
json = {
"valid": lines[0] == 'true',
"error_url": "http://www.google.com/recaptcha/api/challenge?" +
"error_url": "http://www.recaptcha.net/recaptcha/api/challenge?" +
"error=%s" % lines[1]
}
defer.returnValue(json)
@ -471,7 +471,7 @@ class RegistrationHandler(BaseHandler):
Used only by c/s api v1
"""
data = yield self.captcha_client.post_urlencoded_get_raw(
"http://www.google.com:80/recaptcha/api/verify",
"http://www.recaptcha.net:80/recaptcha/api/verify",
args={
'privatekey': private_key,
'remoteip': ip_addr,

View File

@ -50,16 +50,17 @@ class RoomListHandler(BaseHandler):
def get_local_public_room_list(self, limit=None, since_token=None,
search_filter=None,
network_tuple=EMPTY_THIRD_PARTY_ID,):
network_tuple=EMPTY_THIRD_PARTY_ID,
from_federation=False):
"""Generate a local public room list.
There are multiple different lists: the main one plus one per third
party network. A client can ask for a specific list or to return all.
Args:
limit (int)
since_token (str)
search_filter (dict)
limit (int|None)
since_token (str|None)
search_filter (dict|None)
network_tuple (ThirdPartyInstanceID): Which public list to use.
This can be (None, None) to indicate the main list, or a particular
appservice and network id to use an appservice specific one.
@ -87,14 +88,30 @@ class RoomListHandler(BaseHandler):
return self.response_cache.wrap(
key,
self._get_public_room_list,
limit, since_token, network_tuple=network_tuple,
limit, since_token,
network_tuple=network_tuple, from_federation=from_federation,
)
@defer.inlineCallbacks
def _get_public_room_list(self, limit=None, since_token=None,
search_filter=None,
network_tuple=EMPTY_THIRD_PARTY_ID,
from_federation=False,
timeout=None,):
"""Generate a public room list.
Args:
limit (int|None): Maximum amount of rooms to return.
since_token (str|None)
search_filter (dict|None): Dictionary to filter rooms by.
network_tuple (ThirdPartyInstanceID): Which public list to use.
This can be (None, None) to indicate the main list, or a particular
appservice and network id to use an appservice specific one.
Setting to None returns all public rooms across all lists.
from_federation (bool): Whether this request originated from a
federating server or a client. Used for room filtering.
timeout (int|None): Amount of seconds to wait for a response before
timing out.
"""
if since_token and since_token != "END":
since_token = RoomListNextBatch.from_token(since_token)
else:
@ -217,7 +234,8 @@ class RoomListHandler(BaseHandler):
yield concurrently_execute(
lambda r: self._append_room_entry_to_chunk(
r, rooms_to_num_joined[r],
chunk, limit, search_filter
chunk, limit, search_filter,
from_federation=from_federation,
),
batch, 5,
)
@ -288,23 +306,51 @@ class RoomListHandler(BaseHandler):
@defer.inlineCallbacks
def _append_room_entry_to_chunk(self, room_id, num_joined_users, chunk, limit,
search_filter):
search_filter, from_federation=False):
"""Generate the entry for a room in the public room list and append it
to the `chunk` if it matches the search filter
Args:
room_id (str): The ID of the room.
num_joined_users (int): The number of joined users in the room.
chunk (list)
limit (int|None): Maximum amount of rooms to display. Function will
return if length of chunk is greater than limit + 1.
search_filter (dict|None)
from_federation (bool): Whether this request originated from a
federating server or a client. Used for room filtering.
"""
if limit and len(chunk) > limit + 1:
# We've already got enough, so lets just drop it.
return
result = yield self.generate_room_entry(room_id, num_joined_users)
if not result:
return
if result and _matches_room_entry(result, search_filter):
if from_federation and not result.get("m.federate", True):
# This is a room that other servers cannot join. Do not show them
# this room.
return
if _matches_room_entry(result, search_filter):
chunk.append(result)
@cachedInlineCallbacks(num_args=1, cache_context=True)
def generate_room_entry(self, room_id, num_joined_users, cache_context,
with_alias=True, allow_private=False):
"""Returns the entry for a room
Args:
room_id (str): The room's ID.
num_joined_users (int): Number of users in the room.
cache_context: Information for cached responses.
with_alias (bool): Whether to return the room's aliases in the result.
allow_private (bool): Whether invite-only rooms should be shown.
Returns:
Deferred[dict|None]: Returns a room entry as a dictionary, or None if this
room was determined not to be shown publicly.
"""
result = {
"room_id": room_id,
@ -318,6 +364,7 @@ class RoomListHandler(BaseHandler):
event_map = yield self.store.get_events([
event_id for key, event_id in iteritems(current_state_ids)
if key[0] in (
EventTypes.Create,
EventTypes.JoinRules,
EventTypes.Name,
EventTypes.Topic,
@ -334,12 +381,17 @@ class RoomListHandler(BaseHandler):
}
# Double check that this is actually a public room.
join_rules_event = current_state.get((EventTypes.JoinRules, ""))
if join_rules_event:
join_rule = join_rules_event.content.get("join_rule", None)
if not allow_private and join_rule and join_rule != JoinRules.PUBLIC:
defer.returnValue(None)
# Return whether this room is open to federation users or not
create_event = current_state.get((EventTypes.Create, ""))
result["m.federate"] = create_event.content.get("m.federate", True)
if with_alias:
aliases = yield self.store.get_aliases_for_room(
room_id, on_invalidate=cache_context.invalidate

View File

@ -68,9 +68,13 @@ class MatrixFederationAgent(object):
TLS policy to use for fetching .well-known files. None to use a default
(browser-like) implementation.
srv_resolver (SrvResolver|None):
_srv_resolver (SrvResolver|None):
SRVResolver impl to use for looking up SRV records. None to use a default
implementation.
_well_known_cache (TTLCache|None):
TTLCache impl for storing cached well-known lookups. None to use a default
implementation.
"""
def __init__(

View File

@ -169,18 +169,18 @@ def _return_html_error(f, request):
)
else:
logger.error(
"Failed handle request %r: %s",
"Failed handle request %r",
request,
f.getTraceback().rstrip(),
exc_info=(f.type, f.value, f.getTracebackObject()),
)
else:
code = http_client.INTERNAL_SERVER_ERROR
msg = "Internal server error"
logger.error(
"Failed handle request %r: %s",
"Failed handle request %r",
request,
f.getTraceback().rstrip(),
exc_info=(f.type, f.value, f.getTracebackObject()),
)
body = HTML_ERROR_TEMPLATE.format(

View File

@ -32,9 +32,25 @@ if six.PY3:
logger = logging.getLogger(__name__)
http_push_processed_counter = Counter("synapse_http_httppusher_http_pushes_processed", "")
http_push_processed_counter = Counter(
"synapse_http_httppusher_http_pushes_processed",
"Number of push notifications successfully sent",
)
http_push_failed_counter = Counter("synapse_http_httppusher_http_pushes_failed", "")
http_push_failed_counter = Counter(
"synapse_http_httppusher_http_pushes_failed",
"Number of push notifications which failed",
)
http_badges_processed_counter = Counter(
"synapse_http_httppusher_badge_updates_processed",
"Number of badge updates successfully sent",
)
http_badges_failed_counter = Counter(
"synapse_http_httppusher_badge_updates_failed",
"Number of badge updates which failed",
)
class HttpPusher(object):
@ -81,6 +97,11 @@ class HttpPusher(object):
pusherdict['pushkey'],
)
if self.data is None:
raise PusherConfigException(
"data can not be null for HTTP pusher"
)
if 'url' not in self.data:
raise PusherConfigException(
"'url' required in data for HTTP pusher"
@ -346,6 +367,10 @@ class HttpPusher(object):
@defer.inlineCallbacks
def _send_badge(self, badge):
"""
Args:
badge (int): number of unread messages
"""
logger.info("Sending updated badge count %d to %s", badge, self.name)
d = {
'notification': {
@ -366,14 +391,11 @@ class HttpPusher(object):
}
}
try:
resp = yield self.http_client.post_json_get_json(self.url, d)
yield self.http_client.post_json_get_json(self.url, d)
http_badges_processed_counter.inc()
except Exception as e:
logger.warning(
"Failed to send badge count to %s: %s %s",
self.name, type(e), e,
)
defer.returnValue(False)
rejected = []
if 'rejected' in resp:
rejected = resp['rejected']
defer.returnValue(rejected)
http_badges_failed_counter.inc()

View File

@ -56,7 +56,7 @@ class PusherFactory(object):
f = self.pusher_types.get(kind, None)
if not f:
return None
logger.info("creating %s pusher for %r", kind, pusherdict)
logger.debug("creating %s pusher for %r", kind, pusherdict)
return f(self.hs, pusherdict)
def _create_email_pusher(self, _hs, pusherdict):

View File

@ -19,6 +19,7 @@ import logging
from twisted.internet import defer
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.push import PusherConfigException
from synapse.push.pusher import PusherFactory
logger = logging.getLogger(__name__)
@ -140,6 +141,10 @@ class PusherPool:
@defer.inlineCallbacks
def on_new_notifications(self, min_stream_id, max_stream_id):
if not self.pushers:
# nothing to do here.
return
try:
users_affected = yield self.store.get_push_action_users_in_range(
min_stream_id, max_stream_id
@ -155,6 +160,10 @@ class PusherPool:
@defer.inlineCallbacks
def on_new_receipts(self, min_stream_id, max_stream_id, affected_room_ids):
if not self.pushers:
# nothing to do here.
return
try:
# Need to subtract 1 from the minimum because the lower bound here
# is not inclusive
@ -214,6 +223,15 @@ class PusherPool:
"""
try:
p = self.pusher_factory.create_pusher(pusherdict)
except PusherConfigException as e:
logger.warning(
"Pusher incorrectly configured user=%s, appid=%s, pushkey=%s: %s",
pusherdict.get('user_name'),
pusherdict.get('app_id'),
pusherdict.get('pushkey'),
e,
)
return
except Exception:
logger.exception("Couldn't start a pusher: caught Exception")
return

View File

@ -59,12 +59,7 @@ class BaseSlavedStore(SQLBaseStore):
members_changed = set(row.keys[1:])
self._invalidate_state_caches(room_id, members_changed)
else:
try:
getattr(self, row.cache_func).invalidate(tuple(row.keys))
except AttributeError:
# We probably haven't pulled in the cache in this worker,
# which is fine.
pass
self._attempt_to_invalidate_cache(row.cache_func, tuple(row.keys))
def _invalidate_cache_and_stream(self, txn, cache_func, keys):
txn.call_after(cache_func.invalidate, keys)

View File

@ -54,8 +54,11 @@ class SlavedPresenceStore(BaseSlavedStore):
def stream_positions(self):
result = super(SlavedPresenceStore, self).stream_positions()
if self.hs.config.use_presence:
position = self._presence_id_gen.get_current_token()
result["presence"] = position
return result
def process_replication_rows(self, stream_name, token, rows):

View File

@ -39,7 +39,7 @@ class ReplicationClientFactory(ReconnectingClientFactory):
Accepts a handler that will be called when new data is available or data
is required.
"""
maxDelay = 5 # Try at least once every N seconds
maxDelay = 30 # Try at least once every N seconds
def __init__(self, hs, client_name, handler):
self.client_name = client_name
@ -54,7 +54,6 @@ class ReplicationClientFactory(ReconnectingClientFactory):
def buildProtocol(self, addr):
logger.info("Connected to replication: %r", addr)
self.resetDelay()
return ClientReplicationStreamProtocol(
self.client_name, self.server_name, self._clock, self.handler
)
@ -90,15 +89,18 @@ class ReplicationClientHandler(object):
# Used for tests.
self.awaiting_syncs = {}
# The factory used to create connections.
self.factory = None
def start_replication(self, hs):
"""Helper method to start a replication connection to the remote server
using TCP.
"""
client_name = hs.config.worker_name
factory = ReplicationClientFactory(hs, client_name, self)
self.factory = ReplicationClientFactory(hs, client_name, self)
host = hs.config.worker_replication_host
port = hs.config.worker_replication_port
hs.get_reactor().connectTCP(host, port, factory)
hs.get_reactor().connectTCP(host, port, self.factory)
def on_rdata(self, stream_name, token, rows):
"""Called when we get new replication data. By default this just pokes
@ -140,6 +142,7 @@ class ReplicationClientHandler(object):
args["account_data"] = user_account_data
elif room_account_data:
args["account_data"] = room_account_data
return args
def get_currently_syncing_users(self):
@ -204,3 +207,14 @@ class ReplicationClientHandler(object):
for cmd in self.pending_commands:
connection.send_command(cmd)
self.pending_commands = []
def finished_connecting(self):
"""Called when we have successfully subscribed and caught up to all
streams we're interested in.
"""
logger.info("Finished connecting to server")
# We don't reset the delay any earlier as otherwise if there is a
# problem during start up we'll end up tight looping connecting to the
# server.
self.factory.resetDelay()

View File

@ -127,8 +127,11 @@ class RdataCommand(Command):
class PositionCommand(Command):
"""Sent by the client to tell the client the stream postition without
"""Sent by the server to tell the client the stream postition without
needing to send an RDATA.
Sent to the client after all missing updates for a stream have been sent
to the client and they're now up to date.
"""
NAME = "POSITION"

View File

@ -268,7 +268,17 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
if "\n" in string:
raise Exception("Unexpected newline in command: %r", string)
self.sendLine(string.encode("utf-8"))
encoded_string = string.encode("utf-8")
if len(encoded_string) > self.MAX_LENGTH:
raise Exception(
"Failed to send command %s as too long (%d > %d)" % (
cmd.NAME,
len(encoded_string), self.MAX_LENGTH,
)
)
self.sendLine(encoded_string)
self.last_sent_command = self.clock.time_msec()
@ -361,6 +371,11 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
def id(self):
return "%s-%s" % (self.name, self.conn_id)
def lineLengthExceeded(self, line):
"""Called when we receive a line that is above the maximum line length
"""
self.send_error("Line length exceeded")
class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
VALID_INBOUND_COMMANDS = VALID_CLIENT_COMMANDS
@ -511,6 +526,11 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
self.server_name = server_name
self.handler = handler
# Set of stream names that have been subscribe to, but haven't yet
# caught up with. This is used to track when the client has been fully
# connected to the remote.
self.streams_connecting = set()
# Map of stream to batched updates. See RdataCommand for info on how
# batching works.
self.pending_batches = {}
@ -533,6 +553,10 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
# We've now finished connecting to so inform the client handler
self.handler.update_connection(self)
# This will happen if we don't actually subscribe to any streams
if not self.streams_connecting:
self.handler.finished_connecting()
def on_SERVER(self, cmd):
if cmd.data != self.server_name:
logger.error("[%s] Connected to wrong remote: %r", self.id(), cmd.data)
@ -562,6 +586,12 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
return self.handler.on_rdata(stream_name, cmd.token, rows)
def on_POSITION(self, cmd):
# When we get a `POSITION` command it means we've finished getting
# missing updates for the given stream, and are now up to date.
self.streams_connecting.discard(cmd.stream_name)
if not self.streams_connecting:
self.handler.finished_connecting()
return self.handler.on_position(cmd.stream_name, cmd.token)
def on_SYNC(self, cmd):
@ -578,6 +608,8 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
self.id(), stream_name, token
)
self.streams_connecting.add(stream_name)
self.send_command(ReplicateCommand(stream_name, token))
def on_connection_closed(self):

View File

@ -33,7 +33,7 @@ RECAPTCHA_TEMPLATE = """
<title>Authentication</title>
<meta name='viewport' content='width=device-width, initial-scale=1,
user-scalable=no, minimum-scale=1.0, maximum-scale=1.0'>
<script src="https://www.google.com/recaptcha/api.js"
<script src="https://www.recaptcha.net/recaptcha/api.js"
async defer></script>
<script src="//code.jquery.com/jquery-1.11.2.min.js"></script>
<link rel="stylesheet" href="/_matrix/static/client/register/style.css">

View File

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2019 New Vector Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -133,8 +134,15 @@ def respond_with_responder(request, responder, media_type, file_size, upload_nam
logger.debug("Responding to media request with responder %s")
add_file_headers(request, media_type, file_size, upload_name)
try:
with responder:
yield responder.write_to_consumer(request)
except Exception as e:
# The majority of the time this will be due to the client having gone
# away. Unfortunately, Twisted simply throws a generic exception at us
# in that case.
logger.warning("Failed to write to consumer: %s %s", type(e), e)
finish_request(request)
@ -206,8 +214,7 @@ def get_filename_from_headers(headers):
Content-Disposition HTTP header.
Args:
headers (twisted.web.http_headers.Headers): The HTTP
request headers.
headers (dict[bytes, list[bytes]]): The HTTP request headers.
Returns:
A Unicode string of the filename, or None.
@ -218,23 +225,12 @@ def get_filename_from_headers(headers):
if not content_disposition[0]:
return
# dict of unicode: bytes, corresponding to the key value sections of the
# Content-Disposition header.
params = {}
parts = content_disposition[0].split(b";")
for i in parts:
# Split into key-value pairs, if able
# We don't care about things like `inline`, so throw it out
if b"=" not in i:
continue
key, value = i.strip().split(b"=")
params[key.decode('ascii')] = value
_, params = _parse_header(content_disposition[0])
upload_name = None
# First check if there is a valid UTF-8 filename
upload_name_utf8 = params.get("filename*", None)
upload_name_utf8 = params.get(b"filename*", None)
if upload_name_utf8:
if upload_name_utf8.lower().startswith(b"utf-8''"):
upload_name_utf8 = upload_name_utf8[7:]
@ -260,12 +256,68 @@ def get_filename_from_headers(headers):
# If there isn't check for an ascii name.
if not upload_name:
upload_name_ascii = params.get("filename", None)
upload_name_ascii = params.get(b"filename", None)
if upload_name_ascii and is_ascii(upload_name_ascii):
# Make sure there's no %-quoted bytes. If there is, reject it as
# non-valid ASCII.
if b"%" not in upload_name_ascii:
upload_name = upload_name_ascii.decode('ascii')
# This may be None here, indicating we did not find a matching name.
return upload_name
def _parse_header(line):
"""Parse a Content-type like header.
Cargo-culted from `cgi`, but works on bytes rather than strings.
Args:
line (bytes): header to be parsed
Returns:
Tuple[bytes, dict[bytes, bytes]]:
the main content-type, followed by the parameter dictionary
"""
parts = _parseparam(b';' + line)
key = next(parts)
pdict = {}
for p in parts:
i = p.find(b'=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i + 1:].strip()
# strip double-quotes
if len(value) >= 2 and value[0:1] == value[-1:] == b'"':
value = value[1:-1]
value = value.replace(b'\\\\', b'\\').replace(b'\\"', b'"')
pdict[name] = value
return key, pdict
def _parseparam(s):
"""Generator which splits the input on ;, respecting double-quoted sequences
Cargo-culted from `cgi`, but works on bytes rather than strings.
Args:
s (bytes): header to be parsed
Returns:
Iterable[bytes]: the split input
"""
while s[:1] == b';':
s = s[1:]
# look for the next ;
end = s.find(b';')
# if there is an odd number of " marks between here and the next ;, skip to the
# next ; instead
while end > 0 and (s.count(b'"', 0, end) - s.count(b'\\"', 0, end)) % 2:
end = s.find(b';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
yield f.strip()
s = s[end:]

View File

@ -7,9 +7,9 @@ import synapse.handlers.auth
import synapse.handlers.deactivate_account
import synapse.handlers.device
import synapse.handlers.e2e_keys
import synapse.handlers.message
import synapse.handlers.room
import synapse.handlers.room_member
import synapse.handlers.message
import synapse.handlers.set_password
import synapse.rest.media.v1.media_repository
import synapse.server_notices.server_notices_manager

View File

@ -4,7 +4,7 @@
<meta name='viewport' content='width=device-width, initial-scale=1, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0'>
<link rel="stylesheet" href="style.css">
<script src="js/jquery-2.1.3.min.js"></script>
<script src="https://www.google.com/recaptcha/api/js/recaptcha_ajax.js"></script>
<script src="https://www.recaptcha.net/recaptcha/api/js/recaptcha_ajax.js"></script>
<script src="register_config.js"></script>
<script src="js/register.js"></script>
</head>

View File

@ -30,6 +30,7 @@ from synapse.api.errors import StoreError
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.engines import PostgresEngine, Sqlite3Engine
from synapse.types import get_domain_from_id
from synapse.util import batch_iter
from synapse.util.caches.descriptors import Cache
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
from synapse.util.stringutils import exception_to_unicode
@ -1327,7 +1328,13 @@ class SQLBaseStore(object):
"""
txn.call_after(self._invalidate_state_caches, room_id, members_changed)
keys = itertools.chain([room_id], members_changed)
# We need to be careful that the size of the `members_changed` list
# isn't so large that it causes problems sending over replication, so we
# send them in chunks.
# Max line length is 16K, and max user ID length is 255, so 50 should
# be safe.
for chunk in batch_iter(members_changed, 50):
keys = itertools.chain([room_id], chunk)
self._send_invalidation_to_replication(
txn, _CURRENT_STATE_CACHE_NAME, keys,
)
@ -1342,15 +1349,43 @@ class SQLBaseStore(object):
changed
"""
for member in members_changed:
self.get_rooms_for_user_with_stream_ordering.invalidate((member,))
self._attempt_to_invalidate_cache(
"get_rooms_for_user_with_stream_ordering", (member,),
)
for host in set(get_domain_from_id(u) for u in members_changed):
self.is_host_joined.invalidate((room_id, host))
self.was_host_joined.invalidate((room_id, host))
self._attempt_to_invalidate_cache(
"is_host_joined", (room_id, host,),
)
self._attempt_to_invalidate_cache(
"was_host_joined", (room_id, host,),
)
self.get_users_in_room.invalidate((room_id,))
self.get_room_summary.invalidate((room_id,))
self.get_current_state_ids.invalidate((room_id,))
self._attempt_to_invalidate_cache(
"get_users_in_room", (room_id,),
)
self._attempt_to_invalidate_cache(
"get_room_summary", (room_id,),
)
self._attempt_to_invalidate_cache(
"get_current_state_ids", (room_id,),
)
def _attempt_to_invalidate_cache(self, cache_name, key):
"""Attempts to invalidate the cache of the given name, ignoring if the
cache doesn't exist. Mainly used for invalidating caches on workers,
where they may not have the cache.
Args:
cache_name (str)
key (tuple)
"""
try:
getattr(self, cache_name).invalidate(key)
except AttributeError:
# We probably haven't pulled in the cache in this worker,
# which is fine.
pass
def _send_invalidation_to_replication(self, txn, cache_name, keys):
"""Notifies replication that given cache has been invalidated.
@ -1568,6 +1603,14 @@ class SQLBaseStore(object):
return cls.cursor_to_dict(txn)
@property
def database_engine_name(self):
return self.database_engine.module.__name__
def get_server_version(self):
"""Returns a string describing the server version number"""
return self.database_engine.server_version
class _RollbackButIsFineException(Exception):
""" This exception is used to rollback a transaction without implying

View File

@ -23,6 +23,7 @@ class PostgresEngine(object):
self.module = database_module
self.module.extensions.register_type(self.module.extensions.UNICODE)
self.synchronous_commit = database_config.get("synchronous_commit", True)
self._version = None # unknown as yet
def check_database(self, txn):
txn.execute("SHOW SERVER_ENCODING")
@ -87,3 +88,27 @@ class PostgresEngine(object):
"""
txn.execute("SELECT nextval('state_group_id_seq')")
return txn.fetchone()[0]
@property
def server_version(self):
"""Returns a string giving the server version. For example: '8.1.5'
Returns:
string
"""
# note that this is a bit of a hack because it relies on on_new_connection
# having been called at least once. Still, that should be a safe bet here.
numver = self._version
assert numver is not None
# https://www.postgresql.org/docs/current/libpq-status.html#LIBPQ-PQSERVERVERSION
if numver >= 100000:
return "%i.%i" % (
numver / 10000, numver % 10000,
)
else:
return "%i.%i.%i" % (
numver / 10000,
(numver % 10000) / 100,
numver % 100,
)

View File

@ -70,6 +70,15 @@ class Sqlite3Engine(object):
self._current_state_group_id += 1
return self._current_state_group_id
@property
def server_version(self):
"""Gets a string giving the server version. For example: '3.22.0'
Returns:
string
"""
return "%i.%i.%i" % self.module.sqlite_version_info
# Following functions taken from: https://github.com/coleifer/peewee

View File

@ -295,6 +295,39 @@ class RegistrationWorkerStore(SQLBaseStore):
return ret['user_id']
return None
@defer.inlineCallbacks
def user_add_threepid(self, user_id, medium, address, validated_at, added_at):
yield self._simple_upsert("user_threepids", {
"medium": medium,
"address": address,
}, {
"user_id": user_id,
"validated_at": validated_at,
"added_at": added_at,
})
@defer.inlineCallbacks
def user_get_threepids(self, user_id):
ret = yield self._simple_select_list(
"user_threepids", {
"user_id": user_id
},
['medium', 'address', 'validated_at', 'added_at'],
'user_get_threepids'
)
defer.returnValue(ret)
def user_delete_threepid(self, user_id, medium, address):
return self._simple_delete(
"user_threepids",
keyvalues={
"user_id": user_id,
"medium": medium,
"address": address,
},
desc="user_delete_threepids",
)
class RegistrationStore(RegistrationWorkerStore,
background_updates.BackgroundUpdateStore):
@ -632,39 +665,6 @@ class RegistrationStore(RegistrationWorkerStore,
defer.returnValue(res if res else False)
@defer.inlineCallbacks
def user_add_threepid(self, user_id, medium, address, validated_at, added_at):
yield self._simple_upsert("user_threepids", {
"medium": medium,
"address": address,
}, {
"user_id": user_id,
"validated_at": validated_at,
"added_at": added_at,
})
@defer.inlineCallbacks
def user_get_threepids(self, user_id):
ret = yield self._simple_select_list(
"user_threepids", {
"user_id": user_id
},
['medium', 'address', 'validated_at', 'added_at'],
'user_get_threepids'
)
defer.returnValue(ret)
def user_delete_threepid(self, user_id, medium, address):
return self._simple_delete(
"user_threepids",
keyvalues={
"user_id": user_id,
"medium": medium,
"address": address,
},
desc="user_delete_threepids",
)
@defer.inlineCallbacks
def save_or_get_3pid_guest_access_token(
self, medium, address, access_token, inviter_user_id

View File

@ -24,13 +24,17 @@ from synapse.api.errors import AuthError
from synapse.types import UserID
from tests import unittest
from tests.utils import register_federation_servlets
from ..utils import (
DeferredMockCallable,
MockClock,
MockHttpResource,
setup_test_homeserver,
)
# Some local users to test with
U_APPLE = UserID.from_string("@apple:test")
U_BANANA = UserID.from_string("@banana:test")
# Remote user
U_ONION = UserID.from_string("@onion:farm")
# Test room id
ROOM_ID = "a-room"
def _expect_edu_transaction(edu_type, content, origin="test"):
@ -46,30 +50,21 @@ def _make_edu_transaction_json(edu_type, content):
return json.dumps(_expect_edu_transaction(edu_type, content)).encode('utf8')
class TypingNotificationsTestCase(unittest.TestCase):
"""Tests typing notifications to rooms."""
class TypingNotificationsTestCase(unittest.HomeserverTestCase):
servlets = [register_federation_servlets]
@defer.inlineCallbacks
def setUp(self):
self.clock = MockClock()
def make_homeserver(self, reactor, clock):
# we mock out the keyring so as to skip the authentication check on the
# federation API call.
mock_keyring = Mock(spec=["verify_json_for_server"])
mock_keyring.verify_json_for_server.return_value = defer.succeed(True)
self.mock_http_client = Mock(spec=[])
self.mock_http_client.put_json = DeferredMockCallable()
# we mock out the federation client too
mock_federation_client = Mock(spec=["put_json"])
mock_federation_client.put_json.return_value = defer.succeed((200, "OK"))
self.mock_federation_resource = MockHttpResource()
mock_notifier = Mock()
self.on_new_event = mock_notifier.on_new_event
self.auth = Mock(spec=[])
self.state_handler = Mock()
hs = yield setup_test_homeserver(
self.addCleanup,
"test",
auth=self.auth,
clock=self.clock,
datastore=Mock(
hs = self.setup_test_homeserver(
datastore=(Mock(
spec=[
# Bits that Federation needs
"prep_send_transaction",
@ -82,16 +77,21 @@ class TypingNotificationsTestCase(unittest.TestCase):
"get_user_directory_stream_pos",
"get_current_state_deltas",
]
),
state_handler=self.state_handler,
handlers=Mock(),
notifier=mock_notifier,
resource_for_client=Mock(),
resource_for_federation=self.mock_federation_resource,
http_client=self.mock_http_client,
keyring=Mock(),
)),
notifier=Mock(),
http_client=mock_federation_client,
keyring=mock_keyring,
)
return hs
def prepare(self, reactor, clock, hs):
# the tests assume that we are starting at unix time 1000
reactor.pump((1000, ))
mock_notifier = hs.get_notifier()
self.on_new_event = mock_notifier.on_new_event
self.handler = hs.get_typing_handler()
self.event_source = hs.get_event_sources().sources["typing"]
@ -109,13 +109,12 @@ class TypingNotificationsTestCase(unittest.TestCase):
self.datastore.get_received_txn_response = get_received_txn_response
self.room_id = "a-room"
self.room_members = []
def check_joined_room(room_id, user_id):
if user_id not in [u.to_string() for u in self.room_members]:
raise AuthError(401, "User is not in the room")
hs.get_auth().check_joined_room = check_joined_room
def get_joined_hosts_for_room(room_id):
return set(member.domain for member in self.room_members)
@ -124,8 +123,7 @@ class TypingNotificationsTestCase(unittest.TestCase):
def get_current_user_in_room(room_id):
return set(str(u) for u in self.room_members)
self.state_handler.get_current_user_in_room = get_current_user_in_room
hs.get_state_handler().get_current_user_in_room = get_current_user_in_room
self.datastore.get_user_directory_stream_pos.return_value = (
# we deliberately return a non-None stream pos to avoid doing an initial_spam
@ -134,230 +132,208 @@ class TypingNotificationsTestCase(unittest.TestCase):
self.datastore.get_current_state_deltas.return_value = None
self.auth.check_joined_room = check_joined_room
self.datastore.get_to_device_stream_token = lambda: 0
self.datastore.get_new_device_msgs_for_remote = lambda *args, **kargs: ([], 0)
self.datastore.delete_device_msgs_for_remote = lambda *args, **kargs: None
# Some local users to test with
self.u_apple = UserID.from_string("@apple:test")
self.u_banana = UserID.from_string("@banana:test")
# Remote user
self.u_onion = UserID.from_string("@onion:farm")
@defer.inlineCallbacks
def test_started_typing_local(self):
self.room_members = [self.u_apple, self.u_banana]
self.room_members = [U_APPLE, U_BANANA]
self.assertEquals(self.event_source.get_current_key(), 0)
yield self.handler.started_typing(
target_user=self.u_apple,
auth_user=self.u_apple,
room_id=self.room_id,
self.successResultOf(self.handler.started_typing(
target_user=U_APPLE,
auth_user=U_APPLE,
room_id=ROOM_ID,
timeout=20000,
)
))
self.on_new_event.assert_has_calls(
[call('typing_key', 1, rooms=[self.room_id])]
[call('typing_key', 1, rooms=[ROOM_ID])]
)
self.assertEquals(self.event_source.get_current_key(), 1)
events = yield self.event_source.get_new_events(
room_ids=[self.room_id], from_key=0
events = self.event_source.get_new_events(
room_ids=[ROOM_ID], from_key=0
)
self.assertEquals(
events[0],
[
{
"type": "m.typing",
"room_id": self.room_id,
"content": {"user_ids": [self.u_apple.to_string()]},
"room_id": ROOM_ID,
"content": {"user_ids": [U_APPLE.to_string()]},
}
],
)
@defer.inlineCallbacks
def test_started_typing_remote_send(self):
self.room_members = [self.u_apple, self.u_onion]
self.room_members = [U_APPLE, U_ONION]
put_json = self.mock_http_client.put_json
put_json.expect_call_and_return(
call(
self.successResultOf(self.handler.started_typing(
target_user=U_APPLE,
auth_user=U_APPLE,
room_id=ROOM_ID,
timeout=20000,
))
put_json = self.hs.get_http_client().put_json
put_json.assert_called_once_with(
"farm",
path="/_matrix/federation/v1/send/1000000/",
data=_expect_edu_transaction(
"m.typing",
content={
"room_id": self.room_id,
"user_id": self.u_apple.to_string(),
"room_id": ROOM_ID,
"user_id": U_APPLE.to_string(),
"typing": True,
},
),
json_data_callback=ANY,
long_retries=True,
backoff_on_404=True,
),
defer.succeed((200, "OK")),
)
yield self.handler.started_typing(
target_user=self.u_apple,
auth_user=self.u_apple,
room_id=self.room_id,
timeout=20000,
)
yield put_json.await_calls()
@defer.inlineCallbacks
def test_started_typing_remote_recv(self):
self.room_members = [self.u_apple, self.u_onion]
self.room_members = [U_APPLE, U_ONION]
self.assertEquals(self.event_source.get_current_key(), 0)
(code, response) = yield self.mock_federation_resource.trigger(
(request, channel) = self.make_request(
"PUT",
"/_matrix/federation/v1/send/1000000/",
_make_edu_transaction_json(
"m.typing",
content={
"room_id": self.room_id,
"user_id": self.u_onion.to_string(),
"room_id": ROOM_ID,
"user_id": U_ONION.to_string(),
"typing": True,
},
),
federation_auth_origin=b'farm',
)
self.render(request)
self.assertEqual(channel.code, 200)
self.on_new_event.assert_has_calls(
[call('typing_key', 1, rooms=[self.room_id])]
[call('typing_key', 1, rooms=[ROOM_ID])]
)
self.assertEquals(self.event_source.get_current_key(), 1)
events = yield self.event_source.get_new_events(
room_ids=[self.room_id], from_key=0
events = self.event_source.get_new_events(
room_ids=[ROOM_ID], from_key=0
)
self.assertEquals(
events[0],
[
{
"type": "m.typing",
"room_id": self.room_id,
"content": {"user_ids": [self.u_onion.to_string()]},
"room_id": ROOM_ID,
"content": {"user_ids": [U_ONION.to_string()]},
}
],
)
@defer.inlineCallbacks
def test_stopped_typing(self):
self.room_members = [self.u_apple, self.u_banana, self.u_onion]
self.room_members = [U_APPLE, U_BANANA, U_ONION]
put_json = self.mock_http_client.put_json
put_json.expect_call_and_return(
call(
# Gut-wrenching
from synapse.handlers.typing import RoomMember
member = RoomMember(ROOM_ID, U_APPLE.to_string())
self.handler._member_typing_until[member] = 1002000
self.handler._room_typing[ROOM_ID] = set([U_APPLE.to_string()])
self.assertEquals(self.event_source.get_current_key(), 0)
self.successResultOf(self.handler.stopped_typing(
target_user=U_APPLE, auth_user=U_APPLE, room_id=ROOM_ID
))
self.on_new_event.assert_has_calls(
[call('typing_key', 1, rooms=[ROOM_ID])]
)
put_json = self.hs.get_http_client().put_json
put_json.assert_called_once_with(
"farm",
path="/_matrix/federation/v1/send/1000000/",
data=_expect_edu_transaction(
"m.typing",
content={
"room_id": self.room_id,
"user_id": self.u_apple.to_string(),
"room_id": ROOM_ID,
"user_id": U_APPLE.to_string(),
"typing": False,
},
),
json_data_callback=ANY,
long_retries=True,
backoff_on_404=True,
),
defer.succeed((200, "OK")),
)
# Gut-wrenching
from synapse.handlers.typing import RoomMember
member = RoomMember(self.room_id, self.u_apple.to_string())
self.handler._member_typing_until[member] = 1002000
self.handler._room_typing[self.room_id] = set([self.u_apple.to_string()])
self.assertEquals(self.event_source.get_current_key(), 0)
yield self.handler.stopped_typing(
target_user=self.u_apple, auth_user=self.u_apple, room_id=self.room_id
)
self.on_new_event.assert_has_calls(
[call('typing_key', 1, rooms=[self.room_id])]
)
yield put_json.await_calls()
self.assertEquals(self.event_source.get_current_key(), 1)
events = yield self.event_source.get_new_events(
room_ids=[self.room_id], from_key=0
events = self.event_source.get_new_events(
room_ids=[ROOM_ID], from_key=0
)
self.assertEquals(
events[0],
[
{
"type": "m.typing",
"room_id": self.room_id,
"room_id": ROOM_ID,
"content": {"user_ids": []},
}
],
)
@defer.inlineCallbacks
def test_typing_timeout(self):
self.room_members = [self.u_apple, self.u_banana]
self.room_members = [U_APPLE, U_BANANA]
self.assertEquals(self.event_source.get_current_key(), 0)
yield self.handler.started_typing(
target_user=self.u_apple,
auth_user=self.u_apple,
room_id=self.room_id,
self.successResultOf(self.handler.started_typing(
target_user=U_APPLE,
auth_user=U_APPLE,
room_id=ROOM_ID,
timeout=10000,
)
))
self.on_new_event.assert_has_calls(
[call('typing_key', 1, rooms=[self.room_id])]
[call('typing_key', 1, rooms=[ROOM_ID])]
)
self.on_new_event.reset_mock()
self.assertEquals(self.event_source.get_current_key(), 1)
events = yield self.event_source.get_new_events(
room_ids=[self.room_id], from_key=0
events = self.event_source.get_new_events(
room_ids=[ROOM_ID], from_key=0
)
self.assertEquals(
events[0],
[
{
"type": "m.typing",
"room_id": self.room_id,
"content": {"user_ids": [self.u_apple.to_string()]},
"room_id": ROOM_ID,
"content": {"user_ids": [U_APPLE.to_string()]},
}
],
)
self.clock.advance_time(16)
self.reactor.pump([16, ])
self.on_new_event.assert_has_calls(
[call('typing_key', 2, rooms=[self.room_id])]
[call('typing_key', 2, rooms=[ROOM_ID])]
)
self.assertEquals(self.event_source.get_current_key(), 2)
events = yield self.event_source.get_new_events(
room_ids=[self.room_id], from_key=1
events = self.event_source.get_new_events(
room_ids=[ROOM_ID], from_key=1
)
self.assertEquals(
events[0],
[
{
"type": "m.typing",
"room_id": self.room_id,
"room_id": ROOM_ID,
"content": {"user_ids": []},
}
],
@ -365,29 +341,29 @@ class TypingNotificationsTestCase(unittest.TestCase):
# SYN-230 - see if we can still set after timeout
yield self.handler.started_typing(
target_user=self.u_apple,
auth_user=self.u_apple,
room_id=self.room_id,
self.successResultOf(self.handler.started_typing(
target_user=U_APPLE,
auth_user=U_APPLE,
room_id=ROOM_ID,
timeout=10000,
)
))
self.on_new_event.assert_has_calls(
[call('typing_key', 3, rooms=[self.room_id])]
[call('typing_key', 3, rooms=[ROOM_ID])]
)
self.on_new_event.reset_mock()
self.assertEquals(self.event_source.get_current_key(), 3)
events = yield self.event_source.get_new_events(
room_ids=[self.room_id], from_key=0
events = self.event_source.get_new_events(
room_ids=[ROOM_ID], from_key=0
)
self.assertEquals(
events[0],
[
{
"type": "m.typing",
"room_id": self.room_id,
"content": {"user_ids": [self.u_apple.to_string()]},
"room_id": ROOM_ID,
"content": {"user_ids": [U_APPLE.to_string()]},
}
],
)

View File

@ -0,0 +1,45 @@
# -*- coding: utf-8 -*-
# Copyright 2019 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.rest.media.v1._base import get_filename_from_headers
from tests import unittest
class GetFileNameFromHeadersTests(unittest.TestCase):
# input -> expected result
TEST_CASES = {
b"inline; filename=abc.txt": u"abc.txt",
b'inline; filename="azerty"': u"azerty",
b'inline; filename="aze%20rty"': u"aze%20rty",
b'inline; filename="aze\"rty"': u'aze"rty',
b'inline; filename="azer;ty"': u"azer;ty",
b"inline; filename*=utf-8''foo%C2%A3bar": u"foo£bar",
}
def tests(self):
for hdr, expected in self.TEST_CASES.items():
res = get_filename_from_headers(
{
b'Content-Disposition': [hdr],
},
)
self.assertEqual(
res, expected,
"expected output for %s to be %s but was %s" % (
hdr, expected, res,
)
)

View File

@ -137,6 +137,7 @@ def make_request(
access_token=None,
request=SynapseRequest,
shorthand=True,
federation_auth_origin=None,
):
"""
Make a web request using the given method and path, feed it the
@ -150,9 +151,11 @@ def make_request(
a dict.
shorthand: Whether to try and be helpful and prefix the given URL
with the usual REST API path, if it doesn't contain it.
federation_auth_origin (bytes|None): if set to not-None, we will add a fake
Authorization header pretenting to be the given server name.
Returns:
A synapse.http.site.SynapseRequest.
Tuple[synapse.http.site.SynapseRequest, channel]
"""
if not isinstance(method, bytes):
method = method.encode('ascii')
@ -184,6 +187,11 @@ def make_request(
b"Authorization", b"Bearer " + access_token.encode('ascii')
)
if federation_auth_origin is not None:
req.requestHeaders.addRawHeader(
b"Authorization", b"X-Matrix origin=%s,key=,sig=" % (federation_auth_origin,)
)
if content:
req.requestHeaders.addRawHeader(b"Content-Type", b"application/json")
@ -288,9 +296,6 @@ def setup_test_homeserver(cleanup_func, *args, **kwargs):
**kwargs
)
pool.runWithConnection = runWithConnection
pool.runInteraction = runInteraction
class ThreadPool:
"""
Threadless thread pool.
@ -316,6 +321,10 @@ def setup_test_homeserver(cleanup_func, *args, **kwargs):
return d
clock.threadpool = ThreadPool()
if pool:
pool.runWithConnection = runWithConnection
pool.runInteraction = runInteraction
pool.threadpool = ThreadPool()
pool.running = True
return d

View File

@ -262,6 +262,7 @@ class HomeserverTestCase(TestCase):
access_token=None,
request=SynapseRequest,
shorthand=True,
federation_auth_origin=None,
):
"""
Create a SynapseRequest at the path using the method and containing the
@ -275,15 +276,18 @@ class HomeserverTestCase(TestCase):
a dict.
shorthand: Whether to try and be helpful and prefix the given URL
with the usual REST API path, if it doesn't contain it.
federation_auth_origin (bytes|None): if set to not-None, we will add a fake
Authorization header pretenting to be the given server name.
Returns:
A synapse.http.site.SynapseRequest.
Tuple[synapse.http.site.SynapseRequest, channel]
"""
if isinstance(content, dict):
content = json.dumps(content).encode('utf8')
return make_request(
self.reactor, method, path, content, access_token, request, shorthand
self.reactor, method, path, content, access_token, request, shorthand,
federation_auth_origin,
)
def render(self, request):

Some files were not shown because too many files have changed in this diff Show More