mirror of
https://git.anonymousland.org/anonymousland/synapse.git
synced 2025-05-03 15:34:48 -04:00
Merge branch 'develop' into travis/login-terms
This commit is contained in:
commit
d1e7b9c44c
91 changed files with 2562 additions and 1263 deletions
44
.travis.yml
44
.travis.yml
|
@ -1,8 +1,20 @@
|
||||||
sudo: false
|
sudo: false
|
||||||
language: python
|
language: python
|
||||||
|
|
||||||
# tell travis to cache ~/.cache/pip
|
cache:
|
||||||
cache: pip
|
directories:
|
||||||
|
# we only bother to cache the wheels; parts of the http cache get
|
||||||
|
# invalidated every build (because they get served with a max-age of 600
|
||||||
|
# seconds), which means that we end up re-uploading the whole cache for
|
||||||
|
# every build, which is time-consuming In any case, it's not obvious that
|
||||||
|
# downloading the cache from S3 would be much faster than downloading the
|
||||||
|
# originals from pypi.
|
||||||
|
#
|
||||||
|
- $HOME/.cache/pip/wheels
|
||||||
|
|
||||||
|
# don't clone the whole repo history, one commit will do
|
||||||
|
git:
|
||||||
|
depth: 1
|
||||||
|
|
||||||
# only build branches we care about (PRs are built seperately)
|
# only build branches we care about (PRs are built seperately)
|
||||||
branches:
|
branches:
|
||||||
|
@ -11,10 +23,9 @@ branches:
|
||||||
- develop
|
- develop
|
||||||
- /^release-v/
|
- /^release-v/
|
||||||
|
|
||||||
before_script:
|
# When running the tox environments that call Twisted Trial, we can pass the -j
|
||||||
- git remote set-branches --add origin develop
|
# flag to run the tests concurrently. We set this to 2 for CPU bound tests
|
||||||
- git fetch origin develop
|
# (SQLite) and 4 for I/O bound tests (PostgreSQL).
|
||||||
|
|
||||||
matrix:
|
matrix:
|
||||||
fast_finish: true
|
fast_finish: true
|
||||||
include:
|
include:
|
||||||
|
@ -22,13 +33,13 @@ matrix:
|
||||||
env: TOX_ENV=packaging
|
env: TOX_ENV=packaging
|
||||||
|
|
||||||
- python: 3.6
|
- python: 3.6
|
||||||
env: TOX_ENV=pep8
|
env: TOX_ENV="pep8,check_isort"
|
||||||
|
|
||||||
- python: 2.7
|
- python: 2.7
|
||||||
env: TOX_ENV=py27
|
env: TOX_ENV=py27 TRIAL_FLAGS="-j 2"
|
||||||
|
|
||||||
- python: 2.7
|
- python: 2.7
|
||||||
env: TOX_ENV=py27-old
|
env: TOX_ENV=py27-old TRIAL_FLAGS="-j 2"
|
||||||
|
|
||||||
- python: 2.7
|
- python: 2.7
|
||||||
env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4"
|
env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4"
|
||||||
|
@ -36,21 +47,24 @@ matrix:
|
||||||
- postgresql
|
- postgresql
|
||||||
|
|
||||||
- python: 3.5
|
- python: 3.5
|
||||||
env: TOX_ENV=py35
|
env: TOX_ENV=py35 TRIAL_FLAGS="-j 2"
|
||||||
|
|
||||||
- python: 3.6
|
- python: 3.6
|
||||||
env: TOX_ENV=py36
|
env: TOX_ENV=py36 TRIAL_FLAGS="-j 2"
|
||||||
|
|
||||||
- python: 3.6
|
- python: 3.6
|
||||||
env: TOX_ENV=py36-postgres TRIAL_FLAGS="-j 4"
|
env: TOX_ENV=py36-postgres TRIAL_FLAGS="-j 4"
|
||||||
services:
|
services:
|
||||||
- postgresql
|
- postgresql
|
||||||
|
|
||||||
- python: 3.6
|
- # we only need to check for the newsfragment if it's a PR build
|
||||||
env: TOX_ENV=check_isort
|
if: type = pull_request
|
||||||
|
python: 3.6
|
||||||
- python: 3.6
|
|
||||||
env: TOX_ENV=check-newsfragment
|
env: TOX_ENV=check-newsfragment
|
||||||
|
script:
|
||||||
|
- git remote set-branches --add origin develop
|
||||||
|
- git fetch origin develop
|
||||||
|
- tox -e $TOX_ENV
|
||||||
|
|
||||||
install:
|
install:
|
||||||
- pip install tox
|
- pip install tox
|
||||||
|
|
13
README.rst
13
README.rst
|
@ -657,7 +657,8 @@ Using a reverse proxy with Synapse
|
||||||
|
|
||||||
It is recommended to put a reverse proxy such as
|
It is recommended to put a reverse proxy such as
|
||||||
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
|
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
|
||||||
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_ or
|
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
|
||||||
|
`Caddy <https://caddyserver.com/docs/proxy>`_ or
|
||||||
`HAProxy <https://www.haproxy.org/>`_ in front of Synapse. One advantage of
|
`HAProxy <https://www.haproxy.org/>`_ in front of Synapse. One advantage of
|
||||||
doing so is that it means that you can expose the default https port (443) to
|
doing so is that it means that you can expose the default https port (443) to
|
||||||
Matrix clients without needing to run Synapse with root privileges.
|
Matrix clients without needing to run Synapse with root privileges.
|
||||||
|
@ -688,7 +689,15 @@ so an example nginx configuration might look like::
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
and an example apache configuration may look like::
|
an example Caddy configuration might look like::
|
||||||
|
|
||||||
|
matrix.example.com {
|
||||||
|
proxy /_matrix http://localhost:8008 {
|
||||||
|
transparent
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
and an example Apache configuration might look like::
|
||||||
|
|
||||||
<VirtualHost *:443>
|
<VirtualHost *:443>
|
||||||
SSLEngine on
|
SSLEngine on
|
||||||
|
|
1
changelog.d/3975.feature
Normal file
1
changelog.d/3975.feature
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Servers with auto-join rooms will now automatically create those rooms when the first user registers
|
1
changelog.d/4006.misc
Normal file
1
changelog.d/4006.misc
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Delete unreferenced state groups during history purge
|
1
changelog.d/4011.misc
Normal file
1
changelog.d/4011.misc
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Reduce database load when fetching state groups
|
1
changelog.d/4051.feature
Normal file
1
changelog.d/4051.feature
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Add config option to control alias creation
|
1
changelog.d/4072.misc
Normal file
1
changelog.d/4072.misc
Normal file
|
@ -0,0 +1 @@
|
||||||
|
The README now contains example for the Caddy web server. Contributed by steamp0rt.
|
2
changelog.d/4081.bugfix
Normal file
2
changelog.d/4081.bugfix
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
Fix race condition where config defined reserved users were not being added to
|
||||||
|
the monthly active user list prior to the homeserver reactor firing up
|
1
changelog.d/4085.feature
Normal file
1
changelog.d/4085.feature
Normal file
|
@ -0,0 +1 @@
|
||||||
|
The register_new_matrix_user script is now ported to Python 3.
|
1
changelog.d/4089.feature
Normal file
1
changelog.d/4089.feature
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Configure Docker image to listen on both ipv4 and ipv6.
|
1
changelog.d/4091.feature
Normal file
1
changelog.d/4091.feature
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Support for replacing rooms with new ones
|
1
changelog.d/4095.bugfix
Normal file
1
changelog.d/4095.bugfix
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Fix exceptions when using the email mailer on Python 3.
|
1
changelog.d/4099.feature
Normal file
1
changelog.d/4099.feature
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Support for replacing rooms with new ones
|
1
changelog.d/4100.feature
Normal file
1
changelog.d/4100.feature
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Support for replacing rooms with new ones
|
1
changelog.d/4101.feature
Normal file
1
changelog.d/4101.feature
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Support for replacing rooms with new ones
|
1
changelog.d/4106.removal
Normal file
1
changelog.d/4106.removal
Normal file
|
@ -0,0 +1 @@
|
||||||
|
The disused and un-specced identicon generator has been removed.
|
1
changelog.d/4108.misc
Normal file
1
changelog.d/4108.misc
Normal file
|
@ -0,0 +1 @@
|
||||||
|
The "Received rdata" log messages on workers is now logged at DEBUG, not INFO.
|
1
changelog.d/4109.misc
Normal file
1
changelog.d/4109.misc
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Reduce replication traffic for device lists
|
1
changelog.d/4110.misc
Normal file
1
changelog.d/4110.misc
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Fix `synapse_replication_tcp_protocol_*_commands` metric label to be full command name, rather than just the first character
|
1
changelog.d/4118.removal
Normal file
1
changelog.d/4118.removal
Normal file
|
@ -0,0 +1 @@
|
||||||
|
The obsolete and non-functional /pull federation endpoint has been removed.
|
1
changelog.d/4119.removal
Normal file
1
changelog.d/4119.removal
Normal file
|
@ -0,0 +1 @@
|
||||||
|
The deprecated v1 key exchange endpoints have been removed.
|
1
changelog.d/4120.removal
Normal file
1
changelog.d/4120.removal
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Synapse will no longer fetch keys using the fallback deprecated v1 key exchange method and will now always use v2.
|
1
changelog.d/4121.misc
Normal file
1
changelog.d/4121.misc
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Log some bits about room creation
|
1
changelog.d/4122.bugfix
Normal file
1
changelog.d/4122.bugfix
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Searches that request profile info now no longer fail with a 500.
|
1
changelog.d/4124.misc
Normal file
1
changelog.d/4124.misc
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Fix `tox` failure on old systems
|
|
@ -47,4 +47,4 @@ services:
|
||||||
# You may store the database tables in a local folder..
|
# You may store the database tables in a local folder..
|
||||||
- ./schemas:/var/lib/postgresql/data
|
- ./schemas:/var/lib/postgresql/data
|
||||||
# .. or store them on some high performance storage for better results
|
# .. or store them on some high performance storage for better results
|
||||||
# - /path/to/ssd/storage:/var/lib/postfesql/data
|
# - /path/to/ssd/storage:/var/lib/postgresql/data
|
||||||
|
|
|
@ -21,7 +21,7 @@ listeners:
|
||||||
{% if not SYNAPSE_NO_TLS %}
|
{% if not SYNAPSE_NO_TLS %}
|
||||||
-
|
-
|
||||||
port: 8448
|
port: 8448
|
||||||
bind_addresses: ['0.0.0.0']
|
bind_addresses: ['::']
|
||||||
type: http
|
type: http
|
||||||
tls: true
|
tls: true
|
||||||
x_forwarded: false
|
x_forwarded: false
|
||||||
|
@ -34,7 +34,7 @@ listeners:
|
||||||
|
|
||||||
- port: 8008
|
- port: 8008
|
||||||
tls: false
|
tls: false
|
||||||
bind_addresses: ['0.0.0.0']
|
bind_addresses: ['::']
|
||||||
type: http
|
type: http
|
||||||
x_forwarded: false
|
x_forwarded: false
|
||||||
|
|
||||||
|
|
|
@ -14,22 +14,3 @@ fi
|
||||||
|
|
||||||
# set up the virtualenv
|
# set up the virtualenv
|
||||||
tox -e py27 --notest -v
|
tox -e py27 --notest -v
|
||||||
|
|
||||||
TOX_BIN=$TOX_DIR/py27/bin
|
|
||||||
|
|
||||||
# cryptography 2.2 requires setuptools >= 18.5.
|
|
||||||
#
|
|
||||||
# older versions of virtualenv (?) give us a virtualenv with the same version
|
|
||||||
# of setuptools as is installed on the system python (and tox runs virtualenv
|
|
||||||
# under python3, so we get the version of setuptools that is installed on that).
|
|
||||||
#
|
|
||||||
# anyway, make sure that we have a recent enough setuptools.
|
|
||||||
$TOX_BIN/pip install 'setuptools>=18.5'
|
|
||||||
|
|
||||||
# we also need a semi-recent version of pip, because old ones fail to install
|
|
||||||
# the "enum34" dependency of cryptography.
|
|
||||||
$TOX_BIN/pip install 'pip>=10'
|
|
||||||
|
|
||||||
{ python synapse/python_dependencies.py
|
|
||||||
echo lxml
|
|
||||||
} | xargs $TOX_BIN/pip install
|
|
||||||
|
|
|
@ -1,39 +0,0 @@
|
||||||
#!/usr/bin/env perl
|
|
||||||
|
|
||||||
use strict;
|
|
||||||
use warnings;
|
|
||||||
|
|
||||||
use DBI;
|
|
||||||
use DBD::SQLite;
|
|
||||||
use JSON;
|
|
||||||
use Getopt::Long;
|
|
||||||
|
|
||||||
my $db; # = "homeserver.db";
|
|
||||||
my $server = "http://localhost:8008";
|
|
||||||
my $size = 320;
|
|
||||||
|
|
||||||
GetOptions("db|d=s", \$db,
|
|
||||||
"server|s=s", \$server,
|
|
||||||
"width|w=i", \$size) or usage();
|
|
||||||
|
|
||||||
usage() unless $db;
|
|
||||||
|
|
||||||
my $dbh = DBI->connect("dbi:SQLite:dbname=$db","","") || die $DBI::errstr;
|
|
||||||
|
|
||||||
my $res = $dbh->selectall_arrayref("select token, name from access_tokens, users where access_tokens.user_id = users.id group by user_id") || die $DBI::errstr;
|
|
||||||
|
|
||||||
foreach (@$res) {
|
|
||||||
my ($token, $mxid) = ($_->[0], $_->[1]);
|
|
||||||
my ($user_id) = ($mxid =~ m/@(.*):/);
|
|
||||||
my ($url) = $dbh->selectrow_array("select avatar_url from profiles where user_id=?", undef, $user_id);
|
|
||||||
if (!$url || $url =~ /#auto$/) {
|
|
||||||
`curl -s -o tmp.png "$server/_matrix/media/v1/identicon?name=${mxid}&width=$size&height=$size"`;
|
|
||||||
my $json = `curl -s -X POST -H "Content-Type: image/png" -T "tmp.png" $server/_matrix/media/v1/upload?access_token=$token`;
|
|
||||||
my $content_uri = from_json($json)->{content_uri};
|
|
||||||
`curl -X PUT -H "Content-Type: application/json" --data '{ "avatar_url": "${content_uri}#auto"}' $server/_matrix/client/api/v1/profile/${mxid}/avatar_url?access_token=$token`;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sub usage {
|
|
||||||
die "usage: ./make-identicons.pl\n\t-d database [e.g. homeserver.db]\n\t-s homeserver (default: http://localhost:8008)\n\t-w identicon size in pixels (default 320)";
|
|
||||||
}
|
|
|
@ -16,207 +16,7 @@
|
||||||
|
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
|
|
||||||
import argparse
|
from synapse._scripts.register_new_matrix_user import main
|
||||||
import getpass
|
|
||||||
import hashlib
|
|
||||||
import hmac
|
|
||||||
import json
|
|
||||||
import sys
|
|
||||||
import urllib2
|
|
||||||
|
|
||||||
from six import input
|
|
||||||
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
|
|
||||||
def request_registration(user, password, server_location, shared_secret, admin=False):
|
|
||||||
req = urllib2.Request(
|
|
||||||
"%s/_matrix/client/r0/admin/register" % (server_location,),
|
|
||||||
headers={'Content-Type': 'application/json'},
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
if sys.version_info[:3] >= (2, 7, 9):
|
|
||||||
# As of version 2.7.9, urllib2 now checks SSL certs
|
|
||||||
import ssl
|
|
||||||
|
|
||||||
f = urllib2.urlopen(req, context=ssl.SSLContext(ssl.PROTOCOL_SSLv23))
|
|
||||||
else:
|
|
||||||
f = urllib2.urlopen(req)
|
|
||||||
body = f.read()
|
|
||||||
f.close()
|
|
||||||
nonce = json.loads(body)["nonce"]
|
|
||||||
except urllib2.HTTPError as e:
|
|
||||||
print("ERROR! Received %d %s" % (e.code, e.reason))
|
|
||||||
if 400 <= e.code < 500:
|
|
||||||
if e.info().type == "application/json":
|
|
||||||
resp = json.load(e)
|
|
||||||
if "error" in resp:
|
|
||||||
print(resp["error"])
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
mac = hmac.new(key=shared_secret, digestmod=hashlib.sha1)
|
|
||||||
|
|
||||||
mac.update(nonce)
|
|
||||||
mac.update("\x00")
|
|
||||||
mac.update(user)
|
|
||||||
mac.update("\x00")
|
|
||||||
mac.update(password)
|
|
||||||
mac.update("\x00")
|
|
||||||
mac.update("admin" if admin else "notadmin")
|
|
||||||
|
|
||||||
mac = mac.hexdigest()
|
|
||||||
|
|
||||||
data = {
|
|
||||||
"nonce": nonce,
|
|
||||||
"username": user,
|
|
||||||
"password": password,
|
|
||||||
"mac": mac,
|
|
||||||
"admin": admin,
|
|
||||||
}
|
|
||||||
|
|
||||||
server_location = server_location.rstrip("/")
|
|
||||||
|
|
||||||
print("Sending registration request...")
|
|
||||||
|
|
||||||
req = urllib2.Request(
|
|
||||||
"%s/_matrix/client/r0/admin/register" % (server_location,),
|
|
||||||
data=json.dumps(data),
|
|
||||||
headers={'Content-Type': 'application/json'},
|
|
||||||
)
|
|
||||||
try:
|
|
||||||
if sys.version_info[:3] >= (2, 7, 9):
|
|
||||||
# As of version 2.7.9, urllib2 now checks SSL certs
|
|
||||||
import ssl
|
|
||||||
|
|
||||||
f = urllib2.urlopen(req, context=ssl.SSLContext(ssl.PROTOCOL_SSLv23))
|
|
||||||
else:
|
|
||||||
f = urllib2.urlopen(req)
|
|
||||||
f.read()
|
|
||||||
f.close()
|
|
||||||
print("Success.")
|
|
||||||
except urllib2.HTTPError as e:
|
|
||||||
print("ERROR! Received %d %s" % (e.code, e.reason))
|
|
||||||
if 400 <= e.code < 500:
|
|
||||||
if e.info().type == "application/json":
|
|
||||||
resp = json.load(e)
|
|
||||||
if "error" in resp:
|
|
||||||
print(resp["error"])
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
def register_new_user(user, password, server_location, shared_secret, admin):
|
|
||||||
if not user:
|
|
||||||
try:
|
|
||||||
default_user = getpass.getuser()
|
|
||||||
except Exception:
|
|
||||||
default_user = None
|
|
||||||
|
|
||||||
if default_user:
|
|
||||||
user = input("New user localpart [%s]: " % (default_user,))
|
|
||||||
if not user:
|
|
||||||
user = default_user
|
|
||||||
else:
|
|
||||||
user = input("New user localpart: ")
|
|
||||||
|
|
||||||
if not user:
|
|
||||||
print("Invalid user name")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
if not password:
|
|
||||||
password = getpass.getpass("Password: ")
|
|
||||||
|
|
||||||
if not password:
|
|
||||||
print("Password cannot be blank.")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
confirm_password = getpass.getpass("Confirm password: ")
|
|
||||||
|
|
||||||
if password != confirm_password:
|
|
||||||
print("Passwords do not match")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
if admin is None:
|
|
||||||
admin = input("Make admin [no]: ")
|
|
||||||
if admin in ("y", "yes", "true"):
|
|
||||||
admin = True
|
|
||||||
else:
|
|
||||||
admin = False
|
|
||||||
|
|
||||||
request_registration(user, password, server_location, shared_secret, bool(admin))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
parser = argparse.ArgumentParser(
|
main()
|
||||||
description="Used to register new users with a given home server when"
|
|
||||||
" registration has been disabled. The home server must be"
|
|
||||||
" configured with the 'registration_shared_secret' option"
|
|
||||||
" set."
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-u",
|
|
||||||
"--user",
|
|
||||||
default=None,
|
|
||||||
help="Local part of the new user. Will prompt if omitted.",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-p",
|
|
||||||
"--password",
|
|
||||||
default=None,
|
|
||||||
help="New password for user. Will prompt if omitted.",
|
|
||||||
)
|
|
||||||
admin_group = parser.add_mutually_exclusive_group()
|
|
||||||
admin_group.add_argument(
|
|
||||||
"-a",
|
|
||||||
"--admin",
|
|
||||||
action="store_true",
|
|
||||||
help=(
|
|
||||||
"Register new user as an admin. "
|
|
||||||
"Will prompt if --no-admin is not set either."
|
|
||||||
),
|
|
||||||
)
|
|
||||||
admin_group.add_argument(
|
|
||||||
"--no-admin",
|
|
||||||
action="store_true",
|
|
||||||
help=(
|
|
||||||
"Register new user as a regular user. "
|
|
||||||
"Will prompt if --admin is not set either."
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
group = parser.add_mutually_exclusive_group(required=True)
|
|
||||||
group.add_argument(
|
|
||||||
"-c",
|
|
||||||
"--config",
|
|
||||||
type=argparse.FileType('r'),
|
|
||||||
help="Path to server config file. Used to read in shared secret.",
|
|
||||||
)
|
|
||||||
|
|
||||||
group.add_argument(
|
|
||||||
"-k", "--shared-secret", help="Shared secret as defined in server config file."
|
|
||||||
)
|
|
||||||
|
|
||||||
parser.add_argument(
|
|
||||||
"server_url",
|
|
||||||
default="https://localhost:8448",
|
|
||||||
nargs='?',
|
|
||||||
help="URL to use to talk to the home server. Defaults to "
|
|
||||||
" 'https://localhost:8448'.",
|
|
||||||
)
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
if "config" in args and args.config:
|
|
||||||
config = yaml.safe_load(args.config)
|
|
||||||
secret = config.get("registration_shared_secret", None)
|
|
||||||
if not secret:
|
|
||||||
print("No 'registration_shared_secret' defined in config.")
|
|
||||||
sys.exit(1)
|
|
||||||
else:
|
|
||||||
secret = args.shared_secret
|
|
||||||
|
|
||||||
admin = None
|
|
||||||
if args.admin or args.no_admin:
|
|
||||||
admin = args.admin
|
|
||||||
|
|
||||||
register_new_user(args.user, args.password, args.server_url, secret, admin)
|
|
||||||
|
|
0
synapse/_scripts/__init__.py
Normal file
0
synapse/_scripts/__init__.py
Normal file
215
synapse/_scripts/register_new_matrix_user.py
Normal file
215
synapse/_scripts/register_new_matrix_user.py
Normal file
|
@ -0,0 +1,215 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
|
# Copyright 2018 New Vector
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import getpass
|
||||||
|
import hashlib
|
||||||
|
import hmac
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from six.moves import input
|
||||||
|
|
||||||
|
import requests as _requests
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
|
def request_registration(
|
||||||
|
user,
|
||||||
|
password,
|
||||||
|
server_location,
|
||||||
|
shared_secret,
|
||||||
|
admin=False,
|
||||||
|
requests=_requests,
|
||||||
|
_print=print,
|
||||||
|
exit=sys.exit,
|
||||||
|
):
|
||||||
|
|
||||||
|
url = "%s/_matrix/client/r0/admin/register" % (server_location,)
|
||||||
|
|
||||||
|
# Get the nonce
|
||||||
|
r = requests.get(url, verify=False)
|
||||||
|
|
||||||
|
if r.status_code is not 200:
|
||||||
|
_print("ERROR! Received %d %s" % (r.status_code, r.reason))
|
||||||
|
if 400 <= r.status_code < 500:
|
||||||
|
try:
|
||||||
|
_print(r.json()["error"])
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
return exit(1)
|
||||||
|
|
||||||
|
nonce = r.json()["nonce"]
|
||||||
|
|
||||||
|
mac = hmac.new(key=shared_secret.encode('utf8'), digestmod=hashlib.sha1)
|
||||||
|
|
||||||
|
mac.update(nonce.encode('utf8'))
|
||||||
|
mac.update(b"\x00")
|
||||||
|
mac.update(user.encode('utf8'))
|
||||||
|
mac.update(b"\x00")
|
||||||
|
mac.update(password.encode('utf8'))
|
||||||
|
mac.update(b"\x00")
|
||||||
|
mac.update(b"admin" if admin else b"notadmin")
|
||||||
|
|
||||||
|
mac = mac.hexdigest()
|
||||||
|
|
||||||
|
data = {
|
||||||
|
"nonce": nonce,
|
||||||
|
"username": user,
|
||||||
|
"password": password,
|
||||||
|
"mac": mac,
|
||||||
|
"admin": admin,
|
||||||
|
}
|
||||||
|
|
||||||
|
_print("Sending registration request...")
|
||||||
|
r = requests.post(url, json=data, verify=False)
|
||||||
|
|
||||||
|
if r.status_code is not 200:
|
||||||
|
_print("ERROR! Received %d %s" % (r.status_code, r.reason))
|
||||||
|
if 400 <= r.status_code < 500:
|
||||||
|
try:
|
||||||
|
_print(r.json()["error"])
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
return exit(1)
|
||||||
|
|
||||||
|
_print("Success!")
|
||||||
|
|
||||||
|
|
||||||
|
def register_new_user(user, password, server_location, shared_secret, admin):
|
||||||
|
if not user:
|
||||||
|
try:
|
||||||
|
default_user = getpass.getuser()
|
||||||
|
except Exception:
|
||||||
|
default_user = None
|
||||||
|
|
||||||
|
if default_user:
|
||||||
|
user = input("New user localpart [%s]: " % (default_user,))
|
||||||
|
if not user:
|
||||||
|
user = default_user
|
||||||
|
else:
|
||||||
|
user = input("New user localpart: ")
|
||||||
|
|
||||||
|
if not user:
|
||||||
|
print("Invalid user name")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if not password:
|
||||||
|
password = getpass.getpass("Password: ")
|
||||||
|
|
||||||
|
if not password:
|
||||||
|
print("Password cannot be blank.")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
confirm_password = getpass.getpass("Confirm password: ")
|
||||||
|
|
||||||
|
if password != confirm_password:
|
||||||
|
print("Passwords do not match")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if admin is None:
|
||||||
|
admin = input("Make admin [no]: ")
|
||||||
|
if admin in ("y", "yes", "true"):
|
||||||
|
admin = True
|
||||||
|
else:
|
||||||
|
admin = False
|
||||||
|
|
||||||
|
request_registration(user, password, server_location, shared_secret, bool(admin))
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
|
||||||
|
logging.captureWarnings(True)
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Used to register new users with a given home server when"
|
||||||
|
" registration has been disabled. The home server must be"
|
||||||
|
" configured with the 'registration_shared_secret' option"
|
||||||
|
" set."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-u",
|
||||||
|
"--user",
|
||||||
|
default=None,
|
||||||
|
help="Local part of the new user. Will prompt if omitted.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-p",
|
||||||
|
"--password",
|
||||||
|
default=None,
|
||||||
|
help="New password for user. Will prompt if omitted.",
|
||||||
|
)
|
||||||
|
admin_group = parser.add_mutually_exclusive_group()
|
||||||
|
admin_group.add_argument(
|
||||||
|
"-a",
|
||||||
|
"--admin",
|
||||||
|
action="store_true",
|
||||||
|
help=(
|
||||||
|
"Register new user as an admin. "
|
||||||
|
"Will prompt if --no-admin is not set either."
|
||||||
|
),
|
||||||
|
)
|
||||||
|
admin_group.add_argument(
|
||||||
|
"--no-admin",
|
||||||
|
action="store_true",
|
||||||
|
help=(
|
||||||
|
"Register new user as a regular user. "
|
||||||
|
"Will prompt if --admin is not set either."
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
group = parser.add_mutually_exclusive_group(required=True)
|
||||||
|
group.add_argument(
|
||||||
|
"-c",
|
||||||
|
"--config",
|
||||||
|
type=argparse.FileType('r'),
|
||||||
|
help="Path to server config file. Used to read in shared secret.",
|
||||||
|
)
|
||||||
|
|
||||||
|
group.add_argument(
|
||||||
|
"-k", "--shared-secret", help="Shared secret as defined in server config file."
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"server_url",
|
||||||
|
default="https://localhost:8448",
|
||||||
|
nargs='?',
|
||||||
|
help="URL to use to talk to the home server. Defaults to "
|
||||||
|
" 'https://localhost:8448'.",
|
||||||
|
)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if "config" in args and args.config:
|
||||||
|
config = yaml.safe_load(args.config)
|
||||||
|
secret = config.get("registration_shared_secret", None)
|
||||||
|
if not secret:
|
||||||
|
print("No 'registration_shared_secret' defined in config.")
|
||||||
|
sys.exit(1)
|
||||||
|
else:
|
||||||
|
secret = args.shared_secret
|
||||||
|
|
||||||
|
admin = None
|
||||||
|
if args.admin or args.no_admin:
|
||||||
|
admin = args.admin
|
||||||
|
|
||||||
|
register_new_user(args.user, args.password, args.server_url, secret, admin)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
|
@ -62,6 +62,7 @@ class LoginType(object):
|
||||||
class EventTypes(object):
|
class EventTypes(object):
|
||||||
Member = "m.room.member"
|
Member = "m.room.member"
|
||||||
Create = "m.room.create"
|
Create = "m.room.create"
|
||||||
|
Tombstone = "m.room.tombstone"
|
||||||
JoinRules = "m.room.join_rules"
|
JoinRules = "m.room.join_rules"
|
||||||
PowerLevels = "m.room.power_levels"
|
PowerLevels = "m.room.power_levels"
|
||||||
Aliases = "m.room.aliases"
|
Aliases = "m.room.aliases"
|
||||||
|
|
|
@ -28,7 +28,6 @@ FEDERATION_PREFIX = "/_matrix/federation/v1"
|
||||||
STATIC_PREFIX = "/_matrix/static"
|
STATIC_PREFIX = "/_matrix/static"
|
||||||
WEB_CLIENT_PREFIX = "/_matrix/client"
|
WEB_CLIENT_PREFIX = "/_matrix/client"
|
||||||
CONTENT_REPO_PREFIX = "/_matrix/content"
|
CONTENT_REPO_PREFIX = "/_matrix/content"
|
||||||
SERVER_KEY_PREFIX = "/_matrix/key/v1"
|
|
||||||
SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
|
SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
|
||||||
MEDIA_PREFIX = "/_matrix/media/r0"
|
MEDIA_PREFIX = "/_matrix/media/r0"
|
||||||
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
|
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
|
||||||
|
|
|
@ -37,7 +37,6 @@ from synapse.api.urls import (
|
||||||
FEDERATION_PREFIX,
|
FEDERATION_PREFIX,
|
||||||
LEGACY_MEDIA_PREFIX,
|
LEGACY_MEDIA_PREFIX,
|
||||||
MEDIA_PREFIX,
|
MEDIA_PREFIX,
|
||||||
SERVER_KEY_PREFIX,
|
|
||||||
SERVER_KEY_V2_PREFIX,
|
SERVER_KEY_V2_PREFIX,
|
||||||
STATIC_PREFIX,
|
STATIC_PREFIX,
|
||||||
WEB_CLIENT_PREFIX,
|
WEB_CLIENT_PREFIX,
|
||||||
|
@ -59,7 +58,6 @@ from synapse.python_dependencies import CONDITIONAL_REQUIREMENTS, check_requirem
|
||||||
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
|
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
|
||||||
from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
|
from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
|
||||||
from synapse.rest import ClientRestResource
|
from synapse.rest import ClientRestResource
|
||||||
from synapse.rest.key.v1.server_key_resource import LocalKey
|
|
||||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
|
@ -236,10 +234,7 @@ class SynapseHomeServer(HomeServer):
|
||||||
)
|
)
|
||||||
|
|
||||||
if name in ["keys", "federation"]:
|
if name in ["keys", "federation"]:
|
||||||
resources.update({
|
resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
|
||||||
SERVER_KEY_PREFIX: LocalKey(self),
|
|
||||||
SERVER_KEY_V2_PREFIX: KeyApiV2Resource(self),
|
|
||||||
})
|
|
||||||
|
|
||||||
if name == "webclient":
|
if name == "webclient":
|
||||||
resources[WEB_CLIENT_PREFIX] = build_resource_for_web_client(self)
|
resources[WEB_CLIENT_PREFIX] = build_resource_for_web_client(self)
|
||||||
|
@ -553,14 +548,6 @@ def run(hs):
|
||||||
generate_monthly_active_users,
|
generate_monthly_active_users,
|
||||||
)
|
)
|
||||||
|
|
||||||
# XXX is this really supposed to be a background process? it looks
|
|
||||||
# like it needs to complete before some of the other stuff runs.
|
|
||||||
run_as_background_process(
|
|
||||||
"initialise_reserved_users",
|
|
||||||
hs.get_datastore().initialise_reserved_users,
|
|
||||||
hs.config.mau_limits_reserved_threepids,
|
|
||||||
)
|
|
||||||
|
|
||||||
start_generate_monthly_active_users()
|
start_generate_monthly_active_users()
|
||||||
if hs.config.limit_usage_by_mau:
|
if hs.config.limit_usage_by_mau:
|
||||||
clock.looping_call(start_generate_monthly_active_users, 5 * 60 * 1000)
|
clock.looping_call(start_generate_monthly_active_users, 5 * 60 * 1000)
|
||||||
|
|
|
@ -31,6 +31,7 @@ from .push import PushConfig
|
||||||
from .ratelimiting import RatelimitConfig
|
from .ratelimiting import RatelimitConfig
|
||||||
from .registration import RegistrationConfig
|
from .registration import RegistrationConfig
|
||||||
from .repository import ContentRepositoryConfig
|
from .repository import ContentRepositoryConfig
|
||||||
|
from .room_directory import RoomDirectoryConfig
|
||||||
from .saml2 import SAML2Config
|
from .saml2 import SAML2Config
|
||||||
from .server import ServerConfig
|
from .server import ServerConfig
|
||||||
from .server_notices_config import ServerNoticesConfig
|
from .server_notices_config import ServerNoticesConfig
|
||||||
|
@ -49,7 +50,7 @@ class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
||||||
WorkerConfig, PasswordAuthProviderConfig, PushConfig,
|
WorkerConfig, PasswordAuthProviderConfig, PushConfig,
|
||||||
SpamCheckerConfig, GroupsConfig, UserDirectoryConfig,
|
SpamCheckerConfig, GroupsConfig, UserDirectoryConfig,
|
||||||
ConsentConfig,
|
ConsentConfig,
|
||||||
ServerNoticesConfig,
|
ServerNoticesConfig, RoomDirectoryConfig,
|
||||||
):
|
):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -15,10 +15,10 @@
|
||||||
|
|
||||||
from distutils.util import strtobool
|
from distutils.util import strtobool
|
||||||
|
|
||||||
|
from synapse.config._base import Config, ConfigError
|
||||||
|
from synapse.types import RoomAlias
|
||||||
from synapse.util.stringutils import random_string_with_symbols
|
from synapse.util.stringutils import random_string_with_symbols
|
||||||
|
|
||||||
from ._base import Config
|
|
||||||
|
|
||||||
|
|
||||||
class RegistrationConfig(Config):
|
class RegistrationConfig(Config):
|
||||||
|
|
||||||
|
@ -44,6 +44,10 @@ class RegistrationConfig(Config):
|
||||||
)
|
)
|
||||||
|
|
||||||
self.auto_join_rooms = config.get("auto_join_rooms", [])
|
self.auto_join_rooms = config.get("auto_join_rooms", [])
|
||||||
|
for room_alias in self.auto_join_rooms:
|
||||||
|
if not RoomAlias.is_valid(room_alias):
|
||||||
|
raise ConfigError('Invalid auto_join_rooms entry %s' % (room_alias,))
|
||||||
|
self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True)
|
||||||
|
|
||||||
def default_config(self, **kwargs):
|
def default_config(self, **kwargs):
|
||||||
registration_shared_secret = random_string_with_symbols(50)
|
registration_shared_secret = random_string_with_symbols(50)
|
||||||
|
@ -98,6 +102,13 @@ class RegistrationConfig(Config):
|
||||||
# to these rooms
|
# to these rooms
|
||||||
#auto_join_rooms:
|
#auto_join_rooms:
|
||||||
# - "#example:example.com"
|
# - "#example:example.com"
|
||||||
|
|
||||||
|
# Where auto_join_rooms are specified, setting this flag ensures that the
|
||||||
|
# the rooms exist by creating them when the first user on the
|
||||||
|
# homeserver registers.
|
||||||
|
# Setting to false means that if the rooms are not manually created,
|
||||||
|
# users cannot be auto-joined since they do not exist.
|
||||||
|
autocreate_auto_join_rooms: true
|
||||||
""" % locals()
|
""" % locals()
|
||||||
|
|
||||||
def add_arguments(self, parser):
|
def add_arguments(self, parser):
|
||||||
|
|
102
synapse/config/room_directory.py
Normal file
102
synapse/config/room_directory.py
Normal file
|
@ -0,0 +1,102 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2018 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from synapse.util import glob_to_regex
|
||||||
|
|
||||||
|
from ._base import Config, ConfigError
|
||||||
|
|
||||||
|
|
||||||
|
class RoomDirectoryConfig(Config):
|
||||||
|
def read_config(self, config):
|
||||||
|
alias_creation_rules = config["alias_creation_rules"]
|
||||||
|
|
||||||
|
self._alias_creation_rules = [
|
||||||
|
_AliasRule(rule)
|
||||||
|
for rule in alias_creation_rules
|
||||||
|
]
|
||||||
|
|
||||||
|
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||||
|
return """
|
||||||
|
# The `alias_creation` option controls who's allowed to create aliases
|
||||||
|
# on this server.
|
||||||
|
#
|
||||||
|
# The format of this option is a list of rules that contain globs that
|
||||||
|
# match against user_id and the new alias (fully qualified with server
|
||||||
|
# name). The action in the first rule that matches is taken, which can
|
||||||
|
# currently either be "allow" or "deny".
|
||||||
|
#
|
||||||
|
# If no rules match the request is denied.
|
||||||
|
alias_creation_rules:
|
||||||
|
- user_id: "*"
|
||||||
|
alias: "*"
|
||||||
|
action: allow
|
||||||
|
"""
|
||||||
|
|
||||||
|
def is_alias_creation_allowed(self, user_id, alias):
|
||||||
|
"""Checks if the given user is allowed to create the given alias
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id (str)
|
||||||
|
alias (str)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
boolean: True if user is allowed to crate the alias
|
||||||
|
"""
|
||||||
|
for rule in self._alias_creation_rules:
|
||||||
|
if rule.matches(user_id, alias):
|
||||||
|
return rule.action == "allow"
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class _AliasRule(object):
|
||||||
|
def __init__(self, rule):
|
||||||
|
action = rule["action"]
|
||||||
|
user_id = rule["user_id"]
|
||||||
|
alias = rule["alias"]
|
||||||
|
|
||||||
|
if action in ("allow", "deny"):
|
||||||
|
self.action = action
|
||||||
|
else:
|
||||||
|
raise ConfigError(
|
||||||
|
"alias_creation_rules rules can only have action of 'allow'"
|
||||||
|
" or 'deny'"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
self._user_id_regex = glob_to_regex(user_id)
|
||||||
|
self._alias_regex = glob_to_regex(alias)
|
||||||
|
except Exception as e:
|
||||||
|
raise ConfigError("Failed to parse glob into regex: %s", e)
|
||||||
|
|
||||||
|
def matches(self, user_id, alias):
|
||||||
|
"""Tests if this rule matches the given user_id and alias.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id (str)
|
||||||
|
alias (str)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
boolean
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Note: The regexes are anchored at both ends
|
||||||
|
if not self._user_id_regex.match(user_id):
|
||||||
|
return False
|
||||||
|
|
||||||
|
if not self._alias_regex.match(alias):
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
|
@ -15,6 +15,8 @@
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
from six.moves import urllib
|
||||||
|
|
||||||
from canonicaljson import json
|
from canonicaljson import json
|
||||||
|
|
||||||
from twisted.internet import defer, reactor
|
from twisted.internet import defer, reactor
|
||||||
|
@ -28,15 +30,15 @@ from synapse.util import logcontext
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
KEY_API_V1 = b"/_matrix/key/v1/"
|
KEY_API_V2 = "/_matrix/key/v2/server/%s"
|
||||||
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def fetch_server_key(server_name, tls_client_options_factory, path=KEY_API_V1):
|
def fetch_server_key(server_name, tls_client_options_factory, key_id):
|
||||||
"""Fetch the keys for a remote server."""
|
"""Fetch the keys for a remote server."""
|
||||||
|
|
||||||
factory = SynapseKeyClientFactory()
|
factory = SynapseKeyClientFactory()
|
||||||
factory.path = path
|
factory.path = KEY_API_V2 % (urllib.parse.quote(key_id), )
|
||||||
factory.host = server_name
|
factory.host = server_name
|
||||||
endpoint = matrix_federation_endpoint(
|
endpoint = matrix_federation_endpoint(
|
||||||
reactor, server_name, tls_client_options_factory, timeout=30
|
reactor, server_name, tls_client_options_factory, timeout=30
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
# Copyright 2017 New Vector Ltd.
|
# Copyright 2017, 2018 New Vector Ltd.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -18,8 +18,6 @@ import hashlib
|
||||||
import logging
|
import logging
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
|
|
||||||
from six.moves import urllib
|
|
||||||
|
|
||||||
from signedjson.key import (
|
from signedjson.key import (
|
||||||
decode_verify_key_bytes,
|
decode_verify_key_bytes,
|
||||||
encode_verify_key_base64,
|
encode_verify_key_base64,
|
||||||
|
@ -395,32 +393,13 @@ class Keyring(object):
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_keys_from_server(self, server_name_and_key_ids):
|
def get_keys_from_server(self, server_name_and_key_ids):
|
||||||
@defer.inlineCallbacks
|
|
||||||
def get_key(server_name, key_ids):
|
|
||||||
keys = None
|
|
||||||
try:
|
|
||||||
keys = yield self.get_server_verify_key_v2_direct(
|
|
||||||
server_name, key_ids
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
logger.info(
|
|
||||||
"Unable to get key %r for %r directly: %s %s",
|
|
||||||
key_ids, server_name,
|
|
||||||
type(e).__name__, str(e),
|
|
||||||
)
|
|
||||||
|
|
||||||
if not keys:
|
|
||||||
keys = yield self.get_server_verify_key_v1_direct(
|
|
||||||
server_name, key_ids
|
|
||||||
)
|
|
||||||
|
|
||||||
keys = {server_name: keys}
|
|
||||||
|
|
||||||
defer.returnValue(keys)
|
|
||||||
|
|
||||||
results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||||
[
|
[
|
||||||
run_in_background(get_key, server_name, key_ids)
|
run_in_background(
|
||||||
|
self.get_server_verify_key_v2_direct,
|
||||||
|
server_name,
|
||||||
|
key_ids,
|
||||||
|
)
|
||||||
for server_name, key_ids in server_name_and_key_ids
|
for server_name, key_ids in server_name_and_key_ids
|
||||||
],
|
],
|
||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
|
@ -525,10 +504,7 @@ class Keyring(object):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
(response, tls_certificate) = yield fetch_server_key(
|
(response, tls_certificate) = yield fetch_server_key(
|
||||||
server_name, self.hs.tls_client_options_factory,
|
server_name, self.hs.tls_client_options_factory, requested_key_id
|
||||||
path=("/_matrix/key/v2/server/%s" % (
|
|
||||||
urllib.parse.quote(requested_key_id),
|
|
||||||
)).encode("ascii"),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if (u"signatures" not in response
|
if (u"signatures" not in response
|
||||||
|
@ -657,78 +633,6 @@ class Keyring(object):
|
||||||
|
|
||||||
defer.returnValue(results)
|
defer.returnValue(results)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def get_server_verify_key_v1_direct(self, server_name, key_ids):
|
|
||||||
"""Finds a verification key for the server with one of the key ids.
|
|
||||||
Args:
|
|
||||||
server_name (str): The name of the server to fetch a key for.
|
|
||||||
keys_ids (list of str): The key_ids to check for.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Try to fetch the key from the remote server.
|
|
||||||
|
|
||||||
(response, tls_certificate) = yield fetch_server_key(
|
|
||||||
server_name, self.hs.tls_client_options_factory
|
|
||||||
)
|
|
||||||
|
|
||||||
# Check the response.
|
|
||||||
|
|
||||||
x509_certificate_bytes = crypto.dump_certificate(
|
|
||||||
crypto.FILETYPE_ASN1, tls_certificate
|
|
||||||
)
|
|
||||||
|
|
||||||
if ("signatures" not in response
|
|
||||||
or server_name not in response["signatures"]):
|
|
||||||
raise KeyLookupError("Key response not signed by remote server")
|
|
||||||
|
|
||||||
if "tls_certificate" not in response:
|
|
||||||
raise KeyLookupError("Key response missing TLS certificate")
|
|
||||||
|
|
||||||
tls_certificate_b64 = response["tls_certificate"]
|
|
||||||
|
|
||||||
if encode_base64(x509_certificate_bytes) != tls_certificate_b64:
|
|
||||||
raise KeyLookupError("TLS certificate doesn't match")
|
|
||||||
|
|
||||||
# Cache the result in the datastore.
|
|
||||||
|
|
||||||
time_now_ms = self.clock.time_msec()
|
|
||||||
|
|
||||||
verify_keys = {}
|
|
||||||
for key_id, key_base64 in response["verify_keys"].items():
|
|
||||||
if is_signing_algorithm_supported(key_id):
|
|
||||||
key_bytes = decode_base64(key_base64)
|
|
||||||
verify_key = decode_verify_key_bytes(key_id, key_bytes)
|
|
||||||
verify_key.time_added = time_now_ms
|
|
||||||
verify_keys[key_id] = verify_key
|
|
||||||
|
|
||||||
for key_id in response["signatures"][server_name]:
|
|
||||||
if key_id not in response["verify_keys"]:
|
|
||||||
raise KeyLookupError(
|
|
||||||
"Key response must include verification keys for all"
|
|
||||||
" signatures"
|
|
||||||
)
|
|
||||||
if key_id in verify_keys:
|
|
||||||
verify_signed_json(
|
|
||||||
response,
|
|
||||||
server_name,
|
|
||||||
verify_keys[key_id]
|
|
||||||
)
|
|
||||||
|
|
||||||
yield self.store.store_server_certificate(
|
|
||||||
server_name,
|
|
||||||
server_name,
|
|
||||||
time_now_ms,
|
|
||||||
tls_certificate,
|
|
||||||
)
|
|
||||||
|
|
||||||
yield self.store_keys(
|
|
||||||
server_name=server_name,
|
|
||||||
from_server=server_name,
|
|
||||||
verify_keys=verify_keys,
|
|
||||||
)
|
|
||||||
|
|
||||||
defer.returnValue(verify_keys)
|
|
||||||
|
|
||||||
def store_keys(self, server_name, from_server, verify_keys):
|
def store_keys(self, server_name, from_server, verify_keys):
|
||||||
"""Store a collection of verify keys for a given server
|
"""Store a collection of verify keys for a given server
|
||||||
Args:
|
Args:
|
||||||
|
|
|
@ -14,7 +14,6 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
import logging
|
import logging
|
||||||
import re
|
|
||||||
|
|
||||||
import six
|
import six
|
||||||
from six import iteritems
|
from six import iteritems
|
||||||
|
@ -44,6 +43,7 @@ from synapse.replication.http.federation import (
|
||||||
ReplicationGetQueryRestServlet,
|
ReplicationGetQueryRestServlet,
|
||||||
)
|
)
|
||||||
from synapse.types import get_domain_from_id
|
from synapse.types import get_domain_from_id
|
||||||
|
from synapse.util import glob_to_regex
|
||||||
from synapse.util.async_helpers import Linearizer, concurrently_execute
|
from synapse.util.async_helpers import Linearizer, concurrently_execute
|
||||||
from synapse.util.caches.response_cache import ResponseCache
|
from synapse.util.caches.response_cache import ResponseCache
|
||||||
from synapse.util.logcontext import nested_logging_context
|
from synapse.util.logcontext import nested_logging_context
|
||||||
|
@ -323,11 +323,6 @@ class FederationServer(FederationBase):
|
||||||
else:
|
else:
|
||||||
defer.returnValue((404, ""))
|
defer.returnValue((404, ""))
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
@log_function
|
|
||||||
def on_pull_request(self, origin, versions):
|
|
||||||
raise NotImplementedError("Pull transactions not implemented")
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_query_request(self, query_type, args):
|
def on_query_request(self, query_type, args):
|
||||||
received_queries_counter.labels(query_type).inc()
|
received_queries_counter.labels(query_type).inc()
|
||||||
|
@ -729,22 +724,10 @@ def _acl_entry_matches(server_name, acl_entry):
|
||||||
if not isinstance(acl_entry, six.string_types):
|
if not isinstance(acl_entry, six.string_types):
|
||||||
logger.warn("Ignoring non-str ACL entry '%s' (is %s)", acl_entry, type(acl_entry))
|
logger.warn("Ignoring non-str ACL entry '%s' (is %s)", acl_entry, type(acl_entry))
|
||||||
return False
|
return False
|
||||||
regex = _glob_to_regex(acl_entry)
|
regex = glob_to_regex(acl_entry)
|
||||||
return regex.match(server_name)
|
return regex.match(server_name)
|
||||||
|
|
||||||
|
|
||||||
def _glob_to_regex(glob):
|
|
||||||
res = ''
|
|
||||||
for c in glob:
|
|
||||||
if c == '*':
|
|
||||||
res = res + '.*'
|
|
||||||
elif c == '?':
|
|
||||||
res = res + '.'
|
|
||||||
else:
|
|
||||||
res = res + re.escape(c)
|
|
||||||
return re.compile(res + "\\Z", re.IGNORECASE)
|
|
||||||
|
|
||||||
|
|
||||||
class FederationHandlerRegistry(object):
|
class FederationHandlerRegistry(object):
|
||||||
"""Allows classes to register themselves as handlers for a given EDU or
|
"""Allows classes to register themselves as handlers for a given EDU or
|
||||||
query type for incoming federation traffic.
|
query type for incoming federation traffic.
|
||||||
|
|
|
@ -362,14 +362,6 @@ class FederationSendServlet(BaseFederationServlet):
|
||||||
defer.returnValue((code, response))
|
defer.returnValue((code, response))
|
||||||
|
|
||||||
|
|
||||||
class FederationPullServlet(BaseFederationServlet):
|
|
||||||
PATH = "/pull/"
|
|
||||||
|
|
||||||
# This is for when someone asks us for everything since version X
|
|
||||||
def on_GET(self, origin, content, query):
|
|
||||||
return self.handler.on_pull_request(query["origin"][0], query["v"])
|
|
||||||
|
|
||||||
|
|
||||||
class FederationEventServlet(BaseFederationServlet):
|
class FederationEventServlet(BaseFederationServlet):
|
||||||
PATH = "/event/(?P<event_id>[^/]*)/"
|
PATH = "/event/(?P<event_id>[^/]*)/"
|
||||||
|
|
||||||
|
@ -1261,7 +1253,6 @@ class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet):
|
||||||
|
|
||||||
FEDERATION_SERVLET_CLASSES = (
|
FEDERATION_SERVLET_CLASSES = (
|
||||||
FederationSendServlet,
|
FederationSendServlet,
|
||||||
FederationPullServlet,
|
|
||||||
FederationEventServlet,
|
FederationEventServlet,
|
||||||
FederationStateServlet,
|
FederationStateServlet,
|
||||||
FederationStateIdsServlet,
|
FederationStateIdsServlet,
|
||||||
|
|
|
@ -43,6 +43,7 @@ class DirectoryHandler(BaseHandler):
|
||||||
self.state = hs.get_state_handler()
|
self.state = hs.get_state_handler()
|
||||||
self.appservice_handler = hs.get_application_service_handler()
|
self.appservice_handler = hs.get_application_service_handler()
|
||||||
self.event_creation_handler = hs.get_event_creation_handler()
|
self.event_creation_handler = hs.get_event_creation_handler()
|
||||||
|
self.config = hs.config
|
||||||
|
|
||||||
self.federation = hs.get_federation_client()
|
self.federation = hs.get_federation_client()
|
||||||
hs.get_federation_registry().register_query_handler(
|
hs.get_federation_registry().register_query_handler(
|
||||||
|
@ -111,6 +112,14 @@ class DirectoryHandler(BaseHandler):
|
||||||
403, "This user is not permitted to create this alias",
|
403, "This user is not permitted to create this alias",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if not self.config.is_alias_creation_allowed(user_id, room_alias.to_string()):
|
||||||
|
# Lets just return a generic message, as there may be all sorts of
|
||||||
|
# reasons why we said no. TODO: Allow configurable error messages
|
||||||
|
# per alias creation rule?
|
||||||
|
raise SynapseError(
|
||||||
|
403, "Not allowed to create alias",
|
||||||
|
)
|
||||||
|
|
||||||
can_create = yield self.can_modify_alias(
|
can_create = yield self.can_modify_alias(
|
||||||
room_alias,
|
room_alias,
|
||||||
user_id=user_id
|
user_id=user_id
|
||||||
|
@ -129,9 +138,30 @@ class DirectoryHandler(BaseHandler):
|
||||||
)
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def delete_association(self, requester, room_alias):
|
def delete_association(self, requester, room_alias, send_event=True):
|
||||||
# association deletion for human users
|
"""Remove an alias from the directory
|
||||||
|
|
||||||
|
(this is only meant for human users; AS users should call
|
||||||
|
delete_appservice_association)
|
||||||
|
|
||||||
|
Args:
|
||||||
|
requester (Requester):
|
||||||
|
room_alias (RoomAlias):
|
||||||
|
send_event (bool): Whether to send an updated m.room.aliases event.
|
||||||
|
Note that, if we delete the canonical alias, we will always attempt
|
||||||
|
to send an m.room.canonical_alias event
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred[unicode]: room id that the alias used to point to
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
NotFoundError: if the alias doesn't exist
|
||||||
|
|
||||||
|
AuthError: if the user doesn't have perms to delete the alias (ie, the user
|
||||||
|
is neither the creator of the alias, nor a server admin.
|
||||||
|
|
||||||
|
SynapseError: if the alias belongs to an AS
|
||||||
|
"""
|
||||||
user_id = requester.user.to_string()
|
user_id = requester.user.to_string()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -159,10 +189,11 @@ class DirectoryHandler(BaseHandler):
|
||||||
room_id = yield self._delete_association(room_alias)
|
room_id = yield self._delete_association(room_alias)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
yield self.send_room_alias_update_event(
|
if send_event:
|
||||||
requester,
|
yield self.send_room_alias_update_event(
|
||||||
room_id
|
requester,
|
||||||
)
|
room_id
|
||||||
|
)
|
||||||
|
|
||||||
yield self._update_canonical_alias(
|
yield self._update_canonical_alias(
|
||||||
requester,
|
requester,
|
||||||
|
|
|
@ -156,7 +156,7 @@ class InitialSyncHandler(BaseHandler):
|
||||||
room_end_token = "s%d" % (event.stream_ordering,)
|
room_end_token = "s%d" % (event.stream_ordering,)
|
||||||
deferred_room_state = run_in_background(
|
deferred_room_state = run_in_background(
|
||||||
self.store.get_state_for_events,
|
self.store.get_state_for_events,
|
||||||
[event.event_id], None,
|
[event.event_id],
|
||||||
)
|
)
|
||||||
deferred_room_state.addCallback(
|
deferred_room_state.addCallback(
|
||||||
lambda states: states[event.event_id]
|
lambda states: states[event.event_id]
|
||||||
|
@ -301,7 +301,7 @@ class InitialSyncHandler(BaseHandler):
|
||||||
def _room_initial_sync_parted(self, user_id, room_id, pagin_config,
|
def _room_initial_sync_parted(self, user_id, room_id, pagin_config,
|
||||||
membership, member_event_id, is_peeking):
|
membership, member_event_id, is_peeking):
|
||||||
room_state = yield self.store.get_state_for_events(
|
room_state = yield self.store.get_state_for_events(
|
||||||
[member_event_id], None
|
[member_event_id],
|
||||||
)
|
)
|
||||||
|
|
||||||
room_state = room_state[member_event_id]
|
room_state = room_state[member_event_id]
|
||||||
|
|
|
@ -35,6 +35,7 @@ from synapse.crypto.event_signing import add_hashes_and_signatures
|
||||||
from synapse.events.utils import serialize_event
|
from synapse.events.utils import serialize_event
|
||||||
from synapse.events.validator import EventValidator
|
from synapse.events.validator import EventValidator
|
||||||
from synapse.replication.http.send_event import ReplicationSendEventRestServlet
|
from synapse.replication.http.send_event import ReplicationSendEventRestServlet
|
||||||
|
from synapse.storage.state import StateFilter
|
||||||
from synapse.types import RoomAlias, UserID
|
from synapse.types import RoomAlias, UserID
|
||||||
from synapse.util.async_helpers import Linearizer
|
from synapse.util.async_helpers import Linearizer
|
||||||
from synapse.util.frozenutils import frozendict_json_encoder
|
from synapse.util.frozenutils import frozendict_json_encoder
|
||||||
|
@ -80,7 +81,7 @@ class MessageHandler(object):
|
||||||
elif membership == Membership.LEAVE:
|
elif membership == Membership.LEAVE:
|
||||||
key = (event_type, state_key)
|
key = (event_type, state_key)
|
||||||
room_state = yield self.store.get_state_for_events(
|
room_state = yield self.store.get_state_for_events(
|
||||||
[membership_event_id], [key]
|
[membership_event_id], StateFilter.from_types([key])
|
||||||
)
|
)
|
||||||
data = room_state[membership_event_id].get(key)
|
data = room_state[membership_event_id].get(key)
|
||||||
|
|
||||||
|
@ -88,7 +89,7 @@ class MessageHandler(object):
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_state_events(
|
def get_state_events(
|
||||||
self, user_id, room_id, types=None, filtered_types=None,
|
self, user_id, room_id, state_filter=StateFilter.all(),
|
||||||
at_token=None, is_guest=False,
|
at_token=None, is_guest=False,
|
||||||
):
|
):
|
||||||
"""Retrieve all state events for a given room. If the user is
|
"""Retrieve all state events for a given room. If the user is
|
||||||
|
@ -100,13 +101,8 @@ class MessageHandler(object):
|
||||||
Args:
|
Args:
|
||||||
user_id(str): The user requesting state events.
|
user_id(str): The user requesting state events.
|
||||||
room_id(str): The room ID to get all state events from.
|
room_id(str): The room ID to get all state events from.
|
||||||
types(list[(str, str|None)]|None): List of (type, state_key) tuples
|
state_filter (StateFilter): The state filter used to fetch state
|
||||||
which are used to filter the state fetched. If `state_key` is None,
|
from the database.
|
||||||
all events are returned of the given type.
|
|
||||||
May be None, which matches any key.
|
|
||||||
filtered_types(list[str]|None): Only apply filtering via `types` to this
|
|
||||||
list of event types. Other types of events are returned unfiltered.
|
|
||||||
If None, `types` filtering is applied to all events.
|
|
||||||
at_token(StreamToken|None): the stream token of the at which we are requesting
|
at_token(StreamToken|None): the stream token of the at which we are requesting
|
||||||
the stats. If the user is not allowed to view the state as of that
|
the stats. If the user is not allowed to view the state as of that
|
||||||
stream token, we raise a 403 SynapseError. If None, returns the current
|
stream token, we raise a 403 SynapseError. If None, returns the current
|
||||||
|
@ -139,7 +135,7 @@ class MessageHandler(object):
|
||||||
event = last_events[0]
|
event = last_events[0]
|
||||||
if visible_events:
|
if visible_events:
|
||||||
room_state = yield self.store.get_state_for_events(
|
room_state = yield self.store.get_state_for_events(
|
||||||
[event.event_id], types, filtered_types=filtered_types,
|
[event.event_id], state_filter=state_filter,
|
||||||
)
|
)
|
||||||
room_state = room_state[event.event_id]
|
room_state = room_state[event.event_id]
|
||||||
else:
|
else:
|
||||||
|
@ -158,12 +154,12 @@ class MessageHandler(object):
|
||||||
|
|
||||||
if membership == Membership.JOIN:
|
if membership == Membership.JOIN:
|
||||||
state_ids = yield self.store.get_filtered_current_state_ids(
|
state_ids = yield self.store.get_filtered_current_state_ids(
|
||||||
room_id, types, filtered_types=filtered_types,
|
room_id, state_filter=state_filter,
|
||||||
)
|
)
|
||||||
room_state = yield self.store.get_events(state_ids.values())
|
room_state = yield self.store.get_events(state_ids.values())
|
||||||
elif membership == Membership.LEAVE:
|
elif membership == Membership.LEAVE:
|
||||||
room_state = yield self.store.get_state_for_events(
|
room_state = yield self.store.get_state_for_events(
|
||||||
[membership_event_id], types, filtered_types=filtered_types,
|
[membership_event_id], state_filter=state_filter,
|
||||||
)
|
)
|
||||||
room_state = room_state[membership_event_id]
|
room_state = room_state[membership_event_id]
|
||||||
|
|
||||||
|
@ -431,6 +427,9 @@ class EventCreationHandler(object):
|
||||||
|
|
||||||
if event.is_state():
|
if event.is_state():
|
||||||
prev_state = yield self.deduplicate_state_event(event, context)
|
prev_state = yield self.deduplicate_state_event(event, context)
|
||||||
|
logger.info(
|
||||||
|
"Not bothering to persist duplicate state event %s", event.event_id,
|
||||||
|
)
|
||||||
if prev_state is not None:
|
if prev_state is not None:
|
||||||
defer.returnValue(prev_state)
|
defer.returnValue(prev_state)
|
||||||
|
|
||||||
|
|
|
@ -21,6 +21,7 @@ from twisted.python.failure import Failure
|
||||||
from synapse.api.constants import EventTypes, Membership
|
from synapse.api.constants import EventTypes, Membership
|
||||||
from synapse.api.errors import SynapseError
|
from synapse.api.errors import SynapseError
|
||||||
from synapse.events.utils import serialize_event
|
from synapse.events.utils import serialize_event
|
||||||
|
from synapse.storage.state import StateFilter
|
||||||
from synapse.types import RoomStreamToken
|
from synapse.types import RoomStreamToken
|
||||||
from synapse.util.async_helpers import ReadWriteLock
|
from synapse.util.async_helpers import ReadWriteLock
|
||||||
from synapse.util.logcontext import run_in_background
|
from synapse.util.logcontext import run_in_background
|
||||||
|
@ -255,16 +256,14 @@ class PaginationHandler(object):
|
||||||
if event_filter and event_filter.lazy_load_members():
|
if event_filter and event_filter.lazy_load_members():
|
||||||
# TODO: remove redundant members
|
# TODO: remove redundant members
|
||||||
|
|
||||||
types = [
|
# FIXME: we also care about invite targets etc.
|
||||||
(EventTypes.Member, state_key)
|
state_filter = StateFilter.from_types(
|
||||||
for state_key in set(
|
(EventTypes.Member, event.sender)
|
||||||
event.sender # FIXME: we also care about invite targets etc.
|
for event in events
|
||||||
for event in events
|
)
|
||||||
)
|
|
||||||
]
|
|
||||||
|
|
||||||
state_ids = yield self.store.get_state_ids_for_event(
|
state_ids = yield self.store.get_state_ids_for_event(
|
||||||
events[0].event_id, types=types,
|
events[0].event_id, state_filter=state_filter,
|
||||||
)
|
)
|
||||||
|
|
||||||
if state_ids:
|
if state_ids:
|
||||||
|
|
|
@ -220,15 +220,42 @@ class RegistrationHandler(BaseHandler):
|
||||||
|
|
||||||
# auto-join the user to any rooms we're supposed to dump them into
|
# auto-join the user to any rooms we're supposed to dump them into
|
||||||
fake_requester = create_requester(user_id)
|
fake_requester = create_requester(user_id)
|
||||||
|
|
||||||
|
# try to create the room if we're the first user on the server
|
||||||
|
should_auto_create_rooms = False
|
||||||
|
if self.hs.config.autocreate_auto_join_rooms:
|
||||||
|
count = yield self.store.count_all_users()
|
||||||
|
should_auto_create_rooms = count == 1
|
||||||
|
|
||||||
for r in self.hs.config.auto_join_rooms:
|
for r in self.hs.config.auto_join_rooms:
|
||||||
try:
|
try:
|
||||||
yield self._join_user_to_room(fake_requester, r)
|
if should_auto_create_rooms:
|
||||||
|
room_alias = RoomAlias.from_string(r)
|
||||||
|
if self.hs.hostname != room_alias.domain:
|
||||||
|
logger.warning(
|
||||||
|
'Cannot create room alias %s, '
|
||||||
|
'it does not match server domain',
|
||||||
|
r,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# create room expects the localpart of the room alias
|
||||||
|
room_alias_localpart = room_alias.localpart
|
||||||
|
|
||||||
|
# getting the RoomCreationHandler during init gives a dependency
|
||||||
|
# loop
|
||||||
|
yield self.hs.get_room_creation_handler().create_room(
|
||||||
|
fake_requester,
|
||||||
|
config={
|
||||||
|
"preset": "public_chat",
|
||||||
|
"room_alias_name": room_alias_localpart
|
||||||
|
},
|
||||||
|
ratelimit=False,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
yield self._join_user_to_room(fake_requester, r)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error("Failed to join new user to %r: %r", r, e)
|
logger.error("Failed to join new user to %r: %r", r, e)
|
||||||
|
|
||||||
# We used to generate default identicons here, but nowadays
|
|
||||||
# we want clients to generate their own as part of their branding
|
|
||||||
# rather than there being consistent matrix-wide ones, so we don't.
|
|
||||||
defer.returnValue((user_id, token))
|
defer.returnValue((user_id, token))
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
|
|
|
@ -21,7 +21,7 @@ import math
|
||||||
import string
|
import string
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
|
|
||||||
from six import string_types
|
from six import iteritems, string_types
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
|
@ -32,9 +32,11 @@ from synapse.api.constants import (
|
||||||
JoinRules,
|
JoinRules,
|
||||||
RoomCreationPreset,
|
RoomCreationPreset,
|
||||||
)
|
)
|
||||||
from synapse.api.errors import AuthError, Codes, StoreError, SynapseError
|
from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, SynapseError
|
||||||
|
from synapse.storage.state import StateFilter
|
||||||
from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID
|
from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID
|
||||||
from synapse.util import stringutils
|
from synapse.util import stringutils
|
||||||
|
from synapse.util.async_helpers import Linearizer
|
||||||
from synapse.visibility import filter_events_for_client
|
from synapse.visibility import filter_events_for_client
|
||||||
|
|
||||||
from ._base import BaseHandler
|
from ._base import BaseHandler
|
||||||
|
@ -72,6 +74,334 @@ class RoomCreationHandler(BaseHandler):
|
||||||
|
|
||||||
self.spam_checker = hs.get_spam_checker()
|
self.spam_checker = hs.get_spam_checker()
|
||||||
self.event_creation_handler = hs.get_event_creation_handler()
|
self.event_creation_handler = hs.get_event_creation_handler()
|
||||||
|
self.room_member_handler = hs.get_room_member_handler()
|
||||||
|
|
||||||
|
# linearizer to stop two upgrades happening at once
|
||||||
|
self._upgrade_linearizer = Linearizer("room_upgrade_linearizer")
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def upgrade_room(self, requester, old_room_id, new_version):
|
||||||
|
"""Replace a room with a new room with a different version
|
||||||
|
|
||||||
|
Args:
|
||||||
|
requester (synapse.types.Requester): the user requesting the upgrade
|
||||||
|
old_room_id (unicode): the id of the room to be replaced
|
||||||
|
new_version (unicode): the new room version to use
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred[unicode]: the new room id
|
||||||
|
"""
|
||||||
|
yield self.ratelimit(requester)
|
||||||
|
|
||||||
|
user_id = requester.user.to_string()
|
||||||
|
|
||||||
|
with (yield self._upgrade_linearizer.queue(old_room_id)):
|
||||||
|
# start by allocating a new room id
|
||||||
|
r = yield self.store.get_room(old_room_id)
|
||||||
|
if r is None:
|
||||||
|
raise NotFoundError("Unknown room id %s" % (old_room_id,))
|
||||||
|
new_room_id = yield self._generate_room_id(
|
||||||
|
creator_id=user_id, is_public=r["is_public"],
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info("Creating new room %s to replace %s", new_room_id, old_room_id)
|
||||||
|
|
||||||
|
# we create and auth the tombstone event before properly creating the new
|
||||||
|
# room, to check our user has perms in the old room.
|
||||||
|
tombstone_event, tombstone_context = (
|
||||||
|
yield self.event_creation_handler.create_event(
|
||||||
|
requester, {
|
||||||
|
"type": EventTypes.Tombstone,
|
||||||
|
"state_key": "",
|
||||||
|
"room_id": old_room_id,
|
||||||
|
"sender": user_id,
|
||||||
|
"content": {
|
||||||
|
"body": "This room has been replaced",
|
||||||
|
"replacement_room": new_room_id,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
token_id=requester.access_token_id,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
yield self.auth.check_from_context(tombstone_event, tombstone_context)
|
||||||
|
|
||||||
|
yield self.clone_exiting_room(
|
||||||
|
requester,
|
||||||
|
old_room_id=old_room_id,
|
||||||
|
new_room_id=new_room_id,
|
||||||
|
new_room_version=new_version,
|
||||||
|
tombstone_event_id=tombstone_event.event_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
# now send the tombstone
|
||||||
|
yield self.event_creation_handler.send_nonmember_event(
|
||||||
|
requester, tombstone_event, tombstone_context,
|
||||||
|
)
|
||||||
|
|
||||||
|
old_room_state = yield tombstone_context.get_current_state_ids(self.store)
|
||||||
|
|
||||||
|
# update any aliases
|
||||||
|
yield self._move_aliases_to_new_room(
|
||||||
|
requester, old_room_id, new_room_id, old_room_state,
|
||||||
|
)
|
||||||
|
|
||||||
|
# and finally, shut down the PLs in the old room, and update them in the new
|
||||||
|
# room.
|
||||||
|
yield self._update_upgraded_room_pls(
|
||||||
|
requester, old_room_id, new_room_id, old_room_state,
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue(new_room_id)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _update_upgraded_room_pls(
|
||||||
|
self, requester, old_room_id, new_room_id, old_room_state,
|
||||||
|
):
|
||||||
|
"""Send updated power levels in both rooms after an upgrade
|
||||||
|
|
||||||
|
Args:
|
||||||
|
requester (synapse.types.Requester): the user requesting the upgrade
|
||||||
|
old_room_id (unicode): the id of the room to be replaced
|
||||||
|
new_room_id (unicode): the id of the replacement room
|
||||||
|
old_room_state (dict[tuple[str, str], str]): the state map for the old room
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred
|
||||||
|
"""
|
||||||
|
old_room_pl_event_id = old_room_state.get((EventTypes.PowerLevels, ""))
|
||||||
|
|
||||||
|
if old_room_pl_event_id is None:
|
||||||
|
logger.warning(
|
||||||
|
"Not supported: upgrading a room with no PL event. Not setting PLs "
|
||||||
|
"in old room.",
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
old_room_pl_state = yield self.store.get_event(old_room_pl_event_id)
|
||||||
|
|
||||||
|
# we try to stop regular users from speaking by setting the PL required
|
||||||
|
# to send regular events and invites to 'Moderator' level. That's normally
|
||||||
|
# 50, but if the default PL in a room is 50 or more, then we set the
|
||||||
|
# required PL above that.
|
||||||
|
|
||||||
|
pl_content = dict(old_room_pl_state.content)
|
||||||
|
users_default = int(pl_content.get("users_default", 0))
|
||||||
|
restricted_level = max(users_default + 1, 50)
|
||||||
|
|
||||||
|
updated = False
|
||||||
|
for v in ("invite", "events_default"):
|
||||||
|
current = int(pl_content.get(v, 0))
|
||||||
|
if current < restricted_level:
|
||||||
|
logger.info(
|
||||||
|
"Setting level for %s in %s to %i (was %i)",
|
||||||
|
v, old_room_id, restricted_level, current,
|
||||||
|
)
|
||||||
|
pl_content[v] = restricted_level
|
||||||
|
updated = True
|
||||||
|
else:
|
||||||
|
logger.info(
|
||||||
|
"Not setting level for %s (already %i)",
|
||||||
|
v, current,
|
||||||
|
)
|
||||||
|
|
||||||
|
if updated:
|
||||||
|
try:
|
||||||
|
yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||||
|
requester, {
|
||||||
|
"type": EventTypes.PowerLevels,
|
||||||
|
"state_key": '',
|
||||||
|
"room_id": old_room_id,
|
||||||
|
"sender": requester.user.to_string(),
|
||||||
|
"content": pl_content,
|
||||||
|
}, ratelimit=False,
|
||||||
|
)
|
||||||
|
except AuthError as e:
|
||||||
|
logger.warning("Unable to update PLs in old room: %s", e)
|
||||||
|
|
||||||
|
logger.info("Setting correct PLs in new room")
|
||||||
|
yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||||
|
requester, {
|
||||||
|
"type": EventTypes.PowerLevels,
|
||||||
|
"state_key": '',
|
||||||
|
"room_id": new_room_id,
|
||||||
|
"sender": requester.user.to_string(),
|
||||||
|
"content": old_room_pl_state.content,
|
||||||
|
}, ratelimit=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def clone_exiting_room(
|
||||||
|
self, requester, old_room_id, new_room_id, new_room_version,
|
||||||
|
tombstone_event_id,
|
||||||
|
):
|
||||||
|
"""Populate a new room based on an old room
|
||||||
|
|
||||||
|
Args:
|
||||||
|
requester (synapse.types.Requester): the user requesting the upgrade
|
||||||
|
old_room_id (unicode): the id of the room to be replaced
|
||||||
|
new_room_id (unicode): the id to give the new room (should already have been
|
||||||
|
created with _gemerate_room_id())
|
||||||
|
new_room_version (unicode): the new room version to use
|
||||||
|
tombstone_event_id (unicode|str): the ID of the tombstone event in the old
|
||||||
|
room.
|
||||||
|
Returns:
|
||||||
|
Deferred[None]
|
||||||
|
"""
|
||||||
|
user_id = requester.user.to_string()
|
||||||
|
|
||||||
|
if not self.spam_checker.user_may_create_room(user_id):
|
||||||
|
raise SynapseError(403, "You are not permitted to create rooms")
|
||||||
|
|
||||||
|
creation_content = {
|
||||||
|
"room_version": new_room_version,
|
||||||
|
"predecessor": {
|
||||||
|
"room_id": old_room_id,
|
||||||
|
"event_id": tombstone_event_id,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
initial_state = dict()
|
||||||
|
|
||||||
|
types_to_copy = (
|
||||||
|
(EventTypes.JoinRules, ""),
|
||||||
|
(EventTypes.Name, ""),
|
||||||
|
(EventTypes.Topic, ""),
|
||||||
|
(EventTypes.RoomHistoryVisibility, ""),
|
||||||
|
(EventTypes.GuestAccess, ""),
|
||||||
|
(EventTypes.RoomAvatar, ""),
|
||||||
|
)
|
||||||
|
|
||||||
|
old_room_state_ids = yield self.store.get_filtered_current_state_ids(
|
||||||
|
old_room_id, StateFilter.from_types(types_to_copy),
|
||||||
|
)
|
||||||
|
# map from event_id to BaseEvent
|
||||||
|
old_room_state_events = yield self.store.get_events(old_room_state_ids.values())
|
||||||
|
|
||||||
|
for k, old_event_id in iteritems(old_room_state_ids):
|
||||||
|
old_event = old_room_state_events.get(old_event_id)
|
||||||
|
if old_event:
|
||||||
|
initial_state[k] = old_event.content
|
||||||
|
|
||||||
|
yield self._send_events_for_new_room(
|
||||||
|
requester,
|
||||||
|
new_room_id,
|
||||||
|
|
||||||
|
# we expect to override all the presets with initial_state, so this is
|
||||||
|
# somewhat arbitrary.
|
||||||
|
preset_config=RoomCreationPreset.PRIVATE_CHAT,
|
||||||
|
|
||||||
|
invite_list=[],
|
||||||
|
initial_state=initial_state,
|
||||||
|
creation_content=creation_content,
|
||||||
|
)
|
||||||
|
|
||||||
|
# XXX invites/joins
|
||||||
|
# XXX 3pid invites
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _move_aliases_to_new_room(
|
||||||
|
self, requester, old_room_id, new_room_id, old_room_state,
|
||||||
|
):
|
||||||
|
directory_handler = self.hs.get_handlers().directory_handler
|
||||||
|
|
||||||
|
aliases = yield self.store.get_aliases_for_room(old_room_id)
|
||||||
|
|
||||||
|
# check to see if we have a canonical alias.
|
||||||
|
canonical_alias = None
|
||||||
|
canonical_alias_event_id = old_room_state.get((EventTypes.CanonicalAlias, ""))
|
||||||
|
if canonical_alias_event_id:
|
||||||
|
canonical_alias_event = yield self.store.get_event(canonical_alias_event_id)
|
||||||
|
if canonical_alias_event:
|
||||||
|
canonical_alias = canonical_alias_event.content.get("alias", "")
|
||||||
|
|
||||||
|
# first we try to remove the aliases from the old room (we suppress sending
|
||||||
|
# the room_aliases event until the end).
|
||||||
|
#
|
||||||
|
# Note that we'll only be able to remove aliases that (a) aren't owned by an AS,
|
||||||
|
# and (b) unless the user is a server admin, which the user created.
|
||||||
|
#
|
||||||
|
# This is probably correct - given we don't allow such aliases to be deleted
|
||||||
|
# normally, it would be odd to allow it in the case of doing a room upgrade -
|
||||||
|
# but it makes the upgrade less effective, and you have to wonder why a room
|
||||||
|
# admin can't remove aliases that point to that room anyway.
|
||||||
|
# (cf https://github.com/matrix-org/synapse/issues/2360)
|
||||||
|
#
|
||||||
|
removed_aliases = []
|
||||||
|
for alias_str in aliases:
|
||||||
|
alias = RoomAlias.from_string(alias_str)
|
||||||
|
try:
|
||||||
|
yield directory_handler.delete_association(
|
||||||
|
requester, alias, send_event=False,
|
||||||
|
)
|
||||||
|
removed_aliases.append(alias_str)
|
||||||
|
except SynapseError as e:
|
||||||
|
logger.warning(
|
||||||
|
"Unable to remove alias %s from old room: %s",
|
||||||
|
alias, e,
|
||||||
|
)
|
||||||
|
|
||||||
|
# if we didn't find any aliases, or couldn't remove anyway, we can skip the rest
|
||||||
|
# of this.
|
||||||
|
if not removed_aliases:
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
# this can fail if, for some reason, our user doesn't have perms to send
|
||||||
|
# m.room.aliases events in the old room (note that we've already checked that
|
||||||
|
# they have perms to send a tombstone event, so that's not terribly likely).
|
||||||
|
#
|
||||||
|
# If that happens, it's regrettable, but we should carry on: it's the same
|
||||||
|
# as when you remove an alias from the directory normally - it just means that
|
||||||
|
# the aliases event gets out of sync with the directory
|
||||||
|
# (cf https://github.com/vector-im/riot-web/issues/2369)
|
||||||
|
yield directory_handler.send_room_alias_update_event(
|
||||||
|
requester, old_room_id,
|
||||||
|
)
|
||||||
|
except AuthError as e:
|
||||||
|
logger.warning(
|
||||||
|
"Failed to send updated alias event on old room: %s", e,
|
||||||
|
)
|
||||||
|
|
||||||
|
# we can now add any aliases we successfully removed to the new room.
|
||||||
|
for alias in removed_aliases:
|
||||||
|
try:
|
||||||
|
yield directory_handler.create_association(
|
||||||
|
requester, RoomAlias.from_string(alias),
|
||||||
|
new_room_id, servers=(self.hs.hostname, ),
|
||||||
|
send_event=False,
|
||||||
|
)
|
||||||
|
logger.info("Moved alias %s to new room", alias)
|
||||||
|
except SynapseError as e:
|
||||||
|
# I'm not really expecting this to happen, but it could if the spam
|
||||||
|
# checking module decides it shouldn't, or similar.
|
||||||
|
logger.error(
|
||||||
|
"Error adding alias %s to new room: %s",
|
||||||
|
alias, e,
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if canonical_alias and (canonical_alias in removed_aliases):
|
||||||
|
yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||||
|
requester,
|
||||||
|
{
|
||||||
|
"type": EventTypes.CanonicalAlias,
|
||||||
|
"state_key": "",
|
||||||
|
"room_id": new_room_id,
|
||||||
|
"sender": requester.user.to_string(),
|
||||||
|
"content": {"alias": canonical_alias, },
|
||||||
|
},
|
||||||
|
ratelimit=False
|
||||||
|
)
|
||||||
|
|
||||||
|
yield directory_handler.send_room_alias_update_event(
|
||||||
|
requester, new_room_id,
|
||||||
|
)
|
||||||
|
except SynapseError as e:
|
||||||
|
# again I'm not really expecting this to fail, but if it does, I'd rather
|
||||||
|
# we returned the new room to the client at this point.
|
||||||
|
logger.error(
|
||||||
|
"Unable to send updated alias events in new room: %s", e,
|
||||||
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def create_room(self, requester, config, ratelimit=True,
|
def create_room(self, requester, config, ratelimit=True,
|
||||||
|
@ -164,28 +494,7 @@ class RoomCreationHandler(BaseHandler):
|
||||||
visibility = config.get("visibility", None)
|
visibility = config.get("visibility", None)
|
||||||
is_public = visibility == "public"
|
is_public = visibility == "public"
|
||||||
|
|
||||||
# autogen room IDs and try to create it. We may clash, so just
|
room_id = yield self._generate_room_id(creator_id=user_id, is_public=is_public)
|
||||||
# try a few times till one goes through, giving up eventually.
|
|
||||||
attempts = 0
|
|
||||||
room_id = None
|
|
||||||
while attempts < 5:
|
|
||||||
try:
|
|
||||||
random_string = stringutils.random_string(18)
|
|
||||||
gen_room_id = RoomID(
|
|
||||||
random_string,
|
|
||||||
self.hs.hostname,
|
|
||||||
)
|
|
||||||
yield self.store.store_room(
|
|
||||||
room_id=gen_room_id.to_string(),
|
|
||||||
room_creator_user_id=user_id,
|
|
||||||
is_public=is_public
|
|
||||||
)
|
|
||||||
room_id = gen_room_id.to_string()
|
|
||||||
break
|
|
||||||
except StoreError:
|
|
||||||
attempts += 1
|
|
||||||
if not room_id:
|
|
||||||
raise StoreError(500, "Couldn't generate a room ID.")
|
|
||||||
|
|
||||||
if room_alias:
|
if room_alias:
|
||||||
directory_handler = self.hs.get_handlers().directory_handler
|
directory_handler = self.hs.get_handlers().directory_handler
|
||||||
|
@ -215,18 +524,15 @@ class RoomCreationHandler(BaseHandler):
|
||||||
# override any attempt to set room versions via the creation_content
|
# override any attempt to set room versions via the creation_content
|
||||||
creation_content["room_version"] = room_version
|
creation_content["room_version"] = room_version
|
||||||
|
|
||||||
room_member_handler = self.hs.get_room_member_handler()
|
|
||||||
|
|
||||||
yield self._send_events_for_new_room(
|
yield self._send_events_for_new_room(
|
||||||
requester,
|
requester,
|
||||||
room_id,
|
room_id,
|
||||||
room_member_handler,
|
|
||||||
preset_config=preset_config,
|
preset_config=preset_config,
|
||||||
invite_list=invite_list,
|
invite_list=invite_list,
|
||||||
initial_state=initial_state,
|
initial_state=initial_state,
|
||||||
creation_content=creation_content,
|
creation_content=creation_content,
|
||||||
room_alias=room_alias,
|
room_alias=room_alias,
|
||||||
power_level_content_override=config.get("power_level_content_override", {}),
|
power_level_content_override=config.get("power_level_content_override"),
|
||||||
creator_join_profile=creator_join_profile,
|
creator_join_profile=creator_join_profile,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -262,7 +568,7 @@ class RoomCreationHandler(BaseHandler):
|
||||||
if is_direct:
|
if is_direct:
|
||||||
content["is_direct"] = is_direct
|
content["is_direct"] = is_direct
|
||||||
|
|
||||||
yield room_member_handler.update_membership(
|
yield self.room_member_handler.update_membership(
|
||||||
requester,
|
requester,
|
||||||
UserID.from_string(invitee),
|
UserID.from_string(invitee),
|
||||||
room_id,
|
room_id,
|
||||||
|
@ -300,14 +606,13 @@ class RoomCreationHandler(BaseHandler):
|
||||||
self,
|
self,
|
||||||
creator, # A Requester object.
|
creator, # A Requester object.
|
||||||
room_id,
|
room_id,
|
||||||
room_member_handler,
|
|
||||||
preset_config,
|
preset_config,
|
||||||
invite_list,
|
invite_list,
|
||||||
initial_state,
|
initial_state,
|
||||||
creation_content,
|
creation_content,
|
||||||
room_alias,
|
room_alias=None,
|
||||||
power_level_content_override,
|
power_level_content_override=None,
|
||||||
creator_join_profile,
|
creator_join_profile=None,
|
||||||
):
|
):
|
||||||
def create(etype, content, **kwargs):
|
def create(etype, content, **kwargs):
|
||||||
e = {
|
e = {
|
||||||
|
@ -323,6 +628,7 @@ class RoomCreationHandler(BaseHandler):
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def send(etype, content, **kwargs):
|
def send(etype, content, **kwargs):
|
||||||
event = create(etype, content, **kwargs)
|
event = create(etype, content, **kwargs)
|
||||||
|
logger.info("Sending %s in new room", etype)
|
||||||
yield self.event_creation_handler.create_and_send_nonmember_event(
|
yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||||
creator,
|
creator,
|
||||||
event,
|
event,
|
||||||
|
@ -345,7 +651,8 @@ class RoomCreationHandler(BaseHandler):
|
||||||
content=creation_content,
|
content=creation_content,
|
||||||
)
|
)
|
||||||
|
|
||||||
yield room_member_handler.update_membership(
|
logger.info("Sending %s in new room", EventTypes.Member)
|
||||||
|
yield self.room_member_handler.update_membership(
|
||||||
creator,
|
creator,
|
||||||
creator.user,
|
creator.user,
|
||||||
room_id,
|
room_id,
|
||||||
|
@ -387,7 +694,8 @@ class RoomCreationHandler(BaseHandler):
|
||||||
for invitee in invite_list:
|
for invitee in invite_list:
|
||||||
power_level_content["users"][invitee] = 100
|
power_level_content["users"][invitee] = 100
|
||||||
|
|
||||||
power_level_content.update(power_level_content_override)
|
if power_level_content_override:
|
||||||
|
power_level_content.update(power_level_content_override)
|
||||||
|
|
||||||
yield send(
|
yield send(
|
||||||
etype=EventTypes.PowerLevels,
|
etype=EventTypes.PowerLevels,
|
||||||
|
@ -426,6 +734,30 @@ class RoomCreationHandler(BaseHandler):
|
||||||
content=content,
|
content=content,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _generate_room_id(self, creator_id, is_public):
|
||||||
|
# autogen room IDs and try to create it. We may clash, so just
|
||||||
|
# try a few times till one goes through, giving up eventually.
|
||||||
|
attempts = 0
|
||||||
|
while attempts < 5:
|
||||||
|
try:
|
||||||
|
random_string = stringutils.random_string(18)
|
||||||
|
gen_room_id = RoomID(
|
||||||
|
random_string,
|
||||||
|
self.hs.hostname,
|
||||||
|
).to_string()
|
||||||
|
if isinstance(gen_room_id, bytes):
|
||||||
|
gen_room_id = gen_room_id.decode('utf-8')
|
||||||
|
yield self.store.store_room(
|
||||||
|
room_id=gen_room_id,
|
||||||
|
room_creator_user_id=creator_id,
|
||||||
|
is_public=is_public,
|
||||||
|
)
|
||||||
|
defer.returnValue(gen_room_id)
|
||||||
|
except StoreError:
|
||||||
|
attempts += 1
|
||||||
|
raise StoreError(500, "Couldn't generate a room ID.")
|
||||||
|
|
||||||
|
|
||||||
class RoomContextHandler(object):
|
class RoomContextHandler(object):
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
|
@ -489,23 +821,24 @@ class RoomContextHandler(object):
|
||||||
else:
|
else:
|
||||||
last_event_id = event_id
|
last_event_id = event_id
|
||||||
|
|
||||||
types = None
|
|
||||||
filtered_types = None
|
|
||||||
if event_filter and event_filter.lazy_load_members():
|
if event_filter and event_filter.lazy_load_members():
|
||||||
members = set(ev.sender for ev in itertools.chain(
|
state_filter = StateFilter.from_lazy_load_member_list(
|
||||||
results["events_before"],
|
ev.sender
|
||||||
(results["event"],),
|
for ev in itertools.chain(
|
||||||
results["events_after"],
|
results["events_before"],
|
||||||
))
|
(results["event"],),
|
||||||
filtered_types = [EventTypes.Member]
|
results["events_after"],
|
||||||
types = [(EventTypes.Member, member) for member in members]
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
state_filter = StateFilter.all()
|
||||||
|
|
||||||
# XXX: why do we return the state as of the last event rather than the
|
# XXX: why do we return the state as of the last event rather than the
|
||||||
# first? Shouldn't we be consistent with /sync?
|
# first? Shouldn't we be consistent with /sync?
|
||||||
# https://github.com/matrix-org/matrix-doc/issues/687
|
# https://github.com/matrix-org/matrix-doc/issues/687
|
||||||
|
|
||||||
state = yield self.store.get_state_for_events(
|
state = yield self.store.get_state_for_events(
|
||||||
[last_event_id], types, filtered_types=filtered_types,
|
[last_event_id], state_filter=state_filter,
|
||||||
)
|
)
|
||||||
results["state"] = list(state[last_event_id].values())
|
results["state"] = list(state[last_event_id].values())
|
||||||
|
|
||||||
|
|
|
@ -24,6 +24,7 @@ from synapse.api.constants import EventTypes, Membership
|
||||||
from synapse.api.errors import SynapseError
|
from synapse.api.errors import SynapseError
|
||||||
from synapse.api.filtering import Filter
|
from synapse.api.filtering import Filter
|
||||||
from synapse.events.utils import serialize_event
|
from synapse.events.utils import serialize_event
|
||||||
|
from synapse.storage.state import StateFilter
|
||||||
from synapse.visibility import filter_events_for_client
|
from synapse.visibility import filter_events_for_client
|
||||||
|
|
||||||
from ._base import BaseHandler
|
from ._base import BaseHandler
|
||||||
|
@ -324,9 +325,12 @@ class SearchHandler(BaseHandler):
|
||||||
else:
|
else:
|
||||||
last_event_id = event.event_id
|
last_event_id = event.event_id
|
||||||
|
|
||||||
|
state_filter = StateFilter.from_types(
|
||||||
|
[(EventTypes.Member, sender) for sender in senders]
|
||||||
|
)
|
||||||
|
|
||||||
state = yield self.store.get_state_for_event(
|
state = yield self.store.get_state_for_event(
|
||||||
last_event_id,
|
last_event_id, state_filter
|
||||||
types=[(EventTypes.Member, sender) for sender in senders]
|
|
||||||
)
|
)
|
||||||
|
|
||||||
res["profile_info"] = {
|
res["profile_info"] = {
|
||||||
|
|
|
@ -27,6 +27,7 @@ from twisted.internet import defer
|
||||||
from synapse.api.constants import EventTypes, Membership
|
from synapse.api.constants import EventTypes, Membership
|
||||||
from synapse.push.clientformat import format_push_rules_for_user
|
from synapse.push.clientformat import format_push_rules_for_user
|
||||||
from synapse.storage.roommember import MemberSummary
|
from synapse.storage.roommember import MemberSummary
|
||||||
|
from synapse.storage.state import StateFilter
|
||||||
from synapse.types import RoomStreamToken
|
from synapse.types import RoomStreamToken
|
||||||
from synapse.util.async_helpers import concurrently_execute
|
from synapse.util.async_helpers import concurrently_execute
|
||||||
from synapse.util.caches.expiringcache import ExpiringCache
|
from synapse.util.caches.expiringcache import ExpiringCache
|
||||||
|
@ -469,25 +470,20 @@ class SyncHandler(object):
|
||||||
))
|
))
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_state_after_event(self, event, types=None, filtered_types=None):
|
def get_state_after_event(self, event, state_filter=StateFilter.all()):
|
||||||
"""
|
"""
|
||||||
Get the room state after the given event
|
Get the room state after the given event
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
event(synapse.events.EventBase): event of interest
|
event(synapse.events.EventBase): event of interest
|
||||||
types(list[(str, str|None)]|None): List of (type, state_key) tuples
|
state_filter (StateFilter): The state filter used to fetch state
|
||||||
which are used to filter the state fetched. If `state_key` is None,
|
from the database.
|
||||||
all events are returned of the given type.
|
|
||||||
May be None, which matches any key.
|
|
||||||
filtered_types(list[str]|None): Only apply filtering via `types` to this
|
|
||||||
list of event types. Other types of events are returned unfiltered.
|
|
||||||
If None, `types` filtering is applied to all events.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
A Deferred map from ((type, state_key)->Event)
|
A Deferred map from ((type, state_key)->Event)
|
||||||
"""
|
"""
|
||||||
state_ids = yield self.store.get_state_ids_for_event(
|
state_ids = yield self.store.get_state_ids_for_event(
|
||||||
event.event_id, types, filtered_types=filtered_types,
|
event.event_id, state_filter=state_filter,
|
||||||
)
|
)
|
||||||
if event.is_state():
|
if event.is_state():
|
||||||
state_ids = state_ids.copy()
|
state_ids = state_ids.copy()
|
||||||
|
@ -495,18 +491,14 @@ class SyncHandler(object):
|
||||||
defer.returnValue(state_ids)
|
defer.returnValue(state_ids)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_state_at(self, room_id, stream_position, types=None, filtered_types=None):
|
def get_state_at(self, room_id, stream_position, state_filter=StateFilter.all()):
|
||||||
""" Get the room state at a particular stream position
|
""" Get the room state at a particular stream position
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
room_id(str): room for which to get state
|
room_id(str): room for which to get state
|
||||||
stream_position(StreamToken): point at which to get state
|
stream_position(StreamToken): point at which to get state
|
||||||
types(list[(str, str|None)]|None): List of (type, state_key) tuples
|
state_filter (StateFilter): The state filter used to fetch state
|
||||||
which are used to filter the state fetched. If `state_key` is None,
|
from the database.
|
||||||
all events are returned of the given type.
|
|
||||||
filtered_types(list[str]|None): Only apply filtering via `types` to this
|
|
||||||
list of event types. Other types of events are returned unfiltered.
|
|
||||||
If None, `types` filtering is applied to all events.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
A Deferred map from ((type, state_key)->Event)
|
A Deferred map from ((type, state_key)->Event)
|
||||||
|
@ -522,7 +514,7 @@ class SyncHandler(object):
|
||||||
if last_events:
|
if last_events:
|
||||||
last_event = last_events[-1]
|
last_event = last_events[-1]
|
||||||
state = yield self.get_state_after_event(
|
state = yield self.get_state_after_event(
|
||||||
last_event, types, filtered_types=filtered_types,
|
last_event, state_filter=state_filter,
|
||||||
)
|
)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
@ -563,10 +555,11 @@ class SyncHandler(object):
|
||||||
|
|
||||||
last_event = last_events[-1]
|
last_event = last_events[-1]
|
||||||
state_ids = yield self.store.get_state_ids_for_event(
|
state_ids = yield self.store.get_state_ids_for_event(
|
||||||
last_event.event_id, [
|
last_event.event_id,
|
||||||
|
state_filter=StateFilter.from_types([
|
||||||
(EventTypes.Name, ''),
|
(EventTypes.Name, ''),
|
||||||
(EventTypes.CanonicalAlias, ''),
|
(EventTypes.CanonicalAlias, ''),
|
||||||
]
|
]),
|
||||||
)
|
)
|
||||||
|
|
||||||
# this is heavily cached, thus: fast.
|
# this is heavily cached, thus: fast.
|
||||||
|
@ -717,8 +710,7 @@ class SyncHandler(object):
|
||||||
|
|
||||||
with Measure(self.clock, "compute_state_delta"):
|
with Measure(self.clock, "compute_state_delta"):
|
||||||
|
|
||||||
types = None
|
members_to_fetch = None
|
||||||
filtered_types = None
|
|
||||||
|
|
||||||
lazy_load_members = sync_config.filter_collection.lazy_load_members()
|
lazy_load_members = sync_config.filter_collection.lazy_load_members()
|
||||||
include_redundant_members = (
|
include_redundant_members = (
|
||||||
|
@ -729,16 +721,21 @@ class SyncHandler(object):
|
||||||
# We only request state for the members needed to display the
|
# We only request state for the members needed to display the
|
||||||
# timeline:
|
# timeline:
|
||||||
|
|
||||||
types = [
|
members_to_fetch = set(
|
||||||
(EventTypes.Member, state_key)
|
event.sender # FIXME: we also care about invite targets etc.
|
||||||
for state_key in set(
|
for event in batch.events
|
||||||
event.sender # FIXME: we also care about invite targets etc.
|
)
|
||||||
for event in batch.events
|
|
||||||
)
|
|
||||||
]
|
|
||||||
|
|
||||||
# only apply the filtering to room members
|
if full_state:
|
||||||
filtered_types = [EventTypes.Member]
|
# always make sure we LL ourselves so we know we're in the room
|
||||||
|
# (if we are) to fix https://github.com/vector-im/riot-web/issues/7209
|
||||||
|
# We only need apply this on full state syncs given we disabled
|
||||||
|
# LL for incr syncs in #3840.
|
||||||
|
members_to_fetch.add(sync_config.user.to_string())
|
||||||
|
|
||||||
|
state_filter = StateFilter.from_lazy_load_member_list(members_to_fetch)
|
||||||
|
else:
|
||||||
|
state_filter = StateFilter.all()
|
||||||
|
|
||||||
timeline_state = {
|
timeline_state = {
|
||||||
(event.type, event.state_key): event.event_id
|
(event.type, event.state_key): event.event_id
|
||||||
|
@ -746,28 +743,19 @@ class SyncHandler(object):
|
||||||
}
|
}
|
||||||
|
|
||||||
if full_state:
|
if full_state:
|
||||||
if lazy_load_members:
|
|
||||||
# always make sure we LL ourselves so we know we're in the room
|
|
||||||
# (if we are) to fix https://github.com/vector-im/riot-web/issues/7209
|
|
||||||
# We only need apply this on full state syncs given we disabled
|
|
||||||
# LL for incr syncs in #3840.
|
|
||||||
types.append((EventTypes.Member, sync_config.user.to_string()))
|
|
||||||
|
|
||||||
if batch:
|
if batch:
|
||||||
current_state_ids = yield self.store.get_state_ids_for_event(
|
current_state_ids = yield self.store.get_state_ids_for_event(
|
||||||
batch.events[-1].event_id, types=types,
|
batch.events[-1].event_id, state_filter=state_filter,
|
||||||
filtered_types=filtered_types,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
state_ids = yield self.store.get_state_ids_for_event(
|
state_ids = yield self.store.get_state_ids_for_event(
|
||||||
batch.events[0].event_id, types=types,
|
batch.events[0].event_id, state_filter=state_filter,
|
||||||
filtered_types=filtered_types,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
current_state_ids = yield self.get_state_at(
|
current_state_ids = yield self.get_state_at(
|
||||||
room_id, stream_position=now_token, types=types,
|
room_id, stream_position=now_token,
|
||||||
filtered_types=filtered_types,
|
state_filter=state_filter,
|
||||||
)
|
)
|
||||||
|
|
||||||
state_ids = current_state_ids
|
state_ids = current_state_ids
|
||||||
|
@ -781,8 +769,7 @@ class SyncHandler(object):
|
||||||
)
|
)
|
||||||
elif batch.limited:
|
elif batch.limited:
|
||||||
state_at_timeline_start = yield self.store.get_state_ids_for_event(
|
state_at_timeline_start = yield self.store.get_state_ids_for_event(
|
||||||
batch.events[0].event_id, types=types,
|
batch.events[0].event_id, state_filter=state_filter,
|
||||||
filtered_types=filtered_types,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# for now, we disable LL for gappy syncs - see
|
# for now, we disable LL for gappy syncs - see
|
||||||
|
@ -797,17 +784,15 @@ class SyncHandler(object):
|
||||||
# members to just be ones which were timeline senders, which then ensures
|
# members to just be ones which were timeline senders, which then ensures
|
||||||
# all of the rest get included in the state block (if we need to know
|
# all of the rest get included in the state block (if we need to know
|
||||||
# about them).
|
# about them).
|
||||||
types = None
|
state_filter = StateFilter.all()
|
||||||
filtered_types = None
|
|
||||||
|
|
||||||
state_at_previous_sync = yield self.get_state_at(
|
state_at_previous_sync = yield self.get_state_at(
|
||||||
room_id, stream_position=since_token, types=types,
|
room_id, stream_position=since_token,
|
||||||
filtered_types=filtered_types,
|
state_filter=state_filter,
|
||||||
)
|
)
|
||||||
|
|
||||||
current_state_ids = yield self.store.get_state_ids_for_event(
|
current_state_ids = yield self.store.get_state_ids_for_event(
|
||||||
batch.events[-1].event_id, types=types,
|
batch.events[-1].event_id, state_filter=state_filter,
|
||||||
filtered_types=filtered_types,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
state_ids = _calculate_state(
|
state_ids = _calculate_state(
|
||||||
|
@ -821,7 +806,7 @@ class SyncHandler(object):
|
||||||
else:
|
else:
|
||||||
state_ids = {}
|
state_ids = {}
|
||||||
if lazy_load_members:
|
if lazy_load_members:
|
||||||
if types and batch.events:
|
if members_to_fetch and batch.events:
|
||||||
# We're returning an incremental sync, with no
|
# We're returning an incremental sync, with no
|
||||||
# "gap" since the previous sync, so normally there would be
|
# "gap" since the previous sync, so normally there would be
|
||||||
# no state to return.
|
# no state to return.
|
||||||
|
@ -831,8 +816,12 @@ class SyncHandler(object):
|
||||||
# timeline here, and then dedupe any redundant ones below.
|
# timeline here, and then dedupe any redundant ones below.
|
||||||
|
|
||||||
state_ids = yield self.store.get_state_ids_for_event(
|
state_ids = yield self.store.get_state_ids_for_event(
|
||||||
batch.events[0].event_id, types=types,
|
batch.events[0].event_id,
|
||||||
filtered_types=None, # we only want members!
|
# we only want members!
|
||||||
|
state_filter=StateFilter.from_types(
|
||||||
|
(EventTypes.Member, member)
|
||||||
|
for member in members_to_fetch
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
if lazy_load_members and not include_redundant_members:
|
if lazy_load_members and not include_redundant_members:
|
||||||
|
|
|
@ -85,7 +85,10 @@ class EmailPusher(object):
|
||||||
self.timed_call = None
|
self.timed_call = None
|
||||||
|
|
||||||
def on_new_notifications(self, min_stream_ordering, max_stream_ordering):
|
def on_new_notifications(self, min_stream_ordering, max_stream_ordering):
|
||||||
self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering)
|
if self.max_stream_ordering:
|
||||||
|
self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering)
|
||||||
|
else:
|
||||||
|
self.max_stream_ordering = max_stream_ordering
|
||||||
self._start_processing()
|
self._start_processing()
|
||||||
|
|
||||||
def on_new_receipts(self, min_stream_id, max_stream_id):
|
def on_new_receipts(self, min_stream_id, max_stream_id):
|
||||||
|
|
|
@ -26,7 +26,6 @@ import bleach
|
||||||
import jinja2
|
import jinja2
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
from twisted.mail.smtp import sendmail
|
|
||||||
|
|
||||||
from synapse.api.constants import EventTypes
|
from synapse.api.constants import EventTypes
|
||||||
from synapse.api.errors import StoreError
|
from synapse.api.errors import StoreError
|
||||||
|
@ -85,6 +84,7 @@ class Mailer(object):
|
||||||
self.notif_template_html = notif_template_html
|
self.notif_template_html = notif_template_html
|
||||||
self.notif_template_text = notif_template_text
|
self.notif_template_text = notif_template_text
|
||||||
|
|
||||||
|
self.sendmail = self.hs.get_sendmail()
|
||||||
self.store = self.hs.get_datastore()
|
self.store = self.hs.get_datastore()
|
||||||
self.macaroon_gen = self.hs.get_macaroon_generator()
|
self.macaroon_gen = self.hs.get_macaroon_generator()
|
||||||
self.state_handler = self.hs.get_state_handler()
|
self.state_handler = self.hs.get_state_handler()
|
||||||
|
@ -191,11 +191,11 @@ class Mailer(object):
|
||||||
multipart_msg.attach(html_part)
|
multipart_msg.attach(html_part)
|
||||||
|
|
||||||
logger.info("Sending email push notification to %s" % email_address)
|
logger.info("Sending email push notification to %s" % email_address)
|
||||||
# logger.debug(html_text)
|
|
||||||
|
|
||||||
yield sendmail(
|
yield self.sendmail(
|
||||||
self.hs.config.email_smtp_host,
|
self.hs.config.email_smtp_host,
|
||||||
raw_from, raw_to, multipart_msg.as_string(),
|
raw_from, raw_to, multipart_msg.as_string().encode('utf8'),
|
||||||
|
reactor=self.hs.get_reactor(),
|
||||||
port=self.hs.config.email_smtp_port,
|
port=self.hs.config.email_smtp_port,
|
||||||
requireAuthentication=self.hs.config.email_smtp_user is not None,
|
requireAuthentication=self.hs.config.email_smtp_user is not None,
|
||||||
username=self.hs.config.email_smtp_user,
|
username=self.hs.config.email_smtp_user,
|
||||||
|
@ -333,7 +333,7 @@ class Mailer(object):
|
||||||
notif_events, user_id, reason):
|
notif_events, user_id, reason):
|
||||||
if len(notifs_by_room) == 1:
|
if len(notifs_by_room) == 1:
|
||||||
# Only one room has new stuff
|
# Only one room has new stuff
|
||||||
room_id = notifs_by_room.keys()[0]
|
room_id = list(notifs_by_room.keys())[0]
|
||||||
|
|
||||||
# If the room has some kind of name, use it, but we don't
|
# If the room has some kind of name, use it, but we don't
|
||||||
# want the generated-from-names one here otherwise we'll
|
# want the generated-from-names one here otherwise we'll
|
||||||
|
|
|
@ -51,7 +51,6 @@ REQUIREMENTS = {
|
||||||
"daemonize>=2.3.1": ["daemonize"],
|
"daemonize>=2.3.1": ["daemonize"],
|
||||||
"bcrypt>=3.1.0": ["bcrypt>=3.1.0"],
|
"bcrypt>=3.1.0": ["bcrypt>=3.1.0"],
|
||||||
"pillow>=3.1.2": ["PIL"],
|
"pillow>=3.1.2": ["PIL"],
|
||||||
"pydenticon>=0.2": ["pydenticon"],
|
|
||||||
"sortedcontainers>=1.4.4": ["sortedcontainers"],
|
"sortedcontainers>=1.4.4": ["sortedcontainers"],
|
||||||
"psutil>=2.0.0": ["psutil>=2.0.0"],
|
"psutil>=2.0.0": ["psutil>=2.0.0"],
|
||||||
"pysaml2>=3.0.0": ["saml2"],
|
"pysaml2>=3.0.0": ["saml2"],
|
||||||
|
|
|
@ -106,7 +106,7 @@ class ReplicationClientHandler(object):
|
||||||
|
|
||||||
Can be overriden in subclasses to handle more.
|
Can be overriden in subclasses to handle more.
|
||||||
"""
|
"""
|
||||||
logger.info("Received rdata %s -> %s", stream_name, token)
|
logger.debug("Received rdata %s -> %s", stream_name, token)
|
||||||
return self.store.process_replication_rows(stream_name, token, rows)
|
return self.store.process_replication_rows(stream_name, token, rows)
|
||||||
|
|
||||||
def on_position(self, stream_name, token):
|
def on_position(self, stream_name, token):
|
||||||
|
|
|
@ -656,7 +656,7 @@ tcp_inbound_commands = LaterGauge(
|
||||||
"",
|
"",
|
||||||
["command", "name"],
|
["command", "name"],
|
||||||
lambda: {
|
lambda: {
|
||||||
(k[0], p.name,): count
|
(k, p.name,): count
|
||||||
for p in connected_connections
|
for p in connected_connections
|
||||||
for k, count in iteritems(p.inbound_commands_counter)
|
for k, count in iteritems(p.inbound_commands_counter)
|
||||||
},
|
},
|
||||||
|
@ -667,7 +667,7 @@ tcp_outbound_commands = LaterGauge(
|
||||||
"",
|
"",
|
||||||
["command", "name"],
|
["command", "name"],
|
||||||
lambda: {
|
lambda: {
|
||||||
(k[0], p.name,): count
|
(k, p.name,): count
|
||||||
for p in connected_connections
|
for p in connected_connections
|
||||||
for k, count in iteritems(p.outbound_commands_counter)
|
for k, count in iteritems(p.outbound_commands_counter)
|
||||||
},
|
},
|
||||||
|
|
|
@ -47,6 +47,7 @@ from synapse.rest.client.v2_alpha import (
|
||||||
register,
|
register,
|
||||||
report_event,
|
report_event,
|
||||||
room_keys,
|
room_keys,
|
||||||
|
room_upgrade_rest_servlet,
|
||||||
sendtodevice,
|
sendtodevice,
|
||||||
sync,
|
sync,
|
||||||
tags,
|
tags,
|
||||||
|
@ -116,3 +117,4 @@ class ClientRestResource(JsonResource):
|
||||||
sendtodevice.register_servlets(hs, client_resource)
|
sendtodevice.register_servlets(hs, client_resource)
|
||||||
user_directory.register_servlets(hs, client_resource)
|
user_directory.register_servlets(hs, client_resource)
|
||||||
groups.register_servlets(hs, client_resource)
|
groups.register_servlets(hs, client_resource)
|
||||||
|
room_upgrade_rest_servlet.register_servlets(hs, client_resource)
|
||||||
|
|
|
@ -33,6 +33,7 @@ from synapse.http.servlet import (
|
||||||
parse_json_object_from_request,
|
parse_json_object_from_request,
|
||||||
parse_string,
|
parse_string,
|
||||||
)
|
)
|
||||||
|
from synapse.storage.state import StateFilter
|
||||||
from synapse.streams.config import PaginationConfig
|
from synapse.streams.config import PaginationConfig
|
||||||
from synapse.types import RoomAlias, RoomID, StreamToken, ThirdPartyInstanceID, UserID
|
from synapse.types import RoomAlias, RoomID, StreamToken, ThirdPartyInstanceID, UserID
|
||||||
|
|
||||||
|
@ -409,7 +410,7 @@ class RoomMemberListRestServlet(ClientV1RestServlet):
|
||||||
room_id=room_id,
|
room_id=room_id,
|
||||||
user_id=requester.user.to_string(),
|
user_id=requester.user.to_string(),
|
||||||
at_token=at_token,
|
at_token=at_token,
|
||||||
types=[(EventTypes.Member, None)],
|
state_filter=StateFilter.from_types([(EventTypes.Member, None)]),
|
||||||
)
|
)
|
||||||
|
|
||||||
chunk = []
|
chunk = []
|
||||||
|
|
89
synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py
Normal file
89
synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py
Normal file
|
@ -0,0 +1,89 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from twisted.internet import defer
|
||||||
|
|
||||||
|
from synapse.api.constants import KNOWN_ROOM_VERSIONS
|
||||||
|
from synapse.api.errors import Codes, SynapseError
|
||||||
|
from synapse.http.servlet import (
|
||||||
|
RestServlet,
|
||||||
|
assert_params_in_dict,
|
||||||
|
parse_json_object_from_request,
|
||||||
|
)
|
||||||
|
|
||||||
|
from ._base import client_v2_patterns
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class RoomUpgradeRestServlet(RestServlet):
|
||||||
|
"""Handler for room uprade requests.
|
||||||
|
|
||||||
|
Handles requests of the form:
|
||||||
|
|
||||||
|
POST /_matrix/client/r0/rooms/$roomid/upgrade HTTP/1.1
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"new_version": "2",
|
||||||
|
}
|
||||||
|
|
||||||
|
Creates a new room and shuts down the old one. Returns the ID of the new room.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
hs (synapse.server.HomeServer):
|
||||||
|
"""
|
||||||
|
PATTERNS = client_v2_patterns(
|
||||||
|
# /rooms/$roomid/upgrade
|
||||||
|
"/rooms/(?P<room_id>[^/]*)/upgrade$",
|
||||||
|
v2_alpha=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
def __init__(self, hs):
|
||||||
|
super(RoomUpgradeRestServlet, self).__init__()
|
||||||
|
self._hs = hs
|
||||||
|
self._room_creation_handler = hs.get_room_creation_handler()
|
||||||
|
self._auth = hs.get_auth()
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_POST(self, request, room_id):
|
||||||
|
requester = yield self._auth.get_user_by_req(request)
|
||||||
|
|
||||||
|
content = parse_json_object_from_request(request)
|
||||||
|
assert_params_in_dict(content, ("new_version", ))
|
||||||
|
new_version = content["new_version"]
|
||||||
|
|
||||||
|
if new_version not in KNOWN_ROOM_VERSIONS:
|
||||||
|
raise SynapseError(
|
||||||
|
400,
|
||||||
|
"Your homeserver does not support this room version",
|
||||||
|
Codes.UNSUPPORTED_ROOM_VERSION,
|
||||||
|
)
|
||||||
|
|
||||||
|
new_room_id = yield self._room_creation_handler.upgrade_room(
|
||||||
|
requester, room_id, new_version
|
||||||
|
)
|
||||||
|
|
||||||
|
ret = {
|
||||||
|
"replacement_room": new_room_id,
|
||||||
|
}
|
||||||
|
|
||||||
|
defer.returnValue((200, ret))
|
||||||
|
|
||||||
|
|
||||||
|
def register_servlets(hs, http_server):
|
||||||
|
RoomUpgradeRestServlet(hs).register(http_server)
|
|
@ -1,14 +0,0 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright 2015, 2016 OpenMarket Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
|
@ -1,92 +0,0 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from canonicaljson import encode_canonical_json
|
|
||||||
from signedjson.sign import sign_json
|
|
||||||
from unpaddedbase64 import encode_base64
|
|
||||||
|
|
||||||
from OpenSSL import crypto
|
|
||||||
from twisted.web.resource import Resource
|
|
||||||
|
|
||||||
from synapse.http.server import respond_with_json_bytes
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class LocalKey(Resource):
|
|
||||||
"""HTTP resource containing encoding the TLS X.509 certificate and NACL
|
|
||||||
signature verification keys for this server::
|
|
||||||
|
|
||||||
GET /key HTTP/1.1
|
|
||||||
|
|
||||||
HTTP/1.1 200 OK
|
|
||||||
Content-Type: application/json
|
|
||||||
{
|
|
||||||
"server_name": "this.server.example.com"
|
|
||||||
"verify_keys": {
|
|
||||||
"algorithm:version": # base64 encoded NACL verification key.
|
|
||||||
},
|
|
||||||
"tls_certificate": # base64 ASN.1 DER encoded X.509 tls cert.
|
|
||||||
"signatures": {
|
|
||||||
"this.server.example.com": {
|
|
||||||
"algorithm:version": # NACL signature for this server.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, hs):
|
|
||||||
self.response_body = encode_canonical_json(
|
|
||||||
self.response_json_object(hs.config)
|
|
||||||
)
|
|
||||||
Resource.__init__(self)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def response_json_object(server_config):
|
|
||||||
verify_keys = {}
|
|
||||||
for key in server_config.signing_key:
|
|
||||||
verify_key_bytes = key.verify_key.encode()
|
|
||||||
key_id = "%s:%s" % (key.alg, key.version)
|
|
||||||
verify_keys[key_id] = encode_base64(verify_key_bytes)
|
|
||||||
|
|
||||||
x509_certificate_bytes = crypto.dump_certificate(
|
|
||||||
crypto.FILETYPE_ASN1,
|
|
||||||
server_config.tls_certificate
|
|
||||||
)
|
|
||||||
json_object = {
|
|
||||||
u"server_name": server_config.server_name,
|
|
||||||
u"verify_keys": verify_keys,
|
|
||||||
u"tls_certificate": encode_base64(x509_certificate_bytes)
|
|
||||||
}
|
|
||||||
for key in server_config.signing_key:
|
|
||||||
json_object = sign_json(
|
|
||||||
json_object,
|
|
||||||
server_config.server_name,
|
|
||||||
key,
|
|
||||||
)
|
|
||||||
|
|
||||||
return json_object
|
|
||||||
|
|
||||||
def render_GET(self, request):
|
|
||||||
return respond_with_json_bytes(
|
|
||||||
request, 200, self.response_body,
|
|
||||||
)
|
|
||||||
|
|
||||||
def getChild(self, name, request):
|
|
||||||
if name == b'':
|
|
||||||
return self
|
|
|
@ -1,68 +0,0 @@
|
||||||
# Copyright 2015, 2016 OpenMarket Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from pydenticon import Generator
|
|
||||||
|
|
||||||
from twisted.web.resource import Resource
|
|
||||||
|
|
||||||
from synapse.http.servlet import parse_integer
|
|
||||||
|
|
||||||
FOREGROUND = [
|
|
||||||
"rgb(45,79,255)",
|
|
||||||
"rgb(254,180,44)",
|
|
||||||
"rgb(226,121,234)",
|
|
||||||
"rgb(30,179,253)",
|
|
||||||
"rgb(232,77,65)",
|
|
||||||
"rgb(49,203,115)",
|
|
||||||
"rgb(141,69,170)"
|
|
||||||
]
|
|
||||||
|
|
||||||
BACKGROUND = "rgb(224,224,224)"
|
|
||||||
SIZE = 5
|
|
||||||
|
|
||||||
|
|
||||||
class IdenticonResource(Resource):
|
|
||||||
isLeaf = True
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
Resource.__init__(self)
|
|
||||||
self.generator = Generator(
|
|
||||||
SIZE, SIZE, foreground=FOREGROUND, background=BACKGROUND,
|
|
||||||
)
|
|
||||||
|
|
||||||
def generate_identicon(self, name, width, height):
|
|
||||||
v_padding = width % SIZE
|
|
||||||
h_padding = height % SIZE
|
|
||||||
top_padding = v_padding // 2
|
|
||||||
left_padding = h_padding // 2
|
|
||||||
bottom_padding = v_padding - top_padding
|
|
||||||
right_padding = h_padding - left_padding
|
|
||||||
width -= v_padding
|
|
||||||
height -= h_padding
|
|
||||||
padding = (top_padding, bottom_padding, left_padding, right_padding)
|
|
||||||
identicon = self.generator.generate(
|
|
||||||
name, width, height, padding=padding
|
|
||||||
)
|
|
||||||
return identicon
|
|
||||||
|
|
||||||
def render_GET(self, request):
|
|
||||||
name = "/".join(request.postpath)
|
|
||||||
width = parse_integer(request, "width", default=96)
|
|
||||||
height = parse_integer(request, "height", default=96)
|
|
||||||
identicon_bytes = self.generate_identicon(name, width, height)
|
|
||||||
request.setHeader(b"Content-Type", b"image/png")
|
|
||||||
request.setHeader(
|
|
||||||
b"Cache-Control", b"public,max-age=86400,s-maxage=86400"
|
|
||||||
)
|
|
||||||
return identicon_bytes
|
|
|
@ -45,7 +45,6 @@ from ._base import FileInfo, respond_404, respond_with_responder
|
||||||
from .config_resource import MediaConfigResource
|
from .config_resource import MediaConfigResource
|
||||||
from .download_resource import DownloadResource
|
from .download_resource import DownloadResource
|
||||||
from .filepath import MediaFilePaths
|
from .filepath import MediaFilePaths
|
||||||
from .identicon_resource import IdenticonResource
|
|
||||||
from .media_storage import MediaStorage
|
from .media_storage import MediaStorage
|
||||||
from .preview_url_resource import PreviewUrlResource
|
from .preview_url_resource import PreviewUrlResource
|
||||||
from .storage_provider import StorageProviderWrapper
|
from .storage_provider import StorageProviderWrapper
|
||||||
|
@ -769,7 +768,6 @@ class MediaRepositoryResource(Resource):
|
||||||
self.putChild(b"thumbnail", ThumbnailResource(
|
self.putChild(b"thumbnail", ThumbnailResource(
|
||||||
hs, media_repo, media_repo.media_storage,
|
hs, media_repo, media_repo.media_storage,
|
||||||
))
|
))
|
||||||
self.putChild(b"identicon", IdenticonResource())
|
|
||||||
if hs.config.url_preview_enabled:
|
if hs.config.url_preview_enabled:
|
||||||
self.putChild(b"preview_url", PreviewUrlResource(
|
self.putChild(b"preview_url", PreviewUrlResource(
|
||||||
hs, media_repo, media_repo.media_storage,
|
hs, media_repo, media_repo.media_storage,
|
||||||
|
|
|
@ -23,6 +23,7 @@ import abc
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from twisted.enterprise import adbapi
|
from twisted.enterprise import adbapi
|
||||||
|
from twisted.mail.smtp import sendmail
|
||||||
from twisted.web.client import BrowserLikePolicyForHTTPS
|
from twisted.web.client import BrowserLikePolicyForHTTPS
|
||||||
|
|
||||||
from synapse.api.auth import Auth
|
from synapse.api.auth import Auth
|
||||||
|
@ -174,6 +175,7 @@ class HomeServer(object):
|
||||||
'message_handler',
|
'message_handler',
|
||||||
'pagination_handler',
|
'pagination_handler',
|
||||||
'room_context_handler',
|
'room_context_handler',
|
||||||
|
'sendmail',
|
||||||
]
|
]
|
||||||
|
|
||||||
# This is overridden in derived application classes
|
# This is overridden in derived application classes
|
||||||
|
@ -207,6 +209,7 @@ class HomeServer(object):
|
||||||
logger.info("Setting up.")
|
logger.info("Setting up.")
|
||||||
with self.get_db_conn() as conn:
|
with self.get_db_conn() as conn:
|
||||||
self.datastore = self.DATASTORE_CLASS(conn, self)
|
self.datastore = self.DATASTORE_CLASS(conn, self)
|
||||||
|
conn.commit()
|
||||||
logger.info("Finished setting up.")
|
logger.info("Finished setting up.")
|
||||||
|
|
||||||
def get_reactor(self):
|
def get_reactor(self):
|
||||||
|
@ -268,6 +271,9 @@ class HomeServer(object):
|
||||||
def build_room_creation_handler(self):
|
def build_room_creation_handler(self):
|
||||||
return RoomCreationHandler(self)
|
return RoomCreationHandler(self)
|
||||||
|
|
||||||
|
def build_sendmail(self):
|
||||||
|
return sendmail
|
||||||
|
|
||||||
def build_state_handler(self):
|
def build_state_handler(self):
|
||||||
return StateHandler(self)
|
return StateHandler(self)
|
||||||
|
|
||||||
|
|
|
@ -7,6 +7,9 @@ import synapse.handlers.auth
|
||||||
import synapse.handlers.deactivate_account
|
import synapse.handlers.deactivate_account
|
||||||
import synapse.handlers.device
|
import synapse.handlers.device
|
||||||
import synapse.handlers.e2e_keys
|
import synapse.handlers.e2e_keys
|
||||||
|
import synapse.handlers.room
|
||||||
|
import synapse.handlers.room_member
|
||||||
|
import synapse.handlers.message
|
||||||
import synapse.handlers.set_password
|
import synapse.handlers.set_password
|
||||||
import synapse.rest.media.v1.media_repository
|
import synapse.rest.media.v1.media_repository
|
||||||
import synapse.server_notices.server_notices_manager
|
import synapse.server_notices.server_notices_manager
|
||||||
|
@ -50,6 +53,9 @@ class HomeServer(object):
|
||||||
def get_room_creation_handler(self) -> synapse.handlers.room.RoomCreationHandler:
|
def get_room_creation_handler(self) -> synapse.handlers.room.RoomCreationHandler:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def get_room_member_handler(self) -> synapse.handlers.room_member.RoomMemberHandler:
|
||||||
|
pass
|
||||||
|
|
||||||
def get_event_creation_handler(self) -> synapse.handlers.message.EventCreationHandler:
|
def get_event_creation_handler(self) -> synapse.handlers.message.EventCreationHandler:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -589,10 +589,14 @@ class DeviceStore(SQLBaseStore):
|
||||||
combined list of changes to devices, and which destinations need to be
|
combined list of changes to devices, and which destinations need to be
|
||||||
poked. `destination` may be None if no destinations need to be poked.
|
poked. `destination` may be None if no destinations need to be poked.
|
||||||
"""
|
"""
|
||||||
|
# We do a group by here as there can be a large number of duplicate
|
||||||
|
# entries, since we throw away device IDs.
|
||||||
sql = """
|
sql = """
|
||||||
SELECT stream_id, user_id, destination FROM device_lists_stream
|
SELECT MAX(stream_id) AS stream_id, user_id, destination
|
||||||
|
FROM device_lists_stream
|
||||||
LEFT JOIN device_lists_outbound_pokes USING (stream_id, user_id, device_id)
|
LEFT JOIN device_lists_outbound_pokes USING (stream_id, user_id, device_id)
|
||||||
WHERE ? < stream_id AND stream_id <= ?
|
WHERE ? < stream_id AND stream_id <= ?
|
||||||
|
GROUP BY user_id, destination
|
||||||
"""
|
"""
|
||||||
return self._execute(
|
return self._execute(
|
||||||
"get_all_device_list_changes_for_remotes", None,
|
"get_all_device_list_changes_for_remotes", None,
|
||||||
|
|
|
@ -90,7 +90,7 @@ class DirectoryWorkerStore(SQLBaseStore):
|
||||||
class DirectoryStore(DirectoryWorkerStore):
|
class DirectoryStore(DirectoryWorkerStore):
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def create_room_alias_association(self, room_alias, room_id, servers, creator=None):
|
def create_room_alias_association(self, room_alias, room_id, servers, creator=None):
|
||||||
""" Creates an associatin between a room alias and room_id/servers
|
""" Creates an association between a room alias and room_id/servers
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
room_alias (RoomAlias)
|
room_alias (RoomAlias)
|
||||||
|
|
|
@ -38,6 +38,7 @@ from synapse.state import StateResolutionStore
|
||||||
from synapse.storage.background_updates import BackgroundUpdateStore
|
from synapse.storage.background_updates import BackgroundUpdateStore
|
||||||
from synapse.storage.event_federation import EventFederationStore
|
from synapse.storage.event_federation import EventFederationStore
|
||||||
from synapse.storage.events_worker import EventsWorkerStore
|
from synapse.storage.events_worker import EventsWorkerStore
|
||||||
|
from synapse.storage.state import StateGroupWorkerStore
|
||||||
from synapse.types import RoomStreamToken, get_domain_from_id
|
from synapse.types import RoomStreamToken, get_domain_from_id
|
||||||
from synapse.util import batch_iter
|
from synapse.util import batch_iter
|
||||||
from synapse.util.async_helpers import ObservableDeferred
|
from synapse.util.async_helpers import ObservableDeferred
|
||||||
|
@ -205,7 +206,8 @@ def _retry_on_integrity_error(func):
|
||||||
|
|
||||||
# inherits from EventFederationStore so that we can call _update_backward_extremities
|
# inherits from EventFederationStore so that we can call _update_backward_extremities
|
||||||
# and _handle_mult_prev_events (though arguably those could both be moved in here)
|
# and _handle_mult_prev_events (though arguably those could both be moved in here)
|
||||||
class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore):
|
class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore,
|
||||||
|
BackgroundUpdateStore):
|
||||||
EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts"
|
EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts"
|
||||||
EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url"
|
EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url"
|
||||||
|
|
||||||
|
@ -2034,62 +2036,44 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
|
||||||
|
|
||||||
logger.info("[purge] finding redundant state groups")
|
logger.info("[purge] finding redundant state groups")
|
||||||
|
|
||||||
# Get all state groups that are only referenced by events that are
|
# Get all state groups that are referenced by events that are to be
|
||||||
# to be deleted.
|
# deleted. We then go and check if they are referenced by other events
|
||||||
# This works by first getting state groups that we may want to delete,
|
# or state groups, and if not we delete them.
|
||||||
# joining against event_to_state_groups to get events that use that
|
|
||||||
# state group, then left joining against events_to_purge again. Any
|
|
||||||
# state group where the left join produce *no nulls* are referenced
|
|
||||||
# only by events that are going to be purged.
|
|
||||||
txn.execute("""
|
txn.execute("""
|
||||||
SELECT state_group FROM
|
SELECT DISTINCT state_group FROM events_to_purge
|
||||||
(
|
INNER JOIN event_to_state_groups USING (event_id)
|
||||||
SELECT DISTINCT state_group FROM events_to_purge
|
|
||||||
INNER JOIN event_to_state_groups USING (event_id)
|
|
||||||
) AS sp
|
|
||||||
INNER JOIN event_to_state_groups USING (state_group)
|
|
||||||
LEFT JOIN events_to_purge AS ep USING (event_id)
|
|
||||||
GROUP BY state_group
|
|
||||||
HAVING SUM(CASE WHEN ep.event_id IS NULL THEN 1 ELSE 0 END) = 0
|
|
||||||
""")
|
""")
|
||||||
|
|
||||||
state_rows = txn.fetchall()
|
referenced_state_groups = set(sg for sg, in txn)
|
||||||
logger.info("[purge] found %i redundant state groups", len(state_rows))
|
logger.info(
|
||||||
|
"[purge] found %i referenced state groups",
|
||||||
|
len(referenced_state_groups),
|
||||||
|
)
|
||||||
|
|
||||||
# make a set of the redundant state groups, so that we can look them up
|
logger.info("[purge] finding state groups that can be deleted")
|
||||||
# efficiently
|
|
||||||
state_groups_to_delete = set([sg for sg, in state_rows])
|
|
||||||
|
|
||||||
# Now we get all the state groups that rely on these state groups
|
state_groups_to_delete, remaining_state_groups = (
|
||||||
logger.info("[purge] finding state groups which depend on redundant"
|
self._find_unreferenced_groups_during_purge(
|
||||||
" state groups")
|
txn, referenced_state_groups,
|
||||||
remaining_state_groups = []
|
|
||||||
for i in range(0, len(state_rows), 100):
|
|
||||||
chunk = [sg for sg, in state_rows[i:i + 100]]
|
|
||||||
# look for state groups whose prev_state_group is one we are about
|
|
||||||
# to delete
|
|
||||||
rows = self._simple_select_many_txn(
|
|
||||||
txn,
|
|
||||||
table="state_group_edges",
|
|
||||||
column="prev_state_group",
|
|
||||||
iterable=chunk,
|
|
||||||
retcols=["state_group"],
|
|
||||||
keyvalues={},
|
|
||||||
)
|
)
|
||||||
remaining_state_groups.extend(
|
)
|
||||||
row["state_group"] for row in rows
|
|
||||||
|
|
||||||
# exclude state groups we are about to delete: no point in
|
logger.info(
|
||||||
# updating them
|
"[purge] found %i state groups to delete",
|
||||||
if row["state_group"] not in state_groups_to_delete
|
len(state_groups_to_delete),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"[purge] de-delta-ing %i remaining state groups",
|
||||||
|
len(remaining_state_groups),
|
||||||
|
)
|
||||||
|
|
||||||
# Now we turn the state groups that reference to-be-deleted state
|
# Now we turn the state groups that reference to-be-deleted state
|
||||||
# groups to non delta versions.
|
# groups to non delta versions.
|
||||||
for sg in remaining_state_groups:
|
for sg in remaining_state_groups:
|
||||||
logger.info("[purge] de-delta-ing remaining state group %s", sg)
|
logger.info("[purge] de-delta-ing remaining state group %s", sg)
|
||||||
curr_state = self._get_state_groups_from_groups_txn(
|
curr_state = self._get_state_groups_from_groups_txn(
|
||||||
txn, [sg], types=None
|
txn, [sg],
|
||||||
)
|
)
|
||||||
curr_state = curr_state[sg]
|
curr_state = curr_state[sg]
|
||||||
|
|
||||||
|
@ -2127,11 +2111,11 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
|
||||||
logger.info("[purge] removing redundant state groups")
|
logger.info("[purge] removing redundant state groups")
|
||||||
txn.executemany(
|
txn.executemany(
|
||||||
"DELETE FROM state_groups_state WHERE state_group = ?",
|
"DELETE FROM state_groups_state WHERE state_group = ?",
|
||||||
state_rows
|
((sg,) for sg in state_groups_to_delete),
|
||||||
)
|
)
|
||||||
txn.executemany(
|
txn.executemany(
|
||||||
"DELETE FROM state_groups WHERE id = ?",
|
"DELETE FROM state_groups WHERE id = ?",
|
||||||
state_rows
|
((sg,) for sg in state_groups_to_delete),
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.info("[purge] removing events from event_to_state_groups")
|
logger.info("[purge] removing events from event_to_state_groups")
|
||||||
|
@ -2227,6 +2211,85 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
|
||||||
|
|
||||||
logger.info("[purge] done")
|
logger.info("[purge] done")
|
||||||
|
|
||||||
|
def _find_unreferenced_groups_during_purge(self, txn, state_groups):
|
||||||
|
"""Used when purging history to figure out which state groups can be
|
||||||
|
deleted and which need to be de-delta'ed (due to one of its prev groups
|
||||||
|
being scheduled for deletion).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
txn
|
||||||
|
state_groups (set[int]): Set of state groups referenced by events
|
||||||
|
that are going to be deleted.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple[set[int], set[int]]: The set of state groups that can be
|
||||||
|
deleted and the set of state groups that need to be de-delta'ed
|
||||||
|
"""
|
||||||
|
# Graph of state group -> previous group
|
||||||
|
graph = {}
|
||||||
|
|
||||||
|
# Set of events that we have found to be referenced by events
|
||||||
|
referenced_groups = set()
|
||||||
|
|
||||||
|
# Set of state groups we've already seen
|
||||||
|
state_groups_seen = set(state_groups)
|
||||||
|
|
||||||
|
# Set of state groups to handle next.
|
||||||
|
next_to_search = set(state_groups)
|
||||||
|
while next_to_search:
|
||||||
|
# We bound size of groups we're looking up at once, to stop the
|
||||||
|
# SQL query getting too big
|
||||||
|
if len(next_to_search) < 100:
|
||||||
|
current_search = next_to_search
|
||||||
|
next_to_search = set()
|
||||||
|
else:
|
||||||
|
current_search = set(itertools.islice(next_to_search, 100))
|
||||||
|
next_to_search -= current_search
|
||||||
|
|
||||||
|
# Check if state groups are referenced
|
||||||
|
sql = """
|
||||||
|
SELECT DISTINCT state_group FROM event_to_state_groups
|
||||||
|
LEFT JOIN events_to_purge AS ep USING (event_id)
|
||||||
|
WHERE state_group IN (%s) AND ep.event_id IS NULL
|
||||||
|
""" % (",".join("?" for _ in current_search),)
|
||||||
|
txn.execute(sql, list(current_search))
|
||||||
|
|
||||||
|
referenced = set(sg for sg, in txn)
|
||||||
|
referenced_groups |= referenced
|
||||||
|
|
||||||
|
# We don't continue iterating up the state group graphs for state
|
||||||
|
# groups that are referenced.
|
||||||
|
current_search -= referenced
|
||||||
|
|
||||||
|
rows = self._simple_select_many_txn(
|
||||||
|
txn,
|
||||||
|
table="state_group_edges",
|
||||||
|
column="prev_state_group",
|
||||||
|
iterable=current_search,
|
||||||
|
keyvalues={},
|
||||||
|
retcols=("prev_state_group", "state_group",),
|
||||||
|
)
|
||||||
|
|
||||||
|
prevs = set(row["state_group"] for row in rows)
|
||||||
|
# We don't bother re-handling groups we've already seen
|
||||||
|
prevs -= state_groups_seen
|
||||||
|
next_to_search |= prevs
|
||||||
|
state_groups_seen |= prevs
|
||||||
|
|
||||||
|
for row in rows:
|
||||||
|
# Note: Each state group can have at most one prev group
|
||||||
|
graph[row["state_group"]] = row["prev_state_group"]
|
||||||
|
|
||||||
|
to_delete = state_groups_seen - referenced_groups
|
||||||
|
|
||||||
|
to_dedelta = set()
|
||||||
|
for sg in referenced_groups:
|
||||||
|
prev_sg = graph.get(sg)
|
||||||
|
if prev_sg and prev_sg in to_delete:
|
||||||
|
to_dedelta.add(sg)
|
||||||
|
|
||||||
|
return to_delete, to_dedelta
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def is_event_after(self, event_id1, event_id2):
|
def is_event_after(self, event_id1, event_id2):
|
||||||
"""Returns True if event_id1 is after event_id2 in the stream
|
"""Returns True if event_id1 is after event_id2 in the stream
|
||||||
|
|
|
@ -33,19 +33,29 @@ class MonthlyActiveUsersStore(SQLBaseStore):
|
||||||
self._clock = hs.get_clock()
|
self._clock = hs.get_clock()
|
||||||
self.hs = hs
|
self.hs = hs
|
||||||
self.reserved_users = ()
|
self.reserved_users = ()
|
||||||
|
# Do not add more reserved users than the total allowable number
|
||||||
|
self._initialise_reserved_users(
|
||||||
|
dbconn.cursor(),
|
||||||
|
hs.config.mau_limits_reserved_threepids[:self.hs.config.max_mau_value],
|
||||||
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
def _initialise_reserved_users(self, txn, threepids):
|
||||||
def initialise_reserved_users(self, threepids):
|
"""Ensures that reserved threepids are accounted for in the MAU table, should
|
||||||
store = self.hs.get_datastore()
|
be called on start up.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
txn (cursor):
|
||||||
|
threepids (list[dict]): List of threepid dicts to reserve
|
||||||
|
"""
|
||||||
reserved_user_list = []
|
reserved_user_list = []
|
||||||
|
|
||||||
# Do not add more reserved users than the total allowable number
|
for tp in threepids:
|
||||||
for tp in threepids[:self.hs.config.max_mau_value]:
|
user_id = self.get_user_id_by_threepid_txn(
|
||||||
user_id = yield store.get_user_id_by_threepid(
|
txn,
|
||||||
tp["medium"], tp["address"]
|
tp["medium"], tp["address"]
|
||||||
)
|
)
|
||||||
if user_id:
|
if user_id:
|
||||||
yield self.upsert_monthly_active_user(user_id)
|
self.upsert_monthly_active_user_txn(txn, user_id)
|
||||||
reserved_user_list.append(user_id)
|
reserved_user_list.append(user_id)
|
||||||
else:
|
else:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
|
@ -55,8 +65,7 @@ class MonthlyActiveUsersStore(SQLBaseStore):
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def reap_monthly_active_users(self):
|
def reap_monthly_active_users(self):
|
||||||
"""
|
"""Cleans out monthly active user table to ensure that no stale
|
||||||
Cleans out monthly active user table to ensure that no stale
|
|
||||||
entries exist.
|
entries exist.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
|
@ -165,19 +174,44 @@ class MonthlyActiveUsersStore(SQLBaseStore):
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def upsert_monthly_active_user(self, user_id):
|
def upsert_monthly_active_user(self, user_id):
|
||||||
|
"""Updates or inserts the user into the monthly active user table, which
|
||||||
|
is used to track the current MAU usage of the server
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id (str): user to add/update
|
||||||
"""
|
"""
|
||||||
Updates or inserts monthly active user member
|
is_insert = yield self.runInteraction(
|
||||||
Arguments:
|
"upsert_monthly_active_user", self.upsert_monthly_active_user_txn,
|
||||||
user_id (str): user to add/update
|
user_id
|
||||||
Deferred[bool]: True if a new entry was created, False if an
|
)
|
||||||
existing one was updated.
|
|
||||||
|
if is_insert:
|
||||||
|
self.user_last_seen_monthly_active.invalidate((user_id,))
|
||||||
|
self.get_monthly_active_count.invalidate(())
|
||||||
|
|
||||||
|
def upsert_monthly_active_user_txn(self, txn, user_id):
|
||||||
|
"""Updates or inserts monthly active user member
|
||||||
|
|
||||||
|
Note that, after calling this method, it will generally be necessary
|
||||||
|
to invalidate the caches on user_last_seen_monthly_active and
|
||||||
|
get_monthly_active_count. We can't do that here, because we are running
|
||||||
|
in a database thread rather than the main thread, and we can't call
|
||||||
|
txn.call_after because txn may not be a LoggingTransaction.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
txn (cursor):
|
||||||
|
user_id (str): user to add/update
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if a new entry was created, False if an
|
||||||
|
existing one was updated.
|
||||||
"""
|
"""
|
||||||
# Am consciously deciding to lock the table on the basis that is ought
|
# Am consciously deciding to lock the table on the basis that is ought
|
||||||
# never be a big table and alternative approaches (batching multiple
|
# never be a big table and alternative approaches (batching multiple
|
||||||
# upserts into a single txn) introduced a lot of extra complexity.
|
# upserts into a single txn) introduced a lot of extra complexity.
|
||||||
# See https://github.com/matrix-org/synapse/issues/3854 for more
|
# See https://github.com/matrix-org/synapse/issues/3854 for more
|
||||||
is_insert = yield self._simple_upsert(
|
is_insert = self._simple_upsert_txn(
|
||||||
desc="upsert_monthly_active_user",
|
txn,
|
||||||
table="monthly_active_users",
|
table="monthly_active_users",
|
||||||
keyvalues={
|
keyvalues={
|
||||||
"user_id": user_id,
|
"user_id": user_id,
|
||||||
|
@ -186,9 +220,8 @@ class MonthlyActiveUsersStore(SQLBaseStore):
|
||||||
"timestamp": int(self._clock.time_msec()),
|
"timestamp": int(self._clock.time_msec()),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
if is_insert:
|
|
||||||
self.user_last_seen_monthly_active.invalidate((user_id,))
|
return is_insert
|
||||||
self.get_monthly_active_count.invalidate(())
|
|
||||||
|
|
||||||
@cached(num_args=1)
|
@cached(num_args=1)
|
||||||
def user_last_seen_monthly_active(self, user_id):
|
def user_last_seen_monthly_active(self, user_id):
|
||||||
|
|
|
@ -25,7 +25,7 @@ logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
# Remember to update this number every time a change is made to database
|
# Remember to update this number every time a change is made to database
|
||||||
# schema files, so the users will be informed on server restarts.
|
# schema files, so the users will be informed on server restarts.
|
||||||
SCHEMA_VERSION = 51
|
SCHEMA_VERSION = 52
|
||||||
|
|
||||||
dir_path = os.path.abspath(os.path.dirname(__file__))
|
dir_path = os.path.abspath(os.path.dirname(__file__))
|
||||||
|
|
||||||
|
|
|
@ -474,17 +474,44 @@ class RegistrationStore(RegistrationWorkerStore,
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_user_id_by_threepid(self, medium, address):
|
def get_user_id_by_threepid(self, medium, address):
|
||||||
ret = yield self._simple_select_one(
|
"""Returns user id from threepid
|
||||||
|
|
||||||
|
Args:
|
||||||
|
medium (str): threepid medium e.g. email
|
||||||
|
address (str): threepid address e.g. me@example.com
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred[str|None]: user id or None if no user id/threepid mapping exists
|
||||||
|
"""
|
||||||
|
user_id = yield self.runInteraction(
|
||||||
|
"get_user_id_by_threepid", self.get_user_id_by_threepid_txn,
|
||||||
|
medium, address
|
||||||
|
)
|
||||||
|
defer.returnValue(user_id)
|
||||||
|
|
||||||
|
def get_user_id_by_threepid_txn(self, txn, medium, address):
|
||||||
|
"""Returns user id from threepid
|
||||||
|
|
||||||
|
Args:
|
||||||
|
txn (cursor):
|
||||||
|
medium (str): threepid medium e.g. email
|
||||||
|
address (str): threepid address e.g. me@example.com
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str|None: user id or None if no user id/threepid mapping exists
|
||||||
|
"""
|
||||||
|
ret = self._simple_select_one_txn(
|
||||||
|
txn,
|
||||||
"user_threepids",
|
"user_threepids",
|
||||||
{
|
{
|
||||||
"medium": medium,
|
"medium": medium,
|
||||||
"address": address
|
"address": address
|
||||||
},
|
},
|
||||||
['user_id'], True, 'get_user_id_by_threepid'
|
['user_id'], True
|
||||||
)
|
)
|
||||||
if ret:
|
if ret:
|
||||||
defer.returnValue(ret['user_id'])
|
return ret['user_id']
|
||||||
defer.returnValue(None)
|
return None
|
||||||
|
|
||||||
def user_delete_threepid(self, user_id, medium, address):
|
def user_delete_threepid(self, user_id, medium, address):
|
||||||
return self._simple_delete(
|
return self._simple_delete(
|
||||||
|
|
|
@ -47,7 +47,7 @@ class RoomWorkerStore(SQLBaseStore):
|
||||||
Args:
|
Args:
|
||||||
room_id (str): The ID of the room to retrieve.
|
room_id (str): The ID of the room to retrieve.
|
||||||
Returns:
|
Returns:
|
||||||
A namedtuple containing the room information, or an empty list.
|
A dict containing the room information, or None if the room is unknown.
|
||||||
"""
|
"""
|
||||||
return self._simple_select_one(
|
return self._simple_select_one(
|
||||||
table="rooms",
|
table="rooms",
|
||||||
|
|
|
@ -0,0 +1,19 @@
|
||||||
|
/* Copyright 2018 New Vector Ltd
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
-- This is needed to efficiently check for unreferenced state groups during
|
||||||
|
-- purge. Added events_to_state_group(state_group) index
|
||||||
|
INSERT into background_updates (update_name, progress_json)
|
||||||
|
VALUES ('event_to_state_groups_sg_index', '{}');
|
File diff suppressed because it is too large
Load diff
|
@ -14,6 +14,7 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
import re
|
||||||
from itertools import islice
|
from itertools import islice
|
||||||
|
|
||||||
import attr
|
import attr
|
||||||
|
@ -138,3 +139,27 @@ def log_failure(failure, msg, consumeErrors=True):
|
||||||
|
|
||||||
if not consumeErrors:
|
if not consumeErrors:
|
||||||
return failure
|
return failure
|
||||||
|
|
||||||
|
|
||||||
|
def glob_to_regex(glob):
|
||||||
|
"""Converts a glob to a compiled regex object.
|
||||||
|
|
||||||
|
The regex is anchored at the beginning and end of the string.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
glob (str)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
re.RegexObject
|
||||||
|
"""
|
||||||
|
res = ''
|
||||||
|
for c in glob:
|
||||||
|
if c == '*':
|
||||||
|
res = res + '.*'
|
||||||
|
elif c == '?':
|
||||||
|
res = res + '.'
|
||||||
|
else:
|
||||||
|
res = res + re.escape(c)
|
||||||
|
|
||||||
|
# \A anchors at start of string, \Z at end of string
|
||||||
|
return re.compile(r"\A" + res + r"\Z", re.IGNORECASE)
|
||||||
|
|
|
@ -23,6 +23,7 @@ from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.api.constants import EventTypes, Membership
|
from synapse.api.constants import EventTypes, Membership
|
||||||
from synapse.events.utils import prune_event
|
from synapse.events.utils import prune_event
|
||||||
|
from synapse.storage.state import StateFilter
|
||||||
from synapse.types import get_domain_from_id
|
from synapse.types import get_domain_from_id
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
@ -72,7 +73,7 @@ def filter_events_for_client(store, user_id, events, is_peeking=False,
|
||||||
)
|
)
|
||||||
event_id_to_state = yield store.get_state_for_events(
|
event_id_to_state = yield store.get_state_for_events(
|
||||||
frozenset(e.event_id for e in events),
|
frozenset(e.event_id for e in events),
|
||||||
types=types,
|
state_filter=StateFilter.from_types(types),
|
||||||
)
|
)
|
||||||
|
|
||||||
ignore_dict_content = yield store.get_global_account_data_by_type_for_user(
|
ignore_dict_content = yield store.get_global_account_data_by_type_for_user(
|
||||||
|
@ -273,8 +274,8 @@ def filter_events_for_server(store, server_name, events):
|
||||||
# need to check membership (as we know the server is in the room).
|
# need to check membership (as we know the server is in the room).
|
||||||
event_to_state_ids = yield store.get_state_ids_for_events(
|
event_to_state_ids = yield store.get_state_ids_for_events(
|
||||||
frozenset(e.event_id for e in events),
|
frozenset(e.event_id for e in events),
|
||||||
types=(
|
state_filter=StateFilter.from_types(
|
||||||
(EventTypes.RoomHistoryVisibility, ""),
|
types=((EventTypes.RoomHistoryVisibility, ""),),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -314,9 +315,11 @@ def filter_events_for_server(store, server_name, events):
|
||||||
# of the history vis and membership state at those events.
|
# of the history vis and membership state at those events.
|
||||||
event_to_state_ids = yield store.get_state_ids_for_events(
|
event_to_state_ids = yield store.get_state_ids_for_events(
|
||||||
frozenset(e.event_id for e in events),
|
frozenset(e.event_id for e in events),
|
||||||
types=(
|
state_filter=StateFilter.from_types(
|
||||||
(EventTypes.RoomHistoryVisibility, ""),
|
types=(
|
||||||
(EventTypes.Member, None),
|
(EventTypes.RoomHistoryVisibility, ""),
|
||||||
|
(EventTypes.Member, None),
|
||||||
|
),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
67
tests/config/test_room_directory.py
Normal file
67
tests/config/test_room_directory.py
Normal file
|
@ -0,0 +1,67 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2018 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from synapse.config.room_directory import RoomDirectoryConfig
|
||||||
|
|
||||||
|
from tests import unittest
|
||||||
|
|
||||||
|
|
||||||
|
class RoomDirectoryConfigTestCase(unittest.TestCase):
|
||||||
|
def test_alias_creation_acl(self):
|
||||||
|
config = yaml.load("""
|
||||||
|
alias_creation_rules:
|
||||||
|
- user_id: "*bob*"
|
||||||
|
alias: "*"
|
||||||
|
action: "deny"
|
||||||
|
- user_id: "*"
|
||||||
|
alias: "#unofficial_*"
|
||||||
|
action: "allow"
|
||||||
|
- user_id: "@foo*:example.com"
|
||||||
|
alias: "*"
|
||||||
|
action: "allow"
|
||||||
|
- user_id: "@gah:example.com"
|
||||||
|
alias: "#goo:example.com"
|
||||||
|
action: "allow"
|
||||||
|
""")
|
||||||
|
|
||||||
|
rd_config = RoomDirectoryConfig()
|
||||||
|
rd_config.read_config(config)
|
||||||
|
|
||||||
|
self.assertFalse(rd_config.is_alias_creation_allowed(
|
||||||
|
user_id="@bob:example.com",
|
||||||
|
alias="#test:example.com",
|
||||||
|
))
|
||||||
|
|
||||||
|
self.assertTrue(rd_config.is_alias_creation_allowed(
|
||||||
|
user_id="@test:example.com",
|
||||||
|
alias="#unofficial_st:example.com",
|
||||||
|
))
|
||||||
|
|
||||||
|
self.assertTrue(rd_config.is_alias_creation_allowed(
|
||||||
|
user_id="@foobar:example.com",
|
||||||
|
alias="#test:example.com",
|
||||||
|
))
|
||||||
|
|
||||||
|
self.assertTrue(rd_config.is_alias_creation_allowed(
|
||||||
|
user_id="@gah:example.com",
|
||||||
|
alias="#goo:example.com",
|
||||||
|
))
|
||||||
|
|
||||||
|
self.assertFalse(rd_config.is_alias_creation_allowed(
|
||||||
|
user_id="@test:example.com",
|
||||||
|
alias="#test:example.com",
|
||||||
|
))
|
|
@ -18,7 +18,9 @@ from mock import Mock
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
|
from synapse.config.room_directory import RoomDirectoryConfig
|
||||||
from synapse.handlers.directory import DirectoryHandler
|
from synapse.handlers.directory import DirectoryHandler
|
||||||
|
from synapse.rest.client.v1 import directory, room
|
||||||
from synapse.types import RoomAlias
|
from synapse.types import RoomAlias
|
||||||
|
|
||||||
from tests import unittest
|
from tests import unittest
|
||||||
|
@ -102,3 +104,49 @@ class DirectoryTestCase(unittest.TestCase):
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEquals({"room_id": "!8765asdf:test", "servers": ["test"]}, response)
|
self.assertEquals({"room_id": "!8765asdf:test", "servers": ["test"]}, response)
|
||||||
|
|
||||||
|
|
||||||
|
class TestCreateAliasACL(unittest.HomeserverTestCase):
|
||||||
|
user_id = "@test:test"
|
||||||
|
|
||||||
|
servlets = [directory.register_servlets, room.register_servlets]
|
||||||
|
|
||||||
|
def prepare(self, hs, reactor, clock):
|
||||||
|
# We cheekily override the config to add custom alias creation rules
|
||||||
|
config = {}
|
||||||
|
config["alias_creation_rules"] = [
|
||||||
|
{
|
||||||
|
"user_id": "*",
|
||||||
|
"alias": "#unofficial_*",
|
||||||
|
"action": "allow",
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
rd_config = RoomDirectoryConfig()
|
||||||
|
rd_config.read_config(config)
|
||||||
|
|
||||||
|
self.hs.config.is_alias_creation_allowed = rd_config.is_alias_creation_allowed
|
||||||
|
|
||||||
|
return hs
|
||||||
|
|
||||||
|
def test_denied(self):
|
||||||
|
room_id = self.helper.create_room_as(self.user_id)
|
||||||
|
|
||||||
|
request, channel = self.make_request(
|
||||||
|
"PUT",
|
||||||
|
b"directory/room/%23test%3Atest",
|
||||||
|
('{"room_id":"%s"}' % (room_id,)).encode('ascii'),
|
||||||
|
)
|
||||||
|
self.render(request)
|
||||||
|
self.assertEquals(403, channel.code, channel.result)
|
||||||
|
|
||||||
|
def test_allowed(self):
|
||||||
|
room_id = self.helper.create_room_as(self.user_id)
|
||||||
|
|
||||||
|
request, channel = self.make_request(
|
||||||
|
"PUT",
|
||||||
|
b"directory/room/%23unofficial_test%3Atest",
|
||||||
|
('{"room_id":"%s"}' % (room_id,)).encode('ascii'),
|
||||||
|
)
|
||||||
|
self.render(request)
|
||||||
|
self.assertEquals(200, channel.code, channel.result)
|
||||||
|
|
|
@ -19,7 +19,7 @@ from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.api.errors import ResourceLimitError
|
from synapse.api.errors import ResourceLimitError
|
||||||
from synapse.handlers.register import RegistrationHandler
|
from synapse.handlers.register import RegistrationHandler
|
||||||
from synapse.types import UserID, create_requester
|
from synapse.types import RoomAlias, UserID, create_requester
|
||||||
|
|
||||||
from tests.utils import setup_test_homeserver
|
from tests.utils import setup_test_homeserver
|
||||||
|
|
||||||
|
@ -41,30 +41,27 @@ class RegistrationTestCase(unittest.TestCase):
|
||||||
self.mock_captcha_client = Mock()
|
self.mock_captcha_client = Mock()
|
||||||
self.hs = yield setup_test_homeserver(
|
self.hs = yield setup_test_homeserver(
|
||||||
self.addCleanup,
|
self.addCleanup,
|
||||||
handlers=None,
|
|
||||||
http_client=None,
|
|
||||||
expire_access_token=True,
|
expire_access_token=True,
|
||||||
profile_handler=Mock(),
|
|
||||||
)
|
)
|
||||||
self.macaroon_generator = Mock(
|
self.macaroon_generator = Mock(
|
||||||
generate_access_token=Mock(return_value='secret')
|
generate_access_token=Mock(return_value='secret')
|
||||||
)
|
)
|
||||||
self.hs.get_macaroon_generator = Mock(return_value=self.macaroon_generator)
|
self.hs.get_macaroon_generator = Mock(return_value=self.macaroon_generator)
|
||||||
self.hs.handlers = RegistrationHandlers(self.hs)
|
|
||||||
self.handler = self.hs.get_handlers().registration_handler
|
self.handler = self.hs.get_handlers().registration_handler
|
||||||
self.store = self.hs.get_datastore()
|
self.store = self.hs.get_datastore()
|
||||||
self.hs.config.max_mau_value = 50
|
self.hs.config.max_mau_value = 50
|
||||||
self.lots_of_users = 100
|
self.lots_of_users = 100
|
||||||
self.small_number_of_users = 1
|
self.small_number_of_users = 1
|
||||||
|
|
||||||
|
self.requester = create_requester("@requester:test")
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def test_user_is_created_and_logged_in_if_doesnt_exist(self):
|
def test_user_is_created_and_logged_in_if_doesnt_exist(self):
|
||||||
local_part = "someone"
|
frank = UserID.from_string("@frank:test")
|
||||||
display_name = "someone"
|
user_id = frank.to_string()
|
||||||
user_id = "@someone:test"
|
requester = create_requester(user_id)
|
||||||
requester = create_requester("@as:test")
|
|
||||||
result_user_id, result_token = yield self.handler.get_or_create_user(
|
result_user_id, result_token = yield self.handler.get_or_create_user(
|
||||||
requester, local_part, display_name
|
requester, frank.localpart, "Frankie"
|
||||||
)
|
)
|
||||||
self.assertEquals(result_user_id, user_id)
|
self.assertEquals(result_user_id, user_id)
|
||||||
self.assertEquals(result_token, 'secret')
|
self.assertEquals(result_token, 'secret')
|
||||||
|
@ -78,12 +75,11 @@ class RegistrationTestCase(unittest.TestCase):
|
||||||
token="jkv;g498752-43gj['eamb!-5",
|
token="jkv;g498752-43gj['eamb!-5",
|
||||||
password_hash=None,
|
password_hash=None,
|
||||||
)
|
)
|
||||||
local_part = "frank"
|
local_part = frank.localpart
|
||||||
display_name = "Frank"
|
user_id = frank.to_string()
|
||||||
user_id = "@frank:test"
|
requester = create_requester(user_id)
|
||||||
requester = create_requester("@as:test")
|
|
||||||
result_user_id, result_token = yield self.handler.get_or_create_user(
|
result_user_id, result_token = yield self.handler.get_or_create_user(
|
||||||
requester, local_part, display_name
|
requester, local_part, None
|
||||||
)
|
)
|
||||||
self.assertEquals(result_user_id, user_id)
|
self.assertEquals(result_user_id, user_id)
|
||||||
self.assertEquals(result_token, 'secret')
|
self.assertEquals(result_token, 'secret')
|
||||||
|
@ -92,7 +88,7 @@ class RegistrationTestCase(unittest.TestCase):
|
||||||
def test_mau_limits_when_disabled(self):
|
def test_mau_limits_when_disabled(self):
|
||||||
self.hs.config.limit_usage_by_mau = False
|
self.hs.config.limit_usage_by_mau = False
|
||||||
# Ensure does not throw exception
|
# Ensure does not throw exception
|
||||||
yield self.handler.get_or_create_user("requester", 'a', "display_name")
|
yield self.handler.get_or_create_user(self.requester, 'a', "display_name")
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def test_get_or_create_user_mau_not_blocked(self):
|
def test_get_or_create_user_mau_not_blocked(self):
|
||||||
|
@ -101,7 +97,7 @@ class RegistrationTestCase(unittest.TestCase):
|
||||||
return_value=defer.succeed(self.hs.config.max_mau_value - 1)
|
return_value=defer.succeed(self.hs.config.max_mau_value - 1)
|
||||||
)
|
)
|
||||||
# Ensure does not throw exception
|
# Ensure does not throw exception
|
||||||
yield self.handler.get_or_create_user("@user:server", 'c', "User")
|
yield self.handler.get_or_create_user(self.requester, 'c', "User")
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def test_get_or_create_user_mau_blocked(self):
|
def test_get_or_create_user_mau_blocked(self):
|
||||||
|
@ -110,13 +106,13 @@ class RegistrationTestCase(unittest.TestCase):
|
||||||
return_value=defer.succeed(self.lots_of_users)
|
return_value=defer.succeed(self.lots_of_users)
|
||||||
)
|
)
|
||||||
with self.assertRaises(ResourceLimitError):
|
with self.assertRaises(ResourceLimitError):
|
||||||
yield self.handler.get_or_create_user("requester", 'b', "display_name")
|
yield self.handler.get_or_create_user(self.requester, 'b', "display_name")
|
||||||
|
|
||||||
self.store.get_monthly_active_count = Mock(
|
self.store.get_monthly_active_count = Mock(
|
||||||
return_value=defer.succeed(self.hs.config.max_mau_value)
|
return_value=defer.succeed(self.hs.config.max_mau_value)
|
||||||
)
|
)
|
||||||
with self.assertRaises(ResourceLimitError):
|
with self.assertRaises(ResourceLimitError):
|
||||||
yield self.handler.get_or_create_user("requester", 'b', "display_name")
|
yield self.handler.get_or_create_user(self.requester, 'b', "display_name")
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def test_register_mau_blocked(self):
|
def test_register_mau_blocked(self):
|
||||||
|
@ -147,3 +143,44 @@ class RegistrationTestCase(unittest.TestCase):
|
||||||
)
|
)
|
||||||
with self.assertRaises(ResourceLimitError):
|
with self.assertRaises(ResourceLimitError):
|
||||||
yield self.handler.register_saml2(localpart="local_part")
|
yield self.handler.register_saml2(localpart="local_part")
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def test_auto_create_auto_join_rooms(self):
|
||||||
|
room_alias_str = "#room:test"
|
||||||
|
self.hs.config.auto_join_rooms = [room_alias_str]
|
||||||
|
res = yield self.handler.register(localpart='jeff')
|
||||||
|
rooms = yield self.store.get_rooms_for_user(res[0])
|
||||||
|
|
||||||
|
directory_handler = self.hs.get_handlers().directory_handler
|
||||||
|
room_alias = RoomAlias.from_string(room_alias_str)
|
||||||
|
room_id = yield directory_handler.get_association(room_alias)
|
||||||
|
|
||||||
|
self.assertTrue(room_id['room_id'] in rooms)
|
||||||
|
self.assertEqual(len(rooms), 1)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def test_auto_create_auto_join_rooms_with_no_rooms(self):
|
||||||
|
self.hs.config.auto_join_rooms = []
|
||||||
|
frank = UserID.from_string("@frank:test")
|
||||||
|
res = yield self.handler.register(frank.localpart)
|
||||||
|
self.assertEqual(res[0], frank.to_string())
|
||||||
|
rooms = yield self.store.get_rooms_for_user(res[0])
|
||||||
|
self.assertEqual(len(rooms), 0)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def test_auto_create_auto_join_where_room_is_another_domain(self):
|
||||||
|
self.hs.config.auto_join_rooms = ["#room:another"]
|
||||||
|
frank = UserID.from_string("@frank:test")
|
||||||
|
res = yield self.handler.register(frank.localpart)
|
||||||
|
self.assertEqual(res[0], frank.to_string())
|
||||||
|
rooms = yield self.store.get_rooms_for_user(res[0])
|
||||||
|
self.assertEqual(len(rooms), 0)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def test_auto_create_auto_join_where_auto_create_is_false(self):
|
||||||
|
self.hs.config.autocreate_auto_join_rooms = False
|
||||||
|
room_alias_str = "#room:test"
|
||||||
|
self.hs.config.auto_join_rooms = [room_alias_str]
|
||||||
|
res = yield self.handler.register(localpart='jeff')
|
||||||
|
rooms = yield self.store.get_rooms_for_user(res[0])
|
||||||
|
self.assertEqual(len(rooms), 0)
|
||||||
|
|
0
tests/push/__init__.py
Normal file
0
tests/push/__init__.py
Normal file
148
tests/push/test_email.py
Normal file
148
tests/push/test_email.py
Normal file
|
@ -0,0 +1,148 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2018 New Vector
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
import pkg_resources
|
||||||
|
|
||||||
|
from twisted.internet.defer import Deferred
|
||||||
|
|
||||||
|
from synapse.rest.client.v1 import admin, login, room
|
||||||
|
|
||||||
|
from tests.unittest import HomeserverTestCase
|
||||||
|
|
||||||
|
try:
|
||||||
|
from synapse.push.mailer import load_jinja2_templates
|
||||||
|
except Exception:
|
||||||
|
load_jinja2_templates = None
|
||||||
|
|
||||||
|
|
||||||
|
class EmailPusherTests(HomeserverTestCase):
|
||||||
|
|
||||||
|
skip = "No Jinja installed" if not load_jinja2_templates else None
|
||||||
|
servlets = [
|
||||||
|
admin.register_servlets,
|
||||||
|
room.register_servlets,
|
||||||
|
login.register_servlets,
|
||||||
|
]
|
||||||
|
user_id = True
|
||||||
|
hijack_auth = False
|
||||||
|
|
||||||
|
def make_homeserver(self, reactor, clock):
|
||||||
|
|
||||||
|
# List[Tuple[Deferred, args, kwargs]]
|
||||||
|
self.email_attempts = []
|
||||||
|
|
||||||
|
def sendmail(*args, **kwargs):
|
||||||
|
d = Deferred()
|
||||||
|
self.email_attempts.append((d, args, kwargs))
|
||||||
|
return d
|
||||||
|
|
||||||
|
config = self.default_config()
|
||||||
|
config.email_enable_notifs = True
|
||||||
|
config.start_pushers = True
|
||||||
|
|
||||||
|
config.email_template_dir = os.path.abspath(
|
||||||
|
pkg_resources.resource_filename('synapse', 'res/templates')
|
||||||
|
)
|
||||||
|
config.email_notif_template_html = "notif_mail.html"
|
||||||
|
config.email_notif_template_text = "notif_mail.txt"
|
||||||
|
config.email_smtp_host = "127.0.0.1"
|
||||||
|
config.email_smtp_port = 20
|
||||||
|
config.require_transport_security = False
|
||||||
|
config.email_smtp_user = None
|
||||||
|
config.email_app_name = "Matrix"
|
||||||
|
config.email_notif_from = "test@example.com"
|
||||||
|
|
||||||
|
hs = self.setup_test_homeserver(config=config, sendmail=sendmail)
|
||||||
|
|
||||||
|
return hs
|
||||||
|
|
||||||
|
def test_sends_email(self):
|
||||||
|
|
||||||
|
# Register the user who gets notified
|
||||||
|
user_id = self.register_user("user", "pass")
|
||||||
|
access_token = self.login("user", "pass")
|
||||||
|
|
||||||
|
# Register the user who sends the message
|
||||||
|
other_user_id = self.register_user("otheruser", "pass")
|
||||||
|
other_access_token = self.login("otheruser", "pass")
|
||||||
|
|
||||||
|
# Register the pusher
|
||||||
|
user_tuple = self.get_success(
|
||||||
|
self.hs.get_datastore().get_user_by_access_token(access_token)
|
||||||
|
)
|
||||||
|
token_id = user_tuple["token_id"]
|
||||||
|
|
||||||
|
self.get_success(
|
||||||
|
self.hs.get_pusherpool().add_pusher(
|
||||||
|
user_id=user_id,
|
||||||
|
access_token=token_id,
|
||||||
|
kind="email",
|
||||||
|
app_id="m.email",
|
||||||
|
app_display_name="Email Notifications",
|
||||||
|
device_display_name="a@example.com",
|
||||||
|
pushkey="a@example.com",
|
||||||
|
lang=None,
|
||||||
|
data={},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create a room
|
||||||
|
room = self.helper.create_room_as(user_id, tok=access_token)
|
||||||
|
|
||||||
|
# Invite the other person
|
||||||
|
self.helper.invite(room=room, src=user_id, tok=access_token, targ=other_user_id)
|
||||||
|
|
||||||
|
# The other user joins
|
||||||
|
self.helper.join(room=room, user=other_user_id, tok=other_access_token)
|
||||||
|
|
||||||
|
# The other user sends some messages
|
||||||
|
self.helper.send(room, body="Hi!", tok=other_access_token)
|
||||||
|
self.helper.send(room, body="There!", tok=other_access_token)
|
||||||
|
|
||||||
|
# Get the stream ordering before it gets sent
|
||||||
|
pushers = self.get_success(
|
||||||
|
self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
|
||||||
|
)
|
||||||
|
self.assertEqual(len(pushers), 1)
|
||||||
|
last_stream_ordering = pushers[0]["last_stream_ordering"]
|
||||||
|
|
||||||
|
# Advance time a bit, so the pusher will register something has happened
|
||||||
|
self.pump(100)
|
||||||
|
|
||||||
|
# It hasn't succeeded yet, so the stream ordering shouldn't have moved
|
||||||
|
pushers = self.get_success(
|
||||||
|
self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
|
||||||
|
)
|
||||||
|
self.assertEqual(len(pushers), 1)
|
||||||
|
self.assertEqual(last_stream_ordering, pushers[0]["last_stream_ordering"])
|
||||||
|
|
||||||
|
# One email was attempted to be sent
|
||||||
|
self.assertEqual(len(self.email_attempts), 1)
|
||||||
|
|
||||||
|
# Make the email succeed
|
||||||
|
self.email_attempts[0][0].callback(True)
|
||||||
|
self.pump()
|
||||||
|
|
||||||
|
# One email was attempted to be sent
|
||||||
|
self.assertEqual(len(self.email_attempts), 1)
|
||||||
|
|
||||||
|
# The stream ordering has increased
|
||||||
|
pushers = self.get_success(
|
||||||
|
self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
|
||||||
|
)
|
||||||
|
self.assertEqual(len(pushers), 1)
|
||||||
|
self.assertTrue(pushers[0]["last_stream_ordering"] > last_stream_ordering)
|
|
@ -23,7 +23,7 @@ from six.moves.urllib import parse as urlparse
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.api.constants import Membership
|
from synapse.api.constants import Membership
|
||||||
from synapse.rest.client.v1 import room
|
from synapse.rest.client.v1 import admin, login, room
|
||||||
|
|
||||||
from tests import unittest
|
from tests import unittest
|
||||||
|
|
||||||
|
@ -799,3 +799,107 @@ class RoomMessageListTestCase(RoomBase):
|
||||||
self.assertEquals(token, channel.json_body['start'])
|
self.assertEquals(token, channel.json_body['start'])
|
||||||
self.assertTrue("chunk" in channel.json_body)
|
self.assertTrue("chunk" in channel.json_body)
|
||||||
self.assertTrue("end" in channel.json_body)
|
self.assertTrue("end" in channel.json_body)
|
||||||
|
|
||||||
|
|
||||||
|
class RoomSearchTestCase(unittest.HomeserverTestCase):
|
||||||
|
servlets = [
|
||||||
|
admin.register_servlets,
|
||||||
|
room.register_servlets,
|
||||||
|
login.register_servlets,
|
||||||
|
]
|
||||||
|
user_id = True
|
||||||
|
hijack_auth = False
|
||||||
|
|
||||||
|
def prepare(self, reactor, clock, hs):
|
||||||
|
|
||||||
|
# Register the user who does the searching
|
||||||
|
self.user_id = self.register_user("user", "pass")
|
||||||
|
self.access_token = self.login("user", "pass")
|
||||||
|
|
||||||
|
# Register the user who sends the message
|
||||||
|
self.other_user_id = self.register_user("otheruser", "pass")
|
||||||
|
self.other_access_token = self.login("otheruser", "pass")
|
||||||
|
|
||||||
|
# Create a room
|
||||||
|
self.room = self.helper.create_room_as(self.user_id, tok=self.access_token)
|
||||||
|
|
||||||
|
# Invite the other person
|
||||||
|
self.helper.invite(
|
||||||
|
room=self.room,
|
||||||
|
src=self.user_id,
|
||||||
|
tok=self.access_token,
|
||||||
|
targ=self.other_user_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
# The other user joins
|
||||||
|
self.helper.join(
|
||||||
|
room=self.room, user=self.other_user_id, tok=self.other_access_token
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_finds_message(self):
|
||||||
|
"""
|
||||||
|
The search functionality will search for content in messages if asked to
|
||||||
|
do so.
|
||||||
|
"""
|
||||||
|
# The other user sends some messages
|
||||||
|
self.helper.send(self.room, body="Hi!", tok=self.other_access_token)
|
||||||
|
self.helper.send(self.room, body="There!", tok=self.other_access_token)
|
||||||
|
|
||||||
|
request, channel = self.make_request(
|
||||||
|
"POST",
|
||||||
|
"/search?access_token=%s" % (self.access_token,),
|
||||||
|
{
|
||||||
|
"search_categories": {
|
||||||
|
"room_events": {"keys": ["content.body"], "search_term": "Hi"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
self.render(request)
|
||||||
|
|
||||||
|
# Check we get the results we expect -- one search result, of the sent
|
||||||
|
# messages
|
||||||
|
self.assertEqual(channel.code, 200)
|
||||||
|
results = channel.json_body["search_categories"]["room_events"]
|
||||||
|
self.assertEqual(results["count"], 1)
|
||||||
|
self.assertEqual(results["results"][0]["result"]["content"]["body"], "Hi!")
|
||||||
|
|
||||||
|
# No context was requested, so we should get none.
|
||||||
|
self.assertEqual(results["results"][0]["context"], {})
|
||||||
|
|
||||||
|
def test_include_context(self):
|
||||||
|
"""
|
||||||
|
When event_context includes include_profile, profile information will be
|
||||||
|
included in the search response.
|
||||||
|
"""
|
||||||
|
# The other user sends some messages
|
||||||
|
self.helper.send(self.room, body="Hi!", tok=self.other_access_token)
|
||||||
|
self.helper.send(self.room, body="There!", tok=self.other_access_token)
|
||||||
|
|
||||||
|
request, channel = self.make_request(
|
||||||
|
"POST",
|
||||||
|
"/search?access_token=%s" % (self.access_token,),
|
||||||
|
{
|
||||||
|
"search_categories": {
|
||||||
|
"room_events": {
|
||||||
|
"keys": ["content.body"],
|
||||||
|
"search_term": "Hi",
|
||||||
|
"event_context": {"include_profile": True},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
self.render(request)
|
||||||
|
|
||||||
|
# Check we get the results we expect -- one search result, of the sent
|
||||||
|
# messages
|
||||||
|
self.assertEqual(channel.code, 200)
|
||||||
|
results = channel.json_body["search_categories"]["room_events"]
|
||||||
|
self.assertEqual(results["count"], 1)
|
||||||
|
self.assertEqual(results["results"][0]["result"]["content"]["body"], "Hi!")
|
||||||
|
|
||||||
|
# We should get context info, like the two users, and the display names.
|
||||||
|
context = results["results"][0]["context"]
|
||||||
|
self.assertEqual(len(context["profile_info"].keys()), 2)
|
||||||
|
self.assertEqual(
|
||||||
|
context["profile_info"][self.other_user_id]["displayname"], "otheruser"
|
||||||
|
)
|
||||||
|
|
0
tests/scripts/__init__.py
Normal file
0
tests/scripts/__init__.py
Normal file
160
tests/scripts/test_new_matrix_user.py
Normal file
160
tests/scripts/test_new_matrix_user.py
Normal file
|
@ -0,0 +1,160 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2018 New Vector
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from mock import Mock
|
||||||
|
|
||||||
|
from synapse._scripts.register_new_matrix_user import request_registration
|
||||||
|
|
||||||
|
from tests.unittest import TestCase
|
||||||
|
|
||||||
|
|
||||||
|
class RegisterTestCase(TestCase):
|
||||||
|
def test_success(self):
|
||||||
|
"""
|
||||||
|
The script will fetch a nonce, and then generate a MAC with it, and then
|
||||||
|
post that MAC.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def get(url, verify=None):
|
||||||
|
r = Mock()
|
||||||
|
r.status_code = 200
|
||||||
|
r.json = lambda: {"nonce": "a"}
|
||||||
|
return r
|
||||||
|
|
||||||
|
def post(url, json=None, verify=None):
|
||||||
|
# Make sure we are sent the correct info
|
||||||
|
self.assertEqual(json["username"], "user")
|
||||||
|
self.assertEqual(json["password"], "pass")
|
||||||
|
self.assertEqual(json["nonce"], "a")
|
||||||
|
# We want a 40-char hex MAC
|
||||||
|
self.assertEqual(len(json["mac"]), 40)
|
||||||
|
|
||||||
|
r = Mock()
|
||||||
|
r.status_code = 200
|
||||||
|
return r
|
||||||
|
|
||||||
|
requests = Mock()
|
||||||
|
requests.get = get
|
||||||
|
requests.post = post
|
||||||
|
|
||||||
|
# The fake stdout will be written here
|
||||||
|
out = []
|
||||||
|
err_code = []
|
||||||
|
|
||||||
|
request_registration(
|
||||||
|
"user",
|
||||||
|
"pass",
|
||||||
|
"matrix.org",
|
||||||
|
"shared",
|
||||||
|
admin=False,
|
||||||
|
requests=requests,
|
||||||
|
_print=out.append,
|
||||||
|
exit=err_code.append,
|
||||||
|
)
|
||||||
|
|
||||||
|
# We should get the success message making sure everything is OK.
|
||||||
|
self.assertIn("Success!", out)
|
||||||
|
|
||||||
|
# sys.exit shouldn't have been called.
|
||||||
|
self.assertEqual(err_code, [])
|
||||||
|
|
||||||
|
def test_failure_nonce(self):
|
||||||
|
"""
|
||||||
|
If the script fails to fetch a nonce, it throws an error and quits.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def get(url, verify=None):
|
||||||
|
r = Mock()
|
||||||
|
r.status_code = 404
|
||||||
|
r.reason = "Not Found"
|
||||||
|
r.json = lambda: {"not": "error"}
|
||||||
|
return r
|
||||||
|
|
||||||
|
requests = Mock()
|
||||||
|
requests.get = get
|
||||||
|
|
||||||
|
# The fake stdout will be written here
|
||||||
|
out = []
|
||||||
|
err_code = []
|
||||||
|
|
||||||
|
request_registration(
|
||||||
|
"user",
|
||||||
|
"pass",
|
||||||
|
"matrix.org",
|
||||||
|
"shared",
|
||||||
|
admin=False,
|
||||||
|
requests=requests,
|
||||||
|
_print=out.append,
|
||||||
|
exit=err_code.append,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Exit was called
|
||||||
|
self.assertEqual(err_code, [1])
|
||||||
|
|
||||||
|
# We got an error message
|
||||||
|
self.assertIn("ERROR! Received 404 Not Found", out)
|
||||||
|
self.assertNotIn("Success!", out)
|
||||||
|
|
||||||
|
def test_failure_post(self):
|
||||||
|
"""
|
||||||
|
The script will fetch a nonce, and then if the final POST fails, will
|
||||||
|
report an error and quit.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def get(url, verify=None):
|
||||||
|
r = Mock()
|
||||||
|
r.status_code = 200
|
||||||
|
r.json = lambda: {"nonce": "a"}
|
||||||
|
return r
|
||||||
|
|
||||||
|
def post(url, json=None, verify=None):
|
||||||
|
# Make sure we are sent the correct info
|
||||||
|
self.assertEqual(json["username"], "user")
|
||||||
|
self.assertEqual(json["password"], "pass")
|
||||||
|
self.assertEqual(json["nonce"], "a")
|
||||||
|
# We want a 40-char hex MAC
|
||||||
|
self.assertEqual(len(json["mac"]), 40)
|
||||||
|
|
||||||
|
r = Mock()
|
||||||
|
# Then 500 because we're jerks
|
||||||
|
r.status_code = 500
|
||||||
|
r.reason = "Broken"
|
||||||
|
return r
|
||||||
|
|
||||||
|
requests = Mock()
|
||||||
|
requests.get = get
|
||||||
|
requests.post = post
|
||||||
|
|
||||||
|
# The fake stdout will be written here
|
||||||
|
out = []
|
||||||
|
err_code = []
|
||||||
|
|
||||||
|
request_registration(
|
||||||
|
"user",
|
||||||
|
"pass",
|
||||||
|
"matrix.org",
|
||||||
|
"shared",
|
||||||
|
admin=False,
|
||||||
|
requests=requests,
|
||||||
|
_print=out.append,
|
||||||
|
exit=err_code.append,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Exit was called
|
||||||
|
self.assertEqual(err_code, [1])
|
||||||
|
|
||||||
|
# We got an error message
|
||||||
|
self.assertIn("ERROR! Received 500 Broken", out)
|
||||||
|
self.assertNotIn("Success!", out)
|
|
@ -125,7 +125,9 @@ def make_request(method, path, content=b"", access_token=None, request=SynapseRe
|
||||||
req.content = BytesIO(content)
|
req.content = BytesIO(content)
|
||||||
|
|
||||||
if access_token:
|
if access_token:
|
||||||
req.requestHeaders.addRawHeader(b"Authorization", b"Bearer " + access_token)
|
req.requestHeaders.addRawHeader(
|
||||||
|
b"Authorization", b"Bearer " + access_token.encode('ascii')
|
||||||
|
)
|
||||||
|
|
||||||
if content:
|
if content:
|
||||||
req.requestHeaders.addRawHeader(b"Content-Type", b"application/json")
|
req.requestHeaders.addRawHeader(b"Content-Type", b"application/json")
|
||||||
|
|
|
@ -4,7 +4,6 @@ from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.api.constants import EventTypes, ServerNoticeMsgType
|
from synapse.api.constants import EventTypes, ServerNoticeMsgType
|
||||||
from synapse.api.errors import ResourceLimitError
|
from synapse.api.errors import ResourceLimitError
|
||||||
from synapse.handlers.auth import AuthHandler
|
|
||||||
from synapse.server_notices.resource_limits_server_notices import (
|
from synapse.server_notices.resource_limits_server_notices import (
|
||||||
ResourceLimitsServerNotices,
|
ResourceLimitsServerNotices,
|
||||||
)
|
)
|
||||||
|
@ -13,17 +12,10 @@ from tests import unittest
|
||||||
from tests.utils import setup_test_homeserver
|
from tests.utils import setup_test_homeserver
|
||||||
|
|
||||||
|
|
||||||
class AuthHandlers(object):
|
|
||||||
def __init__(self, hs):
|
|
||||||
self.auth_handler = AuthHandler(hs)
|
|
||||||
|
|
||||||
|
|
||||||
class TestResourceLimitsServerNotices(unittest.TestCase):
|
class TestResourceLimitsServerNotices(unittest.TestCase):
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.hs = yield setup_test_homeserver(self.addCleanup, handlers=None)
|
self.hs = yield setup_test_homeserver(self.addCleanup)
|
||||||
self.hs.handlers = AuthHandlers(self.hs)
|
|
||||||
self.auth_handler = self.hs.handlers.auth_handler
|
|
||||||
self.server_notices_sender = self.hs.get_server_notices_sender()
|
self.server_notices_sender = self.hs.get_server_notices_sender()
|
||||||
|
|
||||||
# relying on [1] is far from ideal, but the only case where
|
# relying on [1] is far from ideal, but the only case where
|
||||||
|
|
|
@ -52,7 +52,10 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase):
|
||||||
now = int(self.hs.get_clock().time_msec())
|
now = int(self.hs.get_clock().time_msec())
|
||||||
self.store.user_add_threepid(user1, "email", user1_email, now, now)
|
self.store.user_add_threepid(user1, "email", user1_email, now, now)
|
||||||
self.store.user_add_threepid(user2, "email", user2_email, now, now)
|
self.store.user_add_threepid(user2, "email", user2_email, now, now)
|
||||||
self.store.initialise_reserved_users(threepids)
|
|
||||||
|
self.store.runInteraction(
|
||||||
|
"initialise", self.store._initialise_reserved_users, threepids
|
||||||
|
)
|
||||||
self.pump()
|
self.pump()
|
||||||
|
|
||||||
active_count = self.store.get_monthly_active_count()
|
active_count = self.store.get_monthly_active_count()
|
||||||
|
@ -199,7 +202,10 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase):
|
||||||
{'medium': 'email', 'address': user2_email},
|
{'medium': 'email', 'address': user2_email},
|
||||||
]
|
]
|
||||||
self.hs.config.mau_limits_reserved_threepids = threepids
|
self.hs.config.mau_limits_reserved_threepids = threepids
|
||||||
self.store.initialise_reserved_users(threepids)
|
self.store.runInteraction(
|
||||||
|
"initialise", self.store._initialise_reserved_users, threepids
|
||||||
|
)
|
||||||
|
|
||||||
self.pump()
|
self.pump()
|
||||||
count = self.store.get_registered_reserved_users_count()
|
count = self.store.get_registered_reserved_users_count()
|
||||||
self.assertEquals(self.get_success(count), 0)
|
self.assertEquals(self.get_success(count), 0)
|
||||||
|
|
|
@ -18,6 +18,7 @@ import logging
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.api.constants import EventTypes, Membership
|
from synapse.api.constants import EventTypes, Membership
|
||||||
|
from synapse.storage.state import StateFilter
|
||||||
from synapse.types import RoomID, UserID
|
from synapse.types import RoomID, UserID
|
||||||
|
|
||||||
import tests.unittest
|
import tests.unittest
|
||||||
|
@ -148,7 +149,7 @@ class StateStoreTestCase(tests.unittest.TestCase):
|
||||||
|
|
||||||
# check we get the full state as of the final event
|
# check we get the full state as of the final event
|
||||||
state = yield self.store.get_state_for_event(
|
state = yield self.store.get_state_for_event(
|
||||||
e5.event_id, None, filtered_types=None
|
e5.event_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertIsNotNone(e4)
|
self.assertIsNotNone(e4)
|
||||||
|
@ -166,33 +167,35 @@ class StateStoreTestCase(tests.unittest.TestCase):
|
||||||
|
|
||||||
# check we can filter to the m.room.name event (with a '' state key)
|
# check we can filter to the m.room.name event (with a '' state key)
|
||||||
state = yield self.store.get_state_for_event(
|
state = yield self.store.get_state_for_event(
|
||||||
e5.event_id, [(EventTypes.Name, '')], filtered_types=None
|
e5.event_id, StateFilter.from_types([(EventTypes.Name, '')])
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertStateMapEqual({(e2.type, e2.state_key): e2}, state)
|
self.assertStateMapEqual({(e2.type, e2.state_key): e2}, state)
|
||||||
|
|
||||||
# check we can filter to the m.room.name event (with a wildcard None state key)
|
# check we can filter to the m.room.name event (with a wildcard None state key)
|
||||||
state = yield self.store.get_state_for_event(
|
state = yield self.store.get_state_for_event(
|
||||||
e5.event_id, [(EventTypes.Name, None)], filtered_types=None
|
e5.event_id, StateFilter.from_types([(EventTypes.Name, None)])
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertStateMapEqual({(e2.type, e2.state_key): e2}, state)
|
self.assertStateMapEqual({(e2.type, e2.state_key): e2}, state)
|
||||||
|
|
||||||
# check we can grab the m.room.member events (with a wildcard None state key)
|
# check we can grab the m.room.member events (with a wildcard None state key)
|
||||||
state = yield self.store.get_state_for_event(
|
state = yield self.store.get_state_for_event(
|
||||||
e5.event_id, [(EventTypes.Member, None)], filtered_types=None
|
e5.event_id, StateFilter.from_types([(EventTypes.Member, None)])
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertStateMapEqual(
|
self.assertStateMapEqual(
|
||||||
{(e3.type, e3.state_key): e3, (e5.type, e5.state_key): e5}, state
|
{(e3.type, e3.state_key): e3, (e5.type, e5.state_key): e5}, state
|
||||||
)
|
)
|
||||||
|
|
||||||
# check we can use filtered_types to grab a specific room member
|
# check we can grab a specific room member without filtering out the
|
||||||
# without filtering out the other event types
|
# other event types
|
||||||
state = yield self.store.get_state_for_event(
|
state = yield self.store.get_state_for_event(
|
||||||
e5.event_id,
|
e5.event_id,
|
||||||
[(EventTypes.Member, self.u_alice.to_string())],
|
state_filter=StateFilter(
|
||||||
filtered_types=[EventTypes.Member],
|
types={EventTypes.Member: {self.u_alice.to_string()}},
|
||||||
|
include_others=True,
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertStateMapEqual(
|
self.assertStateMapEqual(
|
||||||
|
@ -204,10 +207,12 @@ class StateStoreTestCase(tests.unittest.TestCase):
|
||||||
state,
|
state,
|
||||||
)
|
)
|
||||||
|
|
||||||
# check that types=[], filtered_types=[EventTypes.Member]
|
# check that we can grab everything except members
|
||||||
# doesn't return all members
|
|
||||||
state = yield self.store.get_state_for_event(
|
state = yield self.store.get_state_for_event(
|
||||||
e5.event_id, [], filtered_types=[EventTypes.Member]
|
e5.event_id, state_filter=StateFilter(
|
||||||
|
types={EventTypes.Member: set()},
|
||||||
|
include_others=True,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertStateMapEqual(
|
self.assertStateMapEqual(
|
||||||
|
@ -215,16 +220,21 @@ class StateStoreTestCase(tests.unittest.TestCase):
|
||||||
)
|
)
|
||||||
|
|
||||||
#######################################################
|
#######################################################
|
||||||
# _get_some_state_from_cache tests against a full cache
|
# _get_state_for_group_using_cache tests against a full cache
|
||||||
#######################################################
|
#######################################################
|
||||||
|
|
||||||
room_id = self.room.to_string()
|
room_id = self.room.to_string()
|
||||||
group_ids = yield self.store.get_state_groups_ids(room_id, [e5.event_id])
|
group_ids = yield self.store.get_state_groups_ids(room_id, [e5.event_id])
|
||||||
group = list(group_ids.keys())[0]
|
group = list(group_ids.keys())[0]
|
||||||
|
|
||||||
# test _get_some_state_from_cache correctly filters out members with types=[]
|
# test _get_state_for_group_using_cache correctly filters out members
|
||||||
(state_dict, is_all) = yield self.store._get_some_state_from_cache(
|
# with types=[]
|
||||||
self.store._state_group_cache, group, [], filtered_types=[EventTypes.Member]
|
(state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
|
||||||
|
self.store._state_group_cache, group,
|
||||||
|
state_filter=StateFilter(
|
||||||
|
types={EventTypes.Member: set()},
|
||||||
|
include_others=True,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual(is_all, True)
|
self.assertEqual(is_all, True)
|
||||||
|
@ -236,22 +246,27 @@ class StateStoreTestCase(tests.unittest.TestCase):
|
||||||
state_dict,
|
state_dict,
|
||||||
)
|
)
|
||||||
|
|
||||||
(state_dict, is_all) = yield self.store._get_some_state_from_cache(
|
(state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
|
||||||
self.store._state_group_members_cache,
|
self.store._state_group_members_cache,
|
||||||
group,
|
group,
|
||||||
[],
|
state_filter=StateFilter(
|
||||||
filtered_types=[EventTypes.Member],
|
types={EventTypes.Member: set()},
|
||||||
|
include_others=True,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual(is_all, True)
|
self.assertEqual(is_all, True)
|
||||||
self.assertDictEqual({}, state_dict)
|
self.assertDictEqual({}, state_dict)
|
||||||
|
|
||||||
# test _get_some_state_from_cache correctly filters in members with wildcard types
|
# test _get_state_for_group_using_cache correctly filters in members
|
||||||
(state_dict, is_all) = yield self.store._get_some_state_from_cache(
|
# with wildcard types
|
||||||
|
(state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
|
||||||
self.store._state_group_cache,
|
self.store._state_group_cache,
|
||||||
group,
|
group,
|
||||||
[(EventTypes.Member, None)],
|
state_filter=StateFilter(
|
||||||
filtered_types=[EventTypes.Member],
|
types={EventTypes.Member: None},
|
||||||
|
include_others=True,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual(is_all, True)
|
self.assertEqual(is_all, True)
|
||||||
|
@ -263,11 +278,13 @@ class StateStoreTestCase(tests.unittest.TestCase):
|
||||||
state_dict,
|
state_dict,
|
||||||
)
|
)
|
||||||
|
|
||||||
(state_dict, is_all) = yield self.store._get_some_state_from_cache(
|
(state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
|
||||||
self.store._state_group_members_cache,
|
self.store._state_group_members_cache,
|
||||||
group,
|
group,
|
||||||
[(EventTypes.Member, None)],
|
state_filter=StateFilter(
|
||||||
filtered_types=[EventTypes.Member],
|
types={EventTypes.Member: None},
|
||||||
|
include_others=True,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual(is_all, True)
|
self.assertEqual(is_all, True)
|
||||||
|
@ -280,12 +297,15 @@ class StateStoreTestCase(tests.unittest.TestCase):
|
||||||
state_dict,
|
state_dict,
|
||||||
)
|
)
|
||||||
|
|
||||||
# test _get_some_state_from_cache correctly filters in members with specific types
|
# test _get_state_for_group_using_cache correctly filters in members
|
||||||
(state_dict, is_all) = yield self.store._get_some_state_from_cache(
|
# with specific types
|
||||||
|
(state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
|
||||||
self.store._state_group_cache,
|
self.store._state_group_cache,
|
||||||
group,
|
group,
|
||||||
[(EventTypes.Member, e5.state_key)],
|
state_filter=StateFilter(
|
||||||
filtered_types=[EventTypes.Member],
|
types={EventTypes.Member: {e5.state_key}},
|
||||||
|
include_others=True,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual(is_all, True)
|
self.assertEqual(is_all, True)
|
||||||
|
@ -297,23 +317,27 @@ class StateStoreTestCase(tests.unittest.TestCase):
|
||||||
state_dict,
|
state_dict,
|
||||||
)
|
)
|
||||||
|
|
||||||
(state_dict, is_all) = yield self.store._get_some_state_from_cache(
|
(state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
|
||||||
self.store._state_group_members_cache,
|
self.store._state_group_members_cache,
|
||||||
group,
|
group,
|
||||||
[(EventTypes.Member, e5.state_key)],
|
state_filter=StateFilter(
|
||||||
filtered_types=[EventTypes.Member],
|
types={EventTypes.Member: {e5.state_key}},
|
||||||
|
include_others=True,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual(is_all, True)
|
self.assertEqual(is_all, True)
|
||||||
self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict)
|
self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict)
|
||||||
|
|
||||||
# test _get_some_state_from_cache correctly filters in members with specific types
|
# test _get_state_for_group_using_cache correctly filters in members
|
||||||
# and no filtered_types
|
# with specific types
|
||||||
(state_dict, is_all) = yield self.store._get_some_state_from_cache(
|
(state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
|
||||||
self.store._state_group_members_cache,
|
self.store._state_group_members_cache,
|
||||||
group,
|
group,
|
||||||
[(EventTypes.Member, e5.state_key)],
|
state_filter=StateFilter(
|
||||||
filtered_types=None,
|
types={EventTypes.Member: {e5.state_key}},
|
||||||
|
include_others=False,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual(is_all, True)
|
self.assertEqual(is_all, True)
|
||||||
|
@ -357,42 +381,54 @@ class StateStoreTestCase(tests.unittest.TestCase):
|
||||||
############################################
|
############################################
|
||||||
# test that things work with a partial cache
|
# test that things work with a partial cache
|
||||||
|
|
||||||
# test _get_some_state_from_cache correctly filters out members with types=[]
|
# test _get_state_for_group_using_cache correctly filters out members
|
||||||
|
# with types=[]
|
||||||
room_id = self.room.to_string()
|
room_id = self.room.to_string()
|
||||||
(state_dict, is_all) = yield self.store._get_some_state_from_cache(
|
(state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
|
||||||
self.store._state_group_cache, group, [], filtered_types=[EventTypes.Member]
|
self.store._state_group_cache, group,
|
||||||
|
state_filter=StateFilter(
|
||||||
|
types={EventTypes.Member: set()},
|
||||||
|
include_others=True,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual(is_all, False)
|
self.assertEqual(is_all, False)
|
||||||
self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict)
|
self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict)
|
||||||
|
|
||||||
room_id = self.room.to_string()
|
room_id = self.room.to_string()
|
||||||
(state_dict, is_all) = yield self.store._get_some_state_from_cache(
|
(state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
|
||||||
self.store._state_group_members_cache,
|
self.store._state_group_members_cache,
|
||||||
group,
|
group,
|
||||||
[],
|
state_filter=StateFilter(
|
||||||
filtered_types=[EventTypes.Member],
|
types={EventTypes.Member: set()},
|
||||||
|
include_others=True,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual(is_all, True)
|
self.assertEqual(is_all, True)
|
||||||
self.assertDictEqual({}, state_dict)
|
self.assertDictEqual({}, state_dict)
|
||||||
|
|
||||||
# test _get_some_state_from_cache correctly filters in members wildcard types
|
# test _get_state_for_group_using_cache correctly filters in members
|
||||||
(state_dict, is_all) = yield self.store._get_some_state_from_cache(
|
# wildcard types
|
||||||
|
(state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
|
||||||
self.store._state_group_cache,
|
self.store._state_group_cache,
|
||||||
group,
|
group,
|
||||||
[(EventTypes.Member, None)],
|
state_filter=StateFilter(
|
||||||
filtered_types=[EventTypes.Member],
|
types={EventTypes.Member: None},
|
||||||
|
include_others=True,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual(is_all, False)
|
self.assertEqual(is_all, False)
|
||||||
self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict)
|
self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict)
|
||||||
|
|
||||||
(state_dict, is_all) = yield self.store._get_some_state_from_cache(
|
(state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
|
||||||
self.store._state_group_members_cache,
|
self.store._state_group_members_cache,
|
||||||
group,
|
group,
|
||||||
[(EventTypes.Member, None)],
|
state_filter=StateFilter(
|
||||||
filtered_types=[EventTypes.Member],
|
types={EventTypes.Member: None},
|
||||||
|
include_others=True,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual(is_all, True)
|
self.assertEqual(is_all, True)
|
||||||
|
@ -404,44 +440,53 @@ class StateStoreTestCase(tests.unittest.TestCase):
|
||||||
state_dict,
|
state_dict,
|
||||||
)
|
)
|
||||||
|
|
||||||
# test _get_some_state_from_cache correctly filters in members with specific types
|
# test _get_state_for_group_using_cache correctly filters in members
|
||||||
(state_dict, is_all) = yield self.store._get_some_state_from_cache(
|
# with specific types
|
||||||
|
(state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
|
||||||
self.store._state_group_cache,
|
self.store._state_group_cache,
|
||||||
group,
|
group,
|
||||||
[(EventTypes.Member, e5.state_key)],
|
state_filter=StateFilter(
|
||||||
filtered_types=[EventTypes.Member],
|
types={EventTypes.Member: {e5.state_key}},
|
||||||
|
include_others=True,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual(is_all, False)
|
self.assertEqual(is_all, False)
|
||||||
self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict)
|
self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict)
|
||||||
|
|
||||||
(state_dict, is_all) = yield self.store._get_some_state_from_cache(
|
(state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
|
||||||
self.store._state_group_members_cache,
|
self.store._state_group_members_cache,
|
||||||
group,
|
group,
|
||||||
[(EventTypes.Member, e5.state_key)],
|
state_filter=StateFilter(
|
||||||
filtered_types=[EventTypes.Member],
|
types={EventTypes.Member: {e5.state_key}},
|
||||||
|
include_others=True,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual(is_all, True)
|
self.assertEqual(is_all, True)
|
||||||
self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict)
|
self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict)
|
||||||
|
|
||||||
# test _get_some_state_from_cache correctly filters in members with specific types
|
# test _get_state_for_group_using_cache correctly filters in members
|
||||||
# and no filtered_types
|
# with specific types
|
||||||
(state_dict, is_all) = yield self.store._get_some_state_from_cache(
|
(state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
|
||||||
self.store._state_group_cache,
|
self.store._state_group_cache,
|
||||||
group,
|
group,
|
||||||
[(EventTypes.Member, e5.state_key)],
|
state_filter=StateFilter(
|
||||||
filtered_types=None,
|
types={EventTypes.Member: {e5.state_key}},
|
||||||
|
include_others=False,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual(is_all, False)
|
self.assertEqual(is_all, False)
|
||||||
self.assertDictEqual({}, state_dict)
|
self.assertDictEqual({}, state_dict)
|
||||||
|
|
||||||
(state_dict, is_all) = yield self.store._get_some_state_from_cache(
|
(state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
|
||||||
self.store._state_group_members_cache,
|
self.store._state_group_members_cache,
|
||||||
group,
|
group,
|
||||||
[(EventTypes.Member, e5.state_key)],
|
state_filter=StateFilter(
|
||||||
filtered_types=None,
|
types={EventTypes.Member: {e5.state_key}},
|
||||||
|
include_others=False,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual(is_all, True)
|
self.assertEqual(is_all, True)
|
||||||
|
|
|
@ -207,7 +207,7 @@ class TestMauLimit(unittest.TestCase):
|
||||||
|
|
||||||
def do_sync_for_user(self, token):
|
def do_sync_for_user(self, token):
|
||||||
request, channel = make_request(
|
request, channel = make_request(
|
||||||
"GET", "/sync", access_token=token.encode('ascii')
|
"GET", "/sync", access_token=token
|
||||||
)
|
)
|
||||||
render(request, self.resource, self.reactor)
|
render(request, self.resource, self.reactor)
|
||||||
|
|
||||||
|
|
|
@ -146,6 +146,13 @@ def DEBUG(target):
|
||||||
return target
|
return target
|
||||||
|
|
||||||
|
|
||||||
|
def INFO(target):
|
||||||
|
"""A decorator to set the .loglevel attribute to logging.INFO.
|
||||||
|
Can apply to either a TestCase or an individual test method."""
|
||||||
|
target.loglevel = logging.INFO
|
||||||
|
return target
|
||||||
|
|
||||||
|
|
||||||
class HomeserverTestCase(TestCase):
|
class HomeserverTestCase(TestCase):
|
||||||
"""
|
"""
|
||||||
A base TestCase that reduces boilerplate for HomeServer-using test cases.
|
A base TestCase that reduces boilerplate for HomeServer-using test cases.
|
||||||
|
@ -373,5 +380,5 @@ class HomeserverTestCase(TestCase):
|
||||||
self.render(request)
|
self.render(request)
|
||||||
self.assertEqual(channel.code, 200)
|
self.assertEqual(channel.code, 200)
|
||||||
|
|
||||||
access_token = channel.json_body["access_token"].encode('ascii')
|
access_token = channel.json_body["access_token"]
|
||||||
return access_token
|
return access_token
|
||||||
|
|
|
@ -124,6 +124,7 @@ def default_config(name):
|
||||||
config.user_consent_server_notice_content = None
|
config.user_consent_server_notice_content = None
|
||||||
config.block_events_without_consent_error = None
|
config.block_events_without_consent_error = None
|
||||||
config.media_storage_providers = []
|
config.media_storage_providers = []
|
||||||
|
config.autocreate_auto_join_rooms = True
|
||||||
config.auto_join_rooms = []
|
config.auto_join_rooms = []
|
||||||
config.limit_usage_by_mau = False
|
config.limit_usage_by_mau = False
|
||||||
config.hs_disabled = False
|
config.hs_disabled = False
|
||||||
|
|
19
tox.ini
19
tox.ini
|
@ -3,7 +3,6 @@ envlist = packaging, py27, py36, pep8, check_isort
|
||||||
|
|
||||||
[base]
|
[base]
|
||||||
deps =
|
deps =
|
||||||
coverage
|
|
||||||
Twisted>=17.1
|
Twisted>=17.1
|
||||||
mock
|
mock
|
||||||
python-subunit
|
python-subunit
|
||||||
|
@ -12,6 +11,20 @@ deps =
|
||||||
# needed by some of the tests
|
# needed by some of the tests
|
||||||
lxml
|
lxml
|
||||||
|
|
||||||
|
# cyptography 2.2 requires setuptools >= 18.5
|
||||||
|
#
|
||||||
|
# older versions of virtualenv (?) give us a virtualenv with the same
|
||||||
|
# version of setuptools as is installed on the system python (and tox runs
|
||||||
|
# virtualenv under python3, so we get the version of setuptools that is
|
||||||
|
# installed on that).
|
||||||
|
#
|
||||||
|
# anyway, make sure that we have a recent enough setuptools.
|
||||||
|
setuptools>=18.5
|
||||||
|
|
||||||
|
# we also need a semi-recent version of pip, because old ones fail to
|
||||||
|
# install the "enum34" dependency of cryptography.
|
||||||
|
pip>=10
|
||||||
|
|
||||||
setenv =
|
setenv =
|
||||||
PYTHONDONTWRITEBYTECODE = no_byte_code
|
PYTHONDONTWRITEBYTECODE = no_byte_code
|
||||||
|
|
||||||
|
@ -26,9 +39,7 @@ passenv = *
|
||||||
|
|
||||||
commands =
|
commands =
|
||||||
/usr/bin/find "{toxinidir}" -name '*.pyc' -delete
|
/usr/bin/find "{toxinidir}" -name '*.pyc' -delete
|
||||||
coverage run {env:COVERAGE_OPTS:} --source="{toxinidir}/synapse" \
|
"{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:}
|
||||||
"{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:}
|
|
||||||
{env:DUMP_COVERAGE_COMMAND:coverage report -m}
|
|
||||||
|
|
||||||
[testenv:py27]
|
[testenv:py27]
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue