Merge remote-tracking branch 'origin/develop' into dbkr/e2e_backups

This commit is contained in:
David Baker 2018-10-09 10:05:02 +01:00
commit dc045ef202
210 changed files with 5643 additions and 3313 deletions

View File

@ -1,5 +1,27 @@
version: 2 version: 2
jobs: jobs:
dockerhubuploadrelease:
machine: true
steps:
- checkout
- run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_TAG} .
- run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_TAG}-py3 --build-arg PYTHON_VERSION=3.6 .
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py3
dockerhubuploadlatest:
machine: true
steps:
- checkout
- run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_SHA1} .
- run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_SHA1}-py3 --build-arg PYTHON_VERSION=3.6 .
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
- run: docker tag matrixdotorg/synapse:${CIRCLE_SHA1} matrixdotorg/synapse:latest
- run: docker tag matrixdotorg/synapse:${CIRCLE_SHA1}-py3 matrixdotorg/synapse:latest-py3
- run: docker push matrixdotorg/synapse:${CIRCLE_SHA1}
- run: docker push matrixdotorg/synapse:${CIRCLE_SHA1}-py3
- run: docker push matrixdotorg/synapse:latest
- run: docker push matrixdotorg/synapse:latest-py3
sytestpy2: sytestpy2:
machine: true machine: true
steps: steps:
@ -9,6 +31,8 @@ jobs:
- store_artifacts: - store_artifacts:
path: ~/project/logs path: ~/project/logs
destination: logs destination: logs
- store_test_results:
path: logs
sytestpy2postgres: sytestpy2postgres:
machine: true machine: true
steps: steps:
@ -18,15 +42,45 @@ jobs:
- store_artifacts: - store_artifacts:
path: ~/project/logs path: ~/project/logs
destination: logs destination: logs
- store_test_results:
path: logs
sytestpy2merged:
machine: true
steps:
- checkout
- run: bash .circleci/merge_base_branch.sh
- run: docker pull matrixdotorg/sytest-synapsepy2
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy2
- store_artifacts:
path: ~/project/logs
destination: logs
- store_test_results:
path: logs
sytestpy2postgresmerged:
machine: true
steps:
- checkout
- run: bash .circleci/merge_base_branch.sh
- run: docker pull matrixdotorg/sytest-synapsepy2
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy2
- store_artifacts:
path: ~/project/logs
destination: logs
- store_test_results:
path: logs
sytestpy3: sytestpy3:
machine: true machine: true
steps: steps:
- checkout - checkout
- run: docker pull matrixdotorg/sytest-synapsepy3 - run: docker pull matrixdotorg/sytest-synapsepy3
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs hawkowl/sytestpy3 - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy3
- store_artifacts: - store_artifacts:
path: ~/project/logs path: ~/project/logs
destination: logs destination: logs
- store_test_results:
path: logs
sytestpy3postgres: sytestpy3postgres:
machine: true machine: true
steps: steps:
@ -36,13 +90,76 @@ jobs:
- store_artifacts: - store_artifacts:
path: ~/project/logs path: ~/project/logs
destination: logs destination: logs
- store_test_results:
path: logs
sytestpy3merged:
machine: true
steps:
- checkout
- run: bash .circleci/merge_base_branch.sh
- run: docker pull matrixdotorg/sytest-synapsepy3
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy3
- store_artifacts:
path: ~/project/logs
destination: logs
- store_test_results:
path: logs
sytestpy3postgresmerged:
machine: true
steps:
- checkout
- run: bash .circleci/merge_base_branch.sh
- run: docker pull matrixdotorg/sytest-synapsepy3
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy3
- store_artifacts:
path: ~/project/logs
destination: logs
- store_test_results:
path: logs
workflows: workflows:
version: 2 version: 2
build: build:
jobs: jobs:
- sytestpy2 - sytestpy2:
- sytestpy2postgres filters:
# Currently broken while the Python 3 port is incomplete branches:
# - sytestpy3 only: /develop|master|release-.*/
# - sytestpy3postgres - sytestpy2postgres:
filters:
branches:
only: /develop|master|release-.*/
- sytestpy3:
filters:
branches:
only: /develop|master|release-.*/
- sytestpy3postgres:
filters:
branches:
only: /develop|master|release-.*/
- sytestpy2merged:
filters:
branches:
ignore: /develop|master|release-.*/
- sytestpy2postgresmerged:
filters:
branches:
ignore: /develop|master|release-.*/
- sytestpy3merged:
filters:
branches:
ignore: /develop|master|release-.*/
- sytestpy3postgresmerged:
filters:
branches:
ignore: /develop|master|release-.*/
- dockerhubuploadrelease:
filters:
tags:
only: /v[0-9].[0-9]+.[0-9]+.*/
branches:
ignore: /.*/
- dockerhubuploadlatest:
filters:
branches:
only: master

34
.circleci/merge_base_branch.sh Executable file
View File

@ -0,0 +1,34 @@
#!/usr/bin/env bash
set -e
# CircleCI doesn't give CIRCLE_PR_NUMBER in the environment for non-forked PRs. Wonderful.
# In this case, we just need to do some ~shell magic~ to strip it out of the PULL_REQUEST URL.
echo 'export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-${CIRCLE_PULL_REQUEST##*/}}"' >> $BASH_ENV
source $BASH_ENV
if [[ -z "${CIRCLE_PR_NUMBER}" ]]
then
echo "Can't figure out what the PR number is! Assuming merge target is develop."
# It probably hasn't had a PR opened yet. Since all PRs land on develop, we
# can probably assume it's based on it and will be merged into it.
GITBASE="develop"
else
# Get the reference, using the GitHub API
GITBASE=`curl -q https://api.github.com/repos/matrix-org/synapse/pulls/${CIRCLE_PR_NUMBER} | jq -r '.base.ref'`
fi
# Show what we are before
git show -s
# Set up username so it can do a merge
git config --global user.email bot@matrix.org
git config --global user.name "A robot"
# Fetch and merge. If it doesn't work, it will raise due to set -e.
git fetch -u origin $GITBASE
git merge --no-edit origin/$GITBASE
# Show what we are after.
git show -s

View File

@ -3,6 +3,5 @@ Dockerfile
.gitignore .gitignore
demo/etc demo/etc
tox.ini tox.ini
synctl
.git/* .git/*
.tox/* .tox/*

3
.gitignore vendored
View File

@ -1,9 +1,11 @@
*.pyc *.pyc
.*.swp .*.swp
*~ *~
*.lock
.DS_Store .DS_Store
_trial_temp/ _trial_temp/
_trial_temp*/
logs/ logs/
dbs/ dbs/
*.egg *.egg
@ -44,6 +46,7 @@ media_store/
build/ build/
venv/ venv/
venv*/ venv*/
*venv/
localhost-800*/ localhost-800*/
static/client/register/register_config.js static/client/register/register_config.js

View File

@ -8,9 +8,6 @@ before_script:
- git remote set-branches --add origin develop - git remote set-branches --add origin develop
- git fetch origin develop - git fetch origin develop
services:
- postgresql
matrix: matrix:
fast_finish: true fast_finish: true
include: include:
@ -23,22 +20,31 @@ matrix:
- python: 2.7 - python: 2.7
env: TOX_ENV=py27 env: TOX_ENV=py27
- python: 2.7
env: TOX_ENV=py27-old
- python: 2.7 - python: 2.7
env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4" env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4"
services:
- postgresql
- python: 3.5
env: TOX_ENV=py35
- python: 3.6 - python: 3.6
env: TOX_ENV=py36 env: TOX_ENV=py36
- python: 3.6
env: TOX_ENV=py36-postgres TRIAL_FLAGS="-j 4"
services:
- postgresql
- python: 3.6 - python: 3.6
env: TOX_ENV=check_isort env: TOX_ENV=check_isort
- python: 3.6 - python: 3.6
env: TOX_ENV=check-newsfragment env: TOX_ENV=check-newsfragment
allow_failures:
- python: 2.7
env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4"
install: install:
- pip install tox - pip install tox

View File

@ -1,3 +1,231 @@
Synapse 0.33.6 (2018-10-04)
===========================
Internal Changes
----------------
- Pin to prometheus_client<0.4 to avoid renaming all of our metrics ([\#4002](https://github.com/matrix-org/synapse/issues/4002))
Synapse 0.33.6rc1 (2018-10-03)
==============================
Features
--------
- Adding the ability to change MAX_UPLOAD_SIZE for the docker container variables. ([\#3883](https://github.com/matrix-org/synapse/issues/3883))
- Report "python_version" in the phone home stats ([\#3894](https://github.com/matrix-org/synapse/issues/3894))
- Always LL ourselves if we're in a room ([\#3916](https://github.com/matrix-org/synapse/issues/3916))
- Include eventid in log lines when processing incoming federation transactions ([\#3959](https://github.com/matrix-org/synapse/issues/3959))
- Remove spurious check which made 'localhost' servers not work ([\#3964](https://github.com/matrix-org/synapse/issues/3964))
Bugfixes
--------
- Fix problem when playing media from Chrome using direct URL (thanks @remjey!) ([\#3578](https://github.com/matrix-org/synapse/issues/3578))
- support registering regular users non-interactively with register_new_matrix_user script ([\#3836](https://github.com/matrix-org/synapse/issues/3836))
- Fix broken invite email links for self hosted riots ([\#3868](https://github.com/matrix-org/synapse/issues/3868))
- Don't ratelimit autojoins ([\#3879](https://github.com/matrix-org/synapse/issues/3879))
- Fix 500 error when deleting unknown room alias ([\#3889](https://github.com/matrix-org/synapse/issues/3889))
- Fix some b'abcd' noise in logs and metrics ([\#3892](https://github.com/matrix-org/synapse/issues/3892), [\#3895](https://github.com/matrix-org/synapse/issues/3895))
- When we join a room, always try the server we used for the alias lookup first, to avoid unresponsive and out-of-date servers. ([\#3899](https://github.com/matrix-org/synapse/issues/3899))
- Fix incorrect server-name indication for outgoing federation requests ([\#3907](https://github.com/matrix-org/synapse/issues/3907))
- Fix adding client IPs to the database failing on Python 3. ([\#3908](https://github.com/matrix-org/synapse/issues/3908))
- Fix bug where things occaisonally were not being timed out correctly. ([\#3910](https://github.com/matrix-org/synapse/issues/3910))
- Fix bug where outbound federation would stop talking to some servers when using workers ([\#3914](https://github.com/matrix-org/synapse/issues/3914))
- Fix some instances of ExpiringCache not expiring cache items ([\#3932](https://github.com/matrix-org/synapse/issues/3932), [\#3980](https://github.com/matrix-org/synapse/issues/3980))
- Fix out-of-bounds error when LLing yourself ([\#3936](https://github.com/matrix-org/synapse/issues/3936))
- Sending server notices regarding user consent now works on Python 3. ([\#3938](https://github.com/matrix-org/synapse/issues/3938))
- Fix exceptions from metrics handler ([\#3956](https://github.com/matrix-org/synapse/issues/3956))
- Fix error message for events with m.room.create missing from auth_events ([\#3960](https://github.com/matrix-org/synapse/issues/3960))
- Fix errors due to concurrent monthly_active_user upserts ([\#3961](https://github.com/matrix-org/synapse/issues/3961))
- Fix exceptions when processing incoming events over federation ([\#3968](https://github.com/matrix-org/synapse/issues/3968))
- Replaced all occurences of e.message with str(e). Contributed by Schnuffle ([\#3970](https://github.com/matrix-org/synapse/issues/3970))
- Fix lazy loaded sync in the presence of rejected state events ([\#3986](https://github.com/matrix-org/synapse/issues/3986))
- Fix error when logging incomplete HTTP requests ([\#3990](https://github.com/matrix-org/synapse/issues/3990))
Internal Changes
----------------
- Unit tests can now be run under PostgreSQL in Docker using ``test_postgresql.sh``. ([\#3699](https://github.com/matrix-org/synapse/issues/3699))
- Speed up calculation of typing updates for replication ([\#3794](https://github.com/matrix-org/synapse/issues/3794))
- Remove documentation regarding installation on Cygwin, the use of WSL is recommended instead. ([\#3873](https://github.com/matrix-org/synapse/issues/3873))
- Fix typo in README, synaspse -> synapse ([\#3897](https://github.com/matrix-org/synapse/issues/3897))
- Increase the timeout when filling missing events in federation requests ([\#3903](https://github.com/matrix-org/synapse/issues/3903))
- Improve the logging when handling a federation transaction ([\#3904](https://github.com/matrix-org/synapse/issues/3904), [\#3966](https://github.com/matrix-org/synapse/issues/3966))
- Improve logging of outbound federation requests ([\#3906](https://github.com/matrix-org/synapse/issues/3906), [\#3909](https://github.com/matrix-org/synapse/issues/3909))
- Fix the docker image building on python 3 ([\#3911](https://github.com/matrix-org/synapse/issues/3911))
- Add a regression test for logging failed HTTP requests on Python 3. ([\#3912](https://github.com/matrix-org/synapse/issues/3912))
- Comments and interface cleanup for on_receive_pdu ([\#3924](https://github.com/matrix-org/synapse/issues/3924))
- Fix spurious exceptions when remote http client closes conncetion ([\#3925](https://github.com/matrix-org/synapse/issues/3925))
- Log exceptions thrown by background tasks ([\#3927](https://github.com/matrix-org/synapse/issues/3927))
- Add a cache to get_destination_retry_timings ([\#3933](https://github.com/matrix-org/synapse/issues/3933), [\#3991](https://github.com/matrix-org/synapse/issues/3991))
- Automate pushes to docker hub ([\#3946](https://github.com/matrix-org/synapse/issues/3946))
- Require attrs 16.0.0 or later ([\#3947](https://github.com/matrix-org/synapse/issues/3947))
- Fix incompatibility with python3 on alpine ([\#3948](https://github.com/matrix-org/synapse/issues/3948))
- Run the test suite on the oldest supported versions of our dependencies in CI. ([\#3952](https://github.com/matrix-org/synapse/issues/3952))
- CircleCI now only runs merged jobs on PRs, and commit jobs on develop, master, and release branches. ([\#3957](https://github.com/matrix-org/synapse/issues/3957))
- Fix docstrings and add tests for state store methods ([\#3958](https://github.com/matrix-org/synapse/issues/3958))
- fix docstring for FederationClient.get_state_for_room ([\#3963](https://github.com/matrix-org/synapse/issues/3963))
- Run notify_app_services as a bg process ([\#3965](https://github.com/matrix-org/synapse/issues/3965))
- Clarifications in FederationHandler ([\#3967](https://github.com/matrix-org/synapse/issues/3967))
- Further reduce the docker image size ([\#3972](https://github.com/matrix-org/synapse/issues/3972))
- Build py3 docker images for docker hub too ([\#3976](https://github.com/matrix-org/synapse/issues/3976))
- Updated the installation instructions to point to the matrix-synapse package on PyPI. ([\#3985](https://github.com/matrix-org/synapse/issues/3985))
- Disable USE_FROZEN_DICTS for unittests by default. ([\#3987](https://github.com/matrix-org/synapse/issues/3987))
- Remove unused Jenkins and development related files from the repo. ([\#3988](https://github.com/matrix-org/synapse/issues/3988))
- Improve stacktraces in certain exceptions in the logs ([\#3989](https://github.com/matrix-org/synapse/issues/3989))
Synapse 0.33.5.1 (2018-09-25)
=============================
Internal Changes
----------------
- Fix incompatibility with older Twisted version in tests. Thanks @OlegGirko! ([\#3940](https://github.com/matrix-org/synapse/issues/3940))
Synapse 0.33.5 (2018-09-24)
===========================
No significant changes.
Synapse 0.33.5rc1 (2018-09-17)
==============================
Features
--------
- Python 3.5 and 3.6 support is now in beta. ([\#3576](https://github.com/matrix-org/synapse/issues/3576))
- Implement `event_format` filter param in `/sync` ([\#3790](https://github.com/matrix-org/synapse/issues/3790))
- Add synapse_admin_mau:registered_reserved_users metric to expose number of real reaserved users ([\#3846](https://github.com/matrix-org/synapse/issues/3846))
Bugfixes
--------
- Remove connection ID for replication prometheus metrics, as it creates a large number of new series. ([\#3788](https://github.com/matrix-org/synapse/issues/3788))
- guest users should not be part of mau total ([\#3800](https://github.com/matrix-org/synapse/issues/3800))
- Bump dependency on pyopenssl 16.x, to avoid incompatibility with recent Twisted. ([\#3804](https://github.com/matrix-org/synapse/issues/3804))
- Fix existing room tags not coming down sync when joining a room ([\#3810](https://github.com/matrix-org/synapse/issues/3810))
- Fix jwt import check ([\#3824](https://github.com/matrix-org/synapse/issues/3824))
- fix VOIP crashes under Python 3 (#3821) ([\#3835](https://github.com/matrix-org/synapse/issues/3835))
- Fix manhole so that it works with latest openssh clients ([\#3841](https://github.com/matrix-org/synapse/issues/3841))
- Fix outbound requests occasionally wedging, which can result in federation breaking between servers. ([\#3845](https://github.com/matrix-org/synapse/issues/3845))
- Show heroes if room name/canonical alias has been deleted ([\#3851](https://github.com/matrix-org/synapse/issues/3851))
- Fix handling of redacted events from federation ([\#3859](https://github.com/matrix-org/synapse/issues/3859))
- ([\#3874](https://github.com/matrix-org/synapse/issues/3874))
- Mitigate outbound federation randomly becoming wedged ([\#3875](https://github.com/matrix-org/synapse/issues/3875))
Internal Changes
----------------
- CircleCI tests now run on the potential merge of a PR. ([\#3704](https://github.com/matrix-org/synapse/issues/3704))
- http/ is now ported to Python 3. ([\#3771](https://github.com/matrix-org/synapse/issues/3771))
- Improve human readable error messages for threepid registration/account update ([\#3789](https://github.com/matrix-org/synapse/issues/3789))
- Make /sync slightly faster by avoiding needless copies ([\#3795](https://github.com/matrix-org/synapse/issues/3795))
- handlers/ is now ported to Python 3. ([\#3803](https://github.com/matrix-org/synapse/issues/3803))
- Limit the number of PDUs/EDUs per federation transaction ([\#3805](https://github.com/matrix-org/synapse/issues/3805))
- Only start postgres instance for postgres tests on Travis CI ([\#3806](https://github.com/matrix-org/synapse/issues/3806))
- tests/ is now ported to Python 3. ([\#3808](https://github.com/matrix-org/synapse/issues/3808))
- crypto/ is now ported to Python 3. ([\#3822](https://github.com/matrix-org/synapse/issues/3822))
- rest/ is now ported to Python 3. ([\#3823](https://github.com/matrix-org/synapse/issues/3823))
- add some logging for the keyring queue ([\#3826](https://github.com/matrix-org/synapse/issues/3826))
- speed up lazy loading by 2-3x ([\#3827](https://github.com/matrix-org/synapse/issues/3827))
- Improved Dockerfile to remove build requirements after building reducing the image size. ([\#3834](https://github.com/matrix-org/synapse/issues/3834))
- Disable lazy loading for incremental syncs for now ([\#3840](https://github.com/matrix-org/synapse/issues/3840))
- federation/ is now ported to Python 3. ([\#3847](https://github.com/matrix-org/synapse/issues/3847))
- Log when we retry outbound requests ([\#3853](https://github.com/matrix-org/synapse/issues/3853))
- Removed some excess logging messages. ([\#3855](https://github.com/matrix-org/synapse/issues/3855))
- Speed up purge history for rooms that have been previously purged ([\#3856](https://github.com/matrix-org/synapse/issues/3856))
- Refactor some HTTP timeout code. ([\#3857](https://github.com/matrix-org/synapse/issues/3857))
- Fix running merged builds on CircleCI ([\#3858](https://github.com/matrix-org/synapse/issues/3858))
- Fix typo in replication stream exception. ([\#3860](https://github.com/matrix-org/synapse/issues/3860))
- Add in flight real time metrics for Measure blocks ([\#3871](https://github.com/matrix-org/synapse/issues/3871))
- Disable buffering and automatic retrying in treq requests to prevent timeouts. ([\#3872](https://github.com/matrix-org/synapse/issues/3872))
- mention jemalloc in the README ([\#3877](https://github.com/matrix-org/synapse/issues/3877))
- Remove unmaintained "nuke-room-from-db.sh" script ([\#3888](https://github.com/matrix-org/synapse/issues/3888))
Synapse 0.33.4 (2018-09-07)
===========================
Internal Changes
----------------
- Unignore synctl in .dockerignore to fix docker builds ([\#3802](https://github.com/matrix-org/synapse/issues/3802))
Synapse 0.33.4rc2 (2018-09-06)
==============================
Pull in security fixes from v0.33.3.1
Synapse 0.33.3.1 (2018-09-06)
=============================
SECURITY FIXES
--------------
- Fix an issue where event signatures were not always correctly validated ([\#3796](https://github.com/matrix-org/synapse/issues/3796))
- Fix an issue where server_acls could be circumvented for incoming events ([\#3796](https://github.com/matrix-org/synapse/issues/3796))
Internal Changes
----------------
- Unignore synctl in .dockerignore to fix docker builds ([\#3802](https://github.com/matrix-org/synapse/issues/3802))
Synapse 0.33.4rc1 (2018-09-04)
==============================
Features
--------
- Support profile API endpoints on workers ([\#3659](https://github.com/matrix-org/synapse/issues/3659))
- Server notices for resource limit blocking ([\#3680](https://github.com/matrix-org/synapse/issues/3680))
- Allow guests to use /rooms/:roomId/event/:eventId ([\#3724](https://github.com/matrix-org/synapse/issues/3724))
- Add mau_trial_days config param, so that users only get counted as MAU after N days. ([\#3749](https://github.com/matrix-org/synapse/issues/3749))
- Require twisted 17.1 or later (fixes [#3741](https://github.com/matrix-org/synapse/issues/3741)). ([\#3751](https://github.com/matrix-org/synapse/issues/3751))
Bugfixes
--------
- Fix error collecting prometheus metrics when run on dedicated thread due to threading concurrency issues ([\#3722](https://github.com/matrix-org/synapse/issues/3722))
- Fix bug where we resent "limit exceeded" server notices repeatedly ([\#3747](https://github.com/matrix-org/synapse/issues/3747))
- Fix bug where we broke sync when using limit_usage_by_mau but hadn't configured server notices ([\#3753](https://github.com/matrix-org/synapse/issues/3753))
- Fix 'federation_domain_whitelist' such that an empty list correctly blocks all outbound federation traffic ([\#3754](https://github.com/matrix-org/synapse/issues/3754))
- Fix tagging of server notice rooms ([\#3755](https://github.com/matrix-org/synapse/issues/3755), [\#3756](https://github.com/matrix-org/synapse/issues/3756))
- Fix 'admin_uri' config variable and error parameter to be 'admin_contact' to match the spec. ([\#3758](https://github.com/matrix-org/synapse/issues/3758))
- Don't return non-LL-member state in incremental sync state blocks ([\#3760](https://github.com/matrix-org/synapse/issues/3760))
- Fix bug in sending presence over federation ([\#3768](https://github.com/matrix-org/synapse/issues/3768))
- Fix bug where preserved threepid user comes to sign up and server is mau blocked ([\#3777](https://github.com/matrix-org/synapse/issues/3777))
Internal Changes
----------------
- Removed the link to the unmaintained matrix-synapse-auto-deploy project from the readme. ([\#3378](https://github.com/matrix-org/synapse/issues/3378))
- Refactor state module to support multiple room versions ([\#3673](https://github.com/matrix-org/synapse/issues/3673))
- The synapse.storage module has been ported to Python 3. ([\#3725](https://github.com/matrix-org/synapse/issues/3725))
- Split the state_group_cache into member and non-member state events (and so speed up LL /sync) ([\#3726](https://github.com/matrix-org/synapse/issues/3726))
- Log failure to authenticate remote servers as warnings (without stack traces) ([\#3727](https://github.com/matrix-org/synapse/issues/3727))
- The CONTRIBUTING guidelines have been updated to mention our use of Markdown and that .misc files have content. ([\#3730](https://github.com/matrix-org/synapse/issues/3730))
- Reference the need for an HTTP replication port when using the federation_reader worker ([\#3734](https://github.com/matrix-org/synapse/issues/3734))
- Fix minor spelling error in federation client documentation. ([\#3735](https://github.com/matrix-org/synapse/issues/3735))
- Remove redundant state resolution function ([\#3737](https://github.com/matrix-org/synapse/issues/3737))
- The test suite now passes on PostgreSQL. ([\#3740](https://github.com/matrix-org/synapse/issues/3740))
- Fix MAU cache invalidation due to missing yield ([\#3746](https://github.com/matrix-org/synapse/issues/3746))
- Make sure that we close db connections opened during init ([\#3764](https://github.com/matrix-org/synapse/issues/3764))
Synapse 0.33.3 (2018-08-22) Synapse 0.33.3 (2018-08-22)
=========================== ===========================

View File

@ -30,12 +30,28 @@ use github's pull request workflow to review the contribution, and either ask
you to make any refinements needed or merge it and make them ourselves. The you to make any refinements needed or merge it and make them ourselves. The
changes will then land on master when we next do a release. changes will then land on master when we next do a release.
We use `Jenkins <http://matrix.org/jenkins>`_ and We use `CircleCI <https://circleci.com/gh/matrix-org>`_ and `Travis CI
`Travis <https://travis-ci.org/matrix-org/synapse>`_ for continuous <https://travis-ci.org/matrix-org/synapse>`_ for continuous integration. All
integration. All pull requests to synapse get automatically tested by Travis; pull requests to synapse get automatically tested by Travis and CircleCI.
the Jenkins builds require an adminstrator to start them. If your change If your change breaks the build, this will be shown in GitHub, so please
breaks the build, this will be shown in github, so please keep an eye on the keep an eye on the pull request for feedback.
pull request for feedback.
To run unit tests in a local development environment, you can use:
- ``tox -e py27`` (requires tox to be installed by ``pip install tox``) for
SQLite-backed Synapse on Python 2.7.
- ``tox -e py35`` for SQLite-backed Synapse on Python 3.5.
- ``tox -e py36`` for SQLite-backed Synapse on Python 3.6.
- ``tox -e py27-postgres`` for PostgreSQL-backed Synapse on Python 2.7
(requires a running local PostgreSQL with access to create databases).
- ``./test_postgresql.sh`` for PostgreSQL-backed Synapse on Python 2.7
(requires Docker). Entirely self-contained, recommended if you don't want to
set up PostgreSQL yourself.
Docker images are available for running the integration tests (SyTest) locally,
see the `documentation in the SyTest repo
<https://github.com/matrix-org/sytest/blob/develop/docker/README.md>`_ for more
information.
Code style Code style
~~~~~~~~~~ ~~~~~~~~~~
@ -59,9 +75,10 @@ To create a changelog entry, make a new file in the ``changelog.d``
file named in the format of ``PRnumber.type``. The type can be file named in the format of ``PRnumber.type``. The type can be
one of ``feature``, ``bugfix``, ``removal`` (also used for one of ``feature``, ``bugfix``, ``removal`` (also used for
deprecations), or ``misc`` (for internal-only changes). The content of deprecations), or ``misc`` (for internal-only changes). The content of
the file is your changelog entry, which can contain RestructuredText the file is your changelog entry, which can contain Markdown
formatting. A note of contributors is welcomed in changelogs for formatting. Adding credits to the changelog is encouraged, we value
non-misc changes (the content of misc changes is not displayed). your contributions and would like to have you shouted out in the
release notes!
For example, a fix in PR #1234 would have its changelog entry in For example, a fix in PR #1234 would have its changelog entry in
``changelog.d/1234.bugfix``, and contain content like "The security levels of ``changelog.d/1234.bugfix``, and contain content like "The security levels of
@ -76,7 +93,8 @@ AUTHORS.rst file for the project in question. Please feel free to include a
change to AUTHORS.rst in your pull request to list yourself and a short change to AUTHORS.rst in your pull request to list yourself and a short
description of the area(s) you've worked on. Also, we sometimes have swag to description of the area(s) you've worked on. Also, we sometimes have swag to
give away to contributors - if you feel that Matrix-branded apparel is missing give away to contributors - if you feel that Matrix-branded apparel is missing
from your life, please mail us your shipping address to matrix at matrix.org and we'll try to fix it :) from your life, please mail us your shipping address to matrix at matrix.org and
we'll try to fix it :)
Sign off Sign off
~~~~~~~~ ~~~~~~~~
@ -143,4 +161,9 @@ flag to ``git commit``, which uses the name and email set in your
Conclusion Conclusion
~~~~~~~~~~ ~~~~~~~~~~
That's it! Matrix is a very open and collaborative project as you might expect given our obsession with open communication. If we're going to successfully matrix together all the fragmented communication technologies out there we are reliant on contributions and collaboration from the community to do so. So please get involved - and we hope you have as much fun hacking on Matrix as we do! That's it! Matrix is a very open and collaborative project as you might expect
given our obsession with open communication. If we're going to successfully
matrix together all the fragmented communication technologies out there we are
reliant on contributions and collaboration from the community to do so. So
please get involved - and we hope you have as much fun hacking on Matrix as we
do!

View File

@ -23,12 +23,9 @@ recursive-include synapse/static *.gif
recursive-include synapse/static *.html recursive-include synapse/static *.html
recursive-include synapse/static *.js recursive-include synapse/static *.js
exclude jenkins.sh
exclude jenkins*.sh
exclude jenkins*
exclude Dockerfile exclude Dockerfile
exclude .dockerignore exclude .dockerignore
recursive-exclude jenkins *.sh exclude test_postgresql.sh
include pyproject.toml include pyproject.toml
recursive-include changelog.d * recursive-include changelog.d *
@ -37,3 +34,6 @@ prune .github
prune demo/etc prune demo/etc
prune docker prune docker
prune .circleci prune .circleci
exclude jenkins*
recursive-exclude jenkins *.sh

35
MAP.rst
View File

@ -1,35 +0,0 @@
Directory Structure
===================
Warning: this may be a bit stale...
::
.
├── cmdclient Basic CLI python Matrix client
├── demo Scripts for running standalone Matrix demos
├── docs All doc, including the draft Matrix API spec
│   ├── client-server The client-server Matrix API spec
│   ├── model Domain-specific elements of the Matrix API spec
│   ├── server-server The server-server model of the Matrix API spec
│   └── sphinx The internal API doc of the Synapse homeserver
├── experiments Early experiments of using Synapse's internal APIs
├── graph Visualisation of Matrix's distributed message store
├── synapse The reference Matrix homeserver implementation
│   ├── api Common building blocks for the APIs
│   │   ├── events Definition of state representation Events
│   │   └── streams Definition of streamable Event objects
│   ├── app The __main__ entry point for the homeserver
│   ├── crypto The PKI client/server used for secure federation
│   │   └── resource PKI helper objects (e.g. keys)
│   ├── federation Server-server state replication logic
│   ├── handlers The main business logic of the homeserver
│   ├── http Wrappers around Twisted's HTTP server & client
│   ├── rest Servlet-style RESTful API
│   ├── storage Persistence subsystem (currently only sqlite3)
│   │   └── schema sqlite persistence schema
│   └── util Synapse-specific utilities
├── tests Unit tests for the Synapse homeserver
└── webclient Basic AngularJS Matrix web client

View File

@ -81,7 +81,7 @@ Thanks for using Matrix!
Synapse Installation Synapse Installation
==================== ====================
Synapse is the reference python/twisted Matrix homeserver implementation. Synapse is the reference Python/Twisted Matrix homeserver implementation.
System requirements: System requirements:
@ -91,12 +91,13 @@ System requirements:
Installing from source Installing from source
---------------------- ----------------------
(Prebuilt packages are available for some platforms - see `Platform-Specific (Prebuilt packages are available for some platforms - see `Platform-Specific
Instructions`_.) Instructions`_.)
Synapse is written in python but some of the libraries it uses are written in Synapse is written in Python but some of the libraries it uses are written in
C. So before we can install synapse itself we need a working C compiler and the C. So before we can install Synapse itself we need a working C compiler and the
header files for python C extensions. header files for Python C extensions.
Installing prerequisites on Ubuntu or Debian:: Installing prerequisites on Ubuntu or Debian::
@ -143,18 +144,24 @@ Installing prerequisites on OpenBSD::
doas pkg_add python libffi py-pip py-setuptools sqlite3 py-virtualenv \ doas pkg_add python libffi py-pip py-setuptools sqlite3 py-virtualenv \
libxslt libxslt
To install the synapse homeserver run:: To install the Synapse homeserver run::
virtualenv -p python2.7 ~/.synapse virtualenv -p python2.7 ~/.synapse
source ~/.synapse/bin/activate source ~/.synapse/bin/activate
pip install --upgrade pip pip install --upgrade pip
pip install --upgrade setuptools pip install --upgrade setuptools
pip install https://github.com/matrix-org/synapse/tarball/master pip install matrix-synapse
This installs synapse, along with the libraries it uses, into a virtual This installs Synapse, along with the libraries it uses, into a virtual
environment under ``~/.synapse``. Feel free to pick a different directory environment under ``~/.synapse``. Feel free to pick a different directory
if you prefer. if you prefer.
This Synapse installation can then be later upgraded by using pip again with the
update flag::
source ~/.synapse/bin/activate
pip install -U matrix-synapse
In case of problems, please see the _`Troubleshooting` section below. In case of problems, please see the _`Troubleshooting` section below.
There is an offical synapse image available at There is an offical synapse image available at
@ -167,12 +174,7 @@ Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
Dockerfile to automate a synapse server in a single Docker image, at Dockerfile to automate a synapse server in a single Docker image, at
https://hub.docker.com/r/avhost/docker-matrix/tags/ https://hub.docker.com/r/avhost/docker-matrix/tags/
Also, Martin Giess has created an auto-deployment process with vagrant/ansible, Configuring Synapse
tested with VirtualBox/AWS/DigitalOcean - see
https://github.com/EMnify/matrix-synapse-auto-deploy
for details.
Configuring synapse
------------------- -------------------
Before you can start Synapse, you will need to generate a configuration Before you can start Synapse, you will need to generate a configuration
@ -254,26 +256,6 @@ Setting up a TURN server
For reliable VoIP calls to be routed via this homeserver, you MUST configure For reliable VoIP calls to be routed via this homeserver, you MUST configure
a TURN server. See `<docs/turn-howto.rst>`_ for details. a TURN server. See `<docs/turn-howto.rst>`_ for details.
IPv6
----
As of Synapse 0.19 we finally support IPv6, many thanks to @kyrias and @glyph
for providing PR #1696.
However, for federation to work on hosts with IPv6 DNS servers you **must**
be running Twisted 17.1.0 or later - see https://github.com/matrix-org/synapse/issues/1002
for details. We can't make Synapse depend on Twisted 17.1 by default
yet as it will break most older distributions (see https://github.com/matrix-org/synapse/pull/1909)
so if you are using operating system dependencies you'll have to install your
own Twisted 17.1 package via pip or backports etc.
If you're running in a virtualenv then pip should have installed the newest
Twisted automatically, but if your virtualenv is old you will need to manually
upgrade to a newer Twisted dependency via:
pip install Twisted>=17.1.0
Running Synapse Running Synapse
=============== ===============
@ -449,8 +431,7 @@ settings require a slightly more difficult installation process.
using the ``.`` command, rather than ``bash``'s ``source``. using the ``.`` command, rather than ``bash``'s ``source``.
5) Optionally, use ``pip`` to install ``lxml``, which Synapse needs to parse 5) Optionally, use ``pip`` to install ``lxml``, which Synapse needs to parse
webpages for their titles. webpages for their titles.
6) Use ``pip`` to install this repository: ``pip install 6) Use ``pip`` to install this repository: ``pip install matrix-synapse``
https://github.com/matrix-org/synapse/tarball/master``
7) Optionally, change ``_synapse``'s shell to ``/bin/false`` to reduce the 7) Optionally, change ``_synapse``'s shell to ``/bin/false`` to reduce the
chance of a compromised Synapse server being used to take over your box. chance of a compromised Synapse server being used to take over your box.
@ -464,37 +445,13 @@ https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-
Windows Install Windows Install
--------------- ---------------
Synapse can be installed on Cygwin. It requires the following Cygwin packages:
- gcc
- git
- libffi-devel
- openssl (and openssl-devel, python-openssl)
- python
- python-setuptools
The content repository requires additional packages and will be unable to process
uploads without them:
- libjpeg8
- libjpeg8-devel
- zlib
If you choose to install Synapse without these packages, you will need to reinstall
``pillow`` for changes to be applied, e.g. ``pip uninstall pillow`` ``pip install
pillow --user``
Troubleshooting:
- You may need to upgrade ``setuptools`` to get this to work correctly:
``pip install setuptools --upgrade``.
- You may encounter errors indicating that ``ffi.h`` is missing, even with
``libffi-devel`` installed. If you do, copy the ``.h`` files:
``cp /usr/lib/libffi-3.0.13/include/*.h /usr/include``
- You may need to install libsodium from source in order to install PyNacl. If
you do, you may need to create a symlink to ``libsodium.a`` so ``ld`` can find
it: ``ln -s /usr/local/lib/libsodium.a /usr/lib/libsodium.a``
If you wish to run or develop Synapse on Windows, the Windows Subsystem For
Linux provides a Linux environment on Windows 10 which is capable of using the
Debian, Fedora, or source installation methods. More information about WSL can
be found at https://docs.microsoft.com/en-us/windows/wsl/install-win10 for
Windows 10 and https://docs.microsoft.com/en-us/windows/wsl/install-on-server
for Windows Server.
Troubleshooting Troubleshooting
=============== ===============
@ -502,7 +459,7 @@ Troubleshooting
Troubleshooting Installation Troubleshooting Installation
---------------------------- ----------------------------
Synapse requires pip 1.7 or later, so if your OS provides too old a version you Synapse requires pip 8 or later, so if your OS provides too old a version you
may need to manually upgrade it:: may need to manually upgrade it::
sudo pip install --upgrade pip sudo pip install --upgrade pip
@ -537,28 +494,6 @@ failing, e.g.::
pip install twisted pip install twisted
On OS X, if you encounter clang: error: unknown argument: '-mno-fused-madd' you
will need to export CFLAGS=-Qunused-arguments.
Troubleshooting Running
-----------------------
If synapse fails with ``missing "sodium.h"`` crypto errors, you may need
to manually upgrade PyNaCL, as synapse uses NaCl (https://nacl.cr.yp.to/) for
encryption and digital signatures.
Unfortunately PyNACL currently has a few issues
(https://github.com/pyca/pynacl/issues/53) and
(https://github.com/pyca/pynacl/issues/79) that mean it may not install
correctly, causing all tests to fail with errors about missing "sodium.h". To
fix try re-installing from PyPI or directly from
(https://github.com/pyca/pynacl)::
# Install from PyPI
pip install --user --upgrade --force pynacl
# Install from github
pip install --user https://github.com/pyca/pynacl/tarball/master
Running out of File Handles Running out of File Handles
~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -747,6 +682,18 @@ so an example nginx configuration might look like::
} }
} }
and an example apache configuration may look like::
<VirtualHost *:443>
SSLEngine on
ServerName matrix.example.com;
<Location /_matrix>
ProxyPass http://127.0.0.1:8008/_matrix nocanon
ProxyPassReverse http://127.0.0.1:8008/_matrix
</Location>
</VirtualHost>
You will also want to set ``bind_addresses: ['127.0.0.1']`` and ``x_forwarded: true`` You will also want to set ``bind_addresses: ['127.0.0.1']`` and ``x_forwarded: true``
for port 8008 in ``homeserver.yaml`` to ensure that client IP addresses are for port 8008 in ``homeserver.yaml`` to ensure that client IP addresses are
recorded correctly. recorded correctly.
@ -901,7 +848,7 @@ to install using pip and a virtualenv::
virtualenv -p python2.7 env virtualenv -p python2.7 env
source env/bin/activate source env/bin/activate
python synapse/python_dependencies.py | xargs pip install python -m synapse.python_dependencies | xargs pip install
pip install lxml mock pip install lxml mock
This will run a process of downloading and installing all the needed This will run a process of downloading and installing all the needed
@ -956,5 +903,13 @@ variable. The default is 0.5, which can be decreased to reduce RAM usage
in memory constrained enviroments, or increased if performance starts to in memory constrained enviroments, or increased if performance starts to
degrade. degrade.
Using `libjemalloc <http://jemalloc.net/>`_ can also yield a significant
improvement in overall amount, and especially in terms of giving back RAM
to the OS. To use it, the library must simply be put in the LD_PRELOAD
environment variable when launching Synapse. On Debian, this can be done
by installing the ``libjemalloc1`` package and adding this line to
``/etc/default/matrix-synapse``::
LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
.. _`key_management`: https://matrix.org/docs/spec/server_server/unstable.html#retrieving-server-keys .. _`key_management`: https://matrix.org/docs/spec/server_server/unstable.html#retrieving-server-keys

View File

@ -18,7 +18,7 @@ instructions that may be required are listed later in this document.
.. code:: bash .. code:: bash
pip install --upgrade --process-dependency-links https://github.com/matrix-org/synapse/tarball/master pip install --upgrade --process-dependency-links matrix-synapse
# restart synapse # restart synapse
synctl restart synctl restart
@ -48,7 +48,7 @@ returned by the Client-Server API:
# configured on port 443. # configured on port 443.
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:" curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
Upgrading to $NEXT_VERSION Upgrading to v0.27.3
==================== ====================
This release expands the anonymous usage stats sent if the opt-in This release expands the anonymous usage stats sent if the opt-in

View File

@ -1 +0,0 @@
Support profile API endpoints on workers

View File

@ -1 +0,0 @@
Refactor state module to support multiple room versions

View File

@ -1 +0,0 @@
Server notices for resource limit blocking

View File

@ -1 +0,0 @@
Fix error collecting prometheus metrics when run on dedicated thread due to threading concurrency issues

View File

@ -1 +0,0 @@
Allow guests to use /rooms/:roomId/event/:eventId

View File

@ -1 +0,0 @@
Split the state_group_cache into member and non-member state events (and so speed up LL /sync)

View File

@ -1 +0,0 @@
Log failure to authenticate remote servers as warnings (without stack traces)

View File

@ -1 +0,0 @@
Reference the need for an HTTP replication port when using the federation_reader worker

View File

@ -1 +0,0 @@
Fix minor spelling error in federation client documentation.

View File

@ -1 +0,0 @@
Fix MAU cache invalidation due to missing yield

View File

@ -1 +0,0 @@
Fix bug where we resent "limit exceeded" server notices repeatedly

View File

@ -1 +0,0 @@
Add mau_trial_days config param, so that users only get counted as MAU after N days.

View File

@ -1 +0,0 @@
Require twisted 17.1 or later (fixes [#3741](https://github.com/matrix-org/synapse/issues/3741)).

View File

@ -1 +0,0 @@
Fix bug where we broke sync when using limit_usage_by_mau but hadn't configured server notices

View File

@ -1 +0,0 @@
Fix 'federation_domain_whitelist' such that an empty list correctly blocks all outbound federation traffic

View File

@ -1 +0,0 @@
Fix tagging of server notice rooms

1
changelog.d/3995.bugfix Normal file
View File

@ -0,0 +1 @@
Fix bug in event persistence logic which caused 'NoneType is not iterable'

1
changelog.d/3996.bugfix Normal file
View File

@ -0,0 +1 @@
Fix exception in background metrics collection

1
changelog.d/3997.bugfix Normal file
View File

@ -0,0 +1 @@
Fix exception handling in fetching remote profiles

1
changelog.d/3999.bugfix Normal file
View File

@ -0,0 +1 @@
Fix handling of rejected threepid invites

1
changelog.d/4017.misc Normal file
View File

@ -0,0 +1 @@
Optimisation for serving federation requests

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,13 @@
FROM docker.io/python:2-alpine3.8 ARG PYTHON_VERSION=2
RUN apk add --no-cache --virtual .nacl_deps \ ###
### Stage 0: builder
###
FROM docker.io/python:${PYTHON_VERSION}-alpine3.8 as builder
# install the OS build deps
RUN apk add \
build-base \ build-base \
libffi-dev \ libffi-dev \
libjpeg-turbo-dev \ libjpeg-turbo-dev \
@ -8,25 +15,46 @@ RUN apk add --no-cache --virtual .nacl_deps \
libxslt-dev \ libxslt-dev \
linux-headers \ linux-headers \
postgresql-dev \ postgresql-dev \
su-exec \
zlib-dev zlib-dev
COPY . /synapse # build things which have slow build steps, before we copy synapse, so that
# the layer can be cached.
#
# (we really just care about caching a wheel here, as the "pip install" below
# will install them again.)
# A wheel cache may be provided in ./cache for faster build RUN pip install --prefix="/install" --no-warn-script-location \
RUN cd /synapse \ cryptography \
&& pip install --upgrade \ msgpack-python \
pillow \
pynacl
# now install synapse and all of the python deps to /install.
COPY . /synapse
RUN pip install --prefix="/install" --no-warn-script-location \
lxml \ lxml \
pip \
psycopg2 \ psycopg2 \
setuptools \ /synapse
&& mkdir -p /synapse/cache \
&& pip install -f /synapse/cache --upgrade --process-dependency-links . \ ###
&& mv /synapse/docker/start.py /synapse/docker/conf / \ ### Stage 1: runtime
&& rm -rf \ ###
setup.cfg \
setup.py \ FROM docker.io/python:${PYTHON_VERSION}-alpine3.8
synapse
RUN apk add --no-cache --virtual .runtime_deps \
libffi \
libjpeg-turbo \
libressl \
libxslt \
libpq \
zlib \
su-exec
COPY --from=builder /install /usr/local
COPY ./docker/start.py /start.py
COPY ./docker/conf /conf
VOLUME ["/data"] VOLUME ["/data"]

12
docker/Dockerfile-pgtests Normal file
View File

@ -0,0 +1,12 @@
# Use the Sytest image that comes with a lot of the build dependencies
# pre-installed
FROM matrixdotorg/sytest:latest
# The Sytest image doesn't come with python, so install that
RUN apt-get -qq install -y python python-dev python-pip
# We need tox to run the tests in run_pg_tests.sh
RUN pip install tox
ADD run_pg_tests.sh /pg_tests.sh
ENTRYPOINT /pg_tests.sh

View File

@ -88,6 +88,7 @@ variables are available for configuration:
* ``SYNAPSE_TURN_URIS``, set this variable to the coma-separated list of TURN * ``SYNAPSE_TURN_URIS``, set this variable to the coma-separated list of TURN
uris to enable TURN for this homeserver. uris to enable TURN for this homeserver.
* ``SYNAPSE_TURN_SECRET``, set this to the TURN shared secret if required. * ``SYNAPSE_TURN_SECRET``, set this to the TURN shared secret if required.
* ``SYNAPSE_MAX_UPLOAD_SIZE``, set this variable to change the max upload size [default `10M`].
Shared secrets, that will be initialized to random values if not set: Shared secrets, that will be initialized to random values if not set:

View File

@ -85,7 +85,7 @@ federation_rc_concurrent: 3
media_store_path: "/data/media" media_store_path: "/data/media"
uploads_path: "/data/uploads" uploads_path: "/data/uploads"
max_upload_size: "10M" max_upload_size: "{{ SYNAPSE_MAX_UPLOAD_SIZE or "10M" }}"
max_image_pixels: "32M" max_image_pixels: "32M"
dynamic_thumbnails: false dynamic_thumbnails: false

20
docker/run_pg_tests.sh Executable file
View File

@ -0,0 +1,20 @@
#!/bin/bash
# This script runs the PostgreSQL tests inside a Docker container. It expects
# the relevant source files to be mounted into /src (done automatically by the
# caller script). It will set up the database, run it, and then use the tox
# configuration to run the tests.
set -e
# Set PGUSER so Synapse's tests know what user to connect to the database with
export PGUSER=postgres
# Initialise & start the database
su -c '/usr/lib/postgresql/9.6/bin/initdb -D /var/lib/postgresql/data -E "UTF-8" --lc-collate="en_US.UTF-8" --lc-ctype="en_US.UTF-8" --username=postgres' postgres
su -c '/usr/lib/postgresql/9.6/bin/pg_ctl -w -D /var/lib/postgresql/data start' postgres
# Run the tests
cd /src
export TRIAL_FLAGS="-j 4"
tox --workdir=/tmp -e py27-postgres

View File

@ -5,6 +5,7 @@ import os
import sys import sys
import subprocess import subprocess
import glob import glob
import codecs
# Utility functions # Utility functions
convert = lambda src, dst, environ: open(dst, "w").write(jinja2.Template(open(src).read()).render(**environ)) convert = lambda src, dst, environ: open(dst, "w").write(jinja2.Template(open(src).read()).render(**environ))
@ -23,7 +24,7 @@ def generate_secrets(environ, secrets):
with open(filename) as handle: value = handle.read() with open(filename) as handle: value = handle.read()
else: else:
print("Generating a random secret for {}".format(name)) print("Generating a random secret for {}".format(name))
value = os.urandom(32).encode("hex") value = codecs.encode(os.urandom(32), "hex").decode()
with open(filename, "w") as handle: handle.write(value) with open(filename, "w") as handle: handle.write(value)
environ[secret] = value environ[secret] = value

View File

@ -1,23 +0,0 @@
#!/bin/bash
set -eux
: ${WORKSPACE:="$(pwd)"}
export WORKSPACE
export PYTHONDONTWRITEBYTECODE=yep
export SYNAPSE_CACHE_FACTOR=1
export HAPROXY_BIN=/home/haproxy/haproxy-1.6.11/haproxy
./jenkins/prepare_synapse.sh
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
./jenkins/clone.sh dendron https://github.com/matrix-org/dendron.git
./dendron/jenkins/build_dendron.sh
./sytest/jenkins/prep_sytest_for_postgres.sh
./sytest/jenkins/install_and_run.sh \
--python $WORKSPACE/.tox/py27/bin/python \
--synapse-directory $WORKSPACE \
--dendron $WORKSPACE/dendron/bin/dendron \
--haproxy \

View File

@ -1,20 +0,0 @@
#!/bin/bash
set -eux
: ${WORKSPACE:="$(pwd)"}
export WORKSPACE
export PYTHONDONTWRITEBYTECODE=yep
export SYNAPSE_CACHE_FACTOR=1
./jenkins/prepare_synapse.sh
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
./jenkins/clone.sh dendron https://github.com/matrix-org/dendron.git
./dendron/jenkins/build_dendron.sh
./sytest/jenkins/prep_sytest_for_postgres.sh
./sytest/jenkins/install_and_run.sh \
--python $WORKSPACE/.tox/py27/bin/python \
--synapse-directory $WORKSPACE \
--dendron $WORKSPACE/dendron/bin/dendron \

View File

@ -1,22 +0,0 @@
#!/bin/bash
set -eux
: ${WORKSPACE:="$(pwd)"}
export PYTHONDONTWRITEBYTECODE=yep
export SYNAPSE_CACHE_FACTOR=1
# Output test results as junit xml
export TRIAL_FLAGS="--reporter=subunit"
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
# Write coverage reports to a separate file for each process
export COVERAGE_OPTS="-p"
export DUMP_COVERAGE_COMMAND="coverage help"
# Output flake8 violations to violations.flake8.log
export PEP8SUFFIX="--output-file=violations.flake8.log"
rm .coverage* || echo "No coverage files to remove"
tox -e packaging -e pep8

View File

@ -1,18 +0,0 @@
#!/bin/bash
set -eux
: ${WORKSPACE:="$(pwd)"}
export WORKSPACE
export PYTHONDONTWRITEBYTECODE=yep
export SYNAPSE_CACHE_FACTOR=1
./jenkins/prepare_synapse.sh
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
./sytest/jenkins/prep_sytest_for_postgres.sh
./sytest/jenkins/install_and_run.sh \
--python $WORKSPACE/.tox/py27/bin/python \
--synapse-directory $WORKSPACE \

View File

@ -1,16 +0,0 @@
#!/bin/bash
set -eux
: ${WORKSPACE:="$(pwd)"}
export WORKSPACE
export PYTHONDONTWRITEBYTECODE=yep
export SYNAPSE_CACHE_FACTOR=1
./jenkins/prepare_synapse.sh
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
./sytest/jenkins/install_and_run.sh \
--python $WORKSPACE/.tox/py27/bin/python \
--synapse-directory $WORKSPACE \

View File

@ -1,30 +0,0 @@
#!/bin/bash
set -eux
: ${WORKSPACE:="$(pwd)"}
export PYTHONDONTWRITEBYTECODE=yep
export SYNAPSE_CACHE_FACTOR=1
# Output test results as junit xml
export TRIAL_FLAGS="--reporter=subunit"
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
# Write coverage reports to a separate file for each process
export COVERAGE_OPTS="-p"
export DUMP_COVERAGE_COMMAND="coverage help"
# Output flake8 violations to violations.flake8.log
# Don't exit with non-0 status code on Jenkins,
# so that the build steps continue and a later step can decided whether to
# UNSTABLE or FAILURE this build.
export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
rm .coverage* || echo "No coverage files to remove"
tox --notest -e py27
TOX_BIN=$WORKSPACE/.tox/py27/bin
python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install
$TOX_BIN/pip install lxml
tox -e py27

View File

@ -1,44 +0,0 @@
#! /bin/bash
# This clones a project from github into a named subdirectory
# If the project has a branch with the same name as this branch
# then it will checkout that branch after cloning.
# Otherwise it will checkout "origin/develop."
# The first argument is the name of the directory to checkout
# the branch into.
# The second argument is the URL of the remote repository to checkout.
# Usually something like https://github.com/matrix-org/sytest.git
set -eux
NAME=$1
PROJECT=$2
BASE=".$NAME-base"
# Update our mirror.
if [ ! -d ".$NAME-base" ]; then
# Create a local mirror of the source repository.
# This saves us from having to download the entire repository
# when this script is next run.
git clone "$PROJECT" "$BASE" --mirror
else
# Fetch any updates from the source repository.
(cd "$BASE"; git fetch -p)
fi
# Remove the existing repository so that we have a clean copy
rm -rf "$NAME"
# Cloning with --shared means that we will share portions of the
# .git directory with our local mirror.
git clone "$BASE" "$NAME" --shared
# Jenkins may have supplied us with the name of the branch in the
# environment. Otherwise we will have to guess based on the current
# commit.
: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
cd "$NAME"
# check out the relevant branch
git checkout "${GIT_BRANCH}" || (
echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop"
git checkout "origin/develop"
)

View File

@ -31,5 +31,5 @@ $TOX_BIN/pip install 'setuptools>=18.5'
$TOX_BIN/pip install 'pip>=10' $TOX_BIN/pip install 'pip>=10'
{ python synapse/python_dependencies.py { python synapse/python_dependencies.py
echo lxml psycopg2 echo lxml
} | xargs $TOX_BIN/pip install } | xargs $TOX_BIN/pip install

View File

@ -1,33 +0,0 @@
#!/usr/bin/perl -pi
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
$copyright = <<EOT;
/* Copyright 2016 OpenMarket Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
EOT
s/^(# -\*- coding: utf-8 -\*-\n)?/$1$copyright/ if ($. == 1);

View File

@ -1,33 +0,0 @@
#!/usr/bin/perl -pi
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
$copyright = <<EOT;
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
EOT
s/^(# -\*- coding: utf-8 -\*-\n)?/$1$copyright/ if ($. == 1);

View File

@ -21,4 +21,4 @@ try:
verifier.verify(macaroon, key) verifier.verify(macaroon, key)
print "Signature is correct" print "Signature is correct"
except Exception as e: except Exception as e:
print e.message print str(e)

View File

@ -0,0 +1,9 @@
#!/bin/bash
set -e
# Fetch the current GitHub issue number, add one to it -- presto! The likely
# next PR number.
CURRENT_NUMBER=`curl -s "https://api.github.com/repos/matrix-org/synapse/issues?state=all&per_page=1" | jq -r ".[0].number"`
CURRENT_NUMBER=$((CURRENT_NUMBER+1))
echo $CURRENT_NUMBER

View File

@ -1,57 +0,0 @@
#!/bin/bash
## CAUTION:
## This script will remove (hopefully) all trace of the given room ID from
## your homeserver.db
## Do not run it lightly.
set -e
if [ "$1" == "-h" ] || [ "$1" == "" ]; then
echo "Call with ROOM_ID as first option and then pipe it into the database. So for instance you might run"
echo " nuke-room-from-db.sh <room_id> | sqlite3 homeserver.db"
echo "or"
echo " nuke-room-from-db.sh <room_id> | psql --dbname=synapse"
exit
fi
ROOMID="$1"
cat <<EOF
DELETE FROM event_forward_extremities WHERE room_id = '$ROOMID';
DELETE FROM event_backward_extremities WHERE room_id = '$ROOMID';
DELETE FROM event_edges WHERE room_id = '$ROOMID';
DELETE FROM room_depth WHERE room_id = '$ROOMID';
DELETE FROM state_forward_extremities WHERE room_id = '$ROOMID';
DELETE FROM events WHERE room_id = '$ROOMID';
DELETE FROM event_json WHERE room_id = '$ROOMID';
DELETE FROM state_events WHERE room_id = '$ROOMID';
DELETE FROM current_state_events WHERE room_id = '$ROOMID';
DELETE FROM room_memberships WHERE room_id = '$ROOMID';
DELETE FROM feedback WHERE room_id = '$ROOMID';
DELETE FROM topics WHERE room_id = '$ROOMID';
DELETE FROM room_names WHERE room_id = '$ROOMID';
DELETE FROM rooms WHERE room_id = '$ROOMID';
DELETE FROM room_hosts WHERE room_id = '$ROOMID';
DELETE FROM room_aliases WHERE room_id = '$ROOMID';
DELETE FROM state_groups WHERE room_id = '$ROOMID';
DELETE FROM state_groups_state WHERE room_id = '$ROOMID';
DELETE FROM receipts_graph WHERE room_id = '$ROOMID';
DELETE FROM receipts_linearized WHERE room_id = '$ROOMID';
DELETE FROM event_search WHERE room_id = '$ROOMID';
DELETE FROM guest_access WHERE room_id = '$ROOMID';
DELETE FROM history_visibility WHERE room_id = '$ROOMID';
DELETE FROM room_tags WHERE room_id = '$ROOMID';
DELETE FROM room_tags_revisions WHERE room_id = '$ROOMID';
DELETE FROM room_account_data WHERE room_id = '$ROOMID';
DELETE FROM event_push_actions WHERE room_id = '$ROOMID';
DELETE FROM local_invites WHERE room_id = '$ROOMID';
DELETE FROM pusher_throttle WHERE room_id = '$ROOMID';
DELETE FROM event_reports WHERE room_id = '$ROOMID';
DELETE FROM public_room_list_stream WHERE room_id = '$ROOMID';
DELETE FROM stream_ordering_to_exterm WHERE room_id = '$ROOMID';
DELETE FROM event_auth WHERE room_id = '$ROOMID';
DELETE FROM appservice_room_list WHERE room_id = '$ROOMID';
VACUUM;
EOF

View File

@ -133,7 +133,7 @@ def register_new_user(user, password, server_location, shared_secret, admin):
print "Passwords do not match" print "Passwords do not match"
sys.exit(1) sys.exit(1)
if not admin: if admin is None:
admin = raw_input("Make admin [no]: ") admin = raw_input("Make admin [no]: ")
if admin in ("y", "yes", "true"): if admin in ("y", "yes", "true"):
admin = True admin = True
@ -160,10 +160,16 @@ if __name__ == "__main__":
default=None, default=None,
help="New password for user. Will prompt if omitted.", help="New password for user. Will prompt if omitted.",
) )
parser.add_argument( admin_group = parser.add_mutually_exclusive_group()
admin_group.add_argument(
"-a", "--admin", "-a", "--admin",
action="store_true", action="store_true",
help="Register new user as an admin. Will prompt if omitted.", help="Register new user as an admin. Will prompt if --no-admin is not set either.",
)
admin_group.add_argument(
"--no-admin",
action="store_true",
help="Register new user as a regular user. Will prompt if --admin is not set either.",
) )
group = parser.add_mutually_exclusive_group(required=True) group = parser.add_mutually_exclusive_group(required=True)
@ -197,4 +203,8 @@ if __name__ == "__main__":
else: else:
secret = args.shared_secret secret = args.shared_secret
register_new_user(args.user, args.password, args.server_url, secret, args.admin) admin = None
if args.admin or args.no_admin:
admin = args.admin
register_new_user(args.user, args.password, args.server_url, secret, admin)

View File

@ -17,13 +17,14 @@ ignore =
[pep8] [pep8]
max-line-length = 90 max-line-length = 90
# W503 requires that binary operators be at the end, not start, of lines. Erik # W503 requires that binary operators be at the end, not start, of lines. Erik
# doesn't like it. E203 is contrary to PEP8. # doesn't like it. E203 is contrary to PEP8. E731 is silly.
ignore = W503,E203 ignore = W503,E203,E731
[flake8] [flake8]
# note that flake8 inherits the "ignore" settings from "pep8" (because it uses # note that flake8 inherits the "ignore" settings from "pep8" (because it uses
# pep8 to do those checks), but not the "max-line-length" setting # pep8 to do those checks), but not the "max-line-length" setting
max-line-length = 90 max-line-length = 90
ignore=W503,E203,E731
[isort] [isort]
line_length = 89 line_length = 89

View File

@ -17,4 +17,14 @@
""" This is a reference implementation of a Matrix home server. """ This is a reference implementation of a Matrix home server.
""" """
__version__ = "0.33.3" try:
from twisted.internet import protocol
from twisted.internet.protocol import Factory
from twisted.names.dns import DNSDatagramProtocol
protocol.Factory.noisy = False
Factory.noisy = False
DNSDatagramProtocol.noisy = False
except ImportError:
pass
__version__ = "0.33.6"

View File

@ -26,6 +26,7 @@ import synapse.types
from synapse import event_auth from synapse import event_auth
from synapse.api.constants import EventTypes, JoinRules, Membership from synapse.api.constants import EventTypes, JoinRules, Membership
from synapse.api.errors import AuthError, Codes, ResourceLimitError from synapse.api.errors import AuthError, Codes, ResourceLimitError
from synapse.config.server import is_threepid_reserved
from synapse.types import UserID from synapse.types import UserID
from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache
from synapse.util.caches.lrucache import LruCache from synapse.util.caches.lrucache import LruCache
@ -775,13 +776,19 @@ class Auth(object):
) )
@defer.inlineCallbacks @defer.inlineCallbacks
def check_auth_blocking(self, user_id=None): def check_auth_blocking(self, user_id=None, threepid=None):
"""Checks if the user should be rejected for some external reason, """Checks if the user should be rejected for some external reason,
such as monthly active user limiting or global disable flag such as monthly active user limiting or global disable flag
Args: Args:
user_id(str|None): If present, checks for presence against existing user_id(str|None): If present, checks for presence against existing
MAU cohort MAU cohort
threepid(dict|None): If present, checks for presence against configured
reserved threepid. Used in cases where the user is trying register
with a MAU blocked server, normally they would be rejected but their
threepid is on the reserved list. user_id and
threepid should never be set at the same time.
""" """
# Never fail an auth check for the server notices users # Never fail an auth check for the server notices users
@ -793,10 +800,12 @@ class Auth(object):
raise ResourceLimitError( raise ResourceLimitError(
403, self.hs.config.hs_disabled_message, 403, self.hs.config.hs_disabled_message,
errcode=Codes.RESOURCE_LIMIT_EXCEEDED, errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
admin_uri=self.hs.config.admin_uri, admin_contact=self.hs.config.admin_contact,
limit_type=self.hs.config.hs_disabled_limit_type limit_type=self.hs.config.hs_disabled_limit_type
) )
if self.hs.config.limit_usage_by_mau is True: if self.hs.config.limit_usage_by_mau is True:
assert not (user_id and threepid)
# If the user is already part of the MAU cohort or a trial user # If the user is already part of the MAU cohort or a trial user
if user_id: if user_id:
timestamp = yield self.store.user_last_seen_monthly_active(user_id) timestamp = yield self.store.user_last_seen_monthly_active(user_id)
@ -806,13 +815,17 @@ class Auth(object):
is_trial = yield self.store.is_trial_user(user_id) is_trial = yield self.store.is_trial_user(user_id)
if is_trial: if is_trial:
return return
elif threepid:
# If the user does not exist yet, but is signing up with a
# reserved threepid then pass auth check
if is_threepid_reserved(self.hs.config, threepid):
return
# Else if there is no room in the MAU bucket, bail # Else if there is no room in the MAU bucket, bail
current_mau = yield self.store.get_monthly_active_count() current_mau = yield self.store.get_monthly_active_count()
if current_mau >= self.hs.config.max_mau_value: if current_mau >= self.hs.config.max_mau_value:
raise ResourceLimitError( raise ResourceLimitError(
403, "Monthly Active User Limit Exceeded", 403, "Monthly Active User Limit Exceeded",
admin_contact=self.hs.config.admin_contact,
admin_uri=self.hs.config.admin_uri,
errcode=Codes.RESOURCE_LIMIT_EXCEEDED, errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
limit_type="monthly_active_user" limit_type="monthly_active_user"
) )

View File

@ -240,10 +240,10 @@ class ResourceLimitError(SynapseError):
def __init__( def __init__(
self, code, msg, self, code, msg,
errcode=Codes.RESOURCE_LIMIT_EXCEEDED, errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
admin_uri=None, admin_contact=None,
limit_type=None, limit_type=None,
): ):
self.admin_uri = admin_uri self.admin_contact = admin_contact
self.limit_type = limit_type self.limit_type = limit_type
super(ResourceLimitError, self).__init__(code, msg, errcode=errcode) super(ResourceLimitError, self).__init__(code, msg, errcode=errcode)
@ -251,7 +251,7 @@ class ResourceLimitError(SynapseError):
return cs_error( return cs_error(
self.msg, self.msg,
self.errcode, self.errcode,
admin_uri=self.admin_uri, admin_contact=self.admin_contact,
limit_type=self.limit_type limit_type=self.limit_type
) )

View File

@ -226,7 +226,7 @@ class Filtering(object):
jsonschema.validate(user_filter_json, USER_FILTER_SCHEMA, jsonschema.validate(user_filter_json, USER_FILTER_SCHEMA,
format_checker=FormatChecker()) format_checker=FormatChecker())
except jsonschema.ValidationError as e: except jsonschema.ValidationError as e:
raise SynapseError(400, e.message) raise SynapseError(400, str(e))
class FilterCollection(object): class FilterCollection(object):
@ -251,6 +251,7 @@ class FilterCollection(object):
"include_leave", False "include_leave", False
) )
self.event_fields = filter_json.get("event_fields", []) self.event_fields = filter_json.get("event_fields", [])
self.event_format = filter_json.get("event_format", "client")
def __repr__(self): def __repr__(self):
return "<FilterCollection %s>" % (json.dumps(self._filter_json),) return "<FilterCollection %s>" % (json.dumps(self._filter_json),)

View File

@ -64,7 +64,7 @@ class ConsentURIBuilder(object):
""" """
mac = hmac.new( mac = hmac.new(
key=self._hmac_secret, key=self._hmac_secret,
msg=user_id, msg=user_id.encode('ascii'),
digestmod=sha256, digestmod=sha256,
).hexdigest() ).hexdigest()
consent_uri = "%s_matrix/consent?%s" % ( consent_uri = "%s_matrix/consent?%s" % (

View File

@ -24,7 +24,7 @@ try:
python_dependencies.check_requirements() python_dependencies.check_requirements()
except python_dependencies.MissingRequirementError as e: except python_dependencies.MissingRequirementError as e:
message = "\n".join([ message = "\n".join([
"Missing Requirement: %s" % (e.message,), "Missing Requirement: %s" % (str(e),),
"To install run:", "To install run:",
" pip install --upgrade --force \"%s\"" % (e.dependency,), " pip install --upgrade --force \"%s\"" % (e.dependency,),
"", "",

View File

@ -51,10 +51,7 @@ class AppserviceSlaveStore(
class AppserviceServer(HomeServer): class AppserviceServer(HomeServer):
def setup(self): DATASTORE_CLASS = AppserviceSlaveStore
logger.info("Setting up.")
self.datastore = AppserviceSlaveStore(self.get_db_conn(), self)
logger.info("Finished setting up.")
def _listen_http(self, listener_config): def _listen_http(self, listener_config):
port = listener_config["port"] port = listener_config["port"]
@ -139,7 +136,7 @@ def start(config_options):
"Synapse appservice", config_options "Synapse appservice", config_options
) )
except ConfigError as e: except ConfigError as e:
sys.stderr.write("\n" + e.message + "\n") sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1) sys.exit(1)
assert config.worker_app == "synapse.app.appservice" assert config.worker_app == "synapse.app.appservice"
@ -175,7 +172,6 @@ def start(config_options):
def start(): def start():
ps.get_datastore().start_profiling() ps.get_datastore().start_profiling()
ps.get_state_handler().start_caching()
reactor.callWhenRunning(start) reactor.callWhenRunning(start)

View File

@ -74,10 +74,7 @@ class ClientReaderSlavedStore(
class ClientReaderServer(HomeServer): class ClientReaderServer(HomeServer):
def setup(self): DATASTORE_CLASS = ClientReaderSlavedStore
logger.info("Setting up.")
self.datastore = ClientReaderSlavedStore(self.get_db_conn(), self)
logger.info("Finished setting up.")
def _listen_http(self, listener_config): def _listen_http(self, listener_config):
port = listener_config["port"] port = listener_config["port"]
@ -156,7 +153,7 @@ def start(config_options):
"Synapse client reader", config_options "Synapse client reader", config_options
) )
except ConfigError as e: except ConfigError as e:
sys.stderr.write("\n" + e.message + "\n") sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1) sys.exit(1)
assert config.worker_app == "synapse.app.client_reader" assert config.worker_app == "synapse.app.client_reader"
@ -184,7 +181,6 @@ def start(config_options):
ss.start_listening(config.worker_listeners) ss.start_listening(config.worker_listeners)
def start(): def start():
ss.get_state_handler().start_caching()
ss.get_datastore().start_profiling() ss.get_datastore().start_profiling()
reactor.callWhenRunning(start) reactor.callWhenRunning(start)

View File

@ -90,10 +90,7 @@ class EventCreatorSlavedStore(
class EventCreatorServer(HomeServer): class EventCreatorServer(HomeServer):
def setup(self): DATASTORE_CLASS = EventCreatorSlavedStore
logger.info("Setting up.")
self.datastore = EventCreatorSlavedStore(self.get_db_conn(), self)
logger.info("Finished setting up.")
def _listen_http(self, listener_config): def _listen_http(self, listener_config):
port = listener_config["port"] port = listener_config["port"]
@ -172,7 +169,7 @@ def start(config_options):
"Synapse event creator", config_options "Synapse event creator", config_options
) )
except ConfigError as e: except ConfigError as e:
sys.stderr.write("\n" + e.message + "\n") sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1) sys.exit(1)
assert config.worker_app == "synapse.app.event_creator" assert config.worker_app == "synapse.app.event_creator"
@ -202,7 +199,6 @@ def start(config_options):
ss.start_listening(config.worker_listeners) ss.start_listening(config.worker_listeners)
def start(): def start():
ss.get_state_handler().start_caching()
ss.get_datastore().start_profiling() ss.get_datastore().start_profiling()
reactor.callWhenRunning(start) reactor.callWhenRunning(start)

View File

@ -72,10 +72,7 @@ class FederationReaderSlavedStore(
class FederationReaderServer(HomeServer): class FederationReaderServer(HomeServer):
def setup(self): DATASTORE_CLASS = FederationReaderSlavedStore
logger.info("Setting up.")
self.datastore = FederationReaderSlavedStore(self.get_db_conn(), self)
logger.info("Finished setting up.")
def _listen_http(self, listener_config): def _listen_http(self, listener_config):
port = listener_config["port"] port = listener_config["port"]
@ -143,7 +140,7 @@ def start(config_options):
"Synapse federation reader", config_options "Synapse federation reader", config_options
) )
except ConfigError as e: except ConfigError as e:
sys.stderr.write("\n" + e.message + "\n") sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1) sys.exit(1)
assert config.worker_app == "synapse.app.federation_reader" assert config.worker_app == "synapse.app.federation_reader"
@ -171,7 +168,6 @@ def start(config_options):
ss.start_listening(config.worker_listeners) ss.start_listening(config.worker_listeners)
def start(): def start():
ss.get_state_handler().start_caching()
ss.get_datastore().start_profiling() ss.get_datastore().start_profiling()
reactor.callWhenRunning(start) reactor.callWhenRunning(start)

View File

@ -78,10 +78,7 @@ class FederationSenderSlaveStore(
class FederationSenderServer(HomeServer): class FederationSenderServer(HomeServer):
def setup(self): DATASTORE_CLASS = FederationSenderSlaveStore
logger.info("Setting up.")
self.datastore = FederationSenderSlaveStore(self.get_db_conn(), self)
logger.info("Finished setting up.")
def _listen_http(self, listener_config): def _listen_http(self, listener_config):
port = listener_config["port"] port = listener_config["port"]
@ -163,7 +160,7 @@ def start(config_options):
"Synapse federation sender", config_options "Synapse federation sender", config_options
) )
except ConfigError as e: except ConfigError as e:
sys.stderr.write("\n" + e.message + "\n") sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1) sys.exit(1)
assert config.worker_app == "synapse.app.federation_sender" assert config.worker_app == "synapse.app.federation_sender"
@ -204,7 +201,6 @@ def start(config_options):
def start(): def start():
ps.get_datastore().start_profiling() ps.get_datastore().start_profiling()
ps.get_state_handler().start_caching()
reactor.callWhenRunning(start) reactor.callWhenRunning(start)
_base.start_worker_reactor("synapse-federation-sender", config) _base.start_worker_reactor("synapse-federation-sender", config)

View File

@ -148,10 +148,7 @@ class FrontendProxySlavedStore(
class FrontendProxyServer(HomeServer): class FrontendProxyServer(HomeServer):
def setup(self): DATASTORE_CLASS = FrontendProxySlavedStore
logger.info("Setting up.")
self.datastore = FrontendProxySlavedStore(self.get_db_conn(), self)
logger.info("Finished setting up.")
def _listen_http(self, listener_config): def _listen_http(self, listener_config):
port = listener_config["port"] port = listener_config["port"]
@ -231,7 +228,7 @@ def start(config_options):
"Synapse frontend proxy", config_options "Synapse frontend proxy", config_options
) )
except ConfigError as e: except ConfigError as e:
sys.stderr.write("\n" + e.message + "\n") sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1) sys.exit(1)
assert config.worker_app == "synapse.app.frontend_proxy" assert config.worker_app == "synapse.app.frontend_proxy"
@ -261,7 +258,6 @@ def start(config_options):
ss.start_listening(config.worker_listeners) ss.start_listening(config.worker_listeners)
def start(): def start():
ss.get_state_handler().start_caching()
ss.get_datastore().start_profiling() ss.get_datastore().start_profiling()
reactor.callWhenRunning(start) reactor.callWhenRunning(start)

View File

@ -62,7 +62,7 @@ from synapse.rest.key.v1.server_key_resource import LocalKey
from synapse.rest.key.v2 import KeyApiV2Resource from synapse.rest.key.v2 import KeyApiV2Resource
from synapse.rest.media.v0.content_repository import ContentRepoResource from synapse.rest.media.v0.content_repository import ContentRepoResource
from synapse.server import HomeServer from synapse.server import HomeServer
from synapse.storage import are_all_users_on_domain from synapse.storage import DataStore, are_all_users_on_domain
from synapse.storage.engines import IncorrectDatabaseSetup, create_engine from synapse.storage.engines import IncorrectDatabaseSetup, create_engine
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
from synapse.util.caches import CACHE_SIZE_FACTOR from synapse.util.caches import CACHE_SIZE_FACTOR
@ -111,6 +111,8 @@ def build_resource_for_web_client(hs):
class SynapseHomeServer(HomeServer): class SynapseHomeServer(HomeServer):
DATASTORE_CLASS = DataStore
def _listener_http(self, config, listener_config): def _listener_http(self, config, listener_config):
port = listener_config["port"] port = listener_config["port"]
bind_addresses = listener_config["bind_addresses"] bind_addresses = listener_config["bind_addresses"]
@ -299,12 +301,16 @@ class SynapseHomeServer(HomeServer):
try: try:
database_engine.check_database(db_conn.cursor()) database_engine.check_database(db_conn.cursor())
except IncorrectDatabaseSetup as e: except IncorrectDatabaseSetup as e:
quit_with_error(e.message) quit_with_error(str(e))
# Gauges to expose monthly active user control metrics # Gauges to expose monthly active user control metrics
current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU") current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU")
max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit") max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit")
registered_reserved_users_mau_gauge = Gauge(
"synapse_admin_mau:registered_reserved_users",
"Registered users with reserved threepids"
)
def setup(config_options): def setup(config_options):
@ -322,7 +328,7 @@ def setup(config_options):
config_options, config_options,
) )
except ConfigError as e: except ConfigError as e:
sys.stderr.write("\n" + e.message + "\n") sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1) sys.exit(1)
if not config: if not config:
@ -356,13 +362,13 @@ def setup(config_options):
logger.info("Preparing database: %s...", config.database_config['name']) logger.info("Preparing database: %s...", config.database_config['name'])
try: try:
db_conn = hs.get_db_conn(run_new_connection=False) with hs.get_db_conn(run_new_connection=False) as db_conn:
prepare_database(db_conn, database_engine, config=config) prepare_database(db_conn, database_engine, config=config)
database_engine.on_new_connection(db_conn) database_engine.on_new_connection(db_conn)
hs.run_startup_checks(db_conn, database_engine) hs.run_startup_checks(db_conn, database_engine)
db_conn.commit() db_conn.commit()
except UpgradeDatabaseException: except UpgradeDatabaseException:
sys.stderr.write( sys.stderr.write(
"\nFailed to upgrade database.\n" "\nFailed to upgrade database.\n"
@ -378,10 +384,8 @@ def setup(config_options):
def start(): def start():
hs.get_pusherpool().start() hs.get_pusherpool().start()
hs.get_state_handler().start_caching()
hs.get_datastore().start_profiling() hs.get_datastore().start_profiling()
hs.get_datastore().start_doing_background_updates() hs.get_datastore().start_doing_background_updates()
hs.get_federation_client().start_get_pdu_cache()
reactor.callWhenRunning(start) reactor.callWhenRunning(start)
@ -451,6 +455,10 @@ def run(hs):
stats["homeserver"] = hs.config.server_name stats["homeserver"] = hs.config.server_name
stats["timestamp"] = now stats["timestamp"] = now
stats["uptime_seconds"] = uptime stats["uptime_seconds"] = uptime
version = sys.version_info
stats["python_version"] = "{}.{}.{}".format(
version.major, version.minor, version.micro
)
stats["total_users"] = yield hs.get_datastore().count_all_users() stats["total_users"] = yield hs.get_datastore().count_all_users()
total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users() total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users()
@ -529,10 +537,14 @@ def run(hs):
@defer.inlineCallbacks @defer.inlineCallbacks
def generate_monthly_active_users(): def generate_monthly_active_users():
count = 0 current_mau_count = 0
reserved_count = 0
store = hs.get_datastore()
if hs.config.limit_usage_by_mau: if hs.config.limit_usage_by_mau:
count = yield hs.get_datastore().get_monthly_active_count() current_mau_count = yield store.get_monthly_active_count()
current_mau_gauge.set(float(count)) reserved_count = yield store.get_registered_reserved_users_count()
current_mau_gauge.set(float(current_mau_count))
registered_reserved_users_mau_gauge.set(float(reserved_count))
max_mau_gauge.set(float(hs.config.max_mau_value)) max_mau_gauge.set(float(hs.config.max_mau_value))
hs.get_datastore().initialise_reserved_users( hs.get_datastore().initialise_reserved_users(

View File

@ -60,10 +60,7 @@ class MediaRepositorySlavedStore(
class MediaRepositoryServer(HomeServer): class MediaRepositoryServer(HomeServer):
def setup(self): DATASTORE_CLASS = MediaRepositorySlavedStore
logger.info("Setting up.")
self.datastore = MediaRepositorySlavedStore(self.get_db_conn(), self)
logger.info("Finished setting up.")
def _listen_http(self, listener_config): def _listen_http(self, listener_config):
port = listener_config["port"] port = listener_config["port"]
@ -136,7 +133,7 @@ def start(config_options):
"Synapse media repository", config_options "Synapse media repository", config_options
) )
except ConfigError as e: except ConfigError as e:
sys.stderr.write("\n" + e.message + "\n") sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1) sys.exit(1)
assert config.worker_app == "synapse.app.media_repository" assert config.worker_app == "synapse.app.media_repository"
@ -171,7 +168,6 @@ def start(config_options):
ss.start_listening(config.worker_listeners) ss.start_listening(config.worker_listeners)
def start(): def start():
ss.get_state_handler().start_caching()
ss.get_datastore().start_profiling() ss.get_datastore().start_profiling()
reactor.callWhenRunning(start) reactor.callWhenRunning(start)

View File

@ -78,10 +78,7 @@ class PusherSlaveStore(
class PusherServer(HomeServer): class PusherServer(HomeServer):
def setup(self): DATASTORE_CLASS = PusherSlaveStore
logger.info("Setting up.")
self.datastore = PusherSlaveStore(self.get_db_conn(), self)
logger.info("Finished setting up.")
def remove_pusher(self, app_id, push_key, user_id): def remove_pusher(self, app_id, push_key, user_id):
self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id) self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id)
@ -194,7 +191,7 @@ def start(config_options):
"Synapse pusher", config_options "Synapse pusher", config_options
) )
except ConfigError as e: except ConfigError as e:
sys.stderr.write("\n" + e.message + "\n") sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1) sys.exit(1)
assert config.worker_app == "synapse.app.pusher" assert config.worker_app == "synapse.app.pusher"
@ -231,7 +228,6 @@ def start(config_options):
def start(): def start():
ps.get_pusherpool().start() ps.get_pusherpool().start()
ps.get_datastore().start_profiling() ps.get_datastore().start_profiling()
ps.get_state_handler().start_caching()
reactor.callWhenRunning(start) reactor.callWhenRunning(start)

View File

@ -249,10 +249,7 @@ class SynchrotronApplicationService(object):
class SynchrotronServer(HomeServer): class SynchrotronServer(HomeServer):
def setup(self): DATASTORE_CLASS = SynchrotronSlavedStore
logger.info("Setting up.")
self.datastore = SynchrotronSlavedStore(self.get_db_conn(), self)
logger.info("Finished setting up.")
def _listen_http(self, listener_config): def _listen_http(self, listener_config):
port = listener_config["port"] port = listener_config["port"]
@ -413,7 +410,7 @@ def start(config_options):
"Synapse synchrotron", config_options "Synapse synchrotron", config_options
) )
except ConfigError as e: except ConfigError as e:
sys.stderr.write("\n" + e.message + "\n") sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1) sys.exit(1)
assert config.worker_app == "synapse.app.synchrotron" assert config.worker_app == "synapse.app.synchrotron"
@ -438,7 +435,6 @@ def start(config_options):
def start(): def start():
ss.get_datastore().start_profiling() ss.get_datastore().start_profiling()
ss.get_state_handler().start_caching()
reactor.callWhenRunning(start) reactor.callWhenRunning(start)

View File

@ -1,284 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import errno
import glob
import os
import os.path
import signal
import subprocess
import sys
import time
from six import iteritems
import yaml
SYNAPSE = [sys.executable, "-B", "-m", "synapse.app.homeserver"]
GREEN = "\x1b[1;32m"
YELLOW = "\x1b[1;33m"
RED = "\x1b[1;31m"
NORMAL = "\x1b[m"
def pid_running(pid):
try:
os.kill(pid, 0)
return True
except OSError as err:
if err.errno == errno.EPERM:
return True
return False
def write(message, colour=NORMAL, stream=sys.stdout):
if colour == NORMAL:
stream.write(message + "\n")
else:
stream.write(colour + message + NORMAL + "\n")
def abort(message, colour=RED, stream=sys.stderr):
write(message, colour, stream)
sys.exit(1)
def start(configfile):
write("Starting ...")
args = SYNAPSE
args.extend(["--daemonize", "-c", configfile])
try:
subprocess.check_call(args)
write("started synapse.app.homeserver(%r)" %
(configfile,), colour=GREEN)
except subprocess.CalledProcessError as e:
write(
"error starting (exit code: %d); see above for logs" % e.returncode,
colour=RED,
)
def start_worker(app, configfile, worker_configfile):
args = [
"python", "-B",
"-m", app,
"-c", configfile,
"-c", worker_configfile
]
try:
subprocess.check_call(args)
write("started %s(%r)" % (app, worker_configfile), colour=GREEN)
except subprocess.CalledProcessError as e:
write(
"error starting %s(%r) (exit code: %d); see above for logs" % (
app, worker_configfile, e.returncode,
),
colour=RED,
)
def stop(pidfile, app):
if os.path.exists(pidfile):
pid = int(open(pidfile).read())
try:
os.kill(pid, signal.SIGTERM)
write("stopped %s" % (app,), colour=GREEN)
except OSError as err:
if err.errno == errno.ESRCH:
write("%s not running" % (app,), colour=YELLOW)
elif err.errno == errno.EPERM:
abort("Cannot stop %s: Operation not permitted" % (app,))
else:
abort("Cannot stop %s: Unknown error" % (app,))
Worker = collections.namedtuple("Worker", [
"app", "configfile", "pidfile", "cache_factor"
])
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"action",
choices=["start", "stop", "restart"],
help="whether to start, stop or restart the synapse",
)
parser.add_argument(
"configfile",
nargs="?",
default="homeserver.yaml",
help="the homeserver config file, defaults to homeserver.yaml",
)
parser.add_argument(
"-w", "--worker",
metavar="WORKERCONFIG",
help="start or stop a single worker",
)
parser.add_argument(
"-a", "--all-processes",
metavar="WORKERCONFIGDIR",
help="start or stop all the workers in the given directory"
" and the main synapse process",
)
options = parser.parse_args()
if options.worker and options.all_processes:
write(
'Cannot use "--worker" with "--all-processes"',
stream=sys.stderr
)
sys.exit(1)
configfile = options.configfile
if not os.path.exists(configfile):
write(
"No config file found\n"
"To generate a config file, run '%s -c %s --generate-config"
" --server-name=<server name>'\n" % (
" ".join(SYNAPSE), options.configfile
),
stream=sys.stderr,
)
sys.exit(1)
with open(configfile) as stream:
config = yaml.load(stream)
pidfile = config["pid_file"]
cache_factor = config.get("synctl_cache_factor")
start_stop_synapse = True
if cache_factor:
os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor)
cache_factors = config.get("synctl_cache_factors", {})
for cache_name, factor in iteritems(cache_factors):
os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor)
worker_configfiles = []
if options.worker:
start_stop_synapse = False
worker_configfile = options.worker
if not os.path.exists(worker_configfile):
write(
"No worker config found at %r" % (worker_configfile,),
stream=sys.stderr,
)
sys.exit(1)
worker_configfiles.append(worker_configfile)
if options.all_processes:
# To start the main synapse with -a you need to add a worker file
# with worker_app == "synapse.app.homeserver"
start_stop_synapse = False
worker_configdir = options.all_processes
if not os.path.isdir(worker_configdir):
write(
"No worker config directory found at %r" % (worker_configdir,),
stream=sys.stderr,
)
sys.exit(1)
worker_configfiles.extend(sorted(glob.glob(
os.path.join(worker_configdir, "*.yaml")
)))
workers = []
for worker_configfile in worker_configfiles:
with open(worker_configfile) as stream:
worker_config = yaml.load(stream)
worker_app = worker_config["worker_app"]
if worker_app == "synapse.app.homeserver":
# We need to special case all of this to pick up options that may
# be set in the main config file or in this worker config file.
worker_pidfile = (
worker_config.get("pid_file")
or pidfile
)
worker_cache_factor = worker_config.get("synctl_cache_factor") or cache_factor
daemonize = worker_config.get("daemonize") or config.get("daemonize")
assert daemonize, "Main process must have daemonize set to true"
# The master process doesn't support using worker_* config.
for key in worker_config:
if key == "worker_app": # But we allow worker_app
continue
assert not key.startswith("worker_"), \
"Main process cannot use worker_* config"
else:
worker_pidfile = worker_config["worker_pid_file"]
worker_daemonize = worker_config["worker_daemonize"]
assert worker_daemonize, "In config %r: expected '%s' to be True" % (
worker_configfile, "worker_daemonize")
worker_cache_factor = worker_config.get("synctl_cache_factor")
workers.append(Worker(
worker_app, worker_configfile, worker_pidfile, worker_cache_factor,
))
action = options.action
if action == "stop" or action == "restart":
for worker in workers:
stop(worker.pidfile, worker.app)
if start_stop_synapse:
stop(pidfile, "synapse.app.homeserver")
# Wait for synapse to actually shutdown before starting it again
if action == "restart":
running_pids = []
if start_stop_synapse and os.path.exists(pidfile):
running_pids.append(int(open(pidfile).read()))
for worker in workers:
if os.path.exists(worker.pidfile):
running_pids.append(int(open(worker.pidfile).read()))
if len(running_pids) > 0:
write("Waiting for process to exit before restarting...")
for running_pid in running_pids:
while pid_running(running_pid):
time.sleep(0.2)
write("All processes exited; now restarting...")
if action == "start" or action == "restart":
if start_stop_synapse:
# Check if synapse is already running
if os.path.exists(pidfile) and pid_running(int(open(pidfile).read())):
abort("synapse.app.homeserver already running")
start(configfile)
for worker in workers:
if worker.cache_factor:
os.environ["SYNAPSE_CACHE_FACTOR"] = str(worker.cache_factor)
start_worker(worker.app, configfile, worker.configfile)
if cache_factor:
os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor)
else:
os.environ.pop("SYNAPSE_CACHE_FACTOR", None)
if __name__ == "__main__":
main()

View File

@ -94,10 +94,7 @@ class UserDirectorySlaveStore(
class UserDirectoryServer(HomeServer): class UserDirectoryServer(HomeServer):
def setup(self): DATASTORE_CLASS = UserDirectorySlaveStore
logger.info("Setting up.")
self.datastore = UserDirectorySlaveStore(self.get_db_conn(), self)
logger.info("Finished setting up.")
def _listen_http(self, listener_config): def _listen_http(self, listener_config):
port = listener_config["port"] port = listener_config["port"]
@ -191,7 +188,7 @@ def start(config_options):
"Synapse user directory", config_options "Synapse user directory", config_options
) )
except ConfigError as e: except ConfigError as e:
sys.stderr.write("\n" + e.message + "\n") sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1) sys.exit(1)
assert config.worker_app == "synapse.app.user_dir" assert config.worker_app == "synapse.app.user_dir"
@ -232,7 +229,6 @@ def start(config_options):
def start(): def start():
ps.get_datastore().start_profiling() ps.get_datastore().start_profiling()
ps.get_state_handler().start_caching()
reactor.callWhenRunning(start) reactor.callWhenRunning(start)

View File

@ -13,7 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import logging import logging
import urllib
from six.moves import urllib
from prometheus_client import Counter from prometheus_client import Counter
@ -98,7 +99,7 @@ class ApplicationServiceApi(SimpleHttpClient):
def query_user(self, service, user_id): def query_user(self, service, user_id):
if service.url is None: if service.url is None:
defer.returnValue(False) defer.returnValue(False)
uri = service.url + ("/users/%s" % urllib.quote(user_id)) uri = service.url + ("/users/%s" % urllib.parse.quote(user_id))
response = None response = None
try: try:
response = yield self.get_json(uri, { response = yield self.get_json(uri, {
@ -119,7 +120,7 @@ class ApplicationServiceApi(SimpleHttpClient):
def query_alias(self, service, alias): def query_alias(self, service, alias):
if service.url is None: if service.url is None:
defer.returnValue(False) defer.returnValue(False)
uri = service.url + ("/rooms/%s" % urllib.quote(alias)) uri = service.url + ("/rooms/%s" % urllib.parse.quote(alias))
response = None response = None
try: try:
response = yield self.get_json(uri, { response = yield self.get_json(uri, {
@ -153,7 +154,7 @@ class ApplicationServiceApi(SimpleHttpClient):
service.url, service.url,
APP_SERVICE_PREFIX, APP_SERVICE_PREFIX,
kind, kind,
urllib.quote(protocol) urllib.parse.quote(protocol)
) )
try: try:
response = yield self.get_json(uri, fields) response = yield self.get_json(uri, fields)
@ -188,7 +189,7 @@ class ApplicationServiceApi(SimpleHttpClient):
uri = "%s%s/thirdparty/protocol/%s" % ( uri = "%s%s/thirdparty/protocol/%s" % (
service.url, service.url,
APP_SERVICE_PREFIX, APP_SERVICE_PREFIX,
urllib.quote(protocol) urllib.parse.quote(protocol)
) )
try: try:
info = yield self.get_json(uri, {}) info = yield self.get_json(uri, {})
@ -228,7 +229,7 @@ class ApplicationServiceApi(SimpleHttpClient):
txn_id = str(txn_id) txn_id = str(txn_id)
uri = service.url + ("/transactions/%s" % uri = service.url + ("/transactions/%s" %
urllib.quote(txn_id)) urllib.parse.quote(txn_id))
try: try:
yield self.put_json( yield self.put_json(
uri=uri, uri=uri,

View File

@ -25,7 +25,7 @@ if __name__ == "__main__":
try: try:
config = HomeServerConfig.load_config("", sys.argv[3:]) config = HomeServerConfig.load_config("", sys.argv[3:])
except ConfigError as e: except ConfigError as e:
sys.stderr.write("\n" + e.message + "\n") sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1) sys.exit(1)
print (getattr(config, key)) print (getattr(config, key))

View File

@ -21,7 +21,7 @@ from .consent_config import ConsentConfig
from .database import DatabaseConfig from .database import DatabaseConfig
from .emailconfig import EmailConfig from .emailconfig import EmailConfig
from .groups import GroupsConfig from .groups import GroupsConfig
from .jwt import JWTConfig from .jwt_config import JWTConfig
from .key import KeyConfig from .key import KeyConfig
from .logger import LoggingConfig from .logger import LoggingConfig
from .metrics import MetricsConfig from .metrics import MetricsConfig

View File

@ -227,7 +227,22 @@ def setup_logging(config, use_worker_options=False):
# #
# However this may not be too much of a problem if we are just writing to a file. # However this may not be too much of a problem if we are just writing to a file.
observer = STDLibLogObserver() observer = STDLibLogObserver()
def _log(event):
if "log_text" in event:
if event["log_text"].startswith("DNSDatagramProtocol starting on "):
return
if event["log_text"].startswith("(UDP Port "):
return
if event["log_text"].startswith("Timing out client"):
return
return observer(event)
globalLogBeginner.beginLoggingTo( globalLogBeginner.beginLoggingTo(
[observer], [_log],
redirectStandardIO=not config.no_redirect_stdio, redirectStandardIO=not config.no_redirect_stdio,
) )

View File

@ -93,7 +93,7 @@ class ServerConfig(Config):
# Admin uri to direct users at should their instance become blocked # Admin uri to direct users at should their instance become blocked
# due to resource constraints # due to resource constraints
self.admin_uri = config.get("admin_uri", None) self.admin_contact = config.get("admin_contact", None)
# FIXME: federation_domain_whitelist needs sytests # FIXME: federation_domain_whitelist needs sytests
self.federation_domain_whitelist = None self.federation_domain_whitelist = None
@ -357,7 +357,7 @@ class ServerConfig(Config):
# Homeserver blocking # Homeserver blocking
# #
# How to reach the server admin, used in ResourceLimitError # How to reach the server admin, used in ResourceLimitError
# admin_uri: 'mailto:admin@server.com' # admin_contact: 'mailto:admin@server.com'
# #
# Global block config # Global block config
# #
@ -404,6 +404,23 @@ class ServerConfig(Config):
" service on the given port.") " service on the given port.")
def is_threepid_reserved(config, threepid):
"""Check the threepid against the reserved threepid config
Args:
config(ServerConfig) - to access server config attributes
threepid(dict) - The threepid to test for
Returns:
boolean Is the threepid undertest reserved_user
"""
for tp in config.mau_limits_reserved_threepids:
if (threepid['medium'] == tp['medium']
and threepid['address'] == tp['address']):
return True
return False
def read_gc_thresholds(thresholds): def read_gc_thresholds(thresholds):
"""Reads the three integer thresholds for garbage collection. Ensures that """Reads the three integer thresholds for garbage collection. Ensures that
the thresholds are integers if thresholds are supplied. the thresholds are integers if thresholds are supplied.

View File

@ -123,6 +123,6 @@ class ClientTLSOptionsFactory(object):
def get_options(self, host): def get_options(self, host):
return ClientTLSOptions( return ClientTLSOptions(
host.decode('utf-8'), host,
CertificateOptions(verify=False).getContext() CertificateOptions(verify=False).getContext()
) )

View File

@ -50,7 +50,7 @@ def fetch_server_key(server_name, tls_client_options_factory, path=KEY_API_V1):
defer.returnValue((server_response, server_certificate)) defer.returnValue((server_response, server_certificate))
except SynapseKeyClientError as e: except SynapseKeyClientError as e:
logger.warn("Error getting key for %r: %s", server_name, e) logger.warn("Error getting key for %r: %s", server_name, e)
if e.status.startswith("4"): if e.status.startswith(b"4"):
# Don't retry for 4xx responses. # Don't retry for 4xx responses.
raise IOError("Cannot get key for %r" % server_name) raise IOError("Cannot get key for %r" % server_name)
except (ConnectError, DomainError) as e: except (ConnectError, DomainError) as e:
@ -82,6 +82,12 @@ class SynapseKeyClientProtocol(HTTPClient):
self._peer = self.transport.getPeer() self._peer = self.transport.getPeer()
logger.debug("Connected to %s", self._peer) logger.debug("Connected to %s", self._peer)
if not isinstance(self.path, bytes):
self.path = self.path.encode('ascii')
if not isinstance(self.host, bytes):
self.host = self.host.encode('ascii')
self.sendCommand(b"GET", self.path) self.sendCommand(b"GET", self.path)
if self.host: if self.host:
self.sendHeader(b"Host", self.host) self.sendHeader(b"Host", self.host)

View File

@ -16,9 +16,10 @@
import hashlib import hashlib
import logging import logging
import urllib
from collections import namedtuple from collections import namedtuple
from six.moves import urllib
from signedjson.key import ( from signedjson.key import (
decode_verify_key_bytes, decode_verify_key_bytes,
encode_verify_key_base64, encode_verify_key_base64,
@ -40,6 +41,7 @@ from synapse.api.errors import Codes, SynapseError
from synapse.crypto.keyclient import fetch_server_key from synapse.crypto.keyclient import fetch_server_key
from synapse.util import logcontext, unwrapFirstError from synapse.util import logcontext, unwrapFirstError
from synapse.util.logcontext import ( from synapse.util.logcontext import (
LoggingContext,
PreserveLoggingContext, PreserveLoggingContext,
preserve_fn, preserve_fn,
run_in_background, run_in_background,
@ -216,23 +218,34 @@ class Keyring(object):
servers have completed. Follows the synapse rules of logcontext servers have completed. Follows the synapse rules of logcontext
preservation. preservation.
""" """
loop_count = 1
while True: while True:
wait_on = [ wait_on = [
self.key_downloads[server_name] (server_name, self.key_downloads[server_name])
for server_name in server_names for server_name in server_names
if server_name in self.key_downloads if server_name in self.key_downloads
] ]
if wait_on: if not wait_on:
with PreserveLoggingContext():
yield defer.DeferredList(wait_on)
else:
break break
logger.info(
"Waiting for existing lookups for %s to complete [loop %i]",
[w[0] for w in wait_on], loop_count,
)
with PreserveLoggingContext():
yield defer.DeferredList((w[1] for w in wait_on))
loop_count += 1
ctx = LoggingContext.current_context()
def rm(r, server_name_): def rm(r, server_name_):
self.key_downloads.pop(server_name_, None) with PreserveLoggingContext(ctx):
logger.debug("Releasing key lookup lock on %s", server_name_)
self.key_downloads.pop(server_name_, None)
return r return r
for server_name, deferred in server_to_deferred.items(): for server_name, deferred in server_to_deferred.items():
logger.debug("Got key lookup lock on %s", server_name)
self.key_downloads[server_name] = deferred self.key_downloads[server_name] = deferred
deferred.addBoth(rm, server_name) deferred.addBoth(rm, server_name)
@ -432,7 +445,7 @@ class Keyring(object):
# an incoming request. # an incoming request.
query_response = yield self.client.post_json( query_response = yield self.client.post_json(
destination=perspective_name, destination=perspective_name,
path=b"/_matrix/key/v2/query", path="/_matrix/key/v2/query",
data={ data={
u"server_keys": { u"server_keys": {
server_name: { server_name: {
@ -513,8 +526,8 @@ class Keyring(object):
(response, tls_certificate) = yield fetch_server_key( (response, tls_certificate) = yield fetch_server_key(
server_name, self.hs.tls_client_options_factory, server_name, self.hs.tls_client_options_factory,
path=(b"/_matrix/key/v2/server/%s" % ( path=("/_matrix/key/v2/server/%s" % (
urllib.quote(requested_key_id), urllib.parse.quote(requested_key_id),
)).encode("ascii"), )).encode("ascii"),
) )

View File

@ -98,9 +98,9 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
creation_event = auth_events.get((EventTypes.Create, ""), None) creation_event = auth_events.get((EventTypes.Create, ""), None)
if not creation_event: if not creation_event:
raise SynapseError( raise AuthError(
403, 403,
"Room %r does not exist" % (event.room_id,) "No create event in auth events",
) )
creating_domain = get_domain_from_id(event.room_id) creating_domain = get_domain_from_id(event.room_id)
@ -155,10 +155,7 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
if user_level < invite_level: if user_level < invite_level:
raise AuthError( raise AuthError(
403, ( 403, "You don't have permission to invite users",
"You cannot issue a third party invite for %s." %
(event.content.display_name,)
)
) )
else: else:
logger.debug("Allowing! %s", event) logger.debug("Allowing! %s", event)
@ -305,7 +302,7 @@ def _is_membership_change_allowed(event, auth_events):
if user_level < invite_level: if user_level < invite_level:
raise AuthError( raise AuthError(
403, "You cannot invite user %s." % target_user_id 403, "You don't have permission to invite users",
) )
elif Membership.JOIN == membership: elif Membership.JOIN == membership:
# Joins are valid iff caller == target and they were: # Joins are valid iff caller == target and they were:

View File

@ -13,13 +13,22 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os
from distutils.util import strtobool
import six
from synapse.util.caches import intern_dict from synapse.util.caches import intern_dict
from synapse.util.frozenutils import freeze from synapse.util.frozenutils import freeze
# Whether we should use frozen_dict in FrozenEvent. Using frozen_dicts prevents # Whether we should use frozen_dict in FrozenEvent. Using frozen_dicts prevents
# bugs where we accidentally share e.g. signature dicts. However, converting # bugs where we accidentally share e.g. signature dicts. However, converting a
# a dict to frozen_dicts is expensive. # dict to frozen_dicts is expensive.
USE_FROZEN_DICTS = True #
# NOTE: This is overridden by the configuration by the Synapse worker apps, but
# for the sake of tests, it is set here while it cannot be configured on the
# homeserver object itself.
USE_FROZEN_DICTS = strtobool(os.environ.get("SYNAPSE_USE_FROZEN_DICTS", "0"))
class _EventInternalMetadata(object): class _EventInternalMetadata(object):
@ -147,6 +156,9 @@ class EventBase(object):
def items(self): def items(self):
return list(self._event_dict.items()) return list(self._event_dict.items())
def keys(self):
return six.iterkeys(self._event_dict)
class FrozenEvent(EventBase): class FrozenEvent(EventBase):
def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None): def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None):

View File

@ -13,17 +13,20 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import logging import logging
from collections import namedtuple
import six import six
from twisted.internet import defer from twisted.internet import defer
from twisted.internet.defer import DeferredList
from synapse.api.constants import MAX_DEPTH from synapse.api.constants import MAX_DEPTH, EventTypes, Membership
from synapse.api.errors import Codes, SynapseError from synapse.api.errors import Codes, SynapseError
from synapse.crypto.event_signing import check_event_content_hash from synapse.crypto.event_signing import check_event_content_hash
from synapse.events import FrozenEvent from synapse.events import FrozenEvent
from synapse.events.utils import prune_event from synapse.events.utils import prune_event
from synapse.http.servlet import assert_params_in_dict from synapse.http.servlet import assert_params_in_dict
from synapse.types import get_domain_from_id
from synapse.util import logcontext, unwrapFirstError from synapse.util import logcontext, unwrapFirstError
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -133,34 +136,45 @@ class FederationBase(object):
* throws a SynapseError if the signature check failed. * throws a SynapseError if the signature check failed.
The deferreds run their callbacks in the sentinel logcontext. The deferreds run their callbacks in the sentinel logcontext.
""" """
deferreds = _check_sigs_on_pdus(self.keyring, pdus)
redacted_pdus = [
prune_event(pdu)
for pdu in pdus
]
deferreds = self.keyring.verify_json_objects_for_server([
(p.origin, p.get_pdu_json())
for p in redacted_pdus
])
ctx = logcontext.LoggingContext.current_context() ctx = logcontext.LoggingContext.current_context()
def callback(_, pdu, redacted): def callback(_, pdu):
with logcontext.PreserveLoggingContext(ctx): with logcontext.PreserveLoggingContext(ctx):
if not check_event_content_hash(pdu): if not check_event_content_hash(pdu):
logger.warn( # let's try to distinguish between failures because the event was
"Event content has been tampered, redacting %s: %s", # redacted (which are somewhat expected) vs actual ball-tampering
pdu.event_id, pdu.get_pdu_json() # incidents.
) #
return redacted # This is just a heuristic, so we just assume that if the keys are
# about the same between the redacted and received events, then the
# received event was probably a redacted copy (but we then use our
# *actual* redacted copy to be on the safe side.)
redacted_event = prune_event(pdu)
if (
set(redacted_event.keys()) == set(pdu.keys()) and
set(six.iterkeys(redacted_event.content))
== set(six.iterkeys(pdu.content))
):
logger.info(
"Event %s seems to have been redacted; using our redacted "
"copy",
pdu.event_id,
)
else:
logger.warning(
"Event %s content has been tampered, redacting",
pdu.event_id, pdu.get_pdu_json(),
)
return redacted_event
if self.spam_checker.check_event_for_spam(pdu): if self.spam_checker.check_event_for_spam(pdu):
logger.warn( logger.warn(
"Event contains spam, redacting %s: %s", "Event contains spam, redacting %s: %s",
pdu.event_id, pdu.get_pdu_json() pdu.event_id, pdu.get_pdu_json()
) )
return redacted return prune_event(pdu)
return pdu return pdu
@ -168,21 +182,121 @@ class FederationBase(object):
failure.trap(SynapseError) failure.trap(SynapseError)
with logcontext.PreserveLoggingContext(ctx): with logcontext.PreserveLoggingContext(ctx):
logger.warn( logger.warn(
"Signature check failed for %s", "Signature check failed for %s: %s",
pdu.event_id, pdu.event_id, failure.getErrorMessage(),
) )
return failure return failure
for deferred, pdu, redacted in zip(deferreds, pdus, redacted_pdus): for deferred, pdu in zip(deferreds, pdus):
deferred.addCallbacks( deferred.addCallbacks(
callback, errback, callback, errback,
callbackArgs=[pdu, redacted], callbackArgs=[pdu],
errbackArgs=[pdu], errbackArgs=[pdu],
) )
return deferreds return deferreds
class PduToCheckSig(namedtuple("PduToCheckSig", [
"pdu", "redacted_pdu_json", "event_id_domain", "sender_domain", "deferreds",
])):
pass
def _check_sigs_on_pdus(keyring, pdus):
"""Check that the given events are correctly signed
Args:
keyring (synapse.crypto.Keyring): keyring object to do the checks
pdus (Collection[EventBase]): the events to be checked
Returns:
List[Deferred]: a Deferred for each event in pdus, which will either succeed if
the signatures are valid, or fail (with a SynapseError) if not.
"""
# (currently this is written assuming the v1 room structure; we'll probably want a
# separate function for checking v2 rooms)
# we want to check that the event is signed by:
#
# (a) the server which created the event_id
#
# (b) the sender's server.
#
# - except in the case of invites created from a 3pid invite, which are exempt
# from this check, because the sender has to match that of the original 3pid
# invite, but the event may come from a different HS, for reasons that I don't
# entirely grok (why do the senders have to match? and if they do, why doesn't the
# joining server ask the inviting server to do the switcheroo with
# exchange_third_party_invite?).
#
# That's pretty awful, since redacting such an invite will render it invalid
# (because it will then look like a regular invite without a valid signature),
# and signatures are *supposed* to be valid whether or not an event has been
# redacted. But this isn't the worst of the ways that 3pid invites are broken.
#
# let's start by getting the domain for each pdu, and flattening the event back
# to JSON.
pdus_to_check = [
PduToCheckSig(
pdu=p,
redacted_pdu_json=prune_event(p).get_pdu_json(),
event_id_domain=get_domain_from_id(p.event_id),
sender_domain=get_domain_from_id(p.sender),
deferreds=[],
)
for p in pdus
]
# first make sure that the event is signed by the event_id's domain
deferreds = keyring.verify_json_objects_for_server([
(p.event_id_domain, p.redacted_pdu_json)
for p in pdus_to_check
])
for p, d in zip(pdus_to_check, deferreds):
p.deferreds.append(d)
# now let's look for events where the sender's domain is different to the
# event id's domain (normally only the case for joins/leaves), and add additional
# checks.
pdus_to_check_sender = [
p for p in pdus_to_check
if p.sender_domain != p.event_id_domain and not _is_invite_via_3pid(p.pdu)
]
more_deferreds = keyring.verify_json_objects_for_server([
(p.sender_domain, p.redacted_pdu_json)
for p in pdus_to_check_sender
])
for p, d in zip(pdus_to_check_sender, more_deferreds):
p.deferreds.append(d)
# replace lists of deferreds with single Deferreds
return [_flatten_deferred_list(p.deferreds) for p in pdus_to_check]
def _flatten_deferred_list(deferreds):
"""Given a list of one or more deferreds, either return the single deferred, or
combine into a DeferredList.
"""
if len(deferreds) > 1:
return DeferredList(deferreds, fireOnOneErrback=True, consumeErrors=True)
else:
assert len(deferreds) == 1
return deferreds[0]
def _is_invite_via_3pid(event):
return (
event.type == EventTypes.Member
and event.membership == Membership.INVITE
and "third_party_invite" in event.content
)
def event_from_pdu_json(pdu_json, outlier=False): def event_from_pdu_json(pdu_json, outlier=False):
"""Construct a FrozenEvent from an event json received over federation """Construct a FrozenEvent from an event json received over federation

View File

@ -66,6 +66,14 @@ class FederationClient(FederationBase):
self.state = hs.get_state_handler() self.state = hs.get_state_handler()
self.transport_layer = hs.get_federation_transport_client() self.transport_layer = hs.get_federation_transport_client()
self._get_pdu_cache = ExpiringCache(
cache_name="get_pdu_cache",
clock=self._clock,
max_len=1000,
expiry_ms=120 * 1000,
reset_expiry_on_get=False,
)
def _clear_tried_cache(self): def _clear_tried_cache(self):
"""Clear pdu_destination_tried cache""" """Clear pdu_destination_tried cache"""
now = self._clock.time_msec() now = self._clock.time_msec()
@ -82,17 +90,6 @@ class FederationClient(FederationBase):
if destination_dict: if destination_dict:
self.pdu_destination_tried[event_id] = destination_dict self.pdu_destination_tried[event_id] = destination_dict
def start_get_pdu_cache(self):
self._get_pdu_cache = ExpiringCache(
cache_name="get_pdu_cache",
clock=self._clock,
max_len=1000,
expiry_ms=120 * 1000,
reset_expiry_on_get=False,
)
self._get_pdu_cache.start()
@log_function @log_function
def make_query(self, destination, query_type, args, def make_query(self, destination, query_type, args,
retry_on_dns_fail=False, ignore_backoff=False): retry_on_dns_fail=False, ignore_backoff=False):
@ -212,8 +209,6 @@ class FederationClient(FederationBase):
Will attempt to get the PDU from each destination in the list until Will attempt to get the PDU from each destination in the list until
one succeeds. one succeeds.
This will persist the PDU locally upon receipt.
Args: Args:
destinations (list): Which home servers to query destinations (list): Which home servers to query
event_id (str): event to fetch event_id (str): event to fetch
@ -229,10 +224,9 @@ class FederationClient(FederationBase):
# TODO: Rate limit the number of times we try and get the same event. # TODO: Rate limit the number of times we try and get the same event.
if self._get_pdu_cache: ev = self._get_pdu_cache.get(event_id)
ev = self._get_pdu_cache.get(event_id) if ev:
if ev: defer.returnValue(ev)
defer.returnValue(ev)
pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {}) pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
@ -271,10 +265,10 @@ class FederationClient(FederationBase):
event_id, destination, e, event_id, destination, e,
) )
except NotRetryingDestination as e: except NotRetryingDestination as e:
logger.info(e.message) logger.info(str(e))
continue continue
except FederationDeniedError as e: except FederationDeniedError as e:
logger.info(e.message) logger.info(str(e))
continue continue
except Exception as e: except Exception as e:
pdu_attempts[destination] = now pdu_attempts[destination] = now
@ -285,7 +279,7 @@ class FederationClient(FederationBase):
) )
continue continue
if self._get_pdu_cache is not None and signed_pdu: if signed_pdu:
self._get_pdu_cache[event_id] = signed_pdu self._get_pdu_cache[event_id] = signed_pdu
defer.returnValue(signed_pdu) defer.returnValue(signed_pdu)
@ -293,8 +287,7 @@ class FederationClient(FederationBase):
@defer.inlineCallbacks @defer.inlineCallbacks
@log_function @log_function
def get_state_for_room(self, destination, room_id, event_id): def get_state_for_room(self, destination, room_id, event_id):
"""Requests all of the `current` state PDUs for a given room from """Requests all of the room state at a given event from a remote home server.
a remote home server.
Args: Args:
destination (str): The remote homeserver to query for the state. destination (str): The remote homeserver to query for the state.
@ -302,9 +295,10 @@ class FederationClient(FederationBase):
event_id (str): The id of the event we want the state at. event_id (str): The id of the event we want the state at.
Returns: Returns:
Deferred: Results in a list of PDUs. Deferred[Tuple[List[EventBase], List[EventBase]]]:
A list of events in the state, and a list of events in the auth chain
for the given event.
""" """
try: try:
# First we try and ask for just the IDs, as thats far quicker if # First we try and ask for just the IDs, as thats far quicker if
# we have most of the state and auth_chain already. # we have most of the state and auth_chain already.
@ -510,7 +504,7 @@ class FederationClient(FederationBase):
else: else:
logger.warn( logger.warn(
"Failed to %s via %s: %i %s", "Failed to %s via %s: %i %s",
description, destination, e.code, e.message, description, destination, e.code, e.args[0],
) )
except Exception: except Exception:
logger.warn( logger.warn(
@ -875,7 +869,7 @@ class FederationClient(FederationBase):
except Exception as e: except Exception as e:
logger.exception( logger.exception(
"Failed to send_third_party_invite via %s: %s", "Failed to send_third_party_invite via %s: %s",
destination, e.message destination, str(e)
) )
raise RuntimeError("Failed to send to any server.") raise RuntimeError("Failed to send to any server.")

View File

@ -46,6 +46,7 @@ from synapse.replication.http.federation import (
from synapse.types import get_domain_from_id from synapse.types import get_domain_from_id
from synapse.util.async_helpers import Linearizer, concurrently_execute from synapse.util.async_helpers import Linearizer, concurrently_execute
from synapse.util.caches.response_cache import ResponseCache from synapse.util.caches.response_cache import ResponseCache
from synapse.util.logcontext import nested_logging_context
from synapse.util.logutils import log_function from synapse.util.logutils import log_function
# when processing incoming transactions, we try to handle multiple rooms in # when processing incoming transactions, we try to handle multiple rooms in
@ -99,7 +100,7 @@ class FederationServer(FederationBase):
@defer.inlineCallbacks @defer.inlineCallbacks
@log_function @log_function
def on_incoming_transaction(self, transaction_data): def on_incoming_transaction(self, origin, transaction_data):
# keep this as early as possible to make the calculated origin ts as # keep this as early as possible to make the calculated origin ts as
# accurate as possible. # accurate as possible.
request_time = self._clock.time_msec() request_time = self._clock.time_msec()
@ -108,34 +109,33 @@ class FederationServer(FederationBase):
if not transaction.transaction_id: if not transaction.transaction_id:
raise Exception("Transaction missing transaction_id") raise Exception("Transaction missing transaction_id")
if not transaction.origin:
raise Exception("Transaction missing origin")
logger.debug("[%s] Got transaction", transaction.transaction_id) logger.debug("[%s] Got transaction", transaction.transaction_id)
# use a linearizer to ensure that we don't process the same transaction # use a linearizer to ensure that we don't process the same transaction
# multiple times in parallel. # multiple times in parallel.
with (yield self._transaction_linearizer.queue( with (yield self._transaction_linearizer.queue(
(transaction.origin, transaction.transaction_id), (origin, transaction.transaction_id),
)): )):
result = yield self._handle_incoming_transaction( result = yield self._handle_incoming_transaction(
transaction, request_time, origin, transaction, request_time,
) )
defer.returnValue(result) defer.returnValue(result)
@defer.inlineCallbacks @defer.inlineCallbacks
def _handle_incoming_transaction(self, transaction, request_time): def _handle_incoming_transaction(self, origin, transaction, request_time):
""" Process an incoming transaction and return the HTTP response """ Process an incoming transaction and return the HTTP response
Args: Args:
origin (unicode): the server making the request
transaction (Transaction): incoming transaction transaction (Transaction): incoming transaction
request_time (int): timestamp that the HTTP request arrived at request_time (int): timestamp that the HTTP request arrived at
Returns: Returns:
Deferred[(int, object)]: http response code and body Deferred[(int, object)]: http response code and body
""" """
response = yield self.transaction_actions.have_responded(transaction) response = yield self.transaction_actions.have_responded(origin, transaction)
if response: if response:
logger.debug( logger.debug(
@ -149,7 +149,7 @@ class FederationServer(FederationBase):
received_pdus_counter.inc(len(transaction.pdus)) received_pdus_counter.inc(len(transaction.pdus))
origin_host, _ = parse_server_name(transaction.origin) origin_host, _ = parse_server_name(origin)
pdus_by_room = {} pdus_by_room = {}
@ -188,21 +188,22 @@ class FederationServer(FederationBase):
for pdu in pdus_by_room[room_id]: for pdu in pdus_by_room[room_id]:
event_id = pdu.event_id event_id = pdu.event_id
try: with nested_logging_context(event_id):
yield self._handle_received_pdu( try:
transaction.origin, pdu yield self._handle_received_pdu(
) origin, pdu
pdu_results[event_id] = {} )
except FederationError as e: pdu_results[event_id] = {}
logger.warn("Error handling PDU %s: %s", event_id, e) except FederationError as e:
pdu_results[event_id] = {"error": str(e)} logger.warn("Error handling PDU %s: %s", event_id, e)
except Exception as e: pdu_results[event_id] = {"error": str(e)}
f = failure.Failure() except Exception as e:
pdu_results[event_id] = {"error": str(e)} f = failure.Failure()
logger.error( pdu_results[event_id] = {"error": str(e)}
"Failed to handle PDU %s: %s", logger.error(
event_id, f.getTraceback().rstrip(), "Failed to handle PDU %s: %s",
) event_id, f.getTraceback().rstrip(),
)
yield concurrently_execute( yield concurrently_execute(
process_pdus_for_room, pdus_by_room.keys(), process_pdus_for_room, pdus_by_room.keys(),
@ -212,7 +213,7 @@ class FederationServer(FederationBase):
if hasattr(transaction, "edus"): if hasattr(transaction, "edus"):
for edu in (Edu(**x) for x in transaction.edus): for edu in (Edu(**x) for x in transaction.edus):
yield self.received_edu( yield self.received_edu(
transaction.origin, origin,
edu.edu_type, edu.edu_type,
edu.content edu.content
) )
@ -224,6 +225,7 @@ class FederationServer(FederationBase):
logger.debug("Returning: %s", str(response)) logger.debug("Returning: %s", str(response))
yield self.transaction_actions.set_response( yield self.transaction_actions.set_response(
origin,
transaction, transaction,
200, response 200, response
) )
@ -618,7 +620,7 @@ class FederationServer(FederationBase):
) )
yield self.handler.on_receive_pdu( yield self.handler.on_receive_pdu(
origin, pdu, get_missing=True, sent_to_us_directly=True, origin, pdu, sent_to_us_directly=True,
) )
def __str__(self): def __str__(self):
@ -838,9 +840,9 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry):
) )
return self._send_edu( return self._send_edu(
edu_type=edu_type, edu_type=edu_type,
origin=origin, origin=origin,
content=content, content=content,
) )
def on_query(self, query_type, args): def on_query(self, query_type, args):
@ -851,6 +853,6 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry):
return handler(args) return handler(args)
return self._get_query_client( return self._get_query_client(
query_type=query_type, query_type=query_type,
args=args, args=args,
) )

View File

@ -36,7 +36,7 @@ class TransactionActions(object):
self.store = datastore self.store = datastore
@log_function @log_function
def have_responded(self, transaction): def have_responded(self, origin, transaction):
""" Have we already responded to a transaction with the same id and """ Have we already responded to a transaction with the same id and
origin? origin?
@ -50,11 +50,11 @@ class TransactionActions(object):
"transaction_id") "transaction_id")
return self.store.get_received_txn_response( return self.store.get_received_txn_response(
transaction.transaction_id, transaction.origin transaction.transaction_id, origin
) )
@log_function @log_function
def set_response(self, transaction, code, response): def set_response(self, origin, transaction, code, response):
""" Persist how we responded to a transaction. """ Persist how we responded to a transaction.
Returns: Returns:
@ -66,7 +66,7 @@ class TransactionActions(object):
return self.store.set_received_txn_response( return self.store.set_received_txn_response(
transaction.transaction_id, transaction.transaction_id,
transaction.origin, origin,
code, code,
response, response,
) )

View File

@ -32,7 +32,7 @@ Events are replicated via a separate events stream.
import logging import logging
from collections import namedtuple from collections import namedtuple
from six import iteritems, itervalues from six import iteritems
from sortedcontainers import SortedDict from sortedcontainers import SortedDict
@ -117,7 +117,7 @@ class FederationRemoteSendQueue(object):
user_ids = set( user_ids = set(
user_id user_id
for uids in itervalues(self.presence_changed) for uids in self.presence_changed.values()
for user_id in uids for user_id in uids
) )

View File

@ -137,26 +137,6 @@ class TransactionQueue(object):
self._processing_pending_presence = False self._processing_pending_presence = False
def can_send_to(self, destination):
"""Can we send messages to the given server?
We can't send messages to ourselves. If we are running on localhost
then we can only federation with other servers running on localhost.
Otherwise we only federate with servers on a public domain.
Args:
destination(str): The server we are possibly trying to send to.
Returns:
bool: True if we can send to the server.
"""
if destination == self.server_name:
return False
if self.server_name.startswith("localhost"):
return destination.startswith("localhost")
else:
return not destination.startswith("localhost")
def notify_new_events(self, current_id): def notify_new_events(self, current_id):
"""This gets called when we have some new events we might want to """This gets called when we have some new events we might want to
send out to other servers. send out to other servers.
@ -279,10 +259,7 @@ class TransactionQueue(object):
self._order += 1 self._order += 1
destinations = set(destinations) destinations = set(destinations)
destinations = set( destinations.discard(self.server_name)
dest for dest in destinations if self.can_send_to(dest)
)
logger.debug("Sending to: %s", str(destinations)) logger.debug("Sending to: %s", str(destinations))
if not destinations: if not destinations:
@ -358,7 +335,7 @@ class TransactionQueue(object):
for destinations, states in hosts_and_states: for destinations, states in hosts_and_states:
for destination in destinations: for destination in destinations:
if not self.can_send_to(destination): if destination == self.server_name:
continue continue
self.pending_presence_by_dest.setdefault( self.pending_presence_by_dest.setdefault(
@ -377,7 +354,8 @@ class TransactionQueue(object):
content=content, content=content,
) )
if not self.can_send_to(destination): if destination == self.server_name:
logger.info("Not sending EDU to ourselves")
return return
sent_edus_counter.inc() sent_edus_counter.inc()
@ -392,10 +370,8 @@ class TransactionQueue(object):
self._attempt_new_transaction(destination) self._attempt_new_transaction(destination)
def send_device_messages(self, destination): def send_device_messages(self, destination):
if destination == self.server_name or destination == "localhost": if destination == self.server_name:
return logger.info("Not sending device update to ourselves")
if not self.can_send_to(destination):
return return
self._attempt_new_transaction(destination) self._attempt_new_transaction(destination)
@ -463,7 +439,19 @@ class TransactionQueue(object):
# pending_transactions flag. # pending_transactions flag.
pending_pdus = self.pending_pdus_by_dest.pop(destination, []) pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
# We can only include at most 50 PDUs per transactions
pending_pdus, leftover_pdus = pending_pdus[:50], pending_pdus[50:]
if leftover_pdus:
self.pending_pdus_by_dest[destination] = leftover_pdus
pending_edus = self.pending_edus_by_dest.pop(destination, []) pending_edus = self.pending_edus_by_dest.pop(destination, [])
# We can only include at most 100 EDUs per transactions
pending_edus, leftover_edus = pending_edus[:100], pending_edus[100:]
if leftover_edus:
self.pending_edus_by_dest[destination] = leftover_edus
pending_presence = self.pending_presence_by_dest.pop(destination, {}) pending_presence = self.pending_presence_by_dest.pop(destination, {})
pending_edus.extend( pending_edus.extend(

View File

@ -15,7 +15,8 @@
# limitations under the License. # limitations under the License.
import logging import logging
import urllib
from six.moves import urllib
from twisted.internet import defer from twisted.internet import defer
@ -951,4 +952,4 @@ def _create_path(prefix, path, *args):
Returns: Returns:
str str
""" """
return prefix + path % tuple(urllib.quote(arg, "") for arg in args) return prefix + path % tuple(urllib.parse.quote(arg, "") for arg in args)

View File

@ -90,8 +90,8 @@ class Authenticator(object):
@defer.inlineCallbacks @defer.inlineCallbacks
def authenticate_request(self, request, content): def authenticate_request(self, request, content):
json_request = { json_request = {
"method": request.method, "method": request.method.decode('ascii'),
"uri": request.uri, "uri": request.uri.decode('ascii'),
"destination": self.server_name, "destination": self.server_name,
"signatures": {}, "signatures": {},
} }
@ -252,7 +252,7 @@ class BaseFederationServlet(object):
by the callback method. None if the request has already been handled. by the callback method. None if the request has already been handled.
""" """
content = None content = None
if request.method in ["PUT", "POST"]: if request.method in [b"PUT", b"POST"]:
# TODO: Handle other method types? other content types? # TODO: Handle other method types? other content types?
content = parse_json_object_from_request(request) content = parse_json_object_from_request(request)
@ -353,7 +353,7 @@ class FederationSendServlet(BaseFederationServlet):
try: try:
code, response = yield self.handler.on_incoming_transaction( code, response = yield self.handler.on_incoming_transaction(
transaction_data origin, transaction_data,
) )
except Exception: except Exception:
logger.exception("on_incoming_transaction failed") logger.exception("on_incoming_transaction failed")
@ -386,7 +386,7 @@ class FederationStateServlet(BaseFederationServlet):
return self.handler.on_context_state_request( return self.handler.on_context_state_request(
origin, origin,
context, context,
query.get("event_id", [None])[0], parse_string_from_args(query, "event_id", None),
) )
@ -397,7 +397,7 @@ class FederationStateIdsServlet(BaseFederationServlet):
return self.handler.on_state_ids_request( return self.handler.on_state_ids_request(
origin, origin,
room_id, room_id,
query.get("event_id", [None])[0], parse_string_from_args(query, "event_id", None),
) )
@ -405,14 +405,12 @@ class FederationBackfillServlet(BaseFederationServlet):
PATH = "/backfill/(?P<context>[^/]*)/" PATH = "/backfill/(?P<context>[^/]*)/"
def on_GET(self, origin, content, query, context): def on_GET(self, origin, content, query, context):
versions = query["v"] versions = [x.decode('ascii') for x in query[b"v"]]
limits = query["limit"] limit = parse_integer_from_args(query, "limit", None)
if not limits: if not limit:
return defer.succeed((400, {"error": "Did not include limit param"})) return defer.succeed((400, {"error": "Did not include limit param"}))
limit = int(limits[-1])
return self.handler.on_backfill_request(origin, context, versions, limit) return self.handler.on_backfill_request(origin, context, versions, limit)
@ -423,7 +421,7 @@ class FederationQueryServlet(BaseFederationServlet):
def on_GET(self, origin, content, query, query_type): def on_GET(self, origin, content, query, query_type):
return self.handler.on_query_request( return self.handler.on_query_request(
query_type, query_type,
{k: v[0].decode("utf-8") for k, v in query.items()} {k.decode('utf8'): v[0].decode("utf-8") for k, v in query.items()}
) )
@ -630,14 +628,14 @@ class OpenIdUserInfo(BaseFederationServlet):
@defer.inlineCallbacks @defer.inlineCallbacks
def on_GET(self, origin, content, query): def on_GET(self, origin, content, query):
token = query.get("access_token", [None])[0] token = query.get(b"access_token", [None])[0]
if token is None: if token is None:
defer.returnValue((401, { defer.returnValue((401, {
"errcode": "M_MISSING_TOKEN", "error": "Access Token required" "errcode": "M_MISSING_TOKEN", "error": "Access Token required"
})) }))
return return
user_id = yield self.handler.on_openid_userinfo(token) user_id = yield self.handler.on_openid_userinfo(token.decode('ascii'))
if user_id is None: if user_id is None:
defer.returnValue((401, { defer.returnValue((401, {

View File

@ -895,22 +895,24 @@ class AuthHandler(BaseHandler):
Args: Args:
password (unicode): Password to hash. password (unicode): Password to hash.
stored_hash (unicode): Expected hash value. stored_hash (bytes): Expected hash value.
Returns: Returns:
Deferred(bool): Whether self.hash(password) == stored_hash. Deferred(bool): Whether self.hash(password) == stored_hash.
""" """
def _do_validate_hash(): def _do_validate_hash():
# Normalise the Unicode in the password # Normalise the Unicode in the password
pw = unicodedata.normalize("NFKC", password) pw = unicodedata.normalize("NFKC", password)
return bcrypt.checkpw( return bcrypt.checkpw(
pw.encode('utf8') + self.hs.config.password_pepper.encode("utf8"), pw.encode('utf8') + self.hs.config.password_pepper.encode("utf8"),
stored_hash.encode('utf8') stored_hash
) )
if stored_hash: if stored_hash:
if not isinstance(stored_hash, bytes):
stored_hash = stored_hash.encode('ascii')
return make_deferred_yieldable( return make_deferred_yieldable(
threads.deferToThreadPool( threads.deferToThreadPool(
self.hs.get_reactor(), self.hs.get_reactor(),

View File

@ -20,7 +20,14 @@ import string
from twisted.internet import defer from twisted.internet import defer
from synapse.api.constants import EventTypes from synapse.api.constants import EventTypes
from synapse.api.errors import AuthError, CodeMessageException, Codes, SynapseError from synapse.api.errors import (
AuthError,
CodeMessageException,
Codes,
NotFoundError,
StoreError,
SynapseError,
)
from synapse.types import RoomAlias, UserID, get_domain_from_id from synapse.types import RoomAlias, UserID, get_domain_from_id
from ._base import BaseHandler from ._base import BaseHandler
@ -109,7 +116,13 @@ class DirectoryHandler(BaseHandler):
def delete_association(self, requester, user_id, room_alias): def delete_association(self, requester, user_id, room_alias):
# association deletion for human users # association deletion for human users
can_delete = yield self._user_can_delete_alias(room_alias, user_id) try:
can_delete = yield self._user_can_delete_alias(room_alias, user_id)
except StoreError as e:
if e.code == 404:
raise NotFoundError("Unknown room alias")
raise
if not can_delete: if not can_delete:
raise AuthError( raise AuthError(
403, "You don't have permission to delete the alias.", 403, "You don't have permission to delete the alias.",
@ -320,7 +333,7 @@ class DirectoryHandler(BaseHandler):
def _user_can_delete_alias(self, alias, user_id): def _user_can_delete_alias(self, alias, user_id):
creator = yield self.store.get_room_alias_creator(alias.to_string()) creator = yield self.store.get_room_alias_creator(alias.to_string())
if creator and creator == user_id: if creator is not None and creator == user_id:
defer.returnValue(True) defer.returnValue(True)
is_admin = yield self.auth.is_server_admin(UserID.from_string(user_id)) is_admin = yield self.auth.is_server_admin(UserID.from_string(user_id))

View File

@ -330,7 +330,8 @@ class E2eKeysHandler(object):
(algorithm, key_id, ex_json, key) (algorithm, key_id, ex_json, key)
) )
else: else:
new_keys.append((algorithm, key_id, encode_canonical_json(key))) new_keys.append((
algorithm, key_id, encode_canonical_json(key).decode('ascii')))
yield self.store.add_e2e_one_time_keys( yield self.store.add_e2e_one_time_keys(
user_id, device_id, time_now, new_keys user_id, device_id, time_now, new_keys
@ -340,7 +341,7 @@ class E2eKeysHandler(object):
def _exception_to_failure(e): def _exception_to_failure(e):
if isinstance(e, CodeMessageException): if isinstance(e, CodeMessageException):
return { return {
"status": e.code, "message": e.message, "status": e.code, "message": str(e),
} }
if isinstance(e, NotRetryingDestination): if isinstance(e, NotRetryingDestination):
@ -358,7 +359,7 @@ def _exception_to_failure(e):
# Note that some Exceptions (notably twisted's ResponseFailed etc) don't # Note that some Exceptions (notably twisted's ResponseFailed etc) don't
# give a string for e.message, which json then fails to serialize. # give a string for e.message, which json then fails to serialize.
return { return {
"status": 503, "message": str(e.message), "status": 503, "message": str(e),
} }

View File

@ -18,7 +18,6 @@
import itertools import itertools
import logging import logging
import sys
import six import six
from six import iteritems, itervalues from six import iteritems, itervalues
@ -69,6 +68,27 @@ from ._base import BaseHandler
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def shortstr(iterable, maxitems=5):
"""If iterable has maxitems or fewer, return the stringification of a list
containing those items.
Otherwise, return the stringification of a a list with the first maxitems items,
followed by "...".
Args:
iterable (Iterable): iterable to truncate
maxitems (int): number of items to return before truncating
Returns:
unicode
"""
items = list(itertools.islice(iterable, maxitems + 1))
if len(items) <= maxitems:
return str(items)
return u"[" + u", ".join(repr(r) for r in items[:maxitems]) + u", ...]"
class FederationHandler(BaseHandler): class FederationHandler(BaseHandler):
"""Handles events that originated from federation. """Handles events that originated from federation.
Responsible for: Responsible for:
@ -85,7 +105,7 @@ class FederationHandler(BaseHandler):
self.hs = hs self.hs = hs
self.store = hs.get_datastore() self.store = hs.get_datastore() # type: synapse.storage.DataStore
self.federation_client = hs.get_federation_client() self.federation_client = hs.get_federation_client()
self.state_handler = hs.get_state_handler() self.state_handler = hs.get_state_handler()
self.server_name = hs.hostname self.server_name = hs.hostname
@ -114,9 +134,8 @@ class FederationHandler(BaseHandler):
self._room_pdu_linearizer = Linearizer("fed_room_pdu") self._room_pdu_linearizer = Linearizer("fed_room_pdu")
@defer.inlineCallbacks @defer.inlineCallbacks
@log_function
def on_receive_pdu( def on_receive_pdu(
self, origin, pdu, get_missing=True, sent_to_us_directly=False, self, origin, pdu, sent_to_us_directly=False,
): ):
""" Process a PDU received via a federation /send/ transaction, or """ Process a PDU received via a federation /send/ transaction, or
via backfill of missing prev_events via backfill of missing prev_events
@ -125,14 +144,23 @@ class FederationHandler(BaseHandler):
origin (str): server which initiated the /send/ transaction. Will origin (str): server which initiated the /send/ transaction. Will
be used to fetch missing events or state. be used to fetch missing events or state.
pdu (FrozenEvent): received PDU pdu (FrozenEvent): received PDU
get_missing (bool): True if we should fetch missing prev_events sent_to_us_directly (bool): True if this event was pushed to us; False if
we pulled it as the result of a missing prev_event.
Returns (Deferred): completes with None Returns (Deferred): completes with None
""" """
room_id = pdu.room_id
event_id = pdu.event_id
logger.info(
"[%s %s] handling received PDU: %s",
room_id, event_id, pdu,
)
# We reprocess pdus when we have seen them only as outliers # We reprocess pdus when we have seen them only as outliers
existing = yield self.store.get_event( existing = yield self.store.get_event(
pdu.event_id, event_id,
allow_none=True, allow_none=True,
allow_rejected=True, allow_rejected=True,
) )
@ -147,7 +175,7 @@ class FederationHandler(BaseHandler):
) )
) )
if already_seen: if already_seen:
logger.debug("Already seen pdu %s", pdu.event_id) logger.debug("[%s %s]: Already seen pdu", room_id, event_id)
return return
# do some initial sanity-checking of the event. In particular, make # do some initial sanity-checking of the event. In particular, make
@ -156,6 +184,7 @@ class FederationHandler(BaseHandler):
try: try:
self._sanity_check_event(pdu) self._sanity_check_event(pdu)
except SynapseError as err: except SynapseError as err:
logger.warn("[%s %s] Received event failed sanity checks", room_id, event_id)
raise FederationError( raise FederationError(
"ERROR", "ERROR",
err.code, err.code,
@ -165,10 +194,12 @@ class FederationHandler(BaseHandler):
# If we are currently in the process of joining this room, then we # If we are currently in the process of joining this room, then we
# queue up events for later processing. # queue up events for later processing.
if pdu.room_id in self.room_queues: if room_id in self.room_queues:
logger.info("Ignoring PDU %s for room %s from %s for now; join " logger.info(
"in progress", pdu.event_id, pdu.room_id, origin) "[%s %s] Queuing PDU from %s for now: join in progress",
self.room_queues[pdu.room_id].append((pdu, origin)) room_id, event_id, origin,
)
self.room_queues[room_id].append((pdu, origin))
return return
# If we're no longer in the room just ditch the event entirely. This # If we're no longer in the room just ditch the event entirely. This
@ -179,7 +210,7 @@ class FederationHandler(BaseHandler):
# we should check if we *are* in fact in the room. If we are then we # we should check if we *are* in fact in the room. If we are then we
# can magically rejoin the room. # can magically rejoin the room.
is_in_room = yield self.auth.check_host_in_room( is_in_room = yield self.auth.check_host_in_room(
pdu.room_id, room_id,
self.server_name self.server_name
) )
if not is_in_room: if not is_in_room:
@ -188,8 +219,8 @@ class FederationHandler(BaseHandler):
) )
if was_in_room: if was_in_room:
logger.info( logger.info(
"Ignoring PDU %s for room %s from %s as we've left the room!", "[%s %s] Ignoring PDU from %s as we've left the room",
pdu.event_id, pdu.room_id, origin, room_id, event_id, origin,
) )
defer.returnValue(None) defer.returnValue(None)
@ -204,8 +235,8 @@ class FederationHandler(BaseHandler):
) )
logger.debug( logger.debug(
"_handle_new_pdu min_depth for %s: %d", "[%s %s] min_depth: %d",
pdu.room_id, min_depth room_id, event_id, min_depth,
) )
prevs = {e_id for e_id, _ in pdu.prev_events} prevs = {e_id for e_id, _ in pdu.prev_events}
@ -218,17 +249,18 @@ class FederationHandler(BaseHandler):
# send to the clients. # send to the clients.
pdu.internal_metadata.outlier = True pdu.internal_metadata.outlier = True
elif min_depth and pdu.depth > min_depth: elif min_depth and pdu.depth > min_depth:
if get_missing and prevs - seen: missing_prevs = prevs - seen
if sent_to_us_directly and missing_prevs:
# If we're missing stuff, ensure we only fetch stuff one # If we're missing stuff, ensure we only fetch stuff one
# at a time. # at a time.
logger.info( logger.info(
"Acquiring lock for room %r to fetch %d missing events: %r...", "[%s %s] Acquiring room lock to fetch %d missing prev_events: %s",
pdu.room_id, len(prevs - seen), list(prevs - seen)[:5], room_id, event_id, len(missing_prevs), shortstr(missing_prevs),
) )
with (yield self._room_pdu_linearizer.queue(pdu.room_id)): with (yield self._room_pdu_linearizer.queue(pdu.room_id)):
logger.info( logger.info(
"Acquired lock for room %r to fetch %d missing events", "[%s %s] Acquired room lock to fetch %d missing prev_events",
pdu.room_id, len(prevs - seen), room_id, event_id, len(missing_prevs),
) )
yield self._get_missing_events_for_pdu( yield self._get_missing_events_for_pdu(
@ -241,69 +273,150 @@ class FederationHandler(BaseHandler):
if not prevs - seen: if not prevs - seen:
logger.info( logger.info(
"Found all missing prev events for %s", pdu.event_id "[%s %s] Found all missing prev_events",
room_id, event_id,
) )
elif prevs - seen: elif missing_prevs:
logger.info( logger.info(
"Not fetching %d missing events for room %r,event %s: %r...", "[%s %s] Not recursively fetching %d missing prev_events: %s",
len(prevs - seen), pdu.room_id, pdu.event_id, room_id, event_id, len(missing_prevs), shortstr(missing_prevs),
list(prevs - seen)[:5],
) )
if sent_to_us_directly and prevs - seen: if prevs - seen:
# If they have sent it to us directly, and the server # We've still not been able to get all of the prev_events for this event.
# isn't telling us about the auth events that it's #
# made a message referencing, we explode # In this case, we need to fall back to asking another server in the
raise FederationError( # federation for the state at this event. That's ok provided we then
"ERROR", # resolve the state against other bits of the DAG before using it (which
403, # will ensure that you can't just take over a room by sending an event,
( # withholding its prev_events, and declaring yourself to be an admin in
"Your server isn't divulging details about prev_events " # the subsequent state request).
"referenced in this event." #
), # Now, if we're pulling this event as a missing prev_event, then clearly
affected=pdu.event_id, # this event is not going to become the only forward-extremity and we are
) # guaranteed to resolve its state against our existing forward
elif prevs - seen: # extremities, so that should be fine.
# Calculate the state of the previous events, and #
# de-conflict them to find the current state. # On the other hand, if this event was pushed to us, it is possible for
state_groups = [] # it to become the only forward-extremity in the room, and we would then
# trust its state to be the state for the whole room. This is very bad.
# Further, if the event was pushed to us, there is no excuse for us not to
# have all the prev_events. We therefore reject any such events.
#
# XXX this really feels like it could/should be merged with the above,
# but there is an interaction with min_depth that I'm not really
# following.
if sent_to_us_directly:
logger.warn(
"[%s %s] Failed to fetch %d prev events: rejecting",
room_id, event_id, len(prevs - seen),
)
raise FederationError(
"ERROR",
403,
(
"Your server isn't divulging details about prev_events "
"referenced in this event."
),
affected=pdu.event_id,
)
# Calculate the state after each of the previous events, and
# resolve them to find the correct state at the current event.
auth_chains = set() auth_chains = set()
event_map = {
event_id: pdu,
}
try: try:
# Get the state of the events we know about # Get the state of the events we know about
ours = yield self.store.get_state_groups(pdu.room_id, list(seen)) ours = yield self.store.get_state_groups_ids(room_id, seen)
state_groups.append(ours)
# state_maps is a list of mappings from (type, state_key) to event_id
# type: list[dict[tuple[str, str], str]]
state_maps = list(ours.values())
# we don't need this any more, let's delete it.
del ours
# Ask the remote server for the states we don't # Ask the remote server for the states we don't
# know about # know about
for p in prevs - seen: for p in prevs - seen:
state, got_auth_chain = ( logger.info(
yield self.federation_client.get_state_for_room( "[%s %s] Requesting state at missing prev_event %s",
origin, pdu.room_id, p room_id, event_id, p,
)
) )
auth_chains.update(got_auth_chain)
state_group = {(x.type, x.state_key): x.event_id for x in state} with logcontext.nested_logging_context(p):
state_groups.append(state_group) # note that if any of the missing prevs share missing state or
# auth events, the requests to fetch those events are deduped
# by the get_pdu_cache in federation_client.
remote_state, got_auth_chain = (
yield self.federation_client.get_state_for_room(
origin, room_id, p,
)
)
# we want the state *after* p; get_state_for_room returns the
# state *before* p.
remote_event = yield self.federation_client.get_pdu(
[origin], p, outlier=True,
)
if remote_event is None:
raise Exception(
"Unable to get missing prev_event %s" % (p, )
)
if remote_event.is_state():
remote_state.append(remote_event)
# XXX hrm I'm not convinced that duplicate events will compare
# for equality, so I'm not sure this does what the author
# hoped.
auth_chains.update(got_auth_chain)
remote_state_map = {
(x.type, x.state_key): x.event_id for x in remote_state
}
state_maps.append(remote_state_map)
for x in remote_state:
event_map[x.event_id] = x
# Resolve any conflicting state # Resolve any conflicting state
@defer.inlineCallbacks
def fetch(ev_ids): def fetch(ev_ids):
return self.store.get_events( fetched = yield self.store.get_events(
ev_ids, get_prev_content=False, check_redacted=False ev_ids, get_prev_content=False, check_redacted=False,
) )
# add any events we fetch here to the `event_map` so that we
# can use them to build the state event list below.
event_map.update(fetched)
defer.returnValue(fetched)
room_version = yield self.store.get_room_version(pdu.room_id) room_version = yield self.store.get_room_version(room_id)
state_map = yield resolve_events_with_factory( state_map = yield resolve_events_with_factory(
room_version, state_groups, {pdu.event_id: pdu}, fetch room_version, state_maps, event_map, fetch,
) )
state = (yield self.store.get_events(state_map.values())).values() # we need to give _process_received_pdu the actual state events
# rather than event ids, so generate that now.
state = [
event_map[e] for e in six.itervalues(state_map)
]
auth_chain = list(auth_chains) auth_chain = list(auth_chains)
except Exception: except Exception:
logger.warn(
"[%s %s] Error attempting to resolve state at missing "
"prev_events",
room_id, event_id, exc_info=True,
)
raise FederationError( raise FederationError(
"ERROR", "ERROR",
403, 403,
"We can't get valid state history.", "We can't get valid state history.",
affected=pdu.event_id, affected=event_id,
) )
yield self._process_received_pdu( yield self._process_received_pdu(
@ -322,15 +435,16 @@ class FederationHandler(BaseHandler):
prevs (set(str)): List of event ids which we are missing prevs (set(str)): List of event ids which we are missing
min_depth (int): Minimum depth of events to return. min_depth (int): Minimum depth of events to return.
""" """
# We recalculate seen, since it may have changed.
room_id = pdu.room_id
event_id = pdu.event_id
seen = yield self.store.have_seen_events(prevs) seen = yield self.store.have_seen_events(prevs)
if not prevs - seen: if not prevs - seen:
return return
latest = yield self.store.get_latest_event_ids_in_room( latest = yield self.store.get_latest_event_ids_in_room(room_id)
pdu.room_id
)
# We add the prev events that we have seen to the latest # We add the prev events that we have seen to the latest
# list to ensure the remote server doesn't give them to us # list to ensure the remote server doesn't give them to us
@ -338,8 +452,8 @@ class FederationHandler(BaseHandler):
latest |= seen latest |= seen
logger.info( logger.info(
"Missing %d events for room %r pdu %s: %r...", "[%s %s]: Requesting %d prev_events: %s",
len(prevs - seen), pdu.room_id, pdu.event_id, list(prevs - seen)[:5] room_id, event_id, len(prevs - seen), shortstr(prevs - seen)
) )
# XXX: we set timeout to 10s to help workaround # XXX: we set timeout to 10s to help workaround
@ -360,49 +474,88 @@ class FederationHandler(BaseHandler):
# apparently. # apparently.
# #
# see https://github.com/matrix-org/synapse/pull/1744 # see https://github.com/matrix-org/synapse/pull/1744
#
# ----
#
# Update richvdh 2018/09/18: There are a number of problems with timing this
# request out agressively on the client side:
#
# - it plays badly with the server-side rate-limiter, which starts tarpitting you
# if you send too many requests at once, so you end up with the server carefully
# working through the backlog of your requests, which you have already timed
# out.
#
# - for this request in particular, we now (as of
# https://github.com/matrix-org/synapse/pull/3456) reject any PDUs where the
# server can't produce a plausible-looking set of prev_events - so we becone
# much more likely to reject the event.
#
# - contrary to what it says above, we do *not* fall back to fetching fresh state
# for the room if get_missing_events times out. Rather, we give up processing
# the PDU whose prevs we are missing, which then makes it much more likely that
# we'll end up back here for the *next* PDU in the list, which exacerbates the
# problem.
#
# - the agressive 10s timeout was introduced to deal with incoming federation
# requests taking 8 hours to process. It's not entirely clear why that was going
# on; certainly there were other issues causing traffic storms which are now
# resolved, and I think in any case we may be more sensible about our locking
# now. We're *certainly* more sensible about our logging.
#
# All that said: Let's try increasing the timout to 60s and see what happens.
missing_events = yield self.federation_client.get_missing_events( missing_events = yield self.federation_client.get_missing_events(
origin, origin,
pdu.room_id, room_id,
earliest_events_ids=list(latest), earliest_events_ids=list(latest),
latest_events=[pdu], latest_events=[pdu],
limit=10, limit=10,
min_depth=min_depth, min_depth=min_depth,
timeout=10000, timeout=60000,
) )
logger.info( logger.info(
"Got %d events: %r...", "[%s %s]: Got %d prev_events: %s",
len(missing_events), [e.event_id for e in missing_events[:5]] room_id, event_id, len(missing_events), shortstr(missing_events),
) )
# We want to sort these by depth so we process them and # We want to sort these by depth so we process them and
# tell clients about them in order. # tell clients about them in order.
missing_events.sort(key=lambda x: x.depth) missing_events.sort(key=lambda x: x.depth)
for e in missing_events: for ev in missing_events:
logger.info("Handling found event %s", e.event_id) logger.info(
try: "[%s %s] Handling received prev_event %s",
yield self.on_receive_pdu( room_id, event_id, ev.event_id,
origin, )
e, with logcontext.nested_logging_context(ev.event_id):
get_missing=False try:
) yield self.on_receive_pdu(
except FederationError as e: origin,
if e.code == 403: ev,
logger.warn("Event %s failed history check.") sent_to_us_directly=False,
else: )
raise except FederationError as e:
if e.code == 403:
logger.warn(
"[%s %s] Received prev_event %s failed history check.",
room_id, event_id, ev.event_id,
)
else:
raise
@log_function
@defer.inlineCallbacks @defer.inlineCallbacks
def _process_received_pdu(self, origin, pdu, state, auth_chain): def _process_received_pdu(self, origin, event, state, auth_chain):
""" Called when we have a new pdu. We need to do auth checks and put it """ Called when we have a new pdu. We need to do auth checks and put it
through the StateHandler. through the StateHandler.
""" """
event = pdu room_id = event.room_id
event_id = event.event_id
logger.debug("Processing event: %s", event) logger.debug(
"[%s %s] Processing event: %s",
room_id, event_id, event,
)
# FIXME (erikj): Awful hack to make the case where we are not currently # FIXME (erikj): Awful hack to make the case where we are not currently
# in the room work # in the room work
@ -411,15 +564,16 @@ class FederationHandler(BaseHandler):
# event. # event.
if state and auth_chain and not event.internal_metadata.is_outlier(): if state and auth_chain and not event.internal_metadata.is_outlier():
is_in_room = yield self.auth.check_host_in_room( is_in_room = yield self.auth.check_host_in_room(
event.room_id, room_id,
self.server_name self.server_name
) )
else: else:
is_in_room = True is_in_room = True
if not is_in_room: if not is_in_room:
logger.info( logger.info(
"Got event for room we're not in: %r %r", "[%s %s] Got event for room we're not in",
event.room_id, event.event_id room_id, event_id,
) )
try: try:
@ -431,7 +585,7 @@ class FederationHandler(BaseHandler):
"ERROR", "ERROR",
e.code, e.code,
e.msg, e.msg,
affected=event.event_id, affected=event_id,
) )
else: else:
@ -464,6 +618,10 @@ class FederationHandler(BaseHandler):
}) })
seen_ids.add(e.event_id) seen_ids.add(e.event_id)
logger.info(
"[%s %s] persisting newly-received auth/state events %s",
room_id, event_id, [e["event"].event_id for e in event_infos]
)
yield self._handle_new_events(origin, event_infos) yield self._handle_new_events(origin, event_infos)
try: try:
@ -480,12 +638,12 @@ class FederationHandler(BaseHandler):
affected=event.event_id, affected=event.event_id,
) )
room = yield self.store.get_room(event.room_id) room = yield self.store.get_room(room_id)
if not room: if not room:
try: try:
yield self.store.store_room( yield self.store.store_room(
room_id=event.room_id, room_id=room_id,
room_creator_user_id="", room_creator_user_id="",
is_public=False, is_public=False,
) )
@ -513,7 +671,7 @@ class FederationHandler(BaseHandler):
if newly_joined: if newly_joined:
user = UserID.from_string(event.state_key) user = UserID.from_string(event.state_key)
yield self.user_joined_room(user, event.room_id) yield self.user_joined_room(user, room_id)
@log_function @log_function
@defer.inlineCallbacks @defer.inlineCallbacks
@ -594,7 +752,7 @@ class FederationHandler(BaseHandler):
required_auth = set( required_auth = set(
a_id a_id
for event in events + state_events.values() + auth_events.values() for event in events + list(state_events.values()) + list(auth_events.values())
for a_id, _ in event.auth_events for a_id, _ in event.auth_events
) )
auth_events.update({ auth_events.update({
@ -802,7 +960,7 @@ class FederationHandler(BaseHandler):
) )
continue continue
except NotRetryingDestination as e: except NotRetryingDestination as e:
logger.info(e.message) logger.info(str(e))
continue continue
except FederationDeniedError as e: except FederationDeniedError as e:
logger.info(e) logger.info(e)
@ -1027,7 +1185,8 @@ class FederationHandler(BaseHandler):
try: try:
logger.info("Processing queued PDU %s which was received " logger.info("Processing queued PDU %s which was received "
"while we were joining %s", p.event_id, p.room_id) "while we were joining %s", p.event_id, p.room_id)
yield self.on_receive_pdu(origin, p) with logcontext.nested_logging_context(p.event_id):
yield self.on_receive_pdu(origin, p, sent_to_us_directly=True)
except Exception as e: except Exception as e:
logger.warn( logger.warn(
"Error handling queued PDU %s from %s: %s", "Error handling queued PDU %s from %s: %s",
@ -1358,7 +1517,7 @@ class FederationHandler(BaseHandler):
) )
if state_groups: if state_groups:
_, state = state_groups.items().pop() _, state = list(state_groups.items()).pop()
results = state results = state
if event.is_state(): if event.is_state():
@ -1430,12 +1589,10 @@ class FederationHandler(BaseHandler):
else: else:
defer.returnValue(None) defer.returnValue(None)
@log_function
def get_min_depth_for_context(self, context): def get_min_depth_for_context(self, context):
return self.store.get_min_depth(context) return self.store.get_min_depth(context)
@defer.inlineCallbacks @defer.inlineCallbacks
@log_function
def _handle_new_event(self, origin, event, state=None, auth_events=None, def _handle_new_event(self, origin, event, state=None, auth_events=None,
backfilled=False): backfilled=False):
context = yield self._prep_event( context = yield self._prep_event(
@ -1444,6 +1601,9 @@ class FederationHandler(BaseHandler):
auth_events=auth_events, auth_events=auth_events,
) )
# reraise does not allow inlineCallbacks to preserve the stacktrace, so we
# hack around with a try/finally instead.
success = False
try: try:
if not event.internal_metadata.is_outlier() and not backfilled: if not event.internal_metadata.is_outlier() and not backfilled:
yield self.action_generator.handle_push_actions_for_event( yield self.action_generator.handle_push_actions_for_event(
@ -1454,15 +1614,13 @@ class FederationHandler(BaseHandler):
[(event, context)], [(event, context)],
backfilled=backfilled, backfilled=backfilled,
) )
except: # noqa: E722, as we reraise the exception this is fine. success = True
tp, value, tb = sys.exc_info() finally:
if not success:
logcontext.run_in_background( logcontext.run_in_background(
self.store.remove_push_actions_from_staging, self.store.remove_push_actions_from_staging,
event.event_id, event.event_id,
) )
six.reraise(tp, value, tb)
defer.returnValue(context) defer.returnValue(context)
@ -1475,15 +1633,22 @@ class FederationHandler(BaseHandler):
Notifies about the events where appropriate. Notifies about the events where appropriate.
""" """
contexts = yield logcontext.make_deferred_yieldable(defer.gatherResults(
[ @defer.inlineCallbacks
logcontext.run_in_background( def prep(ev_info):
self._prep_event, event = ev_info["event"]
with logcontext.nested_logging_context(suffix=event.event_id):
res = yield self._prep_event(
origin, origin,
ev_info["event"], event,
state=ev_info.get("state"), state=ev_info.get("state"),
auth_events=ev_info.get("auth_events"), auth_events=ev_info.get("auth_events"),
) )
defer.returnValue(res)
contexts = yield logcontext.make_deferred_yieldable(defer.gatherResults(
[
logcontext.run_in_background(prep, ev_info)
for ev_info in event_infos for ev_info in event_infos
], consumeErrors=True, ], consumeErrors=True,
)) ))
@ -1635,8 +1800,8 @@ class FederationHandler(BaseHandler):
) )
except AuthError as e: except AuthError as e:
logger.warn( logger.warn(
"Rejecting %s because %s", "[%s %s] Rejecting: %s",
event.event_id, e.msg event.room_id, event.event_id, e.msg
) )
context.rejected = RejectedReason.AUTH_ERROR context.rejected = RejectedReason.AUTH_ERROR
@ -1831,7 +1996,7 @@ class FederationHandler(BaseHandler):
room_version = yield self.store.get_room_version(event.room_id) room_version = yield self.store.get_room_version(event.room_id)
new_state = self.state_handler.resolve_events( new_state = yield self.state_handler.resolve_events(
room_version, room_version,
[list(local_view.values()), list(remote_view.values())], [list(local_view.values()), list(remote_view.values())],
event event

View File

@ -14,9 +14,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import logging import logging
import sys
import six
from six import iteritems, itervalues, string_types from six import iteritems, itervalues, string_types
from canonicaljson import encode_canonical_json, json from canonicaljson import encode_canonical_json, json
@ -624,6 +622,9 @@ class EventCreationHandler(object):
event, context event, context
) )
# reraise does not allow inlineCallbacks to preserve the stacktrace, so we
# hack around with a try/finally instead.
success = False
try: try:
# If we're a worker we need to hit out to the master. # If we're a worker we need to hit out to the master.
if self.config.worker_app: if self.config.worker_app:
@ -636,6 +637,7 @@ class EventCreationHandler(object):
ratelimit=ratelimit, ratelimit=ratelimit,
extra_users=extra_users, extra_users=extra_users,
) )
success = True
return return
yield self.persist_and_notify_client_event( yield self.persist_and_notify_client_event(
@ -645,17 +647,16 @@ class EventCreationHandler(object):
ratelimit=ratelimit, ratelimit=ratelimit,
extra_users=extra_users, extra_users=extra_users,
) )
except: # noqa: E722, as we reraise the exception this is fine.
# Ensure that we actually remove the entries in the push actions
# staging area, if we calculated them.
tp, value, tb = sys.exc_info()
run_in_background( success = True
self.store.remove_push_actions_from_staging, finally:
event.event_id, if not success:
) # Ensure that we actually remove the entries in the push actions
# staging area, if we calculated them.
six.reraise(tp, value, tb) run_in_background(
self.store.remove_push_actions_from_staging,
event.event_id,
)
@defer.inlineCallbacks @defer.inlineCallbacks
def persist_and_notify_client_event( def persist_and_notify_client_event(

View File

@ -269,14 +269,7 @@ class PaginationHandler(object):
if state_ids: if state_ids:
state = yield self.store.get_events(list(state_ids.values())) state = yield self.store.get_events(list(state_ids.values()))
state = state.values()
if state:
state = yield filter_events_for_client(
self.store,
user_id,
state.values(),
is_peeking=(member_event_id is None),
)
time_now = self.clock.time_msec() time_now = self.clock.time_msec()

View File

@ -142,10 +142,8 @@ class BaseProfileHandler(BaseHandler):
if e.code != 404: if e.code != 404:
logger.exception("Failed to get displayname") logger.exception("Failed to get displayname")
raise raise
except Exception:
logger.exception("Failed to get displayname") defer.returnValue(result["displayname"])
else:
defer.returnValue(result["displayname"])
@defer.inlineCallbacks @defer.inlineCallbacks
def set_displayname(self, target_user, requester, new_displayname, by_admin=False): def set_displayname(self, target_user, requester, new_displayname, by_admin=False):
@ -199,8 +197,6 @@ class BaseProfileHandler(BaseHandler):
if e.code != 404: if e.code != 404:
logger.exception("Failed to get avatar_url") logger.exception("Failed to get avatar_url")
raise raise
except Exception:
logger.exception("Failed to get avatar_url")
defer.returnValue(result["avatar_url"]) defer.returnValue(result["avatar_url"])
@ -278,7 +274,7 @@ class BaseProfileHandler(BaseHandler):
except Exception as e: except Exception as e:
logger.warn( logger.warn(
"Failed to update join event for room %s - %s", "Failed to update join event for room %s - %s",
room_id, str(e.message) room_id, str(e)
) )

View File

@ -125,6 +125,7 @@ class RegistrationHandler(BaseHandler):
guest_access_token=None, guest_access_token=None,
make_guest=False, make_guest=False,
admin=False, admin=False,
threepid=None,
): ):
"""Registers a new client on the server. """Registers a new client on the server.
@ -145,7 +146,7 @@ class RegistrationHandler(BaseHandler):
RegistrationError if there was a problem registering. RegistrationError if there was a problem registering.
""" """
yield self.auth.check_auth_blocking() yield self.auth.check_auth_blocking(threepid=threepid)
password_hash = None password_hash = None
if password: if password:
password_hash = yield self.auth_handler().hash(password) password_hash = yield self.auth_handler().hash(password)
@ -533,4 +534,5 @@ class RegistrationHandler(BaseHandler):
room_id=room_id, room_id=room_id,
remote_room_hosts=remote_room_hosts, remote_room_hosts=remote_room_hosts,
action="join", action="join",
ratelimit=False,
) )

View File

@ -162,7 +162,7 @@ class RoomListHandler(BaseHandler):
# Filter out rooms that we don't want to return # Filter out rooms that we don't want to return
rooms_to_scan = [ rooms_to_scan = [
r for r in sorted_rooms r for r in sorted_rooms
if r not in newly_unpublished and rooms_to_num_joined[room_id] > 0 if r not in newly_unpublished and rooms_to_num_joined[r] > 0
] ]
total_room_count = len(rooms_to_scan) total_room_count = len(rooms_to_scan)

Some files were not shown because too many files have changed in this diff Show More