mirror of
https://git.anonymousland.org/anonymousland/synapse-product.git
synced 2025-08-09 01:22:15 -04:00
Merge remote-tracking branch 'upstream/release-v1.41'
This commit is contained in:
commit
f285b4200c
237 changed files with 9601 additions and 6005 deletions
|
@ -1,13 +0,0 @@
|
|||
CI
|
||||
BUILDKITE
|
||||
BUILDKITE_BUILD_NUMBER
|
||||
BUILDKITE_BRANCH
|
||||
BUILDKITE_BUILD_NUMBER
|
||||
BUILDKITE_JOB_ID
|
||||
BUILDKITE_BUILD_URL
|
||||
BUILDKITE_PROJECT_SLUG
|
||||
BUILDKITE_COMMIT
|
||||
BUILDKITE_PULL_REQUEST
|
||||
BUILDKITE_TAG
|
||||
CODECOV_TOKEN
|
||||
TRIAL_FLAGS
|
|
@ -1,35 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
if [[ "$BUILDKITE_BRANCH" =~ ^(develop|master|dinsic|shhs|release-.*)$ ]]; then
|
||||
echo "Not merging forward, as this is a release branch"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ -z $BUILDKITE_PULL_REQUEST_BASE_BRANCH ]]; then
|
||||
echo "Not a pull request, or hasn't had a PR opened yet..."
|
||||
|
||||
# It probably hasn't had a PR opened yet. Since all PRs land on develop, we
|
||||
# can probably assume it's based on it and will be merged into it.
|
||||
GITBASE="develop"
|
||||
else
|
||||
# Get the reference, using the GitHub API
|
||||
GITBASE=$BUILDKITE_PULL_REQUEST_BASE_BRANCH
|
||||
fi
|
||||
|
||||
echo "--- merge_base_branch $GITBASE"
|
||||
|
||||
# Show what we are before
|
||||
git --no-pager show -s
|
||||
|
||||
# Set up username so it can do a merge
|
||||
git config --global user.email bot@matrix.org
|
||||
git config --global user.name "A robot"
|
||||
|
||||
# Fetch and merge. If it doesn't work, it will raise due to set -e.
|
||||
git fetch -u origin $GITBASE
|
||||
git merge --no-edit --no-commit origin/$GITBASE
|
||||
|
||||
# Show what we are after.
|
||||
git --no-pager show -s
|
|
@ -3,7 +3,7 @@
|
|||
# CI's Docker setup at the point where this file is considered.
|
||||
server_name: "localhost:8800"
|
||||
|
||||
signing_key_path: ".buildkite/test.signing.key"
|
||||
signing_key_path: ".ci/test.signing.key"
|
||||
|
||||
report_stats: false
|
||||
|
||||
|
@ -11,7 +11,7 @@ database:
|
|||
name: "psycopg2"
|
||||
args:
|
||||
user: postgres
|
||||
host: postgres
|
||||
host: localhost
|
||||
password: postgres
|
||||
database: synapse
|
||||
|
|
@ -23,7 +23,7 @@ import psycopg2
|
|||
# We use "postgres" as a database because it's bound to exist and the "synapse" one
|
||||
# doesn't exist yet.
|
||||
db_conn = psycopg2.connect(
|
||||
user="postgres", host="postgres", password="postgres", dbname="postgres"
|
||||
user="postgres", host="localhost", password="postgres", dbname="postgres"
|
||||
)
|
||||
db_conn.autocommit = True
|
||||
cur = db_conn.cursor()
|
|
@ -1,6 +1,6 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# this script is run by buildkite in a plain `bionic` container; it installs the
|
||||
# this script is run by GitHub Actions in a plain `bionic` container; it installs the
|
||||
# minimal requirements for tox and hands over to the py3-old tox environment.
|
||||
|
||||
set -ex
|
|
@ -20,22 +20,22 @@ pip install -e .
|
|||
echo "--- Generate the signing key"
|
||||
|
||||
# Generate the server's signing key.
|
||||
python -m synapse.app.homeserver --generate-keys -c .buildkite/sqlite-config.yaml
|
||||
python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml
|
||||
|
||||
echo "--- Prepare test database"
|
||||
|
||||
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
||||
scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml
|
||||
scripts-dev/update_database --database-config .ci/sqlite-config.yaml
|
||||
|
||||
# Create the PostgreSQL database.
|
||||
./.buildkite/scripts/postgres_exec.py "CREATE DATABASE synapse"
|
||||
.ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
|
||||
|
||||
echo "+++ Run synapse_port_db against test database"
|
||||
coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml
|
||||
coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||
|
||||
# We should be able to run twice against the same database.
|
||||
echo "+++ Run synapse_port_db a second time"
|
||||
coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml
|
||||
coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||
|
||||
#####
|
||||
|
||||
|
@ -44,14 +44,14 @@ coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --p
|
|||
echo "--- Prepare empty SQLite database"
|
||||
|
||||
# we do this by deleting the sqlite db, and then doing the same again.
|
||||
rm .buildkite/test_db.db
|
||||
rm .ci/test_db.db
|
||||
|
||||
scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml
|
||||
scripts-dev/update_database --database-config .ci/sqlite-config.yaml
|
||||
|
||||
# re-create the PostgreSQL database.
|
||||
./.buildkite/scripts/postgres_exec.py \
|
||||
.ci/scripts/postgres_exec.py \
|
||||
"DROP DATABASE synapse" \
|
||||
"CREATE DATABASE synapse"
|
||||
|
||||
echo "+++ Run synapse_port_db against empty database"
|
||||
coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml
|
||||
coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
|
@ -3,14 +3,14 @@
|
|||
# schema and run background updates on it.
|
||||
server_name: "localhost:8800"
|
||||
|
||||
signing_key_path: ".buildkite/test.signing.key"
|
||||
signing_key_path: ".ci/test.signing.key"
|
||||
|
||||
report_stats: false
|
||||
|
||||
database:
|
||||
name: "sqlite3"
|
||||
args:
|
||||
database: ".buildkite/test_db.db"
|
||||
database: ".ci/test_db.db"
|
||||
|
||||
# Suppress the key server warning.
|
||||
trusted_key_servers: []
|
30
.github/workflows/tests.yml
vendored
30
.github/workflows/tests.yml
vendored
|
@ -38,20 +38,15 @@ jobs:
|
|||
if: ${{ github.base_ref == 'develop' || contains(github.base_ref, 'release-') }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# Note: This and the script can be simplified once we drop Buildkite. See:
|
||||
# https://github.com/actions/checkout/issues/266#issuecomment-638346893
|
||||
# https://github.com/actions/checkout/issues/416
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@v2
|
||||
- run: pip install tox
|
||||
- name: Patch Buildkite-specific test script
|
||||
run: |
|
||||
sed -i -e 's/\$BUILDKITE_PULL_REQUEST/${{ github.event.number }}/' \
|
||||
scripts-dev/check-newsfragment
|
||||
- run: scripts-dev/check-newsfragment
|
||||
env:
|
||||
PULL_REQUEST_NUMBER: ${{ github.event.number }}
|
||||
|
||||
lint-sdist:
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -144,7 +139,7 @@ jobs:
|
|||
uses: docker://ubuntu:bionic # For old python and sqlite
|
||||
with:
|
||||
workdir: /github/workspace
|
||||
entrypoint: .buildkite/scripts/test_old_deps.sh
|
||||
entrypoint: .ci/scripts/test_old_deps.sh
|
||||
env:
|
||||
TRIAL_FLAGS: "--jobs=2"
|
||||
- name: Dump logs
|
||||
|
@ -197,12 +192,12 @@ jobs:
|
|||
volumes:
|
||||
- ${{ github.workspace }}:/src
|
||||
env:
|
||||
BUILDKITE_BRANCH: ${{ github.head_ref }}
|
||||
POSTGRES: ${{ matrix.postgres && 1}}
|
||||
MULTI_POSTGRES: ${{ (matrix.postgres == 'multi-postgres') && 1}}
|
||||
WORKERS: ${{ matrix.workers && 1 }}
|
||||
REDIS: ${{ matrix.redis && 1 }}
|
||||
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
|
||||
TOP: ${{ github.workspace }}
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
@ -232,7 +227,7 @@ jobs:
|
|||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Prepare test blacklist
|
||||
run: cat sytest-blacklist .buildkite/worker-blacklist > synapse-blacklist-with-workers
|
||||
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
|
||||
- name: Run SyTest
|
||||
run: /bootstrap.sh synapse
|
||||
working-directory: /src
|
||||
|
@ -252,6 +247,8 @@ jobs:
|
|||
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
TOP: ${{ github.workspace }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
|
@ -281,13 +278,7 @@ jobs:
|
|||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Patch Buildkite-specific test scripts
|
||||
run: |
|
||||
sed -i -e 's/host="postgres"/host="localhost"/' .buildkite/scripts/postgres_exec.py
|
||||
sed -i -e 's/host: postgres/host: localhost/' .buildkite/postgres-config.yaml
|
||||
sed -i -e 's|/src/||' .buildkite/{sqlite,postgres}-config.yaml
|
||||
sed -i -e 's/\$TOP/\$GITHUB_WORKSPACE/' .coveragerc
|
||||
- run: .buildkite/scripts/test_synapse_port_db.sh
|
||||
- run: .ci/scripts/test_synapse_port_db.sh
|
||||
|
||||
complement:
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
|
@ -374,6 +365,11 @@ jobs:
|
|||
rc=0
|
||||
results=$(jq -r 'to_entries[] | [.key,.value.result] | join(" ")' <<< $NEEDS_CONTEXT)
|
||||
while read job result ; do
|
||||
# The newsfile lint may be skipped on non PR builds
|
||||
if [ $result == "skipped" ] && [ $job == "lint-newsfile" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
if [ "$result" != "success" ]; then
|
||||
echo "::set-failed ::Job $job returned $result"
|
||||
rc=1
|
||||
|
|
79
CHANGES.md
79
CHANGES.md
|
@ -1,3 +1,82 @@
|
|||
Synapse 1.41.0rc1 (2021-08-18)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add `get_userinfo_by_id` method to ModuleApi. ([\#9581](https://github.com/matrix-org/synapse/issues/9581))
|
||||
- Initial local support for [MSC3266](https://github.com/matrix-org/synapse/pull/10394), Room Summary over the unstable `/rooms/{roomIdOrAlias}/summary` API. ([\#10394](https://github.com/matrix-org/synapse/issues/10394))
|
||||
- Experimental support for [MSC3288](https://github.com/matrix-org/matrix-doc/pull/3288), sending `room_type` to the identity server for 3pid invites over the `/store-invite` API. ([\#10435](https://github.com/matrix-org/synapse/issues/10435))
|
||||
- Add support for sending federation requests through a proxy. Contributed by @Bubu and @dklimpel. ([\#10475](https://github.com/matrix-org/synapse/issues/10475))
|
||||
- Add support for "marker" events which makes historical events discoverable for servers that already have all of the scrollback history (part of [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716)). ([\#10498](https://github.com/matrix-org/synapse/issues/10498))
|
||||
- Add a configuration setting for the time a `/sync` response is cached for. ([\#10513](https://github.com/matrix-org/synapse/issues/10513))
|
||||
- The default logging handler for new installations is now `PeriodicallyFlushingMemoryHandler`, a buffered logging handler which periodically flushes itself. ([\#10518](https://github.com/matrix-org/synapse/issues/10518))
|
||||
- Add support for new redaction rules for historical events specified in [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716). ([\#10538](https://github.com/matrix-org/synapse/issues/10538))
|
||||
- Add a setting to disable TLS when sending email. ([\#10546](https://github.com/matrix-org/synapse/issues/10546))
|
||||
- Add pagination to the spaces summary based on updates to [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946). ([\#10549](https://github.com/matrix-org/synapse/issues/10549), [\#10560](https://github.com/matrix-org/synapse/issues/10560), [\#10569](https://github.com/matrix-org/synapse/issues/10569), [\#10574](https://github.com/matrix-org/synapse/issues/10574), [\#10575](https://github.com/matrix-org/synapse/issues/10575), [\#10579](https://github.com/matrix-org/synapse/issues/10579), [\#10583](https://github.com/matrix-org/synapse/issues/10583))
|
||||
- Admin API to delete several media for a specific user. Contributed by @dklimpel. ([\#10558](https://github.com/matrix-org/synapse/issues/10558), [\#10628](https://github.com/matrix-org/synapse/issues/10628))
|
||||
- Add support for routing `/createRoom` to workers. ([\#10564](https://github.com/matrix-org/synapse/issues/10564))
|
||||
- Update the Synapse Grafana dashboard. ([\#10570](https://github.com/matrix-org/synapse/issues/10570))
|
||||
- Add an admin API (`GET /_synapse/admin/username_available`) to check if a username is available (regardless of registration settings). ([\#10578](https://github.com/matrix-org/synapse/issues/10578))
|
||||
- Allow editing a user's `external_ids` via the "Edit User" admin API. Contributed by @dklimpel. ([\#10598](https://github.com/matrix-org/synapse/issues/10598))
|
||||
- The Synapse manhole no longer needs coroutines to be wrapped in `defer.ensureDeferred`. ([\#10602](https://github.com/matrix-org/synapse/issues/10602))
|
||||
- Add option to allow modules to run periodic tasks on all instances, rather than just the one configured to run background tasks. ([\#10638](https://github.com/matrix-org/synapse/issues/10638))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Add some clarification to the sample config file. Contributed by @Kentokamoto. ([\#10129](https://github.com/matrix-org/synapse/issues/10129))
|
||||
- Fix a long-standing bug where protocols which are not implemented by any appservices were incorrectly returned via `GET /_matrix/client/r0/thirdparty/protocols`. ([\#10532](https://github.com/matrix-org/synapse/issues/10532))
|
||||
- Fix exceptions in logs when failing to get remote room list. ([\#10541](https://github.com/matrix-org/synapse/issues/10541))
|
||||
- Fix longstanding bug which caused the user "status" to be reset when the user went offline. Contributed by @dklimpel. ([\#10550](https://github.com/matrix-org/synapse/issues/10550))
|
||||
- Allow public rooms to be previewed in the spaces summary APIs from [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946). ([\#10580](https://github.com/matrix-org/synapse/issues/10580))
|
||||
- Fix a bug introduced in v1.37.1 where an error could occur in the asynchronous processing of PDUs when the queue was empty. ([\#10592](https://github.com/matrix-org/synapse/issues/10592))
|
||||
- Fix errors on /sync when read receipt data is a string. Only affects homeservers with the experimental flag for [MSC2285](https://github.com/matrix-org/matrix-doc/pull/2285) enabled. Contributed by @SimonBrandner. ([\#10606](https://github.com/matrix-org/synapse/issues/10606))
|
||||
- Additional validation for the spaces summary API to avoid errors like `ValueError: Stop argument for islice() must be None or an integer`. The missing validation has existed since v1.31.0. ([\#10611](https://github.com/matrix-org/synapse/issues/10611))
|
||||
- Revert behaviour introduced in v1.38.0 that strips `org.matrix.msc2732.device_unused_fallback_key_types` from `/sync` when its value is empty. This field should instead always be present according to [MSC2732](https://github.com/matrix-org/matrix-doc/blob/master/proposals/2732-olm-fallback-keys.md). ([\#10623](https://github.com/matrix-org/synapse/issues/10623))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Add documentation for configuration a forward proxy. ([\#10443](https://github.com/matrix-org/synapse/issues/10443))
|
||||
- Updated the reverse proxy documentation to highlight the homserver configuration that is needed to make Synapse aware that is is intentionally reverse proxied. ([\#10551](https://github.com/matrix-org/synapse/issues/10551))
|
||||
- Update CONTRIBUTING.md to fix index links and the instructions for SyTest in docker. ([\#10599](https://github.com/matrix-org/synapse/issues/10599))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- No longer build `.deb` packages for Ubuntu 20.10 LTS Groovy Gorilla, which has now EOLed. ([\#10588](https://github.com/matrix-org/synapse/issues/10588))
|
||||
- The `template_dir` configuration settings in the `sso`, `account_validity` and `email` sections of the configuration file are now deprecated in favour of the global `templates.custom_template_directory` setting. See the [upgrade notes](https://matrix-org.github.io/synapse/latest/upgrade.html) for more information. ([\#10596](https://github.com/matrix-org/synapse/issues/10596))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Improve event caching mechanism to avoid having multiple copies of an event in memory at a time. ([\#10119](https://github.com/matrix-org/synapse/issues/10119))
|
||||
- Reduce errors in PostgreSQL logs due to concurrent serialization errors. ([\#10504](https://github.com/matrix-org/synapse/issues/10504))
|
||||
- Include room ID in ignored EDU log messages. Contributed by @ilmari. ([\#10507](https://github.com/matrix-org/synapse/issues/10507))
|
||||
- Add pagination to the spaces summary based on updates to [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946). ([\#10527](https://github.com/matrix-org/synapse/issues/10527), [\#10530](https://github.com/matrix-org/synapse/issues/10530))
|
||||
- Fix CI to not break when run against branches rather than pull requests. ([\#10529](https://github.com/matrix-org/synapse/issues/10529))
|
||||
- Mark all events stemming from the [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` endpoint as historical. ([\#10537](https://github.com/matrix-org/synapse/issues/10537))
|
||||
- Clean up some of the federation event authentication code for clarity. ([\#10539](https://github.com/matrix-org/synapse/issues/10539), [\#10591](https://github.com/matrix-org/synapse/issues/10591))
|
||||
- Convert `Transaction` and `Edu` objects to attrs. ([\#10542](https://github.com/matrix-org/synapse/issues/10542))
|
||||
- Update `/batch_send` endpoint to only return `state_events` created by the `state_events_from_before` passed in. ([\#10552](https://github.com/matrix-org/synapse/issues/10552))
|
||||
- Update contributing.md to warn against rebasing an open PR. ([\#10563](https://github.com/matrix-org/synapse/issues/10563))
|
||||
- Remove the unused public rooms replication stream. ([\#10565](https://github.com/matrix-org/synapse/issues/10565))
|
||||
- Clarify error message when failing to join a restricted room. ([\#10572](https://github.com/matrix-org/synapse/issues/10572))
|
||||
- Remove references to BuildKite in favour of GitHub Actions. ([\#10573](https://github.com/matrix-org/synapse/issues/10573))
|
||||
- Move `/batch_send` endpoint defined by [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) to the `/v2_alpha` directory. ([\#10576](https://github.com/matrix-org/synapse/issues/10576))
|
||||
- Allow multiple custom directories in `read_templates`. ([\#10587](https://github.com/matrix-org/synapse/issues/10587))
|
||||
- Re-organize the `synapse.federation.transport.server` module to create smaller files. ([\#10590](https://github.com/matrix-org/synapse/issues/10590))
|
||||
- Flatten the `synapse.rest.client` package by moving the contents of `v1` and `v2_alpha` into the parent. ([\#10600](https://github.com/matrix-org/synapse/issues/10600))
|
||||
- Build Debian packages for Debian 12 (Bookworm). ([\#10612](https://github.com/matrix-org/synapse/issues/10612))
|
||||
- Fix up a couple of links to the database schema documentation. ([\#10620](https://github.com/matrix-org/synapse/issues/10620))
|
||||
- Fix a broken link to the upgrade notes. ([\#10631](https://github.com/matrix-org/synapse/issues/10631))
|
||||
|
||||
|
||||
Synapse 1.40.0 (2021-08-10)
|
||||
===========================
|
||||
|
||||
|
|
|
@ -13,8 +13,9 @@ This document aims to get you started with contributing to this repo!
|
|||
- [7. Turn coffee and documentation into code and documentation!](#7-turn-coffee-and-documentation-into-code-and-documentation)
|
||||
- [8. Test, test, test!](#8-test-test-test)
|
||||
* [Run the linters.](#run-the-linters)
|
||||
* [Run the unit tests.](#run-the-unit-tests)
|
||||
* [Run the integration tests.](#run-the-integration-tests)
|
||||
* [Run the unit tests.](#run-the-unit-tests-twisted-trial)
|
||||
* [Run the integration tests (SyTest).](#run-the-integration-tests-sytest)
|
||||
* [Run the integration tests (Complement).](#run-the-integration-tests-complement)
|
||||
- [9. Submit your patch.](#9-submit-your-patch)
|
||||
* [Changelog](#changelog)
|
||||
+ [How do I know what to call the changelog file before I create the PR?](#how-do-i-know-what-to-call-the-changelog-file-before-i-create-the-pr)
|
||||
|
@ -197,7 +198,7 @@ The following command will let you run the integration test with the most common
|
|||
configuration:
|
||||
|
||||
```sh
|
||||
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:py37
|
||||
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:buster
|
||||
```
|
||||
|
||||
This configuration should generally cover your needs. For more details about other configurations, see [documentation in the SyTest repo](https://github.com/matrix-org/sytest/blob/develop/docker/README.md).
|
||||
|
@ -252,6 +253,7 @@ To prepare a Pull Request, please:
|
|||
4. on GitHub, [create the Pull Request](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request);
|
||||
5. add a [changelog entry](#changelog) and push it to your Pull Request;
|
||||
6. for most contributors, that's all - however, if you are a member of the organization `matrix-org`, on GitHub, please request a review from `matrix.org / Synapse Core`.
|
||||
7. if you need to update your PR, please avoid rebasing and just add new commits to your branch.
|
||||
|
||||
|
||||
## Changelog
|
||||
|
|
|
@ -44,9 +44,9 @@ include book.toml
|
|||
include pyproject.toml
|
||||
recursive-include changelog.d *
|
||||
|
||||
prune .buildkite
|
||||
prune .circleci
|
||||
prune .github
|
||||
prune .ci
|
||||
prune contrib
|
||||
prune debian
|
||||
prune demo/etc
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
Upgrading Synapse
|
||||
=================
|
||||
|
||||
This document has moved to the `Synapse documentation website <https://matrix-org.github.io/synapse/latest/upgrading>`_.
|
||||
This document has moved to the `Synapse documentation website <https://matrix-org.github.io/synapse/latest/upgrade>`_.
|
||||
Please update your links.
|
||||
|
||||
The markdown source is available in `docs/upgrade.md <docs/upgrade.md>`_.
|
||||
|
|
|
@ -54,7 +54,7 @@
|
|||
"gnetId": null,
|
||||
"graphTooltip": 0,
|
||||
"id": null,
|
||||
"iteration": 1621258266004,
|
||||
"iteration": 1628606819564,
|
||||
"links": [
|
||||
{
|
||||
"asDropdown": false,
|
||||
|
@ -307,7 +307,6 @@
|
|||
],
|
||||
"thresholds": [
|
||||
{
|
||||
"$$hashKey": "object:283",
|
||||
"colorMode": "warning",
|
||||
"fill": false,
|
||||
"line": true,
|
||||
|
@ -316,7 +315,6 @@
|
|||
"yaxis": "left"
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:284",
|
||||
"colorMode": "critical",
|
||||
"fill": false,
|
||||
"line": true,
|
||||
|
@ -344,7 +342,6 @@
|
|||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:255",
|
||||
"decimals": null,
|
||||
"format": "s",
|
||||
"label": "",
|
||||
|
@ -354,7 +351,6 @@
|
|||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:256",
|
||||
"format": "hertz",
|
||||
"label": "",
|
||||
"logBase": 1,
|
||||
|
@ -429,7 +425,6 @@
|
|||
],
|
||||
"thresholds": [
|
||||
{
|
||||
"$$hashKey": "object:566",
|
||||
"colorMode": "critical",
|
||||
"fill": true,
|
||||
"line": true,
|
||||
|
@ -457,7 +452,6 @@
|
|||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:538",
|
||||
"decimals": null,
|
||||
"format": "percentunit",
|
||||
"label": null,
|
||||
|
@ -467,7 +461,6 @@
|
|||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:539",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
|
@ -573,7 +566,6 @@
|
|||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:1560",
|
||||
"format": "bytes",
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
|
@ -581,7 +573,6 @@
|
|||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:1561",
|
||||
"format": "short",
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
|
@ -641,7 +632,6 @@
|
|||
"renderer": "flot",
|
||||
"seriesOverrides": [
|
||||
{
|
||||
"$$hashKey": "object:639",
|
||||
"alias": "/max$/",
|
||||
"color": "#890F02",
|
||||
"fill": 0,
|
||||
|
@ -693,7 +683,6 @@
|
|||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:650",
|
||||
"decimals": null,
|
||||
"format": "none",
|
||||
"label": "",
|
||||
|
@ -703,7 +692,6 @@
|
|||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:651",
|
||||
"decimals": null,
|
||||
"format": "short",
|
||||
"label": null,
|
||||
|
@ -783,11 +771,9 @@
|
|||
"renderer": "flot",
|
||||
"seriesOverrides": [
|
||||
{
|
||||
"$$hashKey": "object:1240",
|
||||
"alias": "/user/"
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:1241",
|
||||
"alias": "/system/"
|
||||
}
|
||||
],
|
||||
|
@ -817,7 +803,6 @@
|
|||
],
|
||||
"thresholds": [
|
||||
{
|
||||
"$$hashKey": "object:1278",
|
||||
"colorMode": "custom",
|
||||
"fillColor": "rgba(255, 255, 255, 1)",
|
||||
"line": true,
|
||||
|
@ -827,7 +812,6 @@
|
|||
"yaxis": "left"
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:1279",
|
||||
"colorMode": "custom",
|
||||
"fillColor": "rgba(255, 255, 255, 1)",
|
||||
"line": true,
|
||||
|
@ -837,7 +821,6 @@
|
|||
"yaxis": "left"
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:1498",
|
||||
"colorMode": "critical",
|
||||
"fill": true,
|
||||
"line": true,
|
||||
|
@ -865,7 +848,6 @@
|
|||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:1250",
|
||||
"decimals": null,
|
||||
"format": "percentunit",
|
||||
"label": "",
|
||||
|
@ -875,7 +857,6 @@
|
|||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:1251",
|
||||
"format": "short",
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
|
@ -1427,7 +1408,6 @@
|
|||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:572",
|
||||
"format": "percentunit",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
|
@ -1436,7 +1416,6 @@
|
|||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:573",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
|
@ -1720,7 +1699,6 @@
|
|||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:102",
|
||||
"format": "hertz",
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
|
@ -1728,7 +1706,6 @@
|
|||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:103",
|
||||
"format": "short",
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
|
@ -3425,7 +3402,7 @@
|
|||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 33
|
||||
"y": 6
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 79,
|
||||
|
@ -3442,9 +3419,12 @@
|
|||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.1.3",
|
||||
"pluginVersion": "7.3.7",
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
|
@ -3526,7 +3506,7 @@
|
|||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 33
|
||||
"y": 6
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 83,
|
||||
|
@ -3543,9 +3523,12 @@
|
|||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.1.3",
|
||||
"pluginVersion": "7.3.7",
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
|
@ -3629,7 +3612,7 @@
|
|||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 42
|
||||
"y": 15
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 109,
|
||||
|
@ -3646,9 +3629,12 @@
|
|||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.1.3",
|
||||
"pluginVersion": "7.3.7",
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
|
@ -3733,7 +3719,7 @@
|
|||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 42
|
||||
"y": 15
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 111,
|
||||
|
@ -3750,9 +3736,12 @@
|
|||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.1.3",
|
||||
"pluginVersion": "7.3.7",
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
|
@ -3831,7 +3820,7 @@
|
|||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 51
|
||||
"y": 24
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 142,
|
||||
|
@ -3847,8 +3836,11 @@
|
|||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.1.3",
|
||||
"pluginVersion": "7.3.7",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
|
@ -3931,7 +3923,7 @@
|
|||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 51
|
||||
"y": 24
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 140,
|
||||
|
@ -3948,9 +3940,12 @@
|
|||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.1.3",
|
||||
"pluginVersion": "7.3.7",
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
|
@ -4079,7 +4074,7 @@
|
|||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 59
|
||||
"y": 32
|
||||
},
|
||||
"heatmap": {},
|
||||
"hideZeroBuckets": false,
|
||||
|
@ -4145,7 +4140,7 @@
|
|||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 60
|
||||
"y": 33
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 162,
|
||||
|
@ -4163,9 +4158,12 @@
|
|||
"linewidth": 0,
|
||||
"links": [],
|
||||
"nullPointMode": "connected",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.1.3",
|
||||
"pluginVersion": "7.3.7",
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
|
@ -4350,7 +4348,7 @@
|
|||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 68
|
||||
"y": 41
|
||||
},
|
||||
"heatmap": {},
|
||||
"hideZeroBuckets": false,
|
||||
|
@ -4396,6 +4394,311 @@
|
|||
"yBucketBound": "auto",
|
||||
"yBucketNumber": null,
|
||||
"yBucketSize": null
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "$datasource",
|
||||
"editable": true,
|
||||
"error": false,
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {},
|
||||
"links": []
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"grid": {},
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 42
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 203,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 2,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.3.7",
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "synapse_federation_server_oldest_inbound_pdu_in_staging{job=\"$job\",index=~\"$index\",instance=\"$instance\"}",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "rss {{index}}",
|
||||
"refId": "A",
|
||||
"step": 4
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Age of oldest event in staging area",
|
||||
"tooltip": {
|
||||
"msResolution": false,
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "cumulative"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "ms",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": 0,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "$datasource",
|
||||
"editable": true,
|
||||
"error": false,
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {},
|
||||
"links": []
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"grid": {},
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 50
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 202,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 2,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.3.7",
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "synapse_federation_server_number_inbound_pdu_in_staging{job=\"$job\",index=~\"$index\",instance=\"$instance\"}",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "rss {{index}}",
|
||||
"refId": "A",
|
||||
"step": 4
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Number of events in federation staging area",
|
||||
"tooltip": {
|
||||
"msResolution": false,
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "cumulative"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "none",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": 0,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "${DS_PROMETHEUS}",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 51
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 205,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.3.7",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(rate(synapse_federation_soft_failed_events_total{instance=\"$instance\"}[$bucket_size]))",
|
||||
"interval": "",
|
||||
"legendFormat": "soft-failed events",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Soft-failed event rate",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "hertz",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
}
|
||||
],
|
||||
"title": "Federation",
|
||||
|
@ -4647,7 +4950,7 @@
|
|||
"h": 7,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 8
|
||||
"y": 33
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 48,
|
||||
|
@ -4749,7 +5052,7 @@
|
|||
"h": 7,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 8
|
||||
"y": 33
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 104,
|
||||
|
@ -4877,7 +5180,7 @@
|
|||
"h": 7,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 15
|
||||
"y": 40
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 10,
|
||||
|
@ -4981,7 +5284,7 @@
|
|||
"h": 7,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 15
|
||||
"y": 40
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 11,
|
||||
|
@ -5086,7 +5389,7 @@
|
|||
"h": 7,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 22
|
||||
"y": 47
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 180,
|
||||
|
@ -5168,6 +5471,126 @@
|
|||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "$datasource",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {},
|
||||
"links": []
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"fill": 6,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 47
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 200,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.3.7",
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "histogram_quantile(0.99, sum(rate(synapse_storage_schedule_time_bucket{index=~\"$index\",instance=\"$instance\",job=\"$job\"}[$bucket_size])) by (le))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "99%",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "histogram_quantile(0.9, sum(rate(synapse_storage_schedule_time_bucket{index=~\"$index\",instance=\"$instance\",job=\"$job\"}[$bucket_size])) by (le))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "90%",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "histogram_quantile(0.75, sum(rate(synapse_storage_schedule_time_bucket{index=~\"$index\",instance=\"$instance\",job=\"$job\"}[$bucket_size])) by (le))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "75%",
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "histogram_quantile(0.5, sum(rate(synapse_storage_schedule_time_bucket{index=~\"$index\",instance=\"$instance\",job=\"$job\"}[$bucket_size])) by (le))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "50%",
|
||||
"refId": "B"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Time waiting for DB connection quantiles",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 2,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"decimals": null,
|
||||
"format": "s",
|
||||
"label": "",
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": "0",
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
}
|
||||
],
|
||||
"repeat": null,
|
||||
|
@ -5916,7 +6339,7 @@
|
|||
"h": 10,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 84
|
||||
"y": 35
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 1,
|
||||
|
@ -6022,7 +6445,7 @@
|
|||
"h": 10,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 84
|
||||
"y": 35
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 8,
|
||||
|
@ -6126,7 +6549,7 @@
|
|||
"h": 10,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 94
|
||||
"y": 45
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 38,
|
||||
|
@ -6226,7 +6649,7 @@
|
|||
"h": 10,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 94
|
||||
"y": 45
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 39,
|
||||
|
@ -6258,8 +6681,9 @@
|
|||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "topk(10, rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]) - rate(synapse_util_caches_cache:hits{job=\"$job\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "topk(10, rate(synapse_util_caches_cache:total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]) - rate(synapse_util_caches_cache:hits{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "{{name}} {{job}}-{{index}}",
|
||||
"refId": "A",
|
||||
|
@ -6326,7 +6750,7 @@
|
|||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 104
|
||||
"y": 55
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 65,
|
||||
|
@ -9051,7 +9475,7 @@
|
|||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 119
|
||||
"y": 41
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 156,
|
||||
|
@ -9089,7 +9513,7 @@
|
|||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "synapse_admin_mau:current{instance=\"$instance\"}",
|
||||
"expr": "synapse_admin_mau:current{instance=\"$instance\", job=~\"$job\"}",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
|
@ -9097,7 +9521,7 @@
|
|||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "synapse_admin_mau:max{instance=\"$instance\"}",
|
||||
"expr": "synapse_admin_mau:max{instance=\"$instance\", job=~\"$job\"}",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
|
@ -9164,7 +9588,7 @@
|
|||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 119
|
||||
"y": 41
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 160,
|
||||
|
@ -9484,7 +9908,7 @@
|
|||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 73
|
||||
"y": 43
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 168,
|
||||
|
@ -9516,7 +9940,7 @@
|
|||
{
|
||||
"expr": "rate(synapse_appservice_api_sent_events{instance=\"$instance\"}[$bucket_size])",
|
||||
"interval": "",
|
||||
"legendFormat": "{{exported_service}}",
|
||||
"legendFormat": "{{service}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
|
@ -9579,7 +10003,7 @@
|
|||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 73
|
||||
"y": 43
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 171,
|
||||
|
@ -9611,7 +10035,7 @@
|
|||
{
|
||||
"expr": "rate(synapse_appservice_api_sent_transactions{instance=\"$instance\"}[$bucket_size])",
|
||||
"interval": "",
|
||||
"legendFormat": "{{exported_service}}",
|
||||
"legendFormat": "{{service}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
|
@ -9959,7 +10383,6 @@
|
|||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:165",
|
||||
"format": "hertz",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
|
@ -9968,7 +10391,6 @@
|
|||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:166",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
|
@ -10071,7 +10493,6 @@
|
|||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:390",
|
||||
"format": "hertz",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
|
@ -10080,7 +10501,6 @@
|
|||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:391",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
|
@ -10169,7 +10589,6 @@
|
|||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:390",
|
||||
"format": "hertz",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
|
@ -10178,7 +10597,6 @@
|
|||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:391",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
|
@ -10470,5 +10888,5 @@
|
|||
"timezone": "",
|
||||
"title": "Synapse",
|
||||
"uid": "000000012",
|
||||
"version": 90
|
||||
"version": 99
|
||||
}
|
15
debian/build_virtualenv
vendored
15
debian/build_virtualenv
vendored
|
@ -100,3 +100,18 @@ esac
|
|||
# add a dependency on the right version of python to substvars.
|
||||
PYPKG=`basename $SNAKE`
|
||||
echo "synapse:pydepends=$PYPKG" >> debian/matrix-synapse-py3.substvars
|
||||
|
||||
|
||||
# add a couple of triggers. This is needed so that dh-virtualenv can rebuild
|
||||
# the venv when the system python changes (see
|
||||
# https://dh-virtualenv.readthedocs.io/en/latest/tutorial.html#step-2-set-up-packaging-for-your-project)
|
||||
#
|
||||
# we do it here rather than the more conventional way of just adding it to
|
||||
# debian/matrix-synapse-py3.triggers, because we need to add a trigger on the
|
||||
# right version of python.
|
||||
cat >>"debian/.debhelper/generated/matrix-synapse-py3/triggers" <<EOF
|
||||
# triggers for dh-virtualenv
|
||||
interest-noawait $SNAKE
|
||||
interest dh-virtualenv-interpreter-update
|
||||
|
||||
EOF
|
||||
|
|
8
debian/changelog
vendored
8
debian/changelog
vendored
|
@ -1,3 +1,9 @@
|
|||
matrix-synapse-py3 (1.41.0~rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.41.0~rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 18 Aug 2021 15:52:00 +0100
|
||||
|
||||
matrix-synapse-py3 (1.40.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.40.0.
|
||||
|
@ -20,6 +26,8 @@ matrix-synapse-py3 (1.40.0~rc1) stable; urgency=medium
|
|||
|
||||
[ Richard van der Hoff ]
|
||||
* Drop backwards-compatibility code that was required to support Ubuntu Xenial.
|
||||
* Update package triggers so that the virtualenv is correctly rebuilt
|
||||
when the system python is rebuilt, on recent Python versions.
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.40.0~rc1.
|
||||
|
|
9
debian/matrix-synapse-py3.triggers
vendored
9
debian/matrix-synapse-py3.triggers
vendored
|
@ -1,9 +0,0 @@
|
|||
# Register interest in Python interpreter changes and
|
||||
# don't make the Python package dependent on the virtualenv package
|
||||
# processing (noawait)
|
||||
interest-noawait /usr/bin/python3.5
|
||||
interest-noawait /usr/bin/python3.6
|
||||
interest-noawait /usr/bin/python3.7
|
||||
|
||||
# Also provide a symbolic trigger for all dh-virtualenv packages
|
||||
interest dh-virtualenv-interpreter-update
|
|
@ -18,18 +18,31 @@ handlers:
|
|||
backupCount: 6 # Does not include the current log file.
|
||||
encoding: utf8
|
||||
|
||||
# Default to buffering writes to log file for efficiency. This means that
|
||||
# there will be a delay for INFO/DEBUG logs to get written, but WARNING/ERROR
|
||||
# logs will still be flushed immediately.
|
||||
# Default to buffering writes to log file for efficiency.
|
||||
# WARNING/ERROR logs will still be flushed immediately, but there will be a
|
||||
# delay (of up to `period` seconds, or until the buffer is full with
|
||||
# `capacity` messages) before INFO/DEBUG logs get written.
|
||||
buffer:
|
||||
class: logging.handlers.MemoryHandler
|
||||
class: synapse.logging.handlers.PeriodicallyFlushingMemoryHandler
|
||||
target: file
|
||||
# The capacity is the number of log lines that are buffered before
|
||||
# being written to disk. Increasing this will lead to better
|
||||
|
||||
# The capacity is the maximum number of log lines that are buffered
|
||||
# before being written to disk. Increasing this will lead to better
|
||||
# performance, at the expensive of it taking longer for log lines to
|
||||
# be written to disk.
|
||||
# This parameter is required.
|
||||
capacity: 10
|
||||
flushLevel: 30 # Flush for WARNING logs as well
|
||||
|
||||
# Logs with a level at or above the flush level will cause the buffer to
|
||||
# be flushed immediately.
|
||||
# Default value: 40 (ERROR)
|
||||
# Other values: 50 (CRITICAL), 30 (WARNING), 20 (INFO), 10 (DEBUG)
|
||||
flushLevel: 30 # Flush immediately for WARNING logs and higher
|
||||
|
||||
# The period of time, in seconds, between forced flushes.
|
||||
# Messages will not be delayed for longer than this time.
|
||||
# Default value: 5 seconds
|
||||
period: 5
|
||||
{% endif %}
|
||||
|
||||
console:
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
- [Installation](setup/installation.md)
|
||||
- [Using Postgres](postgres.md)
|
||||
- [Configuring a Reverse Proxy](reverse_proxy.md)
|
||||
- [Configuring a Forward/Outbound Proxy](setup/forward_proxy.md)
|
||||
- [Configuring a Turn Server](turn-howto.md)
|
||||
- [Delegation](delegate.md)
|
||||
|
||||
|
@ -20,6 +21,7 @@
|
|||
- [Homeserver Sample Config File](usage/configuration/homeserver_sample_config.md)
|
||||
- [Logging Sample Config File](usage/configuration/logging_sample_config.md)
|
||||
- [Structured Logging](structured_logging.md)
|
||||
- [Templates](templates.md)
|
||||
- [User Authentication](usage/configuration/user_authentication/README.md)
|
||||
- [Single-Sign On]()
|
||||
- [OpenID Connect](openid.md)
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
- [Delete local media](#delete-local-media)
|
||||
* [Delete a specific local media](#delete-a-specific-local-media)
|
||||
* [Delete local media by date or size](#delete-local-media-by-date-or-size)
|
||||
* [Delete media uploaded by a user](#delete-media-uploaded-by-a-user)
|
||||
- [Purge Remote Media API](#purge-remote-media-api)
|
||||
|
||||
# Querying media
|
||||
|
@ -47,7 +48,8 @@ The API returns a JSON body like the following:
|
|||
## List all media uploaded by a user
|
||||
|
||||
Listing all media that has been uploaded by a local user can be achieved through
|
||||
the use of the [List media of a user](user_admin_api.md#list-media-of-a-user)
|
||||
the use of the
|
||||
[List media uploaded by a user](user_admin_api.md#list-media-uploaded-by-a-user)
|
||||
Admin API.
|
||||
|
||||
# Quarantine media
|
||||
|
@ -281,6 +283,11 @@ The following fields are returned in the JSON response body:
|
|||
* `deleted_media`: an array of strings - List of deleted `media_id`
|
||||
* `total`: integer - Total number of deleted `media_id`
|
||||
|
||||
## Delete media uploaded by a user
|
||||
|
||||
You can find details of how to delete multiple media uploaded by a user in
|
||||
[User Admin API](user_admin_api.md#delete-media-uploaded-by-a-user).
|
||||
|
||||
# Purge Remote Media API
|
||||
|
||||
The purge remote media API allows server admins to purge old cached remote media.
|
||||
|
|
|
@ -81,6 +81,16 @@ with a body of:
|
|||
"address": "<user_mail_2>"
|
||||
}
|
||||
],
|
||||
"external_ids": [
|
||||
{
|
||||
"auth_provider": "<provider1>",
|
||||
"external_id": "<user_id_provider_1>"
|
||||
},
|
||||
{
|
||||
"auth_provider": "<provider2>",
|
||||
"external_id": "<user_id_provider_2>"
|
||||
}
|
||||
],
|
||||
"avatar_url": "<avatar_url>",
|
||||
"admin": false,
|
||||
"deactivated": false
|
||||
|
@ -90,26 +100,34 @@ with a body of:
|
|||
To use it, you will need to authenticate by providing an `access_token` for a
|
||||
server admin: [Admin API](../usage/administration/admin_api)
|
||||
|
||||
Returns HTTP status code:
|
||||
- `201` - When a new user object was created.
|
||||
- `200` - When a user was modified.
|
||||
|
||||
URL parameters:
|
||||
|
||||
- `user_id`: fully-qualified user id: for example, `@user:server.com`.
|
||||
|
||||
Body parameters:
|
||||
|
||||
- `password`, optional. If provided, the user's password is updated and all
|
||||
- `password` - string, optional. If provided, the user's password is updated and all
|
||||
devices are logged out.
|
||||
|
||||
- `displayname`, optional, defaults to the value of `user_id`.
|
||||
|
||||
- `threepids`, optional, allows setting the third-party IDs (email, msisdn)
|
||||
- `displayname` - string, optional, defaults to the value of `user_id`.
|
||||
- `threepids` - array, optional, allows setting the third-party IDs (email, msisdn)
|
||||
- `medium` - string. Kind of third-party ID, either `email` or `msisdn`.
|
||||
- `address` - string. Value of third-party ID.
|
||||
belonging to a user.
|
||||
|
||||
- `avatar_url`, optional, must be a
|
||||
- `external_ids` - array, optional. Allow setting the identifier of the external identity
|
||||
provider for SSO (Single sign-on). Details in
|
||||
[Sample Configuration File](../usage/configuration/homeserver_sample_config.html)
|
||||
section `sso` and `oidc_providers`.
|
||||
- `auth_provider` - string. ID of the external identity provider. Value of `idp_id`
|
||||
in homeserver configuration.
|
||||
- `external_id` - string, user ID in the external identity provider.
|
||||
- `avatar_url` - string, optional, must be a
|
||||
[MXC URI](https://matrix.org/docs/spec/client_server/r0.6.0#matrix-content-mxc-uris).
|
||||
|
||||
- `admin`, optional, defaults to `false`.
|
||||
|
||||
- `deactivated`, optional. If unspecified, deactivation state will be left
|
||||
- `admin` - bool, optional, defaults to `false`.
|
||||
- `deactivated` - bool, optional. If unspecified, deactivation state will be left
|
||||
unchanged on existing accounts and set to `false` for new accounts.
|
||||
A user cannot be erased by deactivating with this API. For details on
|
||||
deactivating users see [Deactivate Account](#deactivate-account).
|
||||
|
@ -443,8 +461,9 @@ The following fields are returned in the JSON response body:
|
|||
- `joined_rooms` - An array of `room_id`.
|
||||
- `total` - Number of rooms.
|
||||
|
||||
## User media
|
||||
|
||||
## List media of a user
|
||||
### List media uploaded by a user
|
||||
Gets a list of all local media that a specific `user_id` has created.
|
||||
By default, the response is ordered by descending creation date and ascending media ID.
|
||||
The newest media is on top. You can change the order with parameters
|
||||
|
@ -543,7 +562,6 @@ The following fields are returned in the JSON response body:
|
|||
|
||||
- `media` - An array of objects, each containing information about a media.
|
||||
Media objects contain the following fields:
|
||||
|
||||
- `created_ts` - integer - Timestamp when the content was uploaded in ms.
|
||||
- `last_access_ts` - integer - Timestamp when the content was last accessed in ms.
|
||||
- `media_id` - string - The id used to refer to the media.
|
||||
|
@ -551,13 +569,58 @@ The following fields are returned in the JSON response body:
|
|||
- `media_type` - string - The MIME-type of the media.
|
||||
- `quarantined_by` - string - The user ID that initiated the quarantine request
|
||||
for this media.
|
||||
|
||||
- `safe_from_quarantine` - bool - Status if this media is safe from quarantining.
|
||||
- `upload_name` - string - The name the media was uploaded with.
|
||||
|
||||
- `next_token`: integer - Indication for pagination. See above.
|
||||
- `total` - integer - Total number of media.
|
||||
|
||||
### Delete media uploaded by a user
|
||||
|
||||
This API deletes the *local* media from the disk of your own server
|
||||
that a specific `user_id` has created. This includes any local thumbnails.
|
||||
|
||||
This API will not affect media that has been uploaded to external
|
||||
media repositories (e.g https://github.com/turt2live/matrix-media-repo/).
|
||||
|
||||
By default, the API deletes media ordered by descending creation date and ascending media ID.
|
||||
The newest media is deleted first. You can change the order with parameters
|
||||
`order_by` and `dir`. If no `limit` is set the API deletes `100` files per request.
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
DELETE /_synapse/admin/v1/users/<user_id>/media
|
||||
```
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token` for a
|
||||
server admin: [Admin API](../usage/administration/admin_api)
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"deleted_media": [
|
||||
"abcdefghijklmnopqrstuvwx"
|
||||
],
|
||||
"total": 1
|
||||
}
|
||||
```
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
* `deleted_media`: an array of strings - List of deleted `media_id`
|
||||
* `total`: integer - Total number of deleted `media_id`
|
||||
|
||||
**Note**: There is no `next_token`. This is not useful for deleting media, because
|
||||
after deleting media the remaining media have a new order.
|
||||
|
||||
**Parameters**
|
||||
|
||||
This API has the same parameters as
|
||||
[List media uploaded by a user](#list-media-uploaded-by-a-user).
|
||||
With the parameters you can for example limit the number of files to delete at once or
|
||||
delete largest/smallest or newest/oldest files first.
|
||||
|
||||
## Login as a user
|
||||
|
||||
Get an access token that can be used to authenticate as that user. Useful for
|
||||
|
@ -1013,3 +1076,22 @@ The following parameters should be set in the URL:
|
|||
- `user_id` - The fully qualified MXID: for example, `@user:server.com`. The user must
|
||||
be local.
|
||||
|
||||
### Check username availability
|
||||
|
||||
Checks to see if a username is available, and valid, for the server. See [the client-server
|
||||
API](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available)
|
||||
for more information.
|
||||
|
||||
This endpoint will work even if registration is disabled on the server, unlike
|
||||
`/_matrix/client/r0/register/available`.
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v1/username_availabile?username=$localpart
|
||||
```
|
||||
|
||||
The request and response format is the same as the [/_matrix/client/r0/register/available](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available) API.
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token` for a
|
||||
server admin: [Admin API](../usage/administration/admin_api)
|
||||
|
|
|
@ -67,7 +67,7 @@ This gives a Python REPL in which `hs` gives access to the
|
|||
`synapse.server.HomeServer` object - which in turn gives access to many other
|
||||
parts of the process.
|
||||
|
||||
Note that any call which returns a coroutine will need to be wrapped in `ensureDeferred`.
|
||||
Note that, prior to Synapse 1.41, any call which returns a coroutine will need to be wrapped in `ensureDeferred`.
|
||||
|
||||
As a simple example, retrieving an event from the database:
|
||||
|
||||
|
|
|
@ -33,6 +33,19 @@ Let's assume that we expect clients to connect to our server at
|
|||
`https://example.com:8448`. The following sections detail the configuration of
|
||||
the reverse proxy and the homeserver.
|
||||
|
||||
|
||||
## Homeserver Configuration
|
||||
|
||||
The HTTP configuration will need to be updated for Synapse to correctly record
|
||||
client IP addresses and generate redirect URLs while behind a reverse proxy.
|
||||
|
||||
In `homeserver.yaml` set `x_forwarded: true` in the port 8008 section and
|
||||
consider setting `bind_addresses: ['127.0.0.1']` so that the server only
|
||||
listens to traffic on localhost. (Do not change `bind_addresses` to `127.0.0.1`
|
||||
when using a containerized Synapse, as that will prevent it from responding
|
||||
to proxied traffic.)
|
||||
|
||||
|
||||
## Reverse-proxy configuration examples
|
||||
|
||||
**NOTE**: You only need one of these.
|
||||
|
@ -239,16 +252,6 @@ relay "matrix_federation" {
|
|||
}
|
||||
```
|
||||
|
||||
## Homeserver Configuration
|
||||
|
||||
You will also want to set `bind_addresses: ['127.0.0.1']` and
|
||||
`x_forwarded: true` for port 8008 in `homeserver.yaml` to ensure that
|
||||
client IP addresses are recorded correctly.
|
||||
|
||||
Having done so, you can then use `https://matrix.example.com` (instead
|
||||
of `https://matrix.example.com:8448`) as the "Custom server" when
|
||||
connecting to Synapse from a client.
|
||||
|
||||
|
||||
## Health check endpoint
|
||||
|
||||
|
|
|
@ -210,6 +210,8 @@ presence:
|
|||
#
|
||||
# This option replaces federation_ip_range_blacklist in Synapse v1.25.0.
|
||||
#
|
||||
# Note: The value is ignored when an HTTP proxy is in use
|
||||
#
|
||||
#ip_range_blacklist:
|
||||
# - '127.0.0.0/8'
|
||||
# - '10.0.0.0/8'
|
||||
|
@ -563,6 +565,19 @@ retention:
|
|||
#
|
||||
#next_link_domain_whitelist: ["matrix.org"]
|
||||
|
||||
# Templates to use when generating email or HTML page contents.
|
||||
#
|
||||
templates:
|
||||
# Directory in which Synapse will try to find template files to use to generate
|
||||
# email or HTML page contents.
|
||||
# If not set, or a file is not found within the template directory, a default
|
||||
# template from within the Synapse package will be used.
|
||||
#
|
||||
# See https://matrix-org.github.io/synapse/latest/templates.html for more
|
||||
# information about using custom templates.
|
||||
#
|
||||
#custom_template_directory: /path/to/custom/templates/
|
||||
|
||||
|
||||
## TLS ##
|
||||
|
||||
|
@ -711,6 +726,15 @@ caches:
|
|||
#
|
||||
#expiry_time: 30m
|
||||
|
||||
# Controls how long the results of a /sync request are cached for after
|
||||
# a successful response is returned. A higher duration can help clients with
|
||||
# intermittent connections, at the cost of higher memory usage.
|
||||
#
|
||||
# By default, this is zero, which means that sync responses are not cached
|
||||
# at all.
|
||||
#
|
||||
#sync_response_cache_duration: 2m
|
||||
|
||||
|
||||
## Database ##
|
||||
|
||||
|
@ -963,6 +987,8 @@ media_store_path: "DATADIR/media_store"
|
|||
# This must be specified if url_preview_enabled is set. It is recommended that
|
||||
# you uncomment the following list as a starting point.
|
||||
#
|
||||
# Note: The value is ignored when an HTTP proxy is in use
|
||||
#
|
||||
#url_preview_ip_range_blacklist:
|
||||
# - '127.0.0.0/8'
|
||||
# - '10.0.0.0/8'
|
||||
|
@ -1882,6 +1908,9 @@ cas_config:
|
|||
# Additional settings to use with single-sign on systems such as OpenID Connect,
|
||||
# SAML2 and CAS.
|
||||
#
|
||||
# Server admins can configure custom templates for pages related to SSO. See
|
||||
# https://matrix-org.github.io/synapse/latest/templates.html for more information.
|
||||
#
|
||||
sso:
|
||||
# A list of client URLs which are whitelisted so that the user does not
|
||||
# have to confirm giving access to their account to the URL. Any client
|
||||
|
@ -1914,169 +1943,6 @@ sso:
|
|||
#
|
||||
#update_profile_information: true
|
||||
|
||||
# Directory in which Synapse will try to find the template files below.
|
||||
# If not set, or the files named below are not found within the template
|
||||
# directory, default templates from within the Synapse package will be used.
|
||||
#
|
||||
# Synapse will look for the following templates in this directory:
|
||||
#
|
||||
# * HTML page to prompt the user to choose an Identity Provider during
|
||||
# login: 'sso_login_idp_picker.html'.
|
||||
#
|
||||
# This is only used if multiple SSO Identity Providers are configured.
|
||||
#
|
||||
# When rendering, this template is given the following variables:
|
||||
# * redirect_url: the URL that the user will be redirected to after
|
||||
# login.
|
||||
#
|
||||
# * server_name: the homeserver's name.
|
||||
#
|
||||
# * providers: a list of available Identity Providers. Each element is
|
||||
# an object with the following attributes:
|
||||
#
|
||||
# * idp_id: unique identifier for the IdP
|
||||
# * idp_name: user-facing name for the IdP
|
||||
# * idp_icon: if specified in the IdP config, an MXC URI for an icon
|
||||
# for the IdP
|
||||
# * idp_brand: if specified in the IdP config, a textual identifier
|
||||
# for the brand of the IdP
|
||||
#
|
||||
# The rendered HTML page should contain a form which submits its results
|
||||
# back as a GET request, with the following query parameters:
|
||||
#
|
||||
# * redirectUrl: the client redirect URI (ie, the `redirect_url` passed
|
||||
# to the template)
|
||||
#
|
||||
# * idp: the 'idp_id' of the chosen IDP.
|
||||
#
|
||||
# * HTML page to prompt new users to enter a userid and confirm other
|
||||
# details: 'sso_auth_account_details.html'. This is only shown if the
|
||||
# SSO implementation (with any user_mapping_provider) does not return
|
||||
# a localpart.
|
||||
#
|
||||
# When rendering, this template is given the following variables:
|
||||
#
|
||||
# * server_name: the homeserver's name.
|
||||
#
|
||||
# * idp: details of the SSO Identity Provider that the user logged in
|
||||
# with: an object with the following attributes:
|
||||
#
|
||||
# * idp_id: unique identifier for the IdP
|
||||
# * idp_name: user-facing name for the IdP
|
||||
# * idp_icon: if specified in the IdP config, an MXC URI for an icon
|
||||
# for the IdP
|
||||
# * idp_brand: if specified in the IdP config, a textual identifier
|
||||
# for the brand of the IdP
|
||||
#
|
||||
# * user_attributes: an object containing details about the user that
|
||||
# we received from the IdP. May have the following attributes:
|
||||
#
|
||||
# * display_name: the user's display_name
|
||||
# * emails: a list of email addresses
|
||||
#
|
||||
# The template should render a form which submits the following fields:
|
||||
#
|
||||
# * username: the localpart of the user's chosen user id
|
||||
#
|
||||
# * HTML page allowing the user to consent to the server's terms and
|
||||
# conditions. This is only shown for new users, and only if
|
||||
# `user_consent.require_at_registration` is set.
|
||||
#
|
||||
# When rendering, this template is given the following variables:
|
||||
#
|
||||
# * server_name: the homeserver's name.
|
||||
#
|
||||
# * user_id: the user's matrix proposed ID.
|
||||
#
|
||||
# * user_profile.display_name: the user's proposed display name, if any.
|
||||
#
|
||||
# * consent_version: the version of the terms that the user will be
|
||||
# shown
|
||||
#
|
||||
# * terms_url: a link to the page showing the terms.
|
||||
#
|
||||
# The template should render a form which submits the following fields:
|
||||
#
|
||||
# * accepted_version: the version of the terms accepted by the user
|
||||
# (ie, 'consent_version' from the input variables).
|
||||
#
|
||||
# * HTML page for a confirmation step before redirecting back to the client
|
||||
# with the login token: 'sso_redirect_confirm.html'.
|
||||
#
|
||||
# When rendering, this template is given the following variables:
|
||||
#
|
||||
# * redirect_url: the URL the user is about to be redirected to.
|
||||
#
|
||||
# * display_url: the same as `redirect_url`, but with the query
|
||||
# parameters stripped. The intention is to have a
|
||||
# human-readable URL to show to users, not to use it as
|
||||
# the final address to redirect to.
|
||||
#
|
||||
# * server_name: the homeserver's name.
|
||||
#
|
||||
# * new_user: a boolean indicating whether this is the user's first time
|
||||
# logging in.
|
||||
#
|
||||
# * user_id: the user's matrix ID.
|
||||
#
|
||||
# * user_profile.avatar_url: an MXC URI for the user's avatar, if any.
|
||||
# None if the user has not set an avatar.
|
||||
#
|
||||
# * user_profile.display_name: the user's display name. None if the user
|
||||
# has not set a display name.
|
||||
#
|
||||
# * HTML page which notifies the user that they are authenticating to confirm
|
||||
# an operation on their account during the user interactive authentication
|
||||
# process: 'sso_auth_confirm.html'.
|
||||
#
|
||||
# When rendering, this template is given the following variables:
|
||||
# * redirect_url: the URL the user is about to be redirected to.
|
||||
#
|
||||
# * description: the operation which the user is being asked to confirm
|
||||
#
|
||||
# * idp: details of the Identity Provider that we will use to confirm
|
||||
# the user's identity: an object with the following attributes:
|
||||
#
|
||||
# * idp_id: unique identifier for the IdP
|
||||
# * idp_name: user-facing name for the IdP
|
||||
# * idp_icon: if specified in the IdP config, an MXC URI for an icon
|
||||
# for the IdP
|
||||
# * idp_brand: if specified in the IdP config, a textual identifier
|
||||
# for the brand of the IdP
|
||||
#
|
||||
# * HTML page shown after a successful user interactive authentication session:
|
||||
# 'sso_auth_success.html'.
|
||||
#
|
||||
# Note that this page must include the JavaScript which notifies of a successful authentication
|
||||
# (see https://matrix.org/docs/spec/client_server/r0.6.0#fallback).
|
||||
#
|
||||
# This template has no additional variables.
|
||||
#
|
||||
# * HTML page shown after a user-interactive authentication session which
|
||||
# does not map correctly onto the expected user: 'sso_auth_bad_user.html'.
|
||||
#
|
||||
# When rendering, this template is given the following variables:
|
||||
# * server_name: the homeserver's name.
|
||||
# * user_id_to_verify: the MXID of the user that we are trying to
|
||||
# validate.
|
||||
#
|
||||
# * HTML page shown during single sign-on if a deactivated user (according to Synapse's database)
|
||||
# attempts to login: 'sso_account_deactivated.html'.
|
||||
#
|
||||
# This template has no additional variables.
|
||||
#
|
||||
# * HTML page to display to users if something goes wrong during the
|
||||
# OpenID Connect authentication process: 'sso_error.html'.
|
||||
#
|
||||
# When rendering, this template is given two variables:
|
||||
# * error: the technical name of the error
|
||||
# * error_description: a human-readable message for the error
|
||||
#
|
||||
# You can see the default templates at:
|
||||
# https://github.com/matrix-org/synapse/tree/master/synapse/res/templates
|
||||
#
|
||||
#template_dir: "res/templates"
|
||||
|
||||
|
||||
# JSON web token integration. The following settings can be used to make
|
||||
# Synapse JSON web tokens for authentication, instead of its internal
|
||||
|
@ -2207,6 +2073,9 @@ ui_auth:
|
|||
|
||||
# Configuration for sending emails from Synapse.
|
||||
#
|
||||
# Server admins can configure custom templates for email content. See
|
||||
# https://matrix-org.github.io/synapse/latest/templates.html for more information.
|
||||
#
|
||||
email:
|
||||
# The hostname of the outgoing SMTP server to use. Defaults to 'localhost'.
|
||||
#
|
||||
|
@ -2229,6 +2098,14 @@ email:
|
|||
#
|
||||
#require_transport_security: true
|
||||
|
||||
# Uncomment the following to disable TLS for SMTP.
|
||||
#
|
||||
# By default, if the server supports TLS, it will be used, and the server
|
||||
# must present a certificate that is valid for 'smtp_host'. If this option
|
||||
# is set to false, TLS will not be used.
|
||||
#
|
||||
#enable_tls: false
|
||||
|
||||
# notif_from defines the "From" address to use when sending emails.
|
||||
# It must be set if email sending is enabled.
|
||||
#
|
||||
|
@ -2275,49 +2152,6 @@ email:
|
|||
#
|
||||
#invite_client_location: https://app.element.io
|
||||
|
||||
# Directory in which Synapse will try to find the template files below.
|
||||
# If not set, or the files named below are not found within the template
|
||||
# directory, default templates from within the Synapse package will be used.
|
||||
#
|
||||
# Synapse will look for the following templates in this directory:
|
||||
#
|
||||
# * The contents of email notifications of missed events: 'notif_mail.html' and
|
||||
# 'notif_mail.txt'.
|
||||
#
|
||||
# * The contents of account expiry notice emails: 'notice_expiry.html' and
|
||||
# 'notice_expiry.txt'.
|
||||
#
|
||||
# * The contents of password reset emails sent by the homeserver:
|
||||
# 'password_reset.html' and 'password_reset.txt'
|
||||
#
|
||||
# * An HTML page that a user will see when they follow the link in the password
|
||||
# reset email. The user will be asked to confirm the action before their
|
||||
# password is reset: 'password_reset_confirmation.html'
|
||||
#
|
||||
# * HTML pages for success and failure that a user will see when they confirm
|
||||
# the password reset flow using the page above: 'password_reset_success.html'
|
||||
# and 'password_reset_failure.html'
|
||||
#
|
||||
# * The contents of address verification emails sent during registration:
|
||||
# 'registration.html' and 'registration.txt'
|
||||
#
|
||||
# * HTML pages for success and failure that a user will see when they follow
|
||||
# the link in an address verification email sent during registration:
|
||||
# 'registration_success.html' and 'registration_failure.html'
|
||||
#
|
||||
# * The contents of address verification emails sent when an address is added
|
||||
# to a Matrix account: 'add_threepid.html' and 'add_threepid.txt'
|
||||
#
|
||||
# * HTML pages for success and failure that a user will see when they follow
|
||||
# the link in an address verification email sent when an address is added
|
||||
# to a Matrix account: 'add_threepid_success.html' and
|
||||
# 'add_threepid_failure.html'
|
||||
#
|
||||
# You can see the default templates at:
|
||||
# https://github.com/matrix-org/synapse/tree/master/synapse/res/templates
|
||||
#
|
||||
#template_dir: "res/templates"
|
||||
|
||||
# Subjects to use when sending emails from Synapse.
|
||||
#
|
||||
# The placeholder '%(app)s' will be replaced with the value of the 'app_name'
|
||||
|
|
|
@ -24,18 +24,31 @@ handlers:
|
|||
backupCount: 3 # Does not include the current log file.
|
||||
encoding: utf8
|
||||
|
||||
# Default to buffering writes to log file for efficiency. This means that
|
||||
# will be a delay for INFO/DEBUG logs to get written, but WARNING/ERROR
|
||||
# logs will still be flushed immediately.
|
||||
# Default to buffering writes to log file for efficiency.
|
||||
# WARNING/ERROR logs will still be flushed immediately, but there will be a
|
||||
# delay (of up to `period` seconds, or until the buffer is full with
|
||||
# `capacity` messages) before INFO/DEBUG logs get written.
|
||||
buffer:
|
||||
class: logging.handlers.MemoryHandler
|
||||
class: synapse.logging.handlers.PeriodicallyFlushingMemoryHandler
|
||||
target: file
|
||||
# The capacity is the number of log lines that are buffered before
|
||||
# being written to disk. Increasing this will lead to better
|
||||
|
||||
# The capacity is the maximum number of log lines that are buffered
|
||||
# before being written to disk. Increasing this will lead to better
|
||||
# performance, at the expensive of it taking longer for log lines to
|
||||
# be written to disk.
|
||||
# This parameter is required.
|
||||
capacity: 10
|
||||
flushLevel: 30 # Flush for WARNING logs as well
|
||||
|
||||
# Logs with a level at or above the flush level will cause the buffer to
|
||||
# be flushed immediately.
|
||||
# Default value: 40 (ERROR)
|
||||
# Other values: 50 (CRITICAL), 30 (WARNING), 20 (INFO), 10 (DEBUG)
|
||||
flushLevel: 30 # Flush immediately for WARNING logs and higher
|
||||
|
||||
# The period of time, in seconds, between forced flushes.
|
||||
# Messages will not be delayed for longer than this time.
|
||||
# Default value: 5 seconds
|
||||
period: 5
|
||||
|
||||
# A handler that writes logs to stderr. Unused by default, but can be used
|
||||
# instead of "buffer" and "file" in the logger handlers.
|
||||
|
|
74
docs/setup/forward_proxy.md
Normal file
74
docs/setup/forward_proxy.md
Normal file
|
@ -0,0 +1,74 @@
|
|||
# Using a forward proxy with Synapse
|
||||
|
||||
You can use Synapse with a forward or outbound proxy. An example of when
|
||||
this is necessary is in corporate environments behind a DMZ (demilitarized zone).
|
||||
Synapse supports routing outbound HTTP(S) requests via a proxy. Only HTTP(S)
|
||||
proxy is supported, not SOCKS proxy or anything else.
|
||||
|
||||
## Configure
|
||||
|
||||
The `http_proxy`, `https_proxy`, `no_proxy` environment variables are used to
|
||||
specify proxy settings. The environment variable is not case sensitive.
|
||||
- `http_proxy`: Proxy server to use for HTTP requests.
|
||||
- `https_proxy`: Proxy server to use for HTTPS requests.
|
||||
- `no_proxy`: Comma-separated list of hosts, IP addresses, or IP ranges in CIDR
|
||||
format which should not use the proxy. Synapse will directly connect to these hosts.
|
||||
|
||||
The `http_proxy` and `https_proxy` environment variables have the form: `[scheme://][<username>:<password>@]<host>[:<port>]`
|
||||
- Supported schemes are `http://` and `https://`. The default scheme is `http://`
|
||||
for compatibility reasons; it is recommended to set a scheme. If scheme is set
|
||||
to `https://` the connection uses TLS between Synapse and the proxy.
|
||||
|
||||
**NOTE**: Synapse validates the certificates. If the certificate is not
|
||||
valid, then the connection is dropped.
|
||||
- Default port if not given is `1080`.
|
||||
- Username and password are optional and will be used to authenticate against
|
||||
the proxy.
|
||||
|
||||
**Examples**
|
||||
- HTTP_PROXY=http://USERNAME:PASSWORD@10.0.1.1:8080/
|
||||
- HTTPS_PROXY=http://USERNAME:PASSWORD@proxy.example.com:8080/
|
||||
- NO_PROXY=master.hostname.example.com,10.1.0.0/16,172.30.0.0/16
|
||||
|
||||
**NOTE**:
|
||||
Synapse does not apply the IP blacklist to connections through the proxy (since
|
||||
the DNS resolution is done by the proxy). It is expected that the proxy or firewall
|
||||
will apply blacklisting of IP addresses.
|
||||
|
||||
## Connection types
|
||||
|
||||
The proxy will be **used** for:
|
||||
|
||||
- push
|
||||
- url previews
|
||||
- phone-home stats
|
||||
- recaptcha validation
|
||||
- CAS auth validation
|
||||
- OpenID Connect
|
||||
- Outbound federation
|
||||
- Federation (checking public key revocation)
|
||||
- Fetching public keys of other servers
|
||||
- Downloading remote media
|
||||
|
||||
It will **not be used** for:
|
||||
|
||||
- Application Services
|
||||
- Identity servers
|
||||
- In worker configurations
|
||||
- connections between workers
|
||||
- connections from workers to Redis
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If a proxy server is used with TLS (HTTPS) and no connections are established,
|
||||
it is most likely due to the proxy's certificates. To test this, the validation
|
||||
in Synapse can be deactivated.
|
||||
|
||||
**NOTE**: This has an impact on security and is for testing purposes only!
|
||||
|
||||
To deactivate the certificate validation, the following setting must be made in
|
||||
[homserver.yaml](../usage/configuration/homeserver_sample_config.md).
|
||||
|
||||
```yaml
|
||||
use_insecure_ssl_client_just_for_testing_do_not_use: true
|
||||
```
|
239
docs/templates.md
Normal file
239
docs/templates.md
Normal file
|
@ -0,0 +1,239 @@
|
|||
# Templates
|
||||
|
||||
Synapse uses parametrised templates to generate the content of emails it sends and
|
||||
webpages it shows to users.
|
||||
|
||||
By default, Synapse will use the templates listed [here](https://github.com/matrix-org/synapse/tree/master/synapse/res/templates).
|
||||
Server admins can configure an additional directory for Synapse to look for templates
|
||||
in, allowing them to specify custom templates:
|
||||
|
||||
```yaml
|
||||
templates:
|
||||
custom_templates_directory: /path/to/custom/templates/
|
||||
```
|
||||
|
||||
If this setting is not set, or the files named below are not found within the directory,
|
||||
default templates from within the Synapse package will be used.
|
||||
|
||||
Templates that are given variables when being rendered are rendered using [Jinja 2](https://jinja.palletsprojects.com/en/2.11.x/).
|
||||
Templates rendered by Jinja 2 can also access two functions on top of the functions
|
||||
already available as part of Jinja 2:
|
||||
|
||||
```python
|
||||
format_ts(value: int, format: str) -> str
|
||||
```
|
||||
|
||||
Formats a timestamp in milliseconds.
|
||||
|
||||
Example: `reason.last_sent_ts|format_ts("%c")`
|
||||
|
||||
```python
|
||||
mxc_to_http(value: str, width: int, height: int, resize_method: str = "crop") -> str
|
||||
```
|
||||
|
||||
Turns a `mxc://` URL for media content into an HTTP(S) one using the homeserver's
|
||||
`public_baseurl` configuration setting as the URL's base.
|
||||
|
||||
Example: `message.sender_avatar_url|mxc_to_http(32,32)`
|
||||
|
||||
|
||||
## Email templates
|
||||
|
||||
Below are the templates Synapse will look for when generating the content of an email:
|
||||
|
||||
* `notif_mail.html` and `notif_mail.txt`: The contents of email notifications of missed
|
||||
events.
|
||||
When rendering, this template is given the following variables:
|
||||
* `user_display_name`: the display name for the user receiving the notification
|
||||
* `unsubscribe_link`: the link users can click to unsubscribe from email notifications
|
||||
* `summary_text`: a summary of the notification(s). The text used can be customised
|
||||
by configuring the various settings in the `email.subjects` section of the
|
||||
configuration file.
|
||||
* `rooms`: a list of rooms containing events to include in the email. Each element is
|
||||
an object with the following attributes:
|
||||
* `title`: a human-readable name for the room
|
||||
* `hash`: a hash of the ID of the room
|
||||
* `invite`: a boolean, which is `True` if the room is an invite the user hasn't
|
||||
accepted yet, `False` otherwise
|
||||
* `notifs`: a list of events, or an empty list if `invite` is `True`. Each element
|
||||
is an object with the following attributes:
|
||||
* `link`: a `matrix.to` link to the event
|
||||
* `ts`: the time in milliseconds at which the event was received
|
||||
* `messages`: a list of messages containing one message before the event, the
|
||||
message in the event, and one message after the event. Each element is an
|
||||
object with the following attributes:
|
||||
* `event_type`: the type of the event
|
||||
* `is_historical`: a boolean, which is `False` if the message is the one
|
||||
that triggered the notification, `True` otherwise
|
||||
* `id`: the ID of the event
|
||||
* `ts`: the time in milliseconds at which the event was sent
|
||||
* `sender_name`: the display name for the event's sender
|
||||
* `sender_avatar_url`: the avatar URL (as a `mxc://` URL) for the event's
|
||||
sender
|
||||
* `sender_hash`: a hash of the user ID of the sender
|
||||
* `link`: a `matrix.to` link to the room
|
||||
* `reason`: information on the event that triggered the email to be sent. It's an
|
||||
object with the following attributes:
|
||||
* `room_id`: the ID of the room the event was sent in
|
||||
* `room_name`: a human-readable name for the room the event was sent in
|
||||
* `now`: the current time in milliseconds
|
||||
* `received_at`: the time in milliseconds at which the event was received
|
||||
* `delay_before_mail_ms`: the amount of time in milliseconds Synapse always waits
|
||||
before ever emailing about a notification (to give the user a chance to respond
|
||||
to other push or notice the window)
|
||||
* `last_sent_ts`: the time in milliseconds at which a notification was last sent
|
||||
for an event in this room
|
||||
* `throttle_ms`: the minimum amount of time in milliseconds between two
|
||||
notifications can be sent for this room
|
||||
* `password_reset.html` and `password_reset.txt`: The contents of password reset emails
|
||||
sent by the homeserver.
|
||||
When rendering, these templates are given a `link` variable which contains the link the
|
||||
user must click in order to reset their password.
|
||||
* `registration.html` and `registration.txt`: The contents of address verification emails
|
||||
sent during registration.
|
||||
When rendering, these templates are given a `link` variable which contains the link the
|
||||
user must click in order to validate their email address.
|
||||
* `add_threepid.html` and `add_threepid.txt`: The contents of address verification emails
|
||||
sent when an address is added to a Matrix account.
|
||||
When rendering, these templates are given a `link` variable which contains the link the
|
||||
user must click in order to validate their email address.
|
||||
|
||||
|
||||
## HTML page templates for registration and password reset
|
||||
|
||||
Below are the templates Synapse will look for when generating pages related to
|
||||
registration and password reset:
|
||||
|
||||
* `password_reset_confirmation.html`: An HTML page that a user will see when they follow
|
||||
the link in the password reset email. The user will be asked to confirm the action
|
||||
before their password is reset.
|
||||
When rendering, this template is given the following variables:
|
||||
* `sid`: the session ID for the password reset
|
||||
* `token`: the token for the password reset
|
||||
* `client_secret`: the client secret for the password reset
|
||||
* `password_reset_success.html` and `password_reset_failure.html`: HTML pages for success
|
||||
and failure that a user will see when they confirm the password reset flow using the
|
||||
page above.
|
||||
When rendering, `password_reset_success.html` is given no variable, and
|
||||
`password_reset_failure.html` is given a `failure_reason`, which contains the reason
|
||||
for the password reset failure.
|
||||
* `registration_success.html` and `registration_failure.html`: HTML pages for success and
|
||||
failure that a user will see when they follow the link in an address verification email
|
||||
sent during registration.
|
||||
When rendering, `registration_success.html` is given no variable, and
|
||||
`registration_failure.html` is given a `failure_reason`, which contains the reason
|
||||
for the registration failure.
|
||||
* `add_threepid_success.html` and `add_threepid_failure.html`: HTML pages for success and
|
||||
failure that a user will see when they follow the link in an address verification email
|
||||
sent when an address is added to a Matrix account.
|
||||
When rendering, `add_threepid_success.html` is given no variable, and
|
||||
`add_threepid_failure.html` is given a `failure_reason`, which contains the reason
|
||||
for the registration failure.
|
||||
|
||||
|
||||
## HTML page templates for Single Sign-On (SSO)
|
||||
|
||||
Below are the templates Synapse will look for when generating pages related to SSO:
|
||||
|
||||
* `sso_login_idp_picker.html`: HTML page to prompt the user to choose an
|
||||
Identity Provider during login.
|
||||
This is only used if multiple SSO Identity Providers are configured.
|
||||
When rendering, this template is given the following variables:
|
||||
* `redirect_url`: the URL that the user will be redirected to after
|
||||
login.
|
||||
* `server_name`: the homeserver's name.
|
||||
* `providers`: a list of available Identity Providers. Each element is
|
||||
an object with the following attributes:
|
||||
* `idp_id`: unique identifier for the IdP
|
||||
* `idp_name`: user-facing name for the IdP
|
||||
* `idp_icon`: if specified in the IdP config, an MXC URI for an icon
|
||||
for the IdP
|
||||
* `idp_brand`: if specified in the IdP config, a textual identifier
|
||||
for the brand of the IdP
|
||||
The rendered HTML page should contain a form which submits its results
|
||||
back as a GET request, with the following query parameters:
|
||||
* `redirectUrl`: the client redirect URI (ie, the `redirect_url` passed
|
||||
to the template)
|
||||
* `idp`: the 'idp_id' of the chosen IDP.
|
||||
* `sso_auth_account_details.html`: HTML page to prompt new users to enter a
|
||||
userid and confirm other details. This is only shown if the
|
||||
SSO implementation (with any `user_mapping_provider`) does not return
|
||||
a localpart.
|
||||
When rendering, this template is given the following variables:
|
||||
* `server_name`: the homeserver's name.
|
||||
* `idp`: details of the SSO Identity Provider that the user logged in
|
||||
with: an object with the following attributes:
|
||||
* `idp_id`: unique identifier for the IdP
|
||||
* `idp_name`: user-facing name for the IdP
|
||||
* `idp_icon`: if specified in the IdP config, an MXC URI for an icon
|
||||
for the IdP
|
||||
* `idp_brand`: if specified in the IdP config, a textual identifier
|
||||
for the brand of the IdP
|
||||
* `user_attributes`: an object containing details about the user that
|
||||
we received from the IdP. May have the following attributes:
|
||||
* display_name: the user's display_name
|
||||
* emails: a list of email addresses
|
||||
The template should render a form which submits the following fields:
|
||||
* `username`: the localpart of the user's chosen user id
|
||||
* `sso_new_user_consent.html`: HTML page allowing the user to consent to the
|
||||
server's terms and conditions. This is only shown for new users, and only if
|
||||
`user_consent.require_at_registration` is set.
|
||||
When rendering, this template is given the following variables:
|
||||
* `server_name`: the homeserver's name.
|
||||
* `user_id`: the user's matrix proposed ID.
|
||||
* `user_profile.display_name`: the user's proposed display name, if any.
|
||||
* consent_version: the version of the terms that the user will be
|
||||
shown
|
||||
* `terms_url`: a link to the page showing the terms.
|
||||
The template should render a form which submits the following fields:
|
||||
* `accepted_version`: the version of the terms accepted by the user
|
||||
(ie, 'consent_version' from the input variables).
|
||||
* `sso_redirect_confirm.html`: HTML page for a confirmation step before redirecting back
|
||||
to the client with the login token.
|
||||
When rendering, this template is given the following variables:
|
||||
* `redirect_url`: the URL the user is about to be redirected to.
|
||||
* `display_url`: the same as `redirect_url`, but with the query
|
||||
parameters stripped. The intention is to have a
|
||||
human-readable URL to show to users, not to use it as
|
||||
the final address to redirect to.
|
||||
* `server_name`: the homeserver's name.
|
||||
* `new_user`: a boolean indicating whether this is the user's first time
|
||||
logging in.
|
||||
* `user_id`: the user's matrix ID.
|
||||
* `user_profile.avatar_url`: an MXC URI for the user's avatar, if any.
|
||||
`None` if the user has not set an avatar.
|
||||
* `user_profile.display_name`: the user's display name. `None` if the user
|
||||
has not set a display name.
|
||||
* `sso_auth_confirm.html`: HTML page which notifies the user that they are authenticating
|
||||
to confirm an operation on their account during the user interactive authentication
|
||||
process.
|
||||
When rendering, this template is given the following variables:
|
||||
* `redirect_url`: the URL the user is about to be redirected to.
|
||||
* `description`: the operation which the user is being asked to confirm
|
||||
* `idp`: details of the Identity Provider that we will use to confirm
|
||||
the user's identity: an object with the following attributes:
|
||||
* `idp_id`: unique identifier for the IdP
|
||||
* `idp_name`: user-facing name for the IdP
|
||||
* `idp_icon`: if specified in the IdP config, an MXC URI for an icon
|
||||
for the IdP
|
||||
* `idp_brand`: if specified in the IdP config, a textual identifier
|
||||
for the brand of the IdP
|
||||
* `sso_auth_success.html`: HTML page shown after a successful user interactive
|
||||
authentication session.
|
||||
Note that this page must include the JavaScript which notifies of a successful
|
||||
authentication (see https://matrix.org/docs/spec/client_server/r0.6.0#fallback).
|
||||
This template has no additional variables.
|
||||
* `sso_auth_bad_user.html`: HTML page shown after a user-interactive authentication
|
||||
session which does not map correctly onto the expected user.
|
||||
When rendering, this template is given the following variables:
|
||||
* `server_name`: the homeserver's name.
|
||||
* `user_id_to_verify`: the MXID of the user that we are trying to
|
||||
validate.
|
||||
* `sso_account_deactivated.html`: HTML page shown during single sign-on if a deactivated
|
||||
user (according to Synapse's database) attempts to login.
|
||||
This template has no additional variables.
|
||||
* `sso_error.html`: HTML page to display to users if something goes wrong during the
|
||||
OpenID Connect authentication process.
|
||||
When rendering, this template is given two variables:
|
||||
* `error`: the technical name of the error
|
||||
* `error_description`: a human-readable message for the error
|
|
@ -86,6 +86,50 @@ process, for example:
|
|||
```
|
||||
|
||||
|
||||
# Upgrading to v1.xx.0
|
||||
|
||||
## Add support for routing outbound HTTP requests via a proxy for federation
|
||||
|
||||
Since Synapse 1.6.0 (2019-11-26) you can set a proxy for outbound HTTP requests via
|
||||
http_proxy/https_proxy environment variables. This proxy was set for:
|
||||
- push
|
||||
- url previews
|
||||
- phone-home stats
|
||||
- recaptcha validation
|
||||
- CAS auth validation
|
||||
- OpenID Connect
|
||||
- Federation (checking public key revocation)
|
||||
|
||||
In this version we have added support for outbound requests for:
|
||||
- Outbound federation
|
||||
- Downloading remote media
|
||||
- Fetching public keys of other servers
|
||||
|
||||
These requests use the same proxy configuration. If you have a proxy configuration we
|
||||
recommend to verify the configuration. It may be necessary to adjust the `no_proxy`
|
||||
environment variable.
|
||||
|
||||
See [using a forward proxy with Synapse documentation](setup/forward_proxy.md) for
|
||||
details.
|
||||
|
||||
## Deprecation of `template_dir`
|
||||
|
||||
The `template_dir` settings in the `sso`, `account_validity` and `email` sections of the
|
||||
configuration file are now deprecated. Server admins should use the new
|
||||
`templates.custom_template_directory` setting in the configuration file and use one single
|
||||
custom template directory for all aforementioned features. Template file names remain
|
||||
unchanged. See [the related documentation](https://matrix-org.github.io/synapse/latest/templates.html)
|
||||
for more information and examples.
|
||||
|
||||
We plan to remove support for these settings in October 2021.
|
||||
|
||||
## `/_synapse/admin/v1/users/{userId}/media` must be handled by media workers
|
||||
|
||||
The [media repository worker documentation](https://matrix-org.github.io/synapse/latest/workers.html#synapseappmedia_repository)
|
||||
has been updated to reflect that calls to `/_synapse/admin/v1/users/{userId}/media`
|
||||
must now be handled by media repository workers. This is due to the new `DELETE` method
|
||||
of this endpoint modifying the media store.
|
||||
|
||||
# Upgrading to v1.39.0
|
||||
|
||||
## Deprecation of the current third-party rules module interface
|
||||
|
|
|
@ -214,6 +214,7 @@ expressions:
|
|||
^/_matrix/federation/v1/send/
|
||||
|
||||
# Client API requests
|
||||
^/_matrix/client/(api/v1|r0|unstable)/createRoom$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/publicRooms$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/joined_members$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/context/.*$
|
||||
|
@ -425,10 +426,12 @@ Handles the media repository. It can handle all endpoints starting with:
|
|||
^/_synapse/admin/v1/user/.*/media.*$
|
||||
^/_synapse/admin/v1/media/.*$
|
||||
^/_synapse/admin/v1/quarantine_media/.*$
|
||||
^/_synapse/admin/v1/users/.*/media$
|
||||
|
||||
You should also set `enable_media_repo: False` in the shared configuration
|
||||
file to stop the main synapse running background jobs related to managing the
|
||||
media repository.
|
||||
media repository. Note that doing so will prevent the main process from being
|
||||
able to handle the above endpoints.
|
||||
|
||||
In the `media_repository` worker configuration file, configure the http listener to
|
||||
expose the `media` resource. For example:
|
||||
|
|
1
mypy.ini
1
mypy.ini
|
@ -86,6 +86,7 @@ files =
|
|||
tests/test_event_auth.py,
|
||||
tests/test_utils,
|
||||
tests/handlers/test_password_providers.py,
|
||||
tests/handlers/test_room_summary.py,
|
||||
tests/rest/client/v1/test_login.py,
|
||||
tests/rest/client/v2_alpha/test_auth.py,
|
||||
tests/util/test_itertools.py,
|
||||
|
|
|
@ -20,12 +20,12 @@ from concurrent.futures import ThreadPoolExecutor
|
|||
from typing import Optional, Sequence
|
||||
|
||||
DISTS = (
|
||||
"debian:buster",
|
||||
"debian:buster", # oldstable: EOL 2022-08
|
||||
"debian:bullseye",
|
||||
"debian:bookworm",
|
||||
"debian:sid",
|
||||
"ubuntu:bionic", # 18.04 LTS (our EOL forced by Py36 on 2021-12-23)
|
||||
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
|
||||
"ubuntu:groovy", # 20.10 (EOL 2021-07-07)
|
||||
"ubuntu:hirsute", # 21.04 (EOL 2022-01-05)
|
||||
)
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ set -e
|
|||
git remote set-branches --add origin develop
|
||||
git fetch -q origin develop
|
||||
|
||||
pr="$BUILDKITE_PULL_REQUEST"
|
||||
pr="$PULL_REQUEST_NUMBER"
|
||||
|
||||
# if there are changes in the debian directory, check that the debian changelog
|
||||
# has been updated
|
||||
|
|
|
@ -65,4 +65,4 @@ if [[ -n "$1" ]]; then
|
|||
fi
|
||||
|
||||
# Run the tests!
|
||||
go test -v -tags synapse_blacklist,msc2946,msc3083,msc2403 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests/...
|
||||
go test -v -tags synapse_blacklist,msc2946,msc3083,msc2403,msc2716 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests/...
|
||||
|
|
|
@ -94,7 +94,7 @@ else
|
|||
"scripts-dev/build_debian_packages"
|
||||
"scripts-dev/sign_json"
|
||||
"scripts-dev/update_database"
|
||||
"contrib" "synctl" "setup.py" "synmark" "stubs" ".buildkite"
|
||||
"contrib" "synctl" "setup.py" "synmark" "stubs" ".ci"
|
||||
)
|
||||
fi
|
||||
fi
|
||||
|
|
|
@ -47,7 +47,7 @@ try:
|
|||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.40.0"
|
||||
__version__ = "1.41.0rc1"
|
||||
|
||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||
# We import here so that we don't have to install a bunch of deps when
|
||||
|
|
|
@ -76,6 +76,8 @@ class RoomVersion:
|
|||
# MSC2716: Adds m.room.power_levels -> content.historical field to control
|
||||
# whether "insertion", "chunk", "marker" events can be sent
|
||||
msc2716_historical = attr.ib(type=bool)
|
||||
# MSC2716: Adds support for redacting "insertion", "chunk", and "marker" events
|
||||
msc2716_redactions = attr.ib(type=bool)
|
||||
|
||||
|
||||
class RoomVersions:
|
||||
|
@ -92,6 +94,7 @@ class RoomVersions:
|
|||
msc3083_join_rules=False,
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
)
|
||||
V2 = RoomVersion(
|
||||
"2",
|
||||
|
@ -106,6 +109,7 @@ class RoomVersions:
|
|||
msc3083_join_rules=False,
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
)
|
||||
V3 = RoomVersion(
|
||||
"3",
|
||||
|
@ -120,6 +124,7 @@ class RoomVersions:
|
|||
msc3083_join_rules=False,
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
)
|
||||
V4 = RoomVersion(
|
||||
"4",
|
||||
|
@ -134,6 +139,7 @@ class RoomVersions:
|
|||
msc3083_join_rules=False,
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
)
|
||||
V5 = RoomVersion(
|
||||
"5",
|
||||
|
@ -148,6 +154,7 @@ class RoomVersions:
|
|||
msc3083_join_rules=False,
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
)
|
||||
V6 = RoomVersion(
|
||||
"6",
|
||||
|
@ -162,6 +169,7 @@ class RoomVersions:
|
|||
msc3083_join_rules=False,
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
)
|
||||
MSC2176 = RoomVersion(
|
||||
"org.matrix.msc2176",
|
||||
|
@ -176,6 +184,7 @@ class RoomVersions:
|
|||
msc3083_join_rules=False,
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
)
|
||||
V7 = RoomVersion(
|
||||
"7",
|
||||
|
@ -190,20 +199,7 @@ class RoomVersions:
|
|||
msc3083_join_rules=False,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
)
|
||||
MSC2716 = RoomVersion(
|
||||
"org.matrix.msc2716",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=True,
|
||||
msc2716_redactions=False,
|
||||
)
|
||||
V8 = RoomVersion(
|
||||
"8",
|
||||
|
@ -218,6 +214,37 @@ class RoomVersions:
|
|||
msc3083_join_rules=True,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
)
|
||||
MSC2716 = RoomVersion(
|
||||
"org.matrix.msc2716",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=True,
|
||||
msc2716_redactions=False,
|
||||
)
|
||||
MSC2716v2 = RoomVersion(
|
||||
"org.matrix.msc2716v2",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=True,
|
||||
msc2716_redactions=True,
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -38,7 +38,6 @@ from synapse.replication.slave.storage.groups import SlavedGroupServerStore
|
|||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.server import HomeServer
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
@ -58,7 +57,6 @@ class AdminCmdSlavedStore(
|
|||
SlavedPushRuleStore,
|
||||
SlavedEventStore,
|
||||
SlavedClientIpStore,
|
||||
RoomStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
pass
|
||||
|
|
|
@ -64,42 +64,41 @@ from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
|||
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.rest.admin import register_servlets_for_media_repo
|
||||
from synapse.rest.client.v1 import events, login, presence, room
|
||||
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
|
||||
from synapse.rest.client.v1.profile import (
|
||||
ProfileAvatarURLRestServlet,
|
||||
ProfileDisplaynameRestServlet,
|
||||
ProfileRestServlet,
|
||||
)
|
||||
from synapse.rest.client.v1.push_rule import PushRuleRestServlet
|
||||
from synapse.rest.client.v1.voip import VoipRestServlet
|
||||
from synapse.rest.client.v2_alpha import (
|
||||
from synapse.rest.client import (
|
||||
account_data,
|
||||
events,
|
||||
groups,
|
||||
login,
|
||||
presence,
|
||||
read_marker,
|
||||
receipts,
|
||||
room,
|
||||
room_keys,
|
||||
sync,
|
||||
tags,
|
||||
user_directory,
|
||||
)
|
||||
from synapse.rest.client.v2_alpha._base import client_patterns
|
||||
from synapse.rest.client.v2_alpha.account import ThreepidRestServlet
|
||||
from synapse.rest.client.v2_alpha.account_data import (
|
||||
AccountDataServlet,
|
||||
RoomAccountDataServlet,
|
||||
)
|
||||
from synapse.rest.client.v2_alpha.devices import DevicesRestServlet
|
||||
from synapse.rest.client.v2_alpha.keys import (
|
||||
from synapse.rest.client._base import client_patterns
|
||||
from synapse.rest.client.account import ThreepidRestServlet
|
||||
from synapse.rest.client.account_data import AccountDataServlet, RoomAccountDataServlet
|
||||
from synapse.rest.client.devices import DevicesRestServlet
|
||||
from synapse.rest.client.initial_sync import InitialSyncRestServlet
|
||||
from synapse.rest.client.keys import (
|
||||
KeyChangesServlet,
|
||||
KeyQueryServlet,
|
||||
OneTimeKeyServlet,
|
||||
)
|
||||
from synapse.rest.client.v2_alpha.register import RegisterRestServlet
|
||||
from synapse.rest.client.v2_alpha.sendtodevice import SendToDeviceRestServlet
|
||||
from synapse.rest.client.profile import (
|
||||
ProfileAvatarURLRestServlet,
|
||||
ProfileDisplaynameRestServlet,
|
||||
ProfileRestServlet,
|
||||
)
|
||||
from synapse.rest.client.push_rule import PushRuleRestServlet
|
||||
from synapse.rest.client.register import RegisterRestServlet
|
||||
from synapse.rest.client.sendtodevice import SendToDeviceRestServlet
|
||||
from synapse.rest.client.versions import VersionsRestServlet
|
||||
from synapse.rest.client.voip import VoipRestServlet
|
||||
from synapse.rest.health import HealthResource
|
||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||
from synapse.rest.synapse.client import build_synapse_client_resource_tree
|
||||
|
@ -114,6 +113,7 @@ from synapse.storage.databases.main.monthly_active_users import (
|
|||
MonthlyActiveUsersWorkerStore,
|
||||
)
|
||||
from synapse.storage.databases.main.presence import PresenceStore
|
||||
from synapse.storage.databases.main.room import RoomWorkerStore
|
||||
from synapse.storage.databases.main.search import SearchStore
|
||||
from synapse.storage.databases.main.stats import StatsStore
|
||||
from synapse.storage.databases.main.transactions import TransactionWorkerStore
|
||||
|
@ -237,7 +237,7 @@ class GenericWorkerSlavedStore(
|
|||
ClientIpWorkerStore,
|
||||
SlavedEventStore,
|
||||
SlavedKeyStore,
|
||||
RoomStore,
|
||||
RoomWorkerStore,
|
||||
DirectoryStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
|
|
|
@ -237,13 +237,14 @@ class Config:
|
|||
def read_templates(
|
||||
self,
|
||||
filenames: List[str],
|
||||
custom_template_directory: Optional[str] = None,
|
||||
custom_template_directories: Optional[Iterable[str]] = None,
|
||||
) -> List[jinja2.Template]:
|
||||
"""Load a list of template files from disk using the given variables.
|
||||
|
||||
This function will attempt to load the given templates from the default Synapse
|
||||
template directory. If `custom_template_directory` is supplied, that directory
|
||||
is tried first.
|
||||
template directory. If `custom_template_directories` is supplied, any directory
|
||||
in this list is tried (in the order they appear in the list) before trying
|
||||
Synapse's default directory.
|
||||
|
||||
Files read are treated as Jinja templates. The templates are not rendered yet
|
||||
and have autoescape enabled.
|
||||
|
@ -251,8 +252,8 @@ class Config:
|
|||
Args:
|
||||
filenames: A list of template filenames to read.
|
||||
|
||||
custom_template_directory: A directory to try to look for the templates
|
||||
before using the default Synapse template directory instead.
|
||||
custom_template_directories: A list of directory to try to look for the
|
||||
templates before using the default Synapse template directory instead.
|
||||
|
||||
Raises:
|
||||
ConfigError: if the file's path is incorrect or otherwise cannot be read.
|
||||
|
@ -260,11 +261,13 @@ class Config:
|
|||
Returns:
|
||||
A list of jinja2 templates.
|
||||
"""
|
||||
search_directories = [self.default_template_dir]
|
||||
search_directories = []
|
||||
|
||||
# The loader will first look in the custom template directory (if specified) for the
|
||||
# given filename. If it doesn't find it, it will use the default template dir instead
|
||||
if custom_template_directory:
|
||||
# The loader will first look in the custom template directories (if specified)
|
||||
# for the given filename. If it doesn't find it, it will use the default
|
||||
# template dir instead.
|
||||
if custom_template_directories is not None:
|
||||
for custom_template_directory in custom_template_directories:
|
||||
# Check that the given template directory exists
|
||||
if not self.path_exists(custom_template_directory):
|
||||
raise ConfigError(
|
||||
|
@ -273,7 +276,11 @@ class Config:
|
|||
)
|
||||
|
||||
# Search the custom template directory as well
|
||||
search_directories.insert(0, custom_template_directory)
|
||||
search_directories.append(custom_template_directory)
|
||||
|
||||
# Append the default directory at the end of the list so Jinja can fallback on it
|
||||
# if a template is missing from any custom directory.
|
||||
search_directories.append(self.default_template_dir)
|
||||
|
||||
# TODO: switch to synapse.util.templates.build_jinja_env
|
||||
loader = jinja2.FileSystemLoader(search_directories)
|
||||
|
|
|
@ -78,6 +78,11 @@ class AccountValidityConfig(Config):
|
|||
)
|
||||
|
||||
# Read and store template content
|
||||
custom_template_directories = (
|
||||
self.root.server.custom_template_directory,
|
||||
account_validity_template_dir,
|
||||
)
|
||||
|
||||
(
|
||||
self.account_validity_account_renewed_template,
|
||||
self.account_validity_account_previously_renewed_template,
|
||||
|
@ -88,5 +93,5 @@ class AccountValidityConfig(Config):
|
|||
"account_previously_renewed.html",
|
||||
invalid_token_template_filename,
|
||||
],
|
||||
account_validity_template_dir,
|
||||
(td for td in custom_template_directories if td),
|
||||
)
|
||||
|
|
|
@ -151,6 +151,15 @@ class CacheConfig(Config):
|
|||
# entries are never evicted based on time.
|
||||
#
|
||||
#expiry_time: 30m
|
||||
|
||||
# Controls how long the results of a /sync request are cached for after
|
||||
# a successful response is returned. A higher duration can help clients with
|
||||
# intermittent connections, at the cost of higher memory usage.
|
||||
#
|
||||
# By default, this is zero, which means that sync responses are not cached
|
||||
# at all.
|
||||
#
|
||||
#sync_response_cache_duration: 2m
|
||||
"""
|
||||
|
||||
def read_config(self, config, **kwargs):
|
||||
|
@ -212,6 +221,10 @@ class CacheConfig(Config):
|
|||
else:
|
||||
self.expiry_time_msec = None
|
||||
|
||||
self.sync_response_cache_duration = self.parse_duration(
|
||||
cache_config.get("sync_response_cache_duration", 0)
|
||||
)
|
||||
|
||||
# Resize all caches (if necessary) with the new factors we've loaded
|
||||
self.resize_all_caches()
|
||||
|
||||
|
|
|
@ -80,6 +80,12 @@ class EmailConfig(Config):
|
|||
self.require_transport_security = email_config.get(
|
||||
"require_transport_security", False
|
||||
)
|
||||
self.enable_smtp_tls = email_config.get("enable_tls", True)
|
||||
if self.require_transport_security and not self.enable_smtp_tls:
|
||||
raise ConfigError(
|
||||
"email.require_transport_security requires email.enable_tls to be true"
|
||||
)
|
||||
|
||||
if "app_name" in email_config:
|
||||
self.email_app_name = email_config["app_name"]
|
||||
else:
|
||||
|
@ -251,8 +257,15 @@ class EmailConfig(Config):
|
|||
registration_template_success_html,
|
||||
add_threepid_template_success_html,
|
||||
],
|
||||
(
|
||||
td
|
||||
for td in (
|
||||
self.root.server.custom_template_directory,
|
||||
template_dir,
|
||||
)
|
||||
if td
|
||||
), # Filter out template_dir if not provided
|
||||
)
|
||||
|
||||
# Render templates that do not contain any placeholders
|
||||
self.email_password_reset_template_success_html_content = (
|
||||
|
@ -291,8 +304,15 @@ class EmailConfig(Config):
|
|||
self.email_notif_template_text,
|
||||
) = self.read_templates(
|
||||
[notif_template_html, notif_template_text],
|
||||
(
|
||||
td
|
||||
for td in (
|
||||
self.root.server.custom_template_directory,
|
||||
template_dir,
|
||||
)
|
||||
if td
|
||||
), # Filter out template_dir if not provided
|
||||
)
|
||||
|
||||
self.email_notif_for_new_users = email_config.get(
|
||||
"notif_for_new_users", True
|
||||
|
@ -314,8 +334,15 @@ class EmailConfig(Config):
|
|||
self.account_validity_template_text,
|
||||
) = self.read_templates(
|
||||
[expiry_template_html, expiry_template_text],
|
||||
(
|
||||
td
|
||||
for td in (
|
||||
self.root.server.custom_template_directory,
|
||||
template_dir,
|
||||
)
|
||||
if td
|
||||
), # Filter out template_dir if not provided
|
||||
)
|
||||
|
||||
subjects_config = email_config.get("subjects", {})
|
||||
subjects = {}
|
||||
|
@ -346,6 +373,9 @@ class EmailConfig(Config):
|
|||
"""\
|
||||
# Configuration for sending emails from Synapse.
|
||||
#
|
||||
# Server admins can configure custom templates for email content. See
|
||||
# https://matrix-org.github.io/synapse/latest/templates.html for more information.
|
||||
#
|
||||
email:
|
||||
# The hostname of the outgoing SMTP server to use. Defaults to 'localhost'.
|
||||
#
|
||||
|
@ -368,6 +398,14 @@ class EmailConfig(Config):
|
|||
#
|
||||
#require_transport_security: true
|
||||
|
||||
# Uncomment the following to disable TLS for SMTP.
|
||||
#
|
||||
# By default, if the server supports TLS, it will be used, and the server
|
||||
# must present a certificate that is valid for 'smtp_host'. If this option
|
||||
# is set to false, TLS will not be used.
|
||||
#
|
||||
#enable_tls: false
|
||||
|
||||
# notif_from defines the "From" address to use when sending emails.
|
||||
# It must be set if email sending is enabled.
|
||||
#
|
||||
|
@ -414,49 +452,6 @@ class EmailConfig(Config):
|
|||
#
|
||||
#invite_client_location: https://app.element.io
|
||||
|
||||
# Directory in which Synapse will try to find the template files below.
|
||||
# If not set, or the files named below are not found within the template
|
||||
# directory, default templates from within the Synapse package will be used.
|
||||
#
|
||||
# Synapse will look for the following templates in this directory:
|
||||
#
|
||||
# * The contents of email notifications of missed events: 'notif_mail.html' and
|
||||
# 'notif_mail.txt'.
|
||||
#
|
||||
# * The contents of account expiry notice emails: 'notice_expiry.html' and
|
||||
# 'notice_expiry.txt'.
|
||||
#
|
||||
# * The contents of password reset emails sent by the homeserver:
|
||||
# 'password_reset.html' and 'password_reset.txt'
|
||||
#
|
||||
# * An HTML page that a user will see when they follow the link in the password
|
||||
# reset email. The user will be asked to confirm the action before their
|
||||
# password is reset: 'password_reset_confirmation.html'
|
||||
#
|
||||
# * HTML pages for success and failure that a user will see when they confirm
|
||||
# the password reset flow using the page above: 'password_reset_success.html'
|
||||
# and 'password_reset_failure.html'
|
||||
#
|
||||
# * The contents of address verification emails sent during registration:
|
||||
# 'registration.html' and 'registration.txt'
|
||||
#
|
||||
# * HTML pages for success and failure that a user will see when they follow
|
||||
# the link in an address verification email sent during registration:
|
||||
# 'registration_success.html' and 'registration_failure.html'
|
||||
#
|
||||
# * The contents of address verification emails sent when an address is added
|
||||
# to a Matrix account: 'add_threepid.html' and 'add_threepid.txt'
|
||||
#
|
||||
# * HTML pages for success and failure that a user will see when they follow
|
||||
# the link in an address verification email sent when an address is added
|
||||
# to a Matrix account: 'add_threepid_success.html' and
|
||||
# 'add_threepid_failure.html'
|
||||
#
|
||||
# You can see the default templates at:
|
||||
# https://github.com/matrix-org/synapse/tree/master/synapse/res/templates
|
||||
#
|
||||
#template_dir: "res/templates"
|
||||
|
||||
# Subjects to use when sending emails from Synapse.
|
||||
#
|
||||
# The placeholder '%%(app)s' will be replaced with the value of the 'app_name'
|
||||
|
|
|
@ -38,3 +38,6 @@ class ExperimentalConfig(Config):
|
|||
|
||||
# MSC3244 (room version capabilities)
|
||||
self.msc3244_enabled: bool = experimental.get("msc3244_enabled", False)
|
||||
|
||||
# MSC3266 (room summary api)
|
||||
self.msc3266_enabled: bool = experimental.get("msc3266_enabled", False)
|
||||
|
|
|
@ -67,18 +67,31 @@ handlers:
|
|||
backupCount: 3 # Does not include the current log file.
|
||||
encoding: utf8
|
||||
|
||||
# Default to buffering writes to log file for efficiency. This means that
|
||||
# will be a delay for INFO/DEBUG logs to get written, but WARNING/ERROR
|
||||
# logs will still be flushed immediately.
|
||||
# Default to buffering writes to log file for efficiency.
|
||||
# WARNING/ERROR logs will still be flushed immediately, but there will be a
|
||||
# delay (of up to `period` seconds, or until the buffer is full with
|
||||
# `capacity` messages) before INFO/DEBUG logs get written.
|
||||
buffer:
|
||||
class: logging.handlers.MemoryHandler
|
||||
class: synapse.logging.handlers.PeriodicallyFlushingMemoryHandler
|
||||
target: file
|
||||
# The capacity is the number of log lines that are buffered before
|
||||
# being written to disk. Increasing this will lead to better
|
||||
|
||||
# The capacity is the maximum number of log lines that are buffered
|
||||
# before being written to disk. Increasing this will lead to better
|
||||
# performance, at the expensive of it taking longer for log lines to
|
||||
# be written to disk.
|
||||
# This parameter is required.
|
||||
capacity: 10
|
||||
flushLevel: 30 # Flush for WARNING logs as well
|
||||
|
||||
# Logs with a level at or above the flush level will cause the buffer to
|
||||
# be flushed immediately.
|
||||
# Default value: 40 (ERROR)
|
||||
# Other values: 50 (CRITICAL), 30 (WARNING), 20 (INFO), 10 (DEBUG)
|
||||
flushLevel: 30 # Flush immediately for WARNING logs and higher
|
||||
|
||||
# The period of time, in seconds, between forced flushes.
|
||||
# Messages will not be delayed for longer than this time.
|
||||
# Default value: 5 seconds
|
||||
period: 5
|
||||
|
||||
# A handler that writes logs to stderr. Unused by default, but can be used
|
||||
# instead of "buffer" and "file" in the logger handlers.
|
||||
|
|
|
@ -12,9 +12,11 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import os
|
||||
from collections import namedtuple
|
||||
from typing import Dict, List
|
||||
from urllib.request import getproxies_environment # type: ignore
|
||||
|
||||
from synapse.config.server import DEFAULT_IP_RANGE_BLACKLIST, generate_ip_set
|
||||
from synapse.python_dependencies import DependencyException, check_requirements
|
||||
|
@ -22,6 +24,8 @@ from synapse.util.module_loader import load_module
|
|||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DEFAULT_THUMBNAIL_SIZES = [
|
||||
{"width": 32, "height": 32, "method": "crop"},
|
||||
{"width": 96, "height": 96, "method": "crop"},
|
||||
|
@ -36,6 +40,9 @@ THUMBNAIL_SIZE_YAML = """\
|
|||
# method: %(method)s
|
||||
"""
|
||||
|
||||
HTTP_PROXY_SET_WARNING = """\
|
||||
The Synapse config url_preview_ip_range_blacklist will be ignored as an HTTP(s) proxy is configured."""
|
||||
|
||||
ThumbnailRequirement = namedtuple(
|
||||
"ThumbnailRequirement", ["width", "height", "method", "media_type"]
|
||||
)
|
||||
|
@ -181,12 +188,17 @@ class ContentRepositoryConfig(Config):
|
|||
e.message # noqa: B306, DependencyException.message is a property
|
||||
)
|
||||
|
||||
proxy_env = getproxies_environment()
|
||||
if "url_preview_ip_range_blacklist" not in config:
|
||||
if "http" not in proxy_env or "https" not in proxy_env:
|
||||
raise ConfigError(
|
||||
"For security, you must specify an explicit target IP address "
|
||||
"blacklist in url_preview_ip_range_blacklist for url previewing "
|
||||
"to work"
|
||||
)
|
||||
else:
|
||||
if "http" in proxy_env or "https" in proxy_env:
|
||||
logger.warning("".join(HTTP_PROXY_SET_WARNING))
|
||||
|
||||
# we always blacklist '0.0.0.0' and '::', which are supposed to be
|
||||
# unroutable addresses.
|
||||
|
@ -293,6 +305,8 @@ class ContentRepositoryConfig(Config):
|
|||
# This must be specified if url_preview_enabled is set. It is recommended that
|
||||
# you uncomment the following list as a starting point.
|
||||
#
|
||||
# Note: The value is ignored when an HTTP proxy is in use
|
||||
#
|
||||
#url_preview_ip_range_blacklist:
|
||||
%(ip_range_blacklist)s
|
||||
|
||||
|
|
|
@ -710,6 +710,18 @@ class ServerConfig(Config):
|
|||
# Turn the list into a set to improve lookup speed.
|
||||
self.next_link_domain_whitelist = set(next_link_domain_whitelist)
|
||||
|
||||
templates_config = config.get("templates") or {}
|
||||
if not isinstance(templates_config, dict):
|
||||
raise ConfigError("The 'templates' section must be a dictionary")
|
||||
|
||||
self.custom_template_directory = templates_config.get(
|
||||
"custom_template_directory"
|
||||
)
|
||||
if self.custom_template_directory is not None and not isinstance(
|
||||
self.custom_template_directory, str
|
||||
):
|
||||
raise ConfigError("'custom_template_directory' must be a string")
|
||||
|
||||
def has_tls_listener(self) -> bool:
|
||||
return any(listener.tls for listener in self.listeners)
|
||||
|
||||
|
@ -960,6 +972,8 @@ class ServerConfig(Config):
|
|||
#
|
||||
# This option replaces federation_ip_range_blacklist in Synapse v1.25.0.
|
||||
#
|
||||
# Note: The value is ignored when an HTTP proxy is in use
|
||||
#
|
||||
#ip_range_blacklist:
|
||||
%(ip_range_blacklist)s
|
||||
|
||||
|
@ -1282,6 +1296,19 @@ class ServerConfig(Config):
|
|||
# all domains.
|
||||
#
|
||||
#next_link_domain_whitelist: ["matrix.org"]
|
||||
|
||||
# Templates to use when generating email or HTML page contents.
|
||||
#
|
||||
templates:
|
||||
# Directory in which Synapse will try to find template files to use to generate
|
||||
# email or HTML page contents.
|
||||
# If not set, or a file is not found within the template directory, a default
|
||||
# template from within the Synapse package will be used.
|
||||
#
|
||||
# See https://matrix-org.github.io/synapse/latest/templates.html for more
|
||||
# information about using custom templates.
|
||||
#
|
||||
#custom_template_directory: /path/to/custom/templates/
|
||||
"""
|
||||
% locals()
|
||||
)
|
||||
|
|
|
@ -45,6 +45,11 @@ class SSOConfig(Config):
|
|||
self.sso_template_dir = sso_config.get("template_dir")
|
||||
|
||||
# Read templates from disk
|
||||
custom_template_directories = (
|
||||
self.root.server.custom_template_directory,
|
||||
self.sso_template_dir,
|
||||
)
|
||||
|
||||
(
|
||||
self.sso_login_idp_picker_template,
|
||||
self.sso_redirect_confirm_template,
|
||||
|
@ -63,7 +68,7 @@ class SSOConfig(Config):
|
|||
"sso_auth_success.html",
|
||||
"sso_auth_bad_user.html",
|
||||
],
|
||||
self.sso_template_dir,
|
||||
(td for td in custom_template_directories if td),
|
||||
)
|
||||
|
||||
# These templates have no placeholders, so render them here
|
||||
|
@ -94,6 +99,9 @@ class SSOConfig(Config):
|
|||
# Additional settings to use with single-sign on systems such as OpenID Connect,
|
||||
# SAML2 and CAS.
|
||||
#
|
||||
# Server admins can configure custom templates for pages related to SSO. See
|
||||
# https://matrix-org.github.io/synapse/latest/templates.html for more information.
|
||||
#
|
||||
sso:
|
||||
# A list of client URLs which are whitelisted so that the user does not
|
||||
# have to confirm giving access to their account to the URL. Any client
|
||||
|
@ -125,167 +133,4 @@ class SSOConfig(Config):
|
|||
# information when first signing in. Defaults to false.
|
||||
#
|
||||
#update_profile_information: true
|
||||
|
||||
# Directory in which Synapse will try to find the template files below.
|
||||
# If not set, or the files named below are not found within the template
|
||||
# directory, default templates from within the Synapse package will be used.
|
||||
#
|
||||
# Synapse will look for the following templates in this directory:
|
||||
#
|
||||
# * HTML page to prompt the user to choose an Identity Provider during
|
||||
# login: 'sso_login_idp_picker.html'.
|
||||
#
|
||||
# This is only used if multiple SSO Identity Providers are configured.
|
||||
#
|
||||
# When rendering, this template is given the following variables:
|
||||
# * redirect_url: the URL that the user will be redirected to after
|
||||
# login.
|
||||
#
|
||||
# * server_name: the homeserver's name.
|
||||
#
|
||||
# * providers: a list of available Identity Providers. Each element is
|
||||
# an object with the following attributes:
|
||||
#
|
||||
# * idp_id: unique identifier for the IdP
|
||||
# * idp_name: user-facing name for the IdP
|
||||
# * idp_icon: if specified in the IdP config, an MXC URI for an icon
|
||||
# for the IdP
|
||||
# * idp_brand: if specified in the IdP config, a textual identifier
|
||||
# for the brand of the IdP
|
||||
#
|
||||
# The rendered HTML page should contain a form which submits its results
|
||||
# back as a GET request, with the following query parameters:
|
||||
#
|
||||
# * redirectUrl: the client redirect URI (ie, the `redirect_url` passed
|
||||
# to the template)
|
||||
#
|
||||
# * idp: the 'idp_id' of the chosen IDP.
|
||||
#
|
||||
# * HTML page to prompt new users to enter a userid and confirm other
|
||||
# details: 'sso_auth_account_details.html'. This is only shown if the
|
||||
# SSO implementation (with any user_mapping_provider) does not return
|
||||
# a localpart.
|
||||
#
|
||||
# When rendering, this template is given the following variables:
|
||||
#
|
||||
# * server_name: the homeserver's name.
|
||||
#
|
||||
# * idp: details of the SSO Identity Provider that the user logged in
|
||||
# with: an object with the following attributes:
|
||||
#
|
||||
# * idp_id: unique identifier for the IdP
|
||||
# * idp_name: user-facing name for the IdP
|
||||
# * idp_icon: if specified in the IdP config, an MXC URI for an icon
|
||||
# for the IdP
|
||||
# * idp_brand: if specified in the IdP config, a textual identifier
|
||||
# for the brand of the IdP
|
||||
#
|
||||
# * user_attributes: an object containing details about the user that
|
||||
# we received from the IdP. May have the following attributes:
|
||||
#
|
||||
# * display_name: the user's display_name
|
||||
# * emails: a list of email addresses
|
||||
#
|
||||
# The template should render a form which submits the following fields:
|
||||
#
|
||||
# * username: the localpart of the user's chosen user id
|
||||
#
|
||||
# * HTML page allowing the user to consent to the server's terms and
|
||||
# conditions. This is only shown for new users, and only if
|
||||
# `user_consent.require_at_registration` is set.
|
||||
#
|
||||
# When rendering, this template is given the following variables:
|
||||
#
|
||||
# * server_name: the homeserver's name.
|
||||
#
|
||||
# * user_id: the user's matrix proposed ID.
|
||||
#
|
||||
# * user_profile.display_name: the user's proposed display name, if any.
|
||||
#
|
||||
# * consent_version: the version of the terms that the user will be
|
||||
# shown
|
||||
#
|
||||
# * terms_url: a link to the page showing the terms.
|
||||
#
|
||||
# The template should render a form which submits the following fields:
|
||||
#
|
||||
# * accepted_version: the version of the terms accepted by the user
|
||||
# (ie, 'consent_version' from the input variables).
|
||||
#
|
||||
# * HTML page for a confirmation step before redirecting back to the client
|
||||
# with the login token: 'sso_redirect_confirm.html'.
|
||||
#
|
||||
# When rendering, this template is given the following variables:
|
||||
#
|
||||
# * redirect_url: the URL the user is about to be redirected to.
|
||||
#
|
||||
# * display_url: the same as `redirect_url`, but with the query
|
||||
# parameters stripped. The intention is to have a
|
||||
# human-readable URL to show to users, not to use it as
|
||||
# the final address to redirect to.
|
||||
#
|
||||
# * server_name: the homeserver's name.
|
||||
#
|
||||
# * new_user: a boolean indicating whether this is the user's first time
|
||||
# logging in.
|
||||
#
|
||||
# * user_id: the user's matrix ID.
|
||||
#
|
||||
# * user_profile.avatar_url: an MXC URI for the user's avatar, if any.
|
||||
# None if the user has not set an avatar.
|
||||
#
|
||||
# * user_profile.display_name: the user's display name. None if the user
|
||||
# has not set a display name.
|
||||
#
|
||||
# * HTML page which notifies the user that they are authenticating to confirm
|
||||
# an operation on their account during the user interactive authentication
|
||||
# process: 'sso_auth_confirm.html'.
|
||||
#
|
||||
# When rendering, this template is given the following variables:
|
||||
# * redirect_url: the URL the user is about to be redirected to.
|
||||
#
|
||||
# * description: the operation which the user is being asked to confirm
|
||||
#
|
||||
# * idp: details of the Identity Provider that we will use to confirm
|
||||
# the user's identity: an object with the following attributes:
|
||||
#
|
||||
# * idp_id: unique identifier for the IdP
|
||||
# * idp_name: user-facing name for the IdP
|
||||
# * idp_icon: if specified in the IdP config, an MXC URI for an icon
|
||||
# for the IdP
|
||||
# * idp_brand: if specified in the IdP config, a textual identifier
|
||||
# for the brand of the IdP
|
||||
#
|
||||
# * HTML page shown after a successful user interactive authentication session:
|
||||
# 'sso_auth_success.html'.
|
||||
#
|
||||
# Note that this page must include the JavaScript which notifies of a successful authentication
|
||||
# (see https://matrix.org/docs/spec/client_server/r0.6.0#fallback).
|
||||
#
|
||||
# This template has no additional variables.
|
||||
#
|
||||
# * HTML page shown after a user-interactive authentication session which
|
||||
# does not map correctly onto the expected user: 'sso_auth_bad_user.html'.
|
||||
#
|
||||
# When rendering, this template is given the following variables:
|
||||
# * server_name: the homeserver's name.
|
||||
# * user_id_to_verify: the MXID of the user that we are trying to
|
||||
# validate.
|
||||
#
|
||||
# * HTML page shown during single sign-on if a deactivated user (according to Synapse's database)
|
||||
# attempts to login: 'sso_account_deactivated.html'.
|
||||
#
|
||||
# This template has no additional variables.
|
||||
#
|
||||
# * HTML page to display to users if something goes wrong during the
|
||||
# OpenID Connect authentication process: 'sso_error.html'.
|
||||
#
|
||||
# When rendering, this template is given two variables:
|
||||
# * error: the technical name of the error
|
||||
# * error_description: a human-readable message for the error
|
||||
#
|
||||
# You can see the default templates at:
|
||||
# https://github.com/matrix-org/synapse/tree/master/synapse/res/templates
|
||||
#
|
||||
#template_dir: "res/templates"
|
||||
"""
|
||||
|
|
|
@ -396,10 +396,11 @@ class FrozenEvent(EventBase):
|
|||
return self.__repr__()
|
||||
|
||||
def __repr__(self):
|
||||
return "<FrozenEvent event_id=%r, type=%r, state_key=%r>" % (
|
||||
return "<FrozenEvent event_id=%r, type=%r, state_key=%r, outlier=%s>" % (
|
||||
self.get("event_id", None),
|
||||
self.get("type", None),
|
||||
self.get("state_key", None),
|
||||
self.internal_metadata.is_outlier(),
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ from typing import Any, Mapping, Union
|
|||
|
||||
from frozendict import frozendict
|
||||
|
||||
from synapse.api.constants import EventTypes, RelationTypes
|
||||
from synapse.api.constants import EventContentFields, EventTypes, RelationTypes
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.room_versions import RoomVersion
|
||||
from synapse.util.async_helpers import yieldable_gather_results
|
||||
|
@ -135,6 +135,12 @@ def prune_event_dict(room_version: RoomVersion, event_dict: dict) -> dict:
|
|||
add_fields("history_visibility")
|
||||
elif event_type == EventTypes.Redaction and room_version.msc2176_redaction_rules:
|
||||
add_fields("redacts")
|
||||
elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_INSERTION:
|
||||
add_fields(EventContentFields.MSC2716_NEXT_CHUNK_ID)
|
||||
elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_CHUNK:
|
||||
add_fields(EventContentFields.MSC2716_CHUNK_ID)
|
||||
elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_MARKER:
|
||||
add_fields(EventContentFields.MSC2716_MARKER_INSERTION)
|
||||
|
||||
allowed_fields = {k: v for k, v in event_dict.items() if k in allowed_keys}
|
||||
|
||||
|
|
|
@ -1108,7 +1108,8 @@ class FederationClient(FederationBase):
|
|||
The response from the remote server.
|
||||
|
||||
Raises:
|
||||
HttpResponseException: There was an exception returned from the remote server
|
||||
HttpResponseException / RequestSendFailed: There was an exception
|
||||
returned from the remote server
|
||||
SynapseException: M_FORBIDDEN when the remote server has disallowed publicRoom
|
||||
requests over federation
|
||||
|
||||
|
@ -1289,8 +1290,136 @@ class FederationClient(FederationBase):
|
|||
failover_on_unknown_endpoint=True,
|
||||
)
|
||||
|
||||
async def get_room_hierarchy(
|
||||
self,
|
||||
destinations: Iterable[str],
|
||||
room_id: str,
|
||||
suggested_only: bool,
|
||||
) -> Tuple[JsonDict, Sequence[JsonDict], Sequence[str]]:
|
||||
"""
|
||||
Call other servers to get a hierarchy of the given room.
|
||||
|
||||
@attr.s(frozen=True, slots=True)
|
||||
Performs simple data validates and parsing of the response.
|
||||
|
||||
Args:
|
||||
destinations: The remote servers. We will try them in turn, omitting any
|
||||
that have been blacklisted.
|
||||
room_id: ID of the space to be queried
|
||||
suggested_only: If true, ask the remote server to only return children
|
||||
with the "suggested" flag set
|
||||
|
||||
Returns:
|
||||
A tuple of:
|
||||
The room as a JSON dictionary.
|
||||
A list of children rooms, as JSON dictionaries.
|
||||
A list of inaccessible children room IDs.
|
||||
|
||||
Raises:
|
||||
SynapseError if we were unable to get a valid summary from any of the
|
||||
remote servers
|
||||
"""
|
||||
|
||||
async def send_request(
|
||||
destination: str,
|
||||
) -> Tuple[JsonDict, Sequence[JsonDict], Sequence[str]]:
|
||||
res = await self.transport_layer.get_room_hierarchy(
|
||||
destination=destination,
|
||||
room_id=room_id,
|
||||
suggested_only=suggested_only,
|
||||
)
|
||||
|
||||
room = res.get("room")
|
||||
if not isinstance(room, dict):
|
||||
raise InvalidResponseError("'room' must be a dict")
|
||||
|
||||
# Validate children_state of the room.
|
||||
children_state = room.get("children_state", [])
|
||||
if not isinstance(children_state, Sequence):
|
||||
raise InvalidResponseError("'room.children_state' must be a list")
|
||||
if any(not isinstance(e, dict) for e in children_state):
|
||||
raise InvalidResponseError("Invalid event in 'children_state' list")
|
||||
try:
|
||||
[
|
||||
FederationSpaceSummaryEventResult.from_json_dict(e)
|
||||
for e in children_state
|
||||
]
|
||||
except ValueError as e:
|
||||
raise InvalidResponseError(str(e))
|
||||
|
||||
# Validate the children rooms.
|
||||
children = res.get("children", [])
|
||||
if not isinstance(children, Sequence):
|
||||
raise InvalidResponseError("'children' must be a list")
|
||||
if any(not isinstance(r, dict) for r in children):
|
||||
raise InvalidResponseError("Invalid room in 'children' list")
|
||||
|
||||
# Validate the inaccessible children.
|
||||
inaccessible_children = res.get("inaccessible_children", [])
|
||||
if not isinstance(inaccessible_children, Sequence):
|
||||
raise InvalidResponseError("'inaccessible_children' must be a list")
|
||||
if any(not isinstance(r, str) for r in inaccessible_children):
|
||||
raise InvalidResponseError(
|
||||
"Invalid room ID in 'inaccessible_children' list"
|
||||
)
|
||||
|
||||
return room, children, inaccessible_children
|
||||
|
||||
try:
|
||||
return await self._try_destination_list(
|
||||
"fetch room hierarchy",
|
||||
destinations,
|
||||
send_request,
|
||||
failover_on_unknown_endpoint=True,
|
||||
)
|
||||
except SynapseError as e:
|
||||
# Fallback to the old federation API and translate the results if
|
||||
# no servers implement the new API.
|
||||
#
|
||||
# The algorithm below is a bit inefficient as it only attempts to
|
||||
# get information for the requested room, but the legacy API may
|
||||
# return additional layers.
|
||||
if e.code == 502:
|
||||
legacy_result = await self.get_space_summary(
|
||||
destinations,
|
||||
room_id,
|
||||
suggested_only,
|
||||
max_rooms_per_space=None,
|
||||
exclude_rooms=[],
|
||||
)
|
||||
|
||||
# Find the requested room in the response (and remove it).
|
||||
for _i, room in enumerate(legacy_result.rooms):
|
||||
if room.get("room_id") == room_id:
|
||||
break
|
||||
else:
|
||||
# The requested room was not returned, nothing we can do.
|
||||
raise
|
||||
requested_room = legacy_result.rooms.pop(_i)
|
||||
|
||||
# Find any children events of the requested room.
|
||||
children_events = []
|
||||
children_room_ids = set()
|
||||
for event in legacy_result.events:
|
||||
if event.room_id == room_id:
|
||||
children_events.append(event.data)
|
||||
children_room_ids.add(event.state_key)
|
||||
# And add them under the requested room.
|
||||
requested_room["children_state"] = children_events
|
||||
|
||||
# Find the children rooms.
|
||||
children = []
|
||||
for room in legacy_result.rooms:
|
||||
if room.get("room_id") in children_room_ids:
|
||||
children.append(room)
|
||||
|
||||
# It isn't clear from the response whether some of the rooms are
|
||||
# not accessible.
|
||||
return requested_room, children, ()
|
||||
|
||||
raise
|
||||
|
||||
|
||||
@attr.s(frozen=True, slots=True, auto_attribs=True)
|
||||
class FederationSpaceSummaryEventResult:
|
||||
"""Represents a single event in the result of a successful get_space_summary call.
|
||||
|
||||
|
@ -1299,12 +1428,13 @@ class FederationSpaceSummaryEventResult:
|
|||
object attributes.
|
||||
"""
|
||||
|
||||
event_type = attr.ib(type=str)
|
||||
state_key = attr.ib(type=str)
|
||||
via = attr.ib(type=Sequence[str])
|
||||
event_type: str
|
||||
room_id: str
|
||||
state_key: str
|
||||
via: Sequence[str]
|
||||
|
||||
# the raw data, including the above keys
|
||||
data = attr.ib(type=JsonDict)
|
||||
data: JsonDict
|
||||
|
||||
@classmethod
|
||||
def from_json_dict(cls, d: JsonDict) -> "FederationSpaceSummaryEventResult":
|
||||
|
@ -1321,6 +1451,10 @@ class FederationSpaceSummaryEventResult:
|
|||
if not isinstance(event_type, str):
|
||||
raise ValueError("Invalid event: 'event_type' must be a str")
|
||||
|
||||
room_id = d.get("room_id")
|
||||
if not isinstance(room_id, str):
|
||||
raise ValueError("Invalid event: 'room_id' must be a str")
|
||||
|
||||
state_key = d.get("state_key")
|
||||
if not isinstance(state_key, str):
|
||||
raise ValueError("Invalid event: 'state_key' must be a str")
|
||||
|
@ -1335,15 +1469,15 @@ class FederationSpaceSummaryEventResult:
|
|||
if any(not isinstance(v, str) for v in via):
|
||||
raise ValueError("Invalid event: 'via' must be a list of strings")
|
||||
|
||||
return cls(event_type, state_key, via, d)
|
||||
return cls(event_type, room_id, state_key, via, d)
|
||||
|
||||
|
||||
@attr.s(frozen=True, slots=True)
|
||||
@attr.s(frozen=True, slots=True, auto_attribs=True)
|
||||
class FederationSpaceSummaryResult:
|
||||
"""Represents the data returned by a successful get_space_summary call."""
|
||||
|
||||
rooms = attr.ib(type=Sequence[JsonDict])
|
||||
events = attr.ib(type=Sequence[FederationSpaceSummaryEventResult])
|
||||
rooms: List[JsonDict]
|
||||
events: Sequence[FederationSpaceSummaryEventResult]
|
||||
|
||||
@classmethod
|
||||
def from_json_dict(cls, d: JsonDict) -> "FederationSpaceSummaryResult":
|
||||
|
@ -1356,7 +1490,7 @@ class FederationSpaceSummaryResult:
|
|||
ValueError if d is not a valid /spaces/ response
|
||||
"""
|
||||
rooms = d.get("rooms")
|
||||
if not isinstance(rooms, Sequence):
|
||||
if not isinstance(rooms, List):
|
||||
raise ValueError("'rooms' must be a list")
|
||||
if any(not isinstance(r, dict) for r in rooms):
|
||||
raise ValueError("Invalid room in 'rooms' list")
|
||||
|
|
|
@ -195,13 +195,17 @@ class FederationServer(FederationBase):
|
|||
origin, room_id, versions, limit
|
||||
)
|
||||
|
||||
res = self._transaction_from_pdus(pdus).get_dict()
|
||||
res = self._transaction_dict_from_pdus(pdus)
|
||||
|
||||
return 200, res
|
||||
|
||||
async def on_incoming_transaction(
|
||||
self, origin: str, transaction_data: JsonDict
|
||||
) -> Tuple[int, Dict[str, Any]]:
|
||||
self,
|
||||
origin: str,
|
||||
transaction_id: str,
|
||||
destination: str,
|
||||
transaction_data: JsonDict,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
# If we receive a transaction we should make sure that kick off handling
|
||||
# any old events in the staging area.
|
||||
if not self._started_handling_of_staged_events:
|
||||
|
@ -212,8 +216,14 @@ class FederationServer(FederationBase):
|
|||
# accurate as possible.
|
||||
request_time = self._clock.time_msec()
|
||||
|
||||
transaction = Transaction(**transaction_data)
|
||||
transaction_id = transaction.transaction_id # type: ignore
|
||||
transaction = Transaction(
|
||||
transaction_id=transaction_id,
|
||||
destination=destination,
|
||||
origin=origin,
|
||||
origin_server_ts=transaction_data.get("origin_server_ts"), # type: ignore
|
||||
pdus=transaction_data.get("pdus"), # type: ignore
|
||||
edus=transaction_data.get("edus"),
|
||||
)
|
||||
|
||||
if not transaction_id:
|
||||
raise Exception("Transaction missing transaction_id")
|
||||
|
@ -221,9 +231,7 @@ class FederationServer(FederationBase):
|
|||
logger.debug("[%s] Got transaction", transaction_id)
|
||||
|
||||
# Reject malformed transactions early: reject if too many PDUs/EDUs
|
||||
if len(transaction.pdus) > 50 or ( # type: ignore
|
||||
hasattr(transaction, "edus") and len(transaction.edus) > 100 # type: ignore
|
||||
):
|
||||
if len(transaction.pdus) > 50 or len(transaction.edus) > 100:
|
||||
logger.info("Transaction PDU or EDU count too large. Returning 400")
|
||||
return 400, {}
|
||||
|
||||
|
@ -263,7 +271,7 @@ class FederationServer(FederationBase):
|
|||
# CRITICAL SECTION: the first thing we must do (before awaiting) is
|
||||
# add an entry to _active_transactions.
|
||||
assert origin not in self._active_transactions
|
||||
self._active_transactions[origin] = transaction.transaction_id # type: ignore
|
||||
self._active_transactions[origin] = transaction.transaction_id
|
||||
|
||||
try:
|
||||
result = await self._handle_incoming_transaction(
|
||||
|
@ -291,11 +299,11 @@ class FederationServer(FederationBase):
|
|||
if response:
|
||||
logger.debug(
|
||||
"[%s] We've already responded to this request",
|
||||
transaction.transaction_id, # type: ignore
|
||||
transaction.transaction_id,
|
||||
)
|
||||
return response
|
||||
|
||||
logger.debug("[%s] Transaction is new", transaction.transaction_id) # type: ignore
|
||||
logger.debug("[%s] Transaction is new", transaction.transaction_id)
|
||||
|
||||
# We process PDUs and EDUs in parallel. This is important as we don't
|
||||
# want to block things like to device messages from reaching clients
|
||||
|
@ -334,7 +342,7 @@ class FederationServer(FederationBase):
|
|||
report back to the sending server.
|
||||
"""
|
||||
|
||||
received_pdus_counter.inc(len(transaction.pdus)) # type: ignore
|
||||
received_pdus_counter.inc(len(transaction.pdus))
|
||||
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
|
||||
|
@ -342,7 +350,7 @@ class FederationServer(FederationBase):
|
|||
|
||||
newest_pdu_ts = 0
|
||||
|
||||
for p in transaction.pdus: # type: ignore
|
||||
for p in transaction.pdus:
|
||||
# FIXME (richardv): I don't think this works:
|
||||
# https://github.com/matrix-org/synapse/issues/8429
|
||||
if "unsigned" in p:
|
||||
|
@ -436,10 +444,10 @@ class FederationServer(FederationBase):
|
|||
|
||||
return pdu_results
|
||||
|
||||
async def _handle_edus_in_txn(self, origin: str, transaction: Transaction):
|
||||
async def _handle_edus_in_txn(self, origin: str, transaction: Transaction) -> None:
|
||||
"""Process the EDUs in a received transaction."""
|
||||
|
||||
async def _process_edu(edu_dict):
|
||||
async def _process_edu(edu_dict: JsonDict) -> None:
|
||||
received_edus_counter.inc()
|
||||
|
||||
edu = Edu(
|
||||
|
@ -452,7 +460,7 @@ class FederationServer(FederationBase):
|
|||
|
||||
await concurrently_execute(
|
||||
_process_edu,
|
||||
getattr(transaction, "edus", []),
|
||||
transaction.edus,
|
||||
TRANSACTION_CONCURRENCY_LIMIT,
|
||||
)
|
||||
|
||||
|
@ -538,7 +546,7 @@ class FederationServer(FederationBase):
|
|||
pdu = await self.handler.get_persisted_pdu(origin, event_id)
|
||||
|
||||
if pdu:
|
||||
return 200, self._transaction_from_pdus([pdu]).get_dict()
|
||||
return 200, self._transaction_dict_from_pdus([pdu])
|
||||
else:
|
||||
return 404, ""
|
||||
|
||||
|
@ -879,18 +887,20 @@ class FederationServer(FederationBase):
|
|||
ts_now_ms = self._clock.time_msec()
|
||||
return await self.store.get_user_id_for_open_id_token(token, ts_now_ms)
|
||||
|
||||
def _transaction_from_pdus(self, pdu_list: List[EventBase]) -> Transaction:
|
||||
def _transaction_dict_from_pdus(self, pdu_list: List[EventBase]) -> JsonDict:
|
||||
"""Returns a new Transaction containing the given PDUs suitable for
|
||||
transmission.
|
||||
"""
|
||||
time_now = self._clock.time_msec()
|
||||
pdus = [p.get_pdu_json(time_now) for p in pdu_list]
|
||||
return Transaction(
|
||||
# Just need a dummy transaction ID and destination since it won't be used.
|
||||
transaction_id="",
|
||||
origin=self.server_name,
|
||||
pdus=pdus,
|
||||
origin_server_ts=int(time_now),
|
||||
destination=None,
|
||||
)
|
||||
destination="",
|
||||
).get_dict()
|
||||
|
||||
async def _handle_received_pdu(self, origin: str, pdu: EventBase) -> None:
|
||||
"""Process a PDU received in a federation /send/ transaction.
|
||||
|
@ -962,11 +972,16 @@ class FederationServer(FederationBase):
|
|||
# the room, so instead of pulling the event out of the DB and parsing
|
||||
# the event we just pull out the next event ID and check if that matches.
|
||||
if latest_event is not None and latest_origin is not None:
|
||||
(
|
||||
next_origin,
|
||||
next_event_id,
|
||||
) = await self.store.get_next_staged_event_id_for_room(room_id)
|
||||
if next_origin != latest_origin or next_event_id != latest_event.event_id:
|
||||
result = await self.store.get_next_staged_event_id_for_room(room_id)
|
||||
if result is None:
|
||||
latest_origin = None
|
||||
latest_event = None
|
||||
else:
|
||||
next_origin, next_event_id = result
|
||||
if (
|
||||
next_origin != latest_origin
|
||||
or next_event_id != latest_event.event_id
|
||||
):
|
||||
latest_origin = None
|
||||
latest_event = None
|
||||
|
||||
|
@ -988,6 +1003,7 @@ class FederationServer(FederationBase):
|
|||
# has started processing).
|
||||
while True:
|
||||
async with lock:
|
||||
logger.info("handling received PDU: %s", event)
|
||||
try:
|
||||
await self.handler.on_receive_pdu(
|
||||
origin, event, sent_to_us_directly=True
|
||||
|
|
|
@ -45,7 +45,7 @@ class TransactionActions:
|
|||
`None` if we have not previously responded to this transaction or a
|
||||
2-tuple of `(int, dict)` representing the response code and response body.
|
||||
"""
|
||||
transaction_id = transaction.transaction_id # type: ignore
|
||||
transaction_id = transaction.transaction_id
|
||||
if not transaction_id:
|
||||
raise RuntimeError("Cannot persist a transaction with no transaction_id")
|
||||
|
||||
|
@ -56,7 +56,7 @@ class TransactionActions:
|
|||
self, origin: str, transaction: Transaction, code: int, response: JsonDict
|
||||
) -> None:
|
||||
"""Persist how we responded to a transaction."""
|
||||
transaction_id = transaction.transaction_id # type: ignore
|
||||
transaction_id = transaction.transaction_id
|
||||
if not transaction_id:
|
||||
raise RuntimeError("Cannot persist a transaction with no transaction_id")
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@ from synapse.logging.opentracing import (
|
|||
tags,
|
||||
whitelisted_homeserver,
|
||||
)
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import json_decoder
|
||||
from synapse.util.metrics import measure_func
|
||||
|
||||
|
@ -104,13 +105,13 @@ class TransactionManager:
|
|||
len(edus),
|
||||
)
|
||||
|
||||
transaction = Transaction.create_new(
|
||||
transaction = Transaction(
|
||||
origin_server_ts=int(self.clock.time_msec()),
|
||||
transaction_id=txn_id,
|
||||
origin=self._server_name,
|
||||
destination=destination,
|
||||
pdus=pdus,
|
||||
edus=edus,
|
||||
pdus=[p.get_pdu_json() for p in pdus],
|
||||
edus=[edu.get_dict() for edu in edus],
|
||||
)
|
||||
|
||||
self._next_txn_id += 1
|
||||
|
@ -131,7 +132,7 @@ class TransactionManager:
|
|||
# FIXME (richardv): I also believe it no longer works. We (now?) store
|
||||
# "age_ts" in "unsigned" rather than at the top level. See
|
||||
# https://github.com/matrix-org/synapse/issues/8429.
|
||||
def json_data_cb():
|
||||
def json_data_cb() -> JsonDict:
|
||||
data = transaction.get_dict()
|
||||
now = int(self.clock.time_msec())
|
||||
if "pdus" in data:
|
||||
|
|
|
@ -143,7 +143,7 @@ class TransportLayerClient:
|
|||
"""Sends the given Transaction to its destination
|
||||
|
||||
Args:
|
||||
transaction (Transaction)
|
||||
transaction
|
||||
|
||||
Returns:
|
||||
Succeeds when we get a 2xx HTTP response. The result
|
||||
|
@ -1177,6 +1177,28 @@ class TransportLayerClient:
|
|||
destination=destination, path=path, data=params
|
||||
)
|
||||
|
||||
async def get_room_hierarchy(
|
||||
self,
|
||||
destination: str,
|
||||
room_id: str,
|
||||
suggested_only: bool,
|
||||
) -> JsonDict:
|
||||
"""
|
||||
Args:
|
||||
destination: The remote server
|
||||
room_id: The room ID to ask about.
|
||||
suggested_only: if True, only suggested rooms will be returned
|
||||
"""
|
||||
path = _create_path(
|
||||
FEDERATION_UNSTABLE_PREFIX, "/org.matrix.msc2946/hierarchy/%s", room_id
|
||||
)
|
||||
|
||||
return await self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"suggested_only": "true" if suggested_only else "false"},
|
||||
)
|
||||
|
||||
|
||||
def _create_path(federation_prefix: str, path: str, *args: str) -> str:
|
||||
"""
|
||||
|
|
File diff suppressed because it is too large
Load diff
332
synapse/federation/transport/server/__init__.py
Normal file
332
synapse/federation/transport/server/__init__.py
Normal file
|
@ -0,0 +1,332 @@
|
|||
# Copyright 2014-2021 The Matrix.org Foundation C.I.C.
|
||||
# Copyright 2020 Sorunome
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import Dict, Iterable, List, Optional, Tuple, Type
|
||||
|
||||
from typing_extensions import Literal
|
||||
|
||||
from synapse.api.errors import FederationDeniedError, SynapseError
|
||||
from synapse.federation.transport.server._base import (
|
||||
Authenticator,
|
||||
BaseFederationServlet,
|
||||
)
|
||||
from synapse.federation.transport.server.federation import FEDERATION_SERVLET_CLASSES
|
||||
from synapse.federation.transport.server.groups_local import GROUP_LOCAL_SERVLET_CLASSES
|
||||
from synapse.federation.transport.server.groups_server import (
|
||||
GROUP_SERVER_SERVLET_CLASSES,
|
||||
)
|
||||
from synapse.http.server import HttpServer, JsonResource
|
||||
from synapse.http.servlet import (
|
||||
parse_boolean_from_args,
|
||||
parse_integer_from_args,
|
||||
parse_string_from_args,
|
||||
)
|
||||
from synapse.server import HomeServer
|
||||
from synapse.types import JsonDict, ThirdPartyInstanceID
|
||||
from synapse.util.ratelimitutils import FederationRateLimiter
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TransportLayerServer(JsonResource):
|
||||
"""Handles incoming federation HTTP requests"""
|
||||
|
||||
def __init__(self, hs: HomeServer, servlet_groups: Optional[List[str]] = None):
|
||||
"""Initialize the TransportLayerServer
|
||||
|
||||
Will by default register all servlets. For custom behaviour, pass in
|
||||
a list of servlet_groups to register.
|
||||
|
||||
Args:
|
||||
hs: homeserver
|
||||
servlet_groups: List of servlet groups to register.
|
||||
Defaults to ``DEFAULT_SERVLET_GROUPS``.
|
||||
"""
|
||||
self.hs = hs
|
||||
self.clock = hs.get_clock()
|
||||
self.servlet_groups = servlet_groups
|
||||
|
||||
super().__init__(hs, canonical_json=False)
|
||||
|
||||
self.authenticator = Authenticator(hs)
|
||||
self.ratelimiter = hs.get_federation_ratelimiter()
|
||||
|
||||
self.register_servlets()
|
||||
|
||||
def register_servlets(self) -> None:
|
||||
register_servlets(
|
||||
self.hs,
|
||||
resource=self,
|
||||
ratelimiter=self.ratelimiter,
|
||||
authenticator=self.authenticator,
|
||||
servlet_groups=self.servlet_groups,
|
||||
)
|
||||
|
||||
|
||||
class PublicRoomList(BaseFederationServlet):
|
||||
"""
|
||||
Fetch the public room list for this server.
|
||||
|
||||
This API returns information in the same format as /publicRooms on the
|
||||
client API, but will only ever include local public rooms and hence is
|
||||
intended for consumption by other homeservers.
|
||||
|
||||
GET /publicRooms HTTP/1.1
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"chunk": [
|
||||
{
|
||||
"aliases": [
|
||||
"#test:localhost"
|
||||
],
|
||||
"guest_can_join": false,
|
||||
"name": "test room",
|
||||
"num_joined_members": 3,
|
||||
"room_id": "!whkydVegtvatLfXmPN:localhost",
|
||||
"world_readable": false
|
||||
}
|
||||
],
|
||||
"end": "END",
|
||||
"start": "START"
|
||||
}
|
||||
"""
|
||||
|
||||
PATH = "/publicRooms"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hs: HomeServer,
|
||||
authenticator: Authenticator,
|
||||
ratelimiter: FederationRateLimiter,
|
||||
server_name: str,
|
||||
):
|
||||
super().__init__(hs, authenticator, ratelimiter, server_name)
|
||||
self.handler = hs.get_room_list_handler()
|
||||
self.allow_access = hs.config.allow_public_rooms_over_federation
|
||||
|
||||
async def on_GET(
|
||||
self, origin: str, content: Literal[None], query: Dict[bytes, List[bytes]]
|
||||
) -> Tuple[int, JsonDict]:
|
||||
if not self.allow_access:
|
||||
raise FederationDeniedError(origin)
|
||||
|
||||
limit = parse_integer_from_args(query, "limit", 0)
|
||||
since_token = parse_string_from_args(query, "since", None)
|
||||
include_all_networks = parse_boolean_from_args(
|
||||
query, "include_all_networks", default=False
|
||||
)
|
||||
third_party_instance_id = parse_string_from_args(
|
||||
query, "third_party_instance_id", None
|
||||
)
|
||||
|
||||
if include_all_networks:
|
||||
network_tuple = None
|
||||
elif third_party_instance_id:
|
||||
network_tuple = ThirdPartyInstanceID.from_string(third_party_instance_id)
|
||||
else:
|
||||
network_tuple = ThirdPartyInstanceID(None, None)
|
||||
|
||||
if limit == 0:
|
||||
# zero is a special value which corresponds to no limit.
|
||||
limit = None
|
||||
|
||||
data = await self.handler.get_local_public_room_list(
|
||||
limit, since_token, network_tuple=network_tuple, from_federation=True
|
||||
)
|
||||
return 200, data
|
||||
|
||||
async def on_POST(
|
||||
self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
|
||||
) -> Tuple[int, JsonDict]:
|
||||
# This implements MSC2197 (Search Filtering over Federation)
|
||||
if not self.allow_access:
|
||||
raise FederationDeniedError(origin)
|
||||
|
||||
limit: Optional[int] = int(content.get("limit", 100))
|
||||
since_token = content.get("since", None)
|
||||
search_filter = content.get("filter", None)
|
||||
|
||||
include_all_networks = content.get("include_all_networks", False)
|
||||
third_party_instance_id = content.get("third_party_instance_id", None)
|
||||
|
||||
if include_all_networks:
|
||||
network_tuple = None
|
||||
if third_party_instance_id is not None:
|
||||
raise SynapseError(
|
||||
400, "Can't use include_all_networks with an explicit network"
|
||||
)
|
||||
elif third_party_instance_id is None:
|
||||
network_tuple = ThirdPartyInstanceID(None, None)
|
||||
else:
|
||||
network_tuple = ThirdPartyInstanceID.from_string(third_party_instance_id)
|
||||
|
||||
if search_filter is None:
|
||||
logger.warning("Nonefilter")
|
||||
|
||||
if limit == 0:
|
||||
# zero is a special value which corresponds to no limit.
|
||||
limit = None
|
||||
|
||||
data = await self.handler.get_local_public_room_list(
|
||||
limit=limit,
|
||||
since_token=since_token,
|
||||
search_filter=search_filter,
|
||||
network_tuple=network_tuple,
|
||||
from_federation=True,
|
||||
)
|
||||
|
||||
return 200, data
|
||||
|
||||
|
||||
class FederationGroupsRenewAttestaionServlet(BaseFederationServlet):
|
||||
"""A group or user's server renews their attestation"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/renew_attestation/(?P<user_id>[^/]*)"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hs: HomeServer,
|
||||
authenticator: Authenticator,
|
||||
ratelimiter: FederationRateLimiter,
|
||||
server_name: str,
|
||||
):
|
||||
super().__init__(hs, authenticator, ratelimiter, server_name)
|
||||
self.handler = hs.get_groups_attestation_renewer()
|
||||
|
||||
async def on_POST(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
user_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
# We don't need to check auth here as we check the attestation signatures
|
||||
|
||||
new_content = await self.handler.on_renew_attestation(
|
||||
group_id, user_id, content
|
||||
)
|
||||
|
||||
return 200, new_content
|
||||
|
||||
|
||||
class OpenIdUserInfo(BaseFederationServlet):
|
||||
"""
|
||||
Exchange a bearer token for information about a user.
|
||||
|
||||
The response format should be compatible with:
|
||||
http://openid.net/specs/openid-connect-core-1_0.html#UserInfoResponse
|
||||
|
||||
GET /openid/userinfo?access_token=ABDEFGH HTTP/1.1
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"sub": "@userpart:example.org",
|
||||
}
|
||||
"""
|
||||
|
||||
PATH = "/openid/userinfo"
|
||||
|
||||
REQUIRE_AUTH = False
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hs: HomeServer,
|
||||
authenticator: Authenticator,
|
||||
ratelimiter: FederationRateLimiter,
|
||||
server_name: str,
|
||||
):
|
||||
super().__init__(hs, authenticator, ratelimiter, server_name)
|
||||
self.handler = hs.get_federation_server()
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: Optional[str],
|
||||
content: Literal[None],
|
||||
query: Dict[bytes, List[bytes]],
|
||||
) -> Tuple[int, JsonDict]:
|
||||
token = parse_string_from_args(query, "access_token")
|
||||
if token is None:
|
||||
return (
|
||||
401,
|
||||
{"errcode": "M_MISSING_TOKEN", "error": "Access Token required"},
|
||||
)
|
||||
|
||||
user_id = await self.handler.on_openid_userinfo(token)
|
||||
|
||||
if user_id is None:
|
||||
return (
|
||||
401,
|
||||
{
|
||||
"errcode": "M_UNKNOWN_TOKEN",
|
||||
"error": "Access Token unknown or expired",
|
||||
},
|
||||
)
|
||||
|
||||
return 200, {"sub": user_id}
|
||||
|
||||
|
||||
DEFAULT_SERVLET_GROUPS: Dict[str, Iterable[Type[BaseFederationServlet]]] = {
|
||||
"federation": FEDERATION_SERVLET_CLASSES,
|
||||
"room_list": (PublicRoomList,),
|
||||
"group_server": GROUP_SERVER_SERVLET_CLASSES,
|
||||
"group_local": GROUP_LOCAL_SERVLET_CLASSES,
|
||||
"group_attestation": (FederationGroupsRenewAttestaionServlet,),
|
||||
"openid": (OpenIdUserInfo,),
|
||||
}
|
||||
|
||||
|
||||
def register_servlets(
|
||||
hs: HomeServer,
|
||||
resource: HttpServer,
|
||||
authenticator: Authenticator,
|
||||
ratelimiter: FederationRateLimiter,
|
||||
servlet_groups: Optional[Iterable[str]] = None,
|
||||
):
|
||||
"""Initialize and register servlet classes.
|
||||
|
||||
Will by default register all servlets. For custom behaviour, pass in
|
||||
a list of servlet_groups to register.
|
||||
|
||||
Args:
|
||||
hs: homeserver
|
||||
resource: resource class to register to
|
||||
authenticator: authenticator to use
|
||||
ratelimiter: ratelimiter to use
|
||||
servlet_groups: List of servlet groups to register.
|
||||
Defaults to ``DEFAULT_SERVLET_GROUPS``.
|
||||
"""
|
||||
if not servlet_groups:
|
||||
servlet_groups = DEFAULT_SERVLET_GROUPS.keys()
|
||||
|
||||
for servlet_group in servlet_groups:
|
||||
# Skip unknown servlet groups.
|
||||
if servlet_group not in DEFAULT_SERVLET_GROUPS:
|
||||
raise RuntimeError(
|
||||
f"Attempting to register unknown federation servlet: '{servlet_group}'"
|
||||
)
|
||||
|
||||
for servletclass in DEFAULT_SERVLET_GROUPS[servlet_group]:
|
||||
servletclass(
|
||||
hs=hs,
|
||||
authenticator=authenticator,
|
||||
ratelimiter=ratelimiter,
|
||||
server_name=hs.hostname,
|
||||
).register(resource)
|
328
synapse/federation/transport/server/_base.py
Normal file
328
synapse/federation/transport/server/_base.py
Normal file
|
@ -0,0 +1,328 @@
|
|||
# Copyright 2021 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import functools
|
||||
import logging
|
||||
import re
|
||||
|
||||
from synapse.api.errors import Codes, FederationDeniedError, SynapseError
|
||||
from synapse.api.urls import FEDERATION_V1_PREFIX
|
||||
from synapse.http.servlet import parse_json_object_from_request
|
||||
from synapse.logging import opentracing
|
||||
from synapse.logging.context import run_in_background
|
||||
from synapse.logging.opentracing import (
|
||||
SynapseTags,
|
||||
start_active_span,
|
||||
start_active_span_from_request,
|
||||
tags,
|
||||
whitelisted_homeserver,
|
||||
)
|
||||
from synapse.server import HomeServer
|
||||
from synapse.util.ratelimitutils import FederationRateLimiter
|
||||
from synapse.util.stringutils import parse_and_validate_server_name
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AuthenticationError(SynapseError):
|
||||
"""There was a problem authenticating the request"""
|
||||
|
||||
|
||||
class NoAuthenticationError(AuthenticationError):
|
||||
"""The request had no authentication information"""
|
||||
|
||||
|
||||
class Authenticator:
|
||||
def __init__(self, hs: HomeServer):
|
||||
self._clock = hs.get_clock()
|
||||
self.keyring = hs.get_keyring()
|
||||
self.server_name = hs.hostname
|
||||
self.store = hs.get_datastore()
|
||||
self.federation_domain_whitelist = hs.config.federation_domain_whitelist
|
||||
self.notifier = hs.get_notifier()
|
||||
|
||||
self.replication_client = None
|
||||
if hs.config.worker.worker_app:
|
||||
self.replication_client = hs.get_tcp_replication()
|
||||
|
||||
# A method just so we can pass 'self' as the authenticator to the Servlets
|
||||
async def authenticate_request(self, request, content):
|
||||
now = self._clock.time_msec()
|
||||
json_request = {
|
||||
"method": request.method.decode("ascii"),
|
||||
"uri": request.uri.decode("ascii"),
|
||||
"destination": self.server_name,
|
||||
"signatures": {},
|
||||
}
|
||||
|
||||
if content is not None:
|
||||
json_request["content"] = content
|
||||
|
||||
origin = None
|
||||
|
||||
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
|
||||
|
||||
if not auth_headers:
|
||||
raise NoAuthenticationError(
|
||||
401, "Missing Authorization headers", Codes.UNAUTHORIZED
|
||||
)
|
||||
|
||||
for auth in auth_headers:
|
||||
if auth.startswith(b"X-Matrix"):
|
||||
(origin, key, sig) = _parse_auth_header(auth)
|
||||
json_request["origin"] = origin
|
||||
json_request["signatures"].setdefault(origin, {})[key] = sig
|
||||
|
||||
if (
|
||||
self.federation_domain_whitelist is not None
|
||||
and origin not in self.federation_domain_whitelist
|
||||
):
|
||||
raise FederationDeniedError(origin)
|
||||
|
||||
if origin is None or not json_request["signatures"]:
|
||||
raise NoAuthenticationError(
|
||||
401, "Missing Authorization headers", Codes.UNAUTHORIZED
|
||||
)
|
||||
|
||||
await self.keyring.verify_json_for_server(
|
||||
origin,
|
||||
json_request,
|
||||
now,
|
||||
)
|
||||
|
||||
logger.debug("Request from %s", origin)
|
||||
request.requester = origin
|
||||
|
||||
# If we get a valid signed request from the other side, its probably
|
||||
# alive
|
||||
retry_timings = await self.store.get_destination_retry_timings(origin)
|
||||
if retry_timings and retry_timings.retry_last_ts:
|
||||
run_in_background(self._reset_retry_timings, origin)
|
||||
|
||||
return origin
|
||||
|
||||
async def _reset_retry_timings(self, origin):
|
||||
try:
|
||||
logger.info("Marking origin %r as up", origin)
|
||||
await self.store.set_destination_retry_timings(origin, None, 0, 0)
|
||||
|
||||
# Inform the relevant places that the remote server is back up.
|
||||
self.notifier.notify_remote_server_up(origin)
|
||||
if self.replication_client:
|
||||
# If we're on a worker we try and inform master about this. The
|
||||
# replication client doesn't hook into the notifier to avoid
|
||||
# infinite loops where we send a `REMOTE_SERVER_UP` command to
|
||||
# master, which then echoes it back to us which in turn pokes
|
||||
# the notifier.
|
||||
self.replication_client.send_remote_server_up(origin)
|
||||
|
||||
except Exception:
|
||||
logger.exception("Error resetting retry timings on %s", origin)
|
||||
|
||||
|
||||
def _parse_auth_header(header_bytes):
|
||||
"""Parse an X-Matrix auth header
|
||||
|
||||
Args:
|
||||
header_bytes (bytes): header value
|
||||
|
||||
Returns:
|
||||
Tuple[str, str, str]: origin, key id, signature.
|
||||
|
||||
Raises:
|
||||
AuthenticationError if the header could not be parsed
|
||||
"""
|
||||
try:
|
||||
header_str = header_bytes.decode("utf-8")
|
||||
params = header_str.split(" ")[1].split(",")
|
||||
param_dict = dict(kv.split("=") for kv in params)
|
||||
|
||||
def strip_quotes(value):
|
||||
if value.startswith('"'):
|
||||
return value[1:-1]
|
||||
else:
|
||||
return value
|
||||
|
||||
origin = strip_quotes(param_dict["origin"])
|
||||
|
||||
# ensure that the origin is a valid server name
|
||||
parse_and_validate_server_name(origin)
|
||||
|
||||
key = strip_quotes(param_dict["key"])
|
||||
sig = strip_quotes(param_dict["sig"])
|
||||
return origin, key, sig
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Error parsing auth header '%s': %s",
|
||||
header_bytes.decode("ascii", "replace"),
|
||||
e,
|
||||
)
|
||||
raise AuthenticationError(
|
||||
400, "Malformed Authorization header", Codes.UNAUTHORIZED
|
||||
)
|
||||
|
||||
|
||||
class BaseFederationServlet:
|
||||
"""Abstract base class for federation servlet classes.
|
||||
|
||||
The servlet object should have a PATH attribute which takes the form of a regexp to
|
||||
match against the request path (excluding the /federation/v1 prefix).
|
||||
|
||||
The servlet should also implement one or more of on_GET, on_POST, on_PUT, to match
|
||||
the appropriate HTTP method. These methods must be *asynchronous* and have the
|
||||
signature:
|
||||
|
||||
on_<METHOD>(self, origin, content, query, **kwargs)
|
||||
|
||||
With arguments:
|
||||
|
||||
origin (unicode|None): The authenticated server_name of the calling server,
|
||||
unless REQUIRE_AUTH is set to False and authentication failed.
|
||||
|
||||
content (unicode|None): decoded json body of the request. None if the
|
||||
request was a GET.
|
||||
|
||||
query (dict[bytes, list[bytes]]): Query params from the request. url-decoded
|
||||
(ie, '+' and '%xx' are decoded) but note that it is *not* utf8-decoded
|
||||
yet.
|
||||
|
||||
**kwargs (dict[unicode, unicode]): the dict mapping keys to path
|
||||
components as specified in the path match regexp.
|
||||
|
||||
Returns:
|
||||
Optional[Tuple[int, object]]: either (response code, response object) to
|
||||
return a JSON response, or None if the request has already been handled.
|
||||
|
||||
Raises:
|
||||
SynapseError: to return an error code
|
||||
|
||||
Exception: other exceptions will be caught, logged, and a 500 will be
|
||||
returned.
|
||||
"""
|
||||
|
||||
PATH = "" # Overridden in subclasses, the regex to match against the path.
|
||||
|
||||
REQUIRE_AUTH = True
|
||||
|
||||
PREFIX = FEDERATION_V1_PREFIX # Allows specifying the API version
|
||||
|
||||
RATELIMIT = True # Whether to rate limit requests or not
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hs: HomeServer,
|
||||
authenticator: Authenticator,
|
||||
ratelimiter: FederationRateLimiter,
|
||||
server_name: str,
|
||||
):
|
||||
self.hs = hs
|
||||
self.authenticator = authenticator
|
||||
self.ratelimiter = ratelimiter
|
||||
self.server_name = server_name
|
||||
|
||||
def _wrap(self, func):
|
||||
authenticator = self.authenticator
|
||||
ratelimiter = self.ratelimiter
|
||||
|
||||
@functools.wraps(func)
|
||||
async def new_func(request, *args, **kwargs):
|
||||
"""A callback which can be passed to HttpServer.RegisterPaths
|
||||
|
||||
Args:
|
||||
request (twisted.web.http.Request):
|
||||
*args: unused?
|
||||
**kwargs (dict[unicode, unicode]): the dict mapping keys to path
|
||||
components as specified in the path match regexp.
|
||||
|
||||
Returns:
|
||||
Tuple[int, object]|None: (response code, response object) as returned by
|
||||
the callback method. None if the request has already been handled.
|
||||
"""
|
||||
content = None
|
||||
if request.method in [b"PUT", b"POST"]:
|
||||
# TODO: Handle other method types? other content types?
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
try:
|
||||
origin = await authenticator.authenticate_request(request, content)
|
||||
except NoAuthenticationError:
|
||||
origin = None
|
||||
if self.REQUIRE_AUTH:
|
||||
logger.warning(
|
||||
"authenticate_request failed: missing authentication"
|
||||
)
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.warning("authenticate_request failed: %s", e)
|
||||
raise
|
||||
|
||||
request_tags = {
|
||||
SynapseTags.REQUEST_ID: request.get_request_id(),
|
||||
tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
|
||||
tags.HTTP_METHOD: request.get_method(),
|
||||
tags.HTTP_URL: request.get_redacted_uri(),
|
||||
tags.PEER_HOST_IPV6: request.getClientIP(),
|
||||
"authenticated_entity": origin,
|
||||
"servlet_name": request.request_metrics.name,
|
||||
}
|
||||
|
||||
# Only accept the span context if the origin is authenticated
|
||||
# and whitelisted
|
||||
if origin and whitelisted_homeserver(origin):
|
||||
scope = start_active_span_from_request(
|
||||
request, "incoming-federation-request", tags=request_tags
|
||||
)
|
||||
else:
|
||||
scope = start_active_span(
|
||||
"incoming-federation-request", tags=request_tags
|
||||
)
|
||||
|
||||
with scope:
|
||||
opentracing.inject_response_headers(request.responseHeaders)
|
||||
|
||||
if origin and self.RATELIMIT:
|
||||
with ratelimiter.ratelimit(origin) as d:
|
||||
await d
|
||||
if request._disconnected:
|
||||
logger.warning(
|
||||
"client disconnected before we started processing "
|
||||
"request"
|
||||
)
|
||||
return -1, None
|
||||
response = await func(
|
||||
origin, content, request.args, *args, **kwargs
|
||||
)
|
||||
else:
|
||||
response = await func(
|
||||
origin, content, request.args, *args, **kwargs
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
return new_func
|
||||
|
||||
def register(self, server):
|
||||
pattern = re.compile("^" + self.PREFIX + self.PATH + "$")
|
||||
|
||||
for method in ("GET", "PUT", "POST"):
|
||||
code = getattr(self, "on_%s" % (method), None)
|
||||
if code is None:
|
||||
continue
|
||||
|
||||
server.register_paths(
|
||||
method,
|
||||
(pattern,),
|
||||
self._wrap(code),
|
||||
self.__class__.__name__,
|
||||
)
|
706
synapse/federation/transport/server/federation.py
Normal file
706
synapse/federation/transport/server/federation.py
Normal file
|
@ -0,0 +1,706 @@
|
|||
# Copyright 2021 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import Dict, List, Mapping, Optional, Sequence, Tuple, Type, Union
|
||||
|
||||
from typing_extensions import Literal
|
||||
|
||||
import synapse
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.room_versions import RoomVersions
|
||||
from synapse.api.urls import FEDERATION_UNSTABLE_PREFIX, FEDERATION_V2_PREFIX
|
||||
from synapse.federation.transport.server._base import (
|
||||
Authenticator,
|
||||
BaseFederationServlet,
|
||||
)
|
||||
from synapse.http.servlet import (
|
||||
parse_boolean_from_args,
|
||||
parse_integer_from_args,
|
||||
parse_string_from_args,
|
||||
parse_strings_from_args,
|
||||
)
|
||||
from synapse.server import HomeServer
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.ratelimitutils import FederationRateLimiter
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseFederationServerServlet(BaseFederationServlet):
|
||||
"""Abstract base class for federation servlet classes which provides a federation server handler.
|
||||
|
||||
See BaseFederationServlet for more information.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hs: HomeServer,
|
||||
authenticator: Authenticator,
|
||||
ratelimiter: FederationRateLimiter,
|
||||
server_name: str,
|
||||
):
|
||||
super().__init__(hs, authenticator, ratelimiter, server_name)
|
||||
self.handler = hs.get_federation_server()
|
||||
|
||||
|
||||
class FederationSendServlet(BaseFederationServerServlet):
|
||||
PATH = "/send/(?P<transaction_id>[^/]*)/?"
|
||||
|
||||
# We ratelimit manually in the handler as we queue up the requests and we
|
||||
# don't want to fill up the ratelimiter with blocked requests.
|
||||
RATELIMIT = False
|
||||
|
||||
# This is when someone is trying to send us a bunch of data.
|
||||
async def on_PUT(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Dict[bytes, List[bytes]],
|
||||
transaction_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
"""Called on PUT /send/<transaction_id>/
|
||||
|
||||
Args:
|
||||
transaction_id: The transaction_id associated with this request. This
|
||||
is *not* None.
|
||||
|
||||
Returns:
|
||||
Tuple of `(code, response)`, where
|
||||
`response` is a python dict to be converted into JSON that is
|
||||
used as the response body.
|
||||
"""
|
||||
# Parse the request
|
||||
try:
|
||||
transaction_data = content
|
||||
|
||||
logger.debug("Decoded %s: %s", transaction_id, str(transaction_data))
|
||||
|
||||
logger.info(
|
||||
"Received txn %s from %s. (PDUs: %d, EDUs: %d)",
|
||||
transaction_id,
|
||||
origin,
|
||||
len(transaction_data.get("pdus", [])),
|
||||
len(transaction_data.get("edus", [])),
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
return 400, {"error": "Invalid transaction"}
|
||||
|
||||
code, response = await self.handler.on_incoming_transaction(
|
||||
origin, transaction_id, self.server_name, transaction_data
|
||||
)
|
||||
|
||||
return code, response
|
||||
|
||||
|
||||
class FederationEventServlet(BaseFederationServerServlet):
|
||||
PATH = "/event/(?P<event_id>[^/]*)/?"
|
||||
|
||||
# This is when someone asks for a data item for a given server data_id pair.
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: str,
|
||||
content: Literal[None],
|
||||
query: Dict[bytes, List[bytes]],
|
||||
event_id: str,
|
||||
) -> Tuple[int, Union[JsonDict, str]]:
|
||||
return await self.handler.on_pdu_request(origin, event_id)
|
||||
|
||||
|
||||
class FederationStateV1Servlet(BaseFederationServerServlet):
|
||||
PATH = "/state/(?P<room_id>[^/]*)/?"
|
||||
|
||||
# This is when someone asks for all data for a given room.
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: str,
|
||||
content: Literal[None],
|
||||
query: Dict[bytes, List[bytes]],
|
||||
room_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
return await self.handler.on_room_state_request(
|
||||
origin,
|
||||
room_id,
|
||||
parse_string_from_args(query, "event_id", None, required=False),
|
||||
)
|
||||
|
||||
|
||||
class FederationStateIdsServlet(BaseFederationServerServlet):
|
||||
PATH = "/state_ids/(?P<room_id>[^/]*)/?"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: str,
|
||||
content: Literal[None],
|
||||
query: Dict[bytes, List[bytes]],
|
||||
room_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
return await self.handler.on_state_ids_request(
|
||||
origin,
|
||||
room_id,
|
||||
parse_string_from_args(query, "event_id", None, required=True),
|
||||
)
|
||||
|
||||
|
||||
class FederationBackfillServlet(BaseFederationServerServlet):
|
||||
PATH = "/backfill/(?P<room_id>[^/]*)/?"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: str,
|
||||
content: Literal[None],
|
||||
query: Dict[bytes, List[bytes]],
|
||||
room_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
versions = [x.decode("ascii") for x in query[b"v"]]
|
||||
limit = parse_integer_from_args(query, "limit", None)
|
||||
|
||||
if not limit:
|
||||
return 400, {"error": "Did not include limit param"}
|
||||
|
||||
return await self.handler.on_backfill_request(origin, room_id, versions, limit)
|
||||
|
||||
|
||||
class FederationQueryServlet(BaseFederationServerServlet):
|
||||
PATH = "/query/(?P<query_type>[^/]*)"
|
||||
|
||||
# This is when we receive a server-server Query
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: str,
|
||||
content: Literal[None],
|
||||
query: Dict[bytes, List[bytes]],
|
||||
query_type: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
args = {k.decode("utf8"): v[0].decode("utf-8") for k, v in query.items()}
|
||||
args["origin"] = origin
|
||||
return await self.handler.on_query_request(query_type, args)
|
||||
|
||||
|
||||
class FederationMakeJoinServlet(BaseFederationServerServlet):
|
||||
PATH = "/make_join/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: str,
|
||||
content: Literal[None],
|
||||
query: Dict[bytes, List[bytes]],
|
||||
room_id: str,
|
||||
user_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
"""
|
||||
Args:
|
||||
origin: The authenticated server_name of the calling server
|
||||
|
||||
content: (GETs don't have bodies)
|
||||
|
||||
query: Query params from the request.
|
||||
|
||||
**kwargs: the dict mapping keys to path components as specified in
|
||||
the path match regexp.
|
||||
|
||||
Returns:
|
||||
Tuple of (response code, response object)
|
||||
"""
|
||||
supported_versions = parse_strings_from_args(query, "ver", encoding="utf-8")
|
||||
if supported_versions is None:
|
||||
supported_versions = ["1"]
|
||||
|
||||
result = await self.handler.on_make_join_request(
|
||||
origin, room_id, user_id, supported_versions=supported_versions
|
||||
)
|
||||
return 200, result
|
||||
|
||||
|
||||
class FederationMakeLeaveServlet(BaseFederationServerServlet):
|
||||
PATH = "/make_leave/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: str,
|
||||
content: Literal[None],
|
||||
query: Dict[bytes, List[bytes]],
|
||||
room_id: str,
|
||||
user_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
result = await self.handler.on_make_leave_request(origin, room_id, user_id)
|
||||
return 200, result
|
||||
|
||||
|
||||
class FederationV1SendLeaveServlet(BaseFederationServerServlet):
|
||||
PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
|
||||
|
||||
async def on_PUT(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Dict[bytes, List[bytes]],
|
||||
room_id: str,
|
||||
event_id: str,
|
||||
) -> Tuple[int, Tuple[int, JsonDict]]:
|
||||
result = await self.handler.on_send_leave_request(origin, content, room_id)
|
||||
return 200, (200, result)
|
||||
|
||||
|
||||
class FederationV2SendLeaveServlet(BaseFederationServerServlet):
|
||||
PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
|
||||
|
||||
PREFIX = FEDERATION_V2_PREFIX
|
||||
|
||||
async def on_PUT(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Dict[bytes, List[bytes]],
|
||||
room_id: str,
|
||||
event_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
result = await self.handler.on_send_leave_request(origin, content, room_id)
|
||||
return 200, result
|
||||
|
||||
|
||||
class FederationMakeKnockServlet(BaseFederationServerServlet):
|
||||
PATH = "/make_knock/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: str,
|
||||
content: Literal[None],
|
||||
query: Dict[bytes, List[bytes]],
|
||||
room_id: str,
|
||||
user_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
# Retrieve the room versions the remote homeserver claims to support
|
||||
supported_versions = parse_strings_from_args(
|
||||
query, "ver", required=True, encoding="utf-8"
|
||||
)
|
||||
|
||||
result = await self.handler.on_make_knock_request(
|
||||
origin, room_id, user_id, supported_versions=supported_versions
|
||||
)
|
||||
return 200, result
|
||||
|
||||
|
||||
class FederationV1SendKnockServlet(BaseFederationServerServlet):
|
||||
PATH = "/send_knock/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
|
||||
|
||||
async def on_PUT(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Dict[bytes, List[bytes]],
|
||||
room_id: str,
|
||||
event_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
result = await self.handler.on_send_knock_request(origin, content, room_id)
|
||||
return 200, result
|
||||
|
||||
|
||||
class FederationEventAuthServlet(BaseFederationServerServlet):
|
||||
PATH = "/event_auth/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: str,
|
||||
content: Literal[None],
|
||||
query: Dict[bytes, List[bytes]],
|
||||
room_id: str,
|
||||
event_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
return await self.handler.on_event_auth(origin, room_id, event_id)
|
||||
|
||||
|
||||
class FederationV1SendJoinServlet(BaseFederationServerServlet):
|
||||
PATH = "/send_join/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
|
||||
|
||||
async def on_PUT(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Dict[bytes, List[bytes]],
|
||||
room_id: str,
|
||||
event_id: str,
|
||||
) -> Tuple[int, Tuple[int, JsonDict]]:
|
||||
# TODO(paul): assert that event_id parsed from path actually
|
||||
# match those given in content
|
||||
result = await self.handler.on_send_join_request(origin, content, room_id)
|
||||
return 200, (200, result)
|
||||
|
||||
|
||||
class FederationV2SendJoinServlet(BaseFederationServerServlet):
|
||||
PATH = "/send_join/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
|
||||
|
||||
PREFIX = FEDERATION_V2_PREFIX
|
||||
|
||||
async def on_PUT(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Dict[bytes, List[bytes]],
|
||||
room_id: str,
|
||||
event_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
# TODO(paul): assert that event_id parsed from path actually
|
||||
# match those given in content
|
||||
result = await self.handler.on_send_join_request(origin, content, room_id)
|
||||
return 200, result
|
||||
|
||||
|
||||
class FederationV1InviteServlet(BaseFederationServerServlet):
|
||||
PATH = "/invite/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
|
||||
|
||||
async def on_PUT(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Dict[bytes, List[bytes]],
|
||||
room_id: str,
|
||||
event_id: str,
|
||||
) -> Tuple[int, Tuple[int, JsonDict]]:
|
||||
# We don't get a room version, so we have to assume its EITHER v1 or
|
||||
# v2. This is "fine" as the only difference between V1 and V2 is the
|
||||
# state resolution algorithm, and we don't use that for processing
|
||||
# invites
|
||||
result = await self.handler.on_invite_request(
|
||||
origin, content, room_version_id=RoomVersions.V1.identifier
|
||||
)
|
||||
|
||||
# V1 federation API is defined to return a content of `[200, {...}]`
|
||||
# due to a historical bug.
|
||||
return 200, (200, result)
|
||||
|
||||
|
||||
class FederationV2InviteServlet(BaseFederationServerServlet):
|
||||
PATH = "/invite/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
|
||||
|
||||
PREFIX = FEDERATION_V2_PREFIX
|
||||
|
||||
async def on_PUT(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Dict[bytes, List[bytes]],
|
||||
room_id: str,
|
||||
event_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
# TODO(paul): assert that room_id/event_id parsed from path actually
|
||||
# match those given in content
|
||||
|
||||
room_version = content["room_version"]
|
||||
event = content["event"]
|
||||
invite_room_state = content["invite_room_state"]
|
||||
|
||||
# Synapse expects invite_room_state to be in unsigned, as it is in v1
|
||||
# API
|
||||
|
||||
event.setdefault("unsigned", {})["invite_room_state"] = invite_room_state
|
||||
|
||||
result = await self.handler.on_invite_request(
|
||||
origin, event, room_version_id=room_version
|
||||
)
|
||||
return 200, result
|
||||
|
||||
|
||||
class FederationThirdPartyInviteExchangeServlet(BaseFederationServerServlet):
|
||||
PATH = "/exchange_third_party_invite/(?P<room_id>[^/]*)"
|
||||
|
||||
async def on_PUT(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Dict[bytes, List[bytes]],
|
||||
room_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
await self.handler.on_exchange_third_party_invite_request(content)
|
||||
return 200, {}
|
||||
|
||||
|
||||
class FederationClientKeysQueryServlet(BaseFederationServerServlet):
|
||||
PATH = "/user/keys/query"
|
||||
|
||||
async def on_POST(
|
||||
self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
|
||||
) -> Tuple[int, JsonDict]:
|
||||
return await self.handler.on_query_client_keys(origin, content)
|
||||
|
||||
|
||||
class FederationUserDevicesQueryServlet(BaseFederationServerServlet):
|
||||
PATH = "/user/devices/(?P<user_id>[^/]*)"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: str,
|
||||
content: Literal[None],
|
||||
query: Dict[bytes, List[bytes]],
|
||||
user_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
return await self.handler.on_query_user_devices(origin, user_id)
|
||||
|
||||
|
||||
class FederationClientKeysClaimServlet(BaseFederationServerServlet):
|
||||
PATH = "/user/keys/claim"
|
||||
|
||||
async def on_POST(
|
||||
self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
|
||||
) -> Tuple[int, JsonDict]:
|
||||
response = await self.handler.on_claim_client_keys(origin, content)
|
||||
return 200, response
|
||||
|
||||
|
||||
class FederationGetMissingEventsServlet(BaseFederationServerServlet):
|
||||
# TODO(paul): Why does this path alone end with "/?" optional?
|
||||
PATH = "/get_missing_events/(?P<room_id>[^/]*)/?"
|
||||
|
||||
async def on_POST(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Dict[bytes, List[bytes]],
|
||||
room_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
limit = int(content.get("limit", 10))
|
||||
earliest_events = content.get("earliest_events", [])
|
||||
latest_events = content.get("latest_events", [])
|
||||
|
||||
result = await self.handler.on_get_missing_events(
|
||||
origin,
|
||||
room_id=room_id,
|
||||
earliest_events=earliest_events,
|
||||
latest_events=latest_events,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
return 200, result
|
||||
|
||||
|
||||
class On3pidBindServlet(BaseFederationServerServlet):
|
||||
PATH = "/3pid/onbind"
|
||||
|
||||
REQUIRE_AUTH = False
|
||||
|
||||
async def on_POST(
|
||||
self, origin: Optional[str], content: JsonDict, query: Dict[bytes, List[bytes]]
|
||||
) -> Tuple[int, JsonDict]:
|
||||
if "invites" in content:
|
||||
last_exception = None
|
||||
for invite in content["invites"]:
|
||||
try:
|
||||
if "signed" not in invite or "token" not in invite["signed"]:
|
||||
message = (
|
||||
"Rejecting received notification of third-"
|
||||
"party invite without signed: %s" % (invite,)
|
||||
)
|
||||
logger.info(message)
|
||||
raise SynapseError(400, message)
|
||||
await self.handler.exchange_third_party_invite(
|
||||
invite["sender"],
|
||||
invite["mxid"],
|
||||
invite["room_id"],
|
||||
invite["signed"],
|
||||
)
|
||||
except Exception as e:
|
||||
last_exception = e
|
||||
if last_exception:
|
||||
raise last_exception
|
||||
return 200, {}
|
||||
|
||||
|
||||
class FederationVersionServlet(BaseFederationServlet):
|
||||
PATH = "/version"
|
||||
|
||||
REQUIRE_AUTH = False
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: Optional[str],
|
||||
content: Literal[None],
|
||||
query: Dict[bytes, List[bytes]],
|
||||
) -> Tuple[int, JsonDict]:
|
||||
return (
|
||||
200,
|
||||
{"server": {"name": "Synapse", "version": get_version_string(synapse)}},
|
||||
)
|
||||
|
||||
|
||||
class FederationSpaceSummaryServlet(BaseFederationServlet):
|
||||
PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc2946"
|
||||
PATH = "/spaces/(?P<room_id>[^/]*)"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hs: HomeServer,
|
||||
authenticator: Authenticator,
|
||||
ratelimiter: FederationRateLimiter,
|
||||
server_name: str,
|
||||
):
|
||||
super().__init__(hs, authenticator, ratelimiter, server_name)
|
||||
self.handler = hs.get_room_summary_handler()
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: str,
|
||||
content: Literal[None],
|
||||
query: Mapping[bytes, Sequence[bytes]],
|
||||
room_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
suggested_only = parse_boolean_from_args(query, "suggested_only", default=False)
|
||||
|
||||
max_rooms_per_space = parse_integer_from_args(query, "max_rooms_per_space")
|
||||
if max_rooms_per_space is not None and max_rooms_per_space < 0:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Value for 'max_rooms_per_space' must be a non-negative integer",
|
||||
Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
exclude_rooms = parse_strings_from_args(query, "exclude_rooms", default=[])
|
||||
|
||||
return 200, await self.handler.federation_space_summary(
|
||||
origin, room_id, suggested_only, max_rooms_per_space, exclude_rooms
|
||||
)
|
||||
|
||||
# TODO When switching to the stable endpoint, remove the POST handler.
|
||||
async def on_POST(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Mapping[bytes, Sequence[bytes]],
|
||||
room_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
suggested_only = content.get("suggested_only", False)
|
||||
if not isinstance(suggested_only, bool):
|
||||
raise SynapseError(
|
||||
400, "'suggested_only' must be a boolean", Codes.BAD_JSON
|
||||
)
|
||||
|
||||
exclude_rooms = content.get("exclude_rooms", [])
|
||||
if not isinstance(exclude_rooms, list) or any(
|
||||
not isinstance(x, str) for x in exclude_rooms
|
||||
):
|
||||
raise SynapseError(400, "bad value for 'exclude_rooms'", Codes.BAD_JSON)
|
||||
|
||||
max_rooms_per_space = content.get("max_rooms_per_space")
|
||||
if max_rooms_per_space is not None:
|
||||
if not isinstance(max_rooms_per_space, int):
|
||||
raise SynapseError(
|
||||
400, "bad value for 'max_rooms_per_space'", Codes.BAD_JSON
|
||||
)
|
||||
if max_rooms_per_space < 0:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Value for 'max_rooms_per_space' must be a non-negative integer",
|
||||
Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
return 200, await self.handler.federation_space_summary(
|
||||
origin, room_id, suggested_only, max_rooms_per_space, exclude_rooms
|
||||
)
|
||||
|
||||
|
||||
class FederationRoomHierarchyServlet(BaseFederationServlet):
|
||||
PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc2946"
|
||||
PATH = "/hierarchy/(?P<room_id>[^/]*)"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hs: HomeServer,
|
||||
authenticator: Authenticator,
|
||||
ratelimiter: FederationRateLimiter,
|
||||
server_name: str,
|
||||
):
|
||||
super().__init__(hs, authenticator, ratelimiter, server_name)
|
||||
self.handler = hs.get_room_summary_handler()
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: str,
|
||||
content: Literal[None],
|
||||
query: Mapping[bytes, Sequence[bytes]],
|
||||
room_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
suggested_only = parse_boolean_from_args(query, "suggested_only", default=False)
|
||||
return 200, await self.handler.get_federation_hierarchy(
|
||||
origin, room_id, suggested_only
|
||||
)
|
||||
|
||||
|
||||
class RoomComplexityServlet(BaseFederationServlet):
|
||||
"""
|
||||
Indicates to other servers how complex (and therefore likely
|
||||
resource-intensive) a public room this server knows about is.
|
||||
"""
|
||||
|
||||
PATH = "/rooms/(?P<room_id>[^/]*)/complexity"
|
||||
PREFIX = FEDERATION_UNSTABLE_PREFIX
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hs: HomeServer,
|
||||
authenticator: Authenticator,
|
||||
ratelimiter: FederationRateLimiter,
|
||||
server_name: str,
|
||||
):
|
||||
super().__init__(hs, authenticator, ratelimiter, server_name)
|
||||
self._store = self.hs.get_datastore()
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: str,
|
||||
content: Literal[None],
|
||||
query: Dict[bytes, List[bytes]],
|
||||
room_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
is_public = await self._store.is_room_world_readable_or_publicly_joinable(
|
||||
room_id
|
||||
)
|
||||
|
||||
if not is_public:
|
||||
raise SynapseError(404, "Room not found", errcode=Codes.INVALID_PARAM)
|
||||
|
||||
complexity = await self._store.get_room_complexity(room_id)
|
||||
return 200, complexity
|
||||
|
||||
|
||||
FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
|
||||
FederationSendServlet,
|
||||
FederationEventServlet,
|
||||
FederationStateV1Servlet,
|
||||
FederationStateIdsServlet,
|
||||
FederationBackfillServlet,
|
||||
FederationQueryServlet,
|
||||
FederationMakeJoinServlet,
|
||||
FederationMakeLeaveServlet,
|
||||
FederationEventServlet,
|
||||
FederationV1SendJoinServlet,
|
||||
FederationV2SendJoinServlet,
|
||||
FederationV1SendLeaveServlet,
|
||||
FederationV2SendLeaveServlet,
|
||||
FederationV1InviteServlet,
|
||||
FederationV2InviteServlet,
|
||||
FederationGetMissingEventsServlet,
|
||||
FederationEventAuthServlet,
|
||||
FederationClientKeysQueryServlet,
|
||||
FederationUserDevicesQueryServlet,
|
||||
FederationClientKeysClaimServlet,
|
||||
FederationThirdPartyInviteExchangeServlet,
|
||||
On3pidBindServlet,
|
||||
FederationVersionServlet,
|
||||
RoomComplexityServlet,
|
||||
FederationSpaceSummaryServlet,
|
||||
FederationRoomHierarchyServlet,
|
||||
FederationV1SendKnockServlet,
|
||||
FederationMakeKnockServlet,
|
||||
)
|
113
synapse/federation/transport/server/groups_local.py
Normal file
113
synapse/federation/transport/server/groups_local.py
Normal file
|
@ -0,0 +1,113 @@
|
|||
# Copyright 2021 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import Dict, List, Tuple, Type
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.federation.transport.server._base import (
|
||||
Authenticator,
|
||||
BaseFederationServlet,
|
||||
)
|
||||
from synapse.handlers.groups_local import GroupsLocalHandler
|
||||
from synapse.server import HomeServer
|
||||
from synapse.types import JsonDict, get_domain_from_id
|
||||
from synapse.util.ratelimitutils import FederationRateLimiter
|
||||
|
||||
|
||||
class BaseGroupsLocalServlet(BaseFederationServlet):
|
||||
"""Abstract base class for federation servlet classes which provides a groups local handler.
|
||||
|
||||
See BaseFederationServlet for more information.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hs: HomeServer,
|
||||
authenticator: Authenticator,
|
||||
ratelimiter: FederationRateLimiter,
|
||||
server_name: str,
|
||||
):
|
||||
super().__init__(hs, authenticator, ratelimiter, server_name)
|
||||
self.handler = hs.get_groups_local_handler()
|
||||
|
||||
|
||||
class FederationGroupsLocalInviteServlet(BaseGroupsLocalServlet):
|
||||
"""A group server has invited a local user"""
|
||||
|
||||
PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite"
|
||||
|
||||
async def on_POST(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
user_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
if get_domain_from_id(group_id) != origin:
|
||||
raise SynapseError(403, "group_id doesn't match origin")
|
||||
|
||||
assert isinstance(
|
||||
self.handler, GroupsLocalHandler
|
||||
), "Workers cannot handle group invites."
|
||||
|
||||
new_content = await self.handler.on_invite(group_id, user_id, content)
|
||||
|
||||
return 200, new_content
|
||||
|
||||
|
||||
class FederationGroupsRemoveLocalUserServlet(BaseGroupsLocalServlet):
|
||||
"""A group server has removed a local user"""
|
||||
|
||||
PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove"
|
||||
|
||||
async def on_POST(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
user_id: str,
|
||||
) -> Tuple[int, None]:
|
||||
if get_domain_from_id(group_id) != origin:
|
||||
raise SynapseError(403, "user_id doesn't match origin")
|
||||
|
||||
assert isinstance(
|
||||
self.handler, GroupsLocalHandler
|
||||
), "Workers cannot handle group removals."
|
||||
|
||||
await self.handler.user_removed_from_group(group_id, user_id, content)
|
||||
|
||||
return 200, None
|
||||
|
||||
|
||||
class FederationGroupsBulkPublicisedServlet(BaseGroupsLocalServlet):
|
||||
"""Get roles in a group"""
|
||||
|
||||
PATH = "/get_groups_publicised"
|
||||
|
||||
async def on_POST(
|
||||
self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
|
||||
) -> Tuple[int, JsonDict]:
|
||||
resp = await self.handler.bulk_get_publicised_groups(
|
||||
content["user_ids"], proxy=False
|
||||
)
|
||||
|
||||
return 200, resp
|
||||
|
||||
|
||||
GROUP_LOCAL_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
|
||||
FederationGroupsLocalInviteServlet,
|
||||
FederationGroupsRemoveLocalUserServlet,
|
||||
FederationGroupsBulkPublicisedServlet,
|
||||
)
|
753
synapse/federation/transport/server/groups_server.py
Normal file
753
synapse/federation/transport/server/groups_server.py
Normal file
|
@ -0,0 +1,753 @@
|
|||
# Copyright 2021 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import Dict, List, Tuple, Type
|
||||
|
||||
from typing_extensions import Literal
|
||||
|
||||
from synapse.api.constants import MAX_GROUP_CATEGORYID_LENGTH, MAX_GROUP_ROLEID_LENGTH
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.federation.transport.server._base import (
|
||||
Authenticator,
|
||||
BaseFederationServlet,
|
||||
)
|
||||
from synapse.http.servlet import parse_string_from_args
|
||||
from synapse.server import HomeServer
|
||||
from synapse.types import JsonDict, get_domain_from_id
|
||||
from synapse.util.ratelimitutils import FederationRateLimiter
|
||||
|
||||
|
||||
class BaseGroupsServerServlet(BaseFederationServlet):
|
||||
"""Abstract base class for federation servlet classes which provides a groups server handler.
|
||||
|
||||
See BaseFederationServlet for more information.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hs: HomeServer,
|
||||
authenticator: Authenticator,
|
||||
ratelimiter: FederationRateLimiter,
|
||||
server_name: str,
|
||||
):
|
||||
super().__init__(hs, authenticator, ratelimiter, server_name)
|
||||
self.handler = hs.get_groups_server_handler()
|
||||
|
||||
|
||||
class FederationGroupsProfileServlet(BaseGroupsServerServlet):
|
||||
"""Get/set the basic profile of a group on behalf of a user"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/profile"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: str,
|
||||
content: Literal[None],
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester_user_id = parse_string_from_args(
|
||||
query, "requester_user_id", required=True
|
||||
)
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
new_content = await self.handler.get_group_profile(group_id, requester_user_id)
|
||||
|
||||
return 200, new_content
|
||||
|
||||
async def on_POST(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester_user_id = parse_string_from_args(
|
||||
query, "requester_user_id", required=True
|
||||
)
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
new_content = await self.handler.update_group_profile(
|
||||
group_id, requester_user_id, content
|
||||
)
|
||||
|
||||
return 200, new_content
|
||||
|
||||
|
||||
class FederationGroupsSummaryServlet(BaseGroupsServerServlet):
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/summary"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: str,
|
||||
content: Literal[None],
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester_user_id = parse_string_from_args(
|
||||
query, "requester_user_id", required=True
|
||||
)
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
new_content = await self.handler.get_group_summary(group_id, requester_user_id)
|
||||
|
||||
return 200, new_content
|
||||
|
||||
|
||||
class FederationGroupsRoomsServlet(BaseGroupsServerServlet):
|
||||
"""Get the rooms in a group on behalf of a user"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/rooms"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: str,
|
||||
content: Literal[None],
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester_user_id = parse_string_from_args(
|
||||
query, "requester_user_id", required=True
|
||||
)
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
new_content = await self.handler.get_rooms_in_group(group_id, requester_user_id)
|
||||
|
||||
return 200, new_content
|
||||
|
||||
|
||||
class FederationGroupsAddRoomsServlet(BaseGroupsServerServlet):
|
||||
"""Add/remove room from group"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/room/(?P<room_id>[^/]*)"
|
||||
|
||||
async def on_POST(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
room_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester_user_id = parse_string_from_args(
|
||||
query, "requester_user_id", required=True
|
||||
)
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
new_content = await self.handler.add_room_to_group(
|
||||
group_id, requester_user_id, room_id, content
|
||||
)
|
||||
|
||||
return 200, new_content
|
||||
|
||||
async def on_DELETE(
|
||||
self,
|
||||
origin: str,
|
||||
content: Literal[None],
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
room_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester_user_id = parse_string_from_args(
|
||||
query, "requester_user_id", required=True
|
||||
)
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
new_content = await self.handler.remove_room_from_group(
|
||||
group_id, requester_user_id, room_id
|
||||
)
|
||||
|
||||
return 200, new_content
|
||||
|
||||
|
||||
class FederationGroupsAddRoomsConfigServlet(BaseGroupsServerServlet):
|
||||
"""Update room config in group"""
|
||||
|
||||
PATH = (
|
||||
"/groups/(?P<group_id>[^/]*)/room/(?P<room_id>[^/]*)"
|
||||
"/config/(?P<config_key>[^/]*)"
|
||||
)
|
||||
|
||||
async def on_POST(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
room_id: str,
|
||||
config_key: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester_user_id = parse_string_from_args(
|
||||
query, "requester_user_id", required=True
|
||||
)
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
result = await self.handler.update_room_in_group(
|
||||
group_id, requester_user_id, room_id, config_key, content
|
||||
)
|
||||
|
||||
return 200, result
|
||||
|
||||
|
||||
class FederationGroupsUsersServlet(BaseGroupsServerServlet):
|
||||
"""Get the users in a group on behalf of a user"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/users"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: str,
|
||||
content: Literal[None],
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester_user_id = parse_string_from_args(
|
||||
query, "requester_user_id", required=True
|
||||
)
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
new_content = await self.handler.get_users_in_group(group_id, requester_user_id)
|
||||
|
||||
return 200, new_content
|
||||
|
||||
|
||||
class FederationGroupsInvitedUsersServlet(BaseGroupsServerServlet):
|
||||
"""Get the users that have been invited to a group"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/invited_users"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: str,
|
||||
content: Literal[None],
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester_user_id = parse_string_from_args(
|
||||
query, "requester_user_id", required=True
|
||||
)
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
new_content = await self.handler.get_invited_users_in_group(
|
||||
group_id, requester_user_id
|
||||
)
|
||||
|
||||
return 200, new_content
|
||||
|
||||
|
||||
class FederationGroupsInviteServlet(BaseGroupsServerServlet):
|
||||
"""Ask a group server to invite someone to the group"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite"
|
||||
|
||||
async def on_POST(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
user_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester_user_id = parse_string_from_args(
|
||||
query, "requester_user_id", required=True
|
||||
)
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
new_content = await self.handler.invite_to_group(
|
||||
group_id, user_id, requester_user_id, content
|
||||
)
|
||||
|
||||
return 200, new_content
|
||||
|
||||
|
||||
class FederationGroupsAcceptInviteServlet(BaseGroupsServerServlet):
|
||||
"""Accept an invitation from the group server"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/accept_invite"
|
||||
|
||||
async def on_POST(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
user_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
if get_domain_from_id(user_id) != origin:
|
||||
raise SynapseError(403, "user_id doesn't match origin")
|
||||
|
||||
new_content = await self.handler.accept_invite(group_id, user_id, content)
|
||||
|
||||
return 200, new_content
|
||||
|
||||
|
||||
class FederationGroupsJoinServlet(BaseGroupsServerServlet):
|
||||
"""Attempt to join a group"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/join"
|
||||
|
||||
async def on_POST(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
user_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
if get_domain_from_id(user_id) != origin:
|
||||
raise SynapseError(403, "user_id doesn't match origin")
|
||||
|
||||
new_content = await self.handler.join_group(group_id, user_id, content)
|
||||
|
||||
return 200, new_content
|
||||
|
||||
|
||||
class FederationGroupsRemoveUserServlet(BaseGroupsServerServlet):
|
||||
"""Leave or kick a user from the group"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove"
|
||||
|
||||
async def on_POST(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
user_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester_user_id = parse_string_from_args(
|
||||
query, "requester_user_id", required=True
|
||||
)
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
new_content = await self.handler.remove_user_from_group(
|
||||
group_id, user_id, requester_user_id, content
|
||||
)
|
||||
|
||||
return 200, new_content
|
||||
|
||||
|
||||
class FederationGroupsSummaryRoomsServlet(BaseGroupsServerServlet):
|
||||
"""Add/remove a room from the group summary, with optional category.
|
||||
|
||||
Matches both:
|
||||
- /groups/:group/summary/rooms/:room_id
|
||||
- /groups/:group/summary/categories/:category/rooms/:room_id
|
||||
"""
|
||||
|
||||
PATH = (
|
||||
"/groups/(?P<group_id>[^/]*)/summary"
|
||||
"(/categories/(?P<category_id>[^/]+))?"
|
||||
"/rooms/(?P<room_id>[^/]*)"
|
||||
)
|
||||
|
||||
async def on_POST(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
category_id: str,
|
||||
room_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester_user_id = parse_string_from_args(
|
||||
query, "requester_user_id", required=True
|
||||
)
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
if category_id == "":
|
||||
raise SynapseError(
|
||||
400, "category_id cannot be empty string", Codes.INVALID_PARAM
|
||||
)
|
||||
|
||||
if len(category_id) > MAX_GROUP_CATEGORYID_LENGTH:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"category_id may not be longer than %s characters"
|
||||
% (MAX_GROUP_CATEGORYID_LENGTH,),
|
||||
Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
resp = await self.handler.update_group_summary_room(
|
||||
group_id,
|
||||
requester_user_id,
|
||||
room_id=room_id,
|
||||
category_id=category_id,
|
||||
content=content,
|
||||
)
|
||||
|
||||
return 200, resp
|
||||
|
||||
async def on_DELETE(
|
||||
self,
|
||||
origin: str,
|
||||
content: Literal[None],
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
category_id: str,
|
||||
room_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester_user_id = parse_string_from_args(
|
||||
query, "requester_user_id", required=True
|
||||
)
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
if category_id == "":
|
||||
raise SynapseError(400, "category_id cannot be empty string")
|
||||
|
||||
resp = await self.handler.delete_group_summary_room(
|
||||
group_id, requester_user_id, room_id=room_id, category_id=category_id
|
||||
)
|
||||
|
||||
return 200, resp
|
||||
|
||||
|
||||
class FederationGroupsCategoriesServlet(BaseGroupsServerServlet):
|
||||
"""Get all categories for a group"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/categories/?"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: str,
|
||||
content: Literal[None],
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester_user_id = parse_string_from_args(
|
||||
query, "requester_user_id", required=True
|
||||
)
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
resp = await self.handler.get_group_categories(group_id, requester_user_id)
|
||||
|
||||
return 200, resp
|
||||
|
||||
|
||||
class FederationGroupsCategoryServlet(BaseGroupsServerServlet):
|
||||
"""Add/remove/get a category in a group"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/categories/(?P<category_id>[^/]+)"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: str,
|
||||
content: Literal[None],
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
category_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester_user_id = parse_string_from_args(
|
||||
query, "requester_user_id", required=True
|
||||
)
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
resp = await self.handler.get_group_category(
|
||||
group_id, requester_user_id, category_id
|
||||
)
|
||||
|
||||
return 200, resp
|
||||
|
||||
async def on_POST(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
category_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester_user_id = parse_string_from_args(
|
||||
query, "requester_user_id", required=True
|
||||
)
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
if category_id == "":
|
||||
raise SynapseError(400, "category_id cannot be empty string")
|
||||
|
||||
if len(category_id) > MAX_GROUP_CATEGORYID_LENGTH:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"category_id may not be longer than %s characters"
|
||||
% (MAX_GROUP_CATEGORYID_LENGTH,),
|
||||
Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
resp = await self.handler.upsert_group_category(
|
||||
group_id, requester_user_id, category_id, content
|
||||
)
|
||||
|
||||
return 200, resp
|
||||
|
||||
async def on_DELETE(
|
||||
self,
|
||||
origin: str,
|
||||
content: Literal[None],
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
category_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester_user_id = parse_string_from_args(
|
||||
query, "requester_user_id", required=True
|
||||
)
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
if category_id == "":
|
||||
raise SynapseError(400, "category_id cannot be empty string")
|
||||
|
||||
resp = await self.handler.delete_group_category(
|
||||
group_id, requester_user_id, category_id
|
||||
)
|
||||
|
||||
return 200, resp
|
||||
|
||||
|
||||
class FederationGroupsRolesServlet(BaseGroupsServerServlet):
|
||||
"""Get roles in a group"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/roles/?"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: str,
|
||||
content: Literal[None],
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester_user_id = parse_string_from_args(
|
||||
query, "requester_user_id", required=True
|
||||
)
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
resp = await self.handler.get_group_roles(group_id, requester_user_id)
|
||||
|
||||
return 200, resp
|
||||
|
||||
|
||||
class FederationGroupsRoleServlet(BaseGroupsServerServlet):
|
||||
"""Add/remove/get a role in a group"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/roles/(?P<role_id>[^/]+)"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: str,
|
||||
content: Literal[None],
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
role_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester_user_id = parse_string_from_args(
|
||||
query, "requester_user_id", required=True
|
||||
)
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
resp = await self.handler.get_group_role(group_id, requester_user_id, role_id)
|
||||
|
||||
return 200, resp
|
||||
|
||||
async def on_POST(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
role_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester_user_id = parse_string_from_args(
|
||||
query, "requester_user_id", required=True
|
||||
)
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
if role_id == "":
|
||||
raise SynapseError(
|
||||
400, "role_id cannot be empty string", Codes.INVALID_PARAM
|
||||
)
|
||||
|
||||
if len(role_id) > MAX_GROUP_ROLEID_LENGTH:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"role_id may not be longer than %s characters"
|
||||
% (MAX_GROUP_ROLEID_LENGTH,),
|
||||
Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
resp = await self.handler.update_group_role(
|
||||
group_id, requester_user_id, role_id, content
|
||||
)
|
||||
|
||||
return 200, resp
|
||||
|
||||
async def on_DELETE(
|
||||
self,
|
||||
origin: str,
|
||||
content: Literal[None],
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
role_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester_user_id = parse_string_from_args(
|
||||
query, "requester_user_id", required=True
|
||||
)
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
if role_id == "":
|
||||
raise SynapseError(400, "role_id cannot be empty string")
|
||||
|
||||
resp = await self.handler.delete_group_role(
|
||||
group_id, requester_user_id, role_id
|
||||
)
|
||||
|
||||
return 200, resp
|
||||
|
||||
|
||||
class FederationGroupsSummaryUsersServlet(BaseGroupsServerServlet):
|
||||
"""Add/remove a user from the group summary, with optional role.
|
||||
|
||||
Matches both:
|
||||
- /groups/:group/summary/users/:user_id
|
||||
- /groups/:group/summary/roles/:role/users/:user_id
|
||||
"""
|
||||
|
||||
PATH = (
|
||||
"/groups/(?P<group_id>[^/]*)/summary"
|
||||
"(/roles/(?P<role_id>[^/]+))?"
|
||||
"/users/(?P<user_id>[^/]*)"
|
||||
)
|
||||
|
||||
async def on_POST(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
role_id: str,
|
||||
user_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester_user_id = parse_string_from_args(
|
||||
query, "requester_user_id", required=True
|
||||
)
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
if role_id == "":
|
||||
raise SynapseError(400, "role_id cannot be empty string")
|
||||
|
||||
if len(role_id) > MAX_GROUP_ROLEID_LENGTH:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"role_id may not be longer than %s characters"
|
||||
% (MAX_GROUP_ROLEID_LENGTH,),
|
||||
Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
resp = await self.handler.update_group_summary_user(
|
||||
group_id,
|
||||
requester_user_id,
|
||||
user_id=user_id,
|
||||
role_id=role_id,
|
||||
content=content,
|
||||
)
|
||||
|
||||
return 200, resp
|
||||
|
||||
async def on_DELETE(
|
||||
self,
|
||||
origin: str,
|
||||
content: Literal[None],
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
role_id: str,
|
||||
user_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester_user_id = parse_string_from_args(
|
||||
query, "requester_user_id", required=True
|
||||
)
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
if role_id == "":
|
||||
raise SynapseError(400, "role_id cannot be empty string")
|
||||
|
||||
resp = await self.handler.delete_group_summary_user(
|
||||
group_id, requester_user_id, user_id=user_id, role_id=role_id
|
||||
)
|
||||
|
||||
return 200, resp
|
||||
|
||||
|
||||
class FederationGroupsSettingJoinPolicyServlet(BaseGroupsServerServlet):
|
||||
"""Sets whether a group is joinable without an invite or knock"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/settings/m.join_policy"
|
||||
|
||||
async def on_PUT(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester_user_id = parse_string_from_args(
|
||||
query, "requester_user_id", required=True
|
||||
)
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
new_content = await self.handler.set_group_join_policy(
|
||||
group_id, requester_user_id, content
|
||||
)
|
||||
|
||||
return 200, new_content
|
||||
|
||||
|
||||
GROUP_SERVER_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
|
||||
FederationGroupsProfileServlet,
|
||||
FederationGroupsSummaryServlet,
|
||||
FederationGroupsRoomsServlet,
|
||||
FederationGroupsUsersServlet,
|
||||
FederationGroupsInvitedUsersServlet,
|
||||
FederationGroupsInviteServlet,
|
||||
FederationGroupsAcceptInviteServlet,
|
||||
FederationGroupsJoinServlet,
|
||||
FederationGroupsRemoveUserServlet,
|
||||
FederationGroupsSummaryRoomsServlet,
|
||||
FederationGroupsCategoriesServlet,
|
||||
FederationGroupsCategoryServlet,
|
||||
FederationGroupsRolesServlet,
|
||||
FederationGroupsRoleServlet,
|
||||
FederationGroupsSummaryUsersServlet,
|
||||
FederationGroupsAddRoomsServlet,
|
||||
FederationGroupsAddRoomsConfigServlet,
|
||||
FederationGroupsSettingJoinPolicyServlet,
|
||||
)
|
|
@ -17,18 +17,17 @@ server protocol.
|
|||
"""
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
from typing import List, Optional
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.jsonobject import JsonEncodedObject
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
class Edu(JsonEncodedObject):
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class Edu:
|
||||
"""An Edu represents a piece of data sent from one homeserver to another.
|
||||
|
||||
In comparison to Pdus, Edus are not persisted for a long time on disk, are
|
||||
|
@ -36,10 +35,10 @@ class Edu(JsonEncodedObject):
|
|||
internal ID or previous references graph.
|
||||
"""
|
||||
|
||||
edu_type = attr.ib(type=str)
|
||||
content = attr.ib(type=dict)
|
||||
origin = attr.ib(type=str)
|
||||
destination = attr.ib(type=str)
|
||||
edu_type: str
|
||||
content: dict
|
||||
origin: str
|
||||
destination: str
|
||||
|
||||
def get_dict(self) -> JsonDict:
|
||||
return {
|
||||
|
@ -55,14 +54,21 @@ class Edu(JsonEncodedObject):
|
|||
"destination": self.destination,
|
||||
}
|
||||
|
||||
def get_context(self):
|
||||
def get_context(self) -> str:
|
||||
return getattr(self, "content", {}).get("org.matrix.opentracing_context", "{}")
|
||||
|
||||
def strip_context(self):
|
||||
def strip_context(self) -> None:
|
||||
getattr(self, "content", {})["org.matrix.opentracing_context"] = "{}"
|
||||
|
||||
|
||||
class Transaction(JsonEncodedObject):
|
||||
def _none_to_list(edus: Optional[List[JsonDict]]) -> List[JsonDict]:
|
||||
if edus is None:
|
||||
return []
|
||||
return edus
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class Transaction:
|
||||
"""A transaction is a list of Pdus and Edus to be sent to a remote home
|
||||
server with some extra metadata.
|
||||
|
||||
|
@ -78,47 +84,21 @@ class Transaction(JsonEncodedObject):
|
|||
|
||||
"""
|
||||
|
||||
valid_keys = [
|
||||
"transaction_id",
|
||||
"origin",
|
||||
"destination",
|
||||
"origin_server_ts",
|
||||
"previous_ids",
|
||||
"pdus",
|
||||
"edus",
|
||||
]
|
||||
# Required keys.
|
||||
transaction_id: str
|
||||
origin: str
|
||||
destination: str
|
||||
origin_server_ts: int
|
||||
pdus: List[JsonDict] = attr.ib(factory=list, converter=_none_to_list)
|
||||
edus: List[JsonDict] = attr.ib(factory=list, converter=_none_to_list)
|
||||
|
||||
internal_keys = ["transaction_id", "destination"]
|
||||
|
||||
required_keys = [
|
||||
"transaction_id",
|
||||
"origin",
|
||||
"destination",
|
||||
"origin_server_ts",
|
||||
"pdus",
|
||||
]
|
||||
|
||||
def __init__(self, transaction_id=None, pdus: Optional[list] = None, **kwargs):
|
||||
"""If we include a list of pdus then we decode then as PDU's
|
||||
automatically.
|
||||
"""
|
||||
|
||||
# If there's no EDUs then remove the arg
|
||||
if "edus" in kwargs and not kwargs["edus"]:
|
||||
del kwargs["edus"]
|
||||
|
||||
super().__init__(transaction_id=transaction_id, pdus=pdus or [], **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def create_new(pdus, **kwargs):
|
||||
"""Used to create a new transaction. Will auto fill out
|
||||
transaction_id and origin_server_ts keys.
|
||||
"""
|
||||
if "origin_server_ts" not in kwargs:
|
||||
raise KeyError("Require 'origin_server_ts' to construct a Transaction")
|
||||
if "transaction_id" not in kwargs:
|
||||
raise KeyError("Require 'transaction_id' to construct a Transaction")
|
||||
|
||||
kwargs["pdus"] = [p.get_pdu_json() for p in pdus]
|
||||
|
||||
return Transaction(**kwargs)
|
||||
def get_dict(self) -> JsonDict:
|
||||
"""A JSON-ready dictionary of valid keys which aren't internal."""
|
||||
result = {
|
||||
"origin": self.origin,
|
||||
"origin_server_ts": self.origin_server_ts,
|
||||
"pdus": self.pdus,
|
||||
}
|
||||
if self.edus:
|
||||
result["edus"] = self.edus
|
||||
return result
|
||||
|
|
|
@ -392,9 +392,6 @@ class ApplicationServicesHandler:
|
|||
protocols[p].append(info)
|
||||
|
||||
def _merge_instances(infos: List[JsonDict]) -> JsonDict:
|
||||
if not infos:
|
||||
return {}
|
||||
|
||||
# Merge the 'instances' lists of multiple results, but just take
|
||||
# the other fields from the first as they ought to be identical
|
||||
# copy the result so as not to corrupt the cached one
|
||||
|
@ -406,7 +403,9 @@ class ApplicationServicesHandler:
|
|||
|
||||
return combined
|
||||
|
||||
return {p: _merge_instances(protocols[p]) for p in protocols.keys()}
|
||||
return {
|
||||
p: _merge_instances(protocols[p]) for p in protocols.keys() if protocols[p]
|
||||
}
|
||||
|
||||
async def _get_services_for_event(
|
||||
self, event: EventBase
|
||||
|
|
|
@ -73,7 +73,7 @@ from synapse.util.stringutils import base62_encode
|
|||
from synapse.util.threepids import canonicalise_email
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.rest.client.v1.login import LoginResponse
|
||||
from synapse.rest.client.login import LoginResponse
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -461,7 +461,7 @@ class AuthHandler(BaseHandler):
|
|||
|
||||
If no auth flows have been completed successfully, raises an
|
||||
InteractiveAuthIncompleteError. To handle this, you can use
|
||||
synapse.rest.client.v2_alpha._base.interactive_auth_handler as a
|
||||
synapse.rest.client._base.interactive_auth_handler as a
|
||||
decorator.
|
||||
|
||||
Args:
|
||||
|
@ -543,7 +543,7 @@ class AuthHandler(BaseHandler):
|
|||
# Note that the registration endpoint explicitly removes the
|
||||
# "initial_device_display_name" parameter if it is provided
|
||||
# without a "password" parameter. See the changes to
|
||||
# synapse.rest.client.v2_alpha.register.RegisterRestServlet.on_POST
|
||||
# synapse.rest.client.register.RegisterRestServlet.on_POST
|
||||
# in commit 544722bad23fc31056b9240189c3cbbbf0ffd3f9.
|
||||
if not clientdict:
|
||||
clientdict = session.clientdict
|
||||
|
|
|
@ -213,7 +213,7 @@ class EventAuthHandler:
|
|||
|
||||
raise AuthError(
|
||||
403,
|
||||
"You do not belong to any of the required rooms to join this room.",
|
||||
"You do not belong to any of the required rooms/spaces to join this room.",
|
||||
)
|
||||
|
||||
async def has_restricted_join_rules(
|
||||
|
|
|
@ -42,6 +42,7 @@ from twisted.internet import defer
|
|||
|
||||
from synapse import event_auth
|
||||
from synapse.api.constants import (
|
||||
EventContentFields,
|
||||
EventTypes,
|
||||
Membership,
|
||||
RejectedReason,
|
||||
|
@ -108,21 +109,33 @@ soft_failed_event_counter = Counter(
|
|||
)
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class _NewEventInfo:
|
||||
"""Holds information about a received event, ready for passing to _auth_and_persist_events
|
||||
|
||||
Attributes:
|
||||
event: the received event
|
||||
|
||||
state: the state at that event
|
||||
state: the state at that event, according to /state_ids from a remote
|
||||
homeserver. Only populated for backfilled events which are going to be a
|
||||
new backwards extremity.
|
||||
|
||||
claimed_auth_event_map: a map of (type, state_key) => event for the event's
|
||||
claimed auth_events.
|
||||
|
||||
This can include events which have not yet been persisted, in the case that
|
||||
we are backfilling a batch of events.
|
||||
|
||||
Note: May be incomplete: if we were unable to find all of the claimed auth
|
||||
events. Also, treat the contents with caution: the events might also have
|
||||
been rejected, might not yet have been authorized themselves, or they might
|
||||
be in the wrong room.
|
||||
|
||||
auth_events: the auth_event map for that event
|
||||
"""
|
||||
|
||||
event = attr.ib(type=EventBase)
|
||||
state = attr.ib(type=Optional[Sequence[EventBase]], default=None)
|
||||
auth_events = attr.ib(type=Optional[MutableStateMap[EventBase]], default=None)
|
||||
event: EventBase
|
||||
state: Optional[Sequence[EventBase]]
|
||||
claimed_auth_event_map: StateMap[EventBase]
|
||||
|
||||
|
||||
class FederationHandler(BaseHandler):
|
||||
|
@ -207,8 +220,6 @@ class FederationHandler(BaseHandler):
|
|||
room_id = pdu.room_id
|
||||
event_id = pdu.event_id
|
||||
|
||||
logger.info("handling received PDU: %s", pdu)
|
||||
|
||||
# We reprocess pdus when we have seen them only as outliers
|
||||
existing = await self.store.get_event(
|
||||
event_id, allow_none=True, allow_rejected=True
|
||||
|
@ -216,14 +227,19 @@ class FederationHandler(BaseHandler):
|
|||
|
||||
# FIXME: Currently we fetch an event again when we already have it
|
||||
# if it has been marked as an outlier.
|
||||
|
||||
already_seen = existing and (
|
||||
not existing.internal_metadata.is_outlier()
|
||||
or pdu.internal_metadata.is_outlier()
|
||||
if existing:
|
||||
if not existing.internal_metadata.is_outlier():
|
||||
logger.info(
|
||||
"Ignoring received event %s which we have already seen", event_id
|
||||
)
|
||||
if already_seen:
|
||||
logger.debug("Already seen pdu")
|
||||
return
|
||||
if pdu.internal_metadata.is_outlier():
|
||||
logger.info(
|
||||
"Ignoring received outlier %s which we already have as an outlier",
|
||||
event_id,
|
||||
)
|
||||
return
|
||||
logger.info("De-outliering event %s", event_id)
|
||||
|
||||
# do some initial sanity-checking of the event. In particular, make
|
||||
# sure it doesn't have hundreds of prev_events or auth_events, which
|
||||
|
@ -262,7 +278,12 @@ class FederationHandler(BaseHandler):
|
|||
|
||||
state = None
|
||||
|
||||
# Get missing pdus if necessary.
|
||||
# Check that the event passes auth based on the state at the event. This is
|
||||
# done for events that are to be added to the timeline (non-outliers).
|
||||
#
|
||||
# Get missing pdus if necessary:
|
||||
# - Fetching any missing prev events to fill in gaps in the graph
|
||||
# - Fetching state if we have a hole in the graph
|
||||
if not pdu.internal_metadata.is_outlier():
|
||||
# We only backfill backwards to the min depth.
|
||||
min_depth = await self.get_min_depth_for_context(pdu.room_id)
|
||||
|
@ -313,7 +334,8 @@ class FederationHandler(BaseHandler):
|
|||
"Found all missing prev_events",
|
||||
)
|
||||
|
||||
if prevs - seen:
|
||||
missing_prevs = prevs - seen
|
||||
if missing_prevs:
|
||||
# We've still not been able to get all of the prev_events for this event.
|
||||
#
|
||||
# In this case, we need to fall back to asking another server in the
|
||||
|
@ -341,8 +363,8 @@ class FederationHandler(BaseHandler):
|
|||
if sent_to_us_directly:
|
||||
logger.warning(
|
||||
"Rejecting: failed to fetch %d prev events: %s",
|
||||
len(prevs - seen),
|
||||
shortstr(prevs - seen),
|
||||
len(missing_prevs),
|
||||
shortstr(missing_prevs),
|
||||
)
|
||||
raise FederationError(
|
||||
"ERROR",
|
||||
|
@ -355,9 +377,10 @@ class FederationHandler(BaseHandler):
|
|||
)
|
||||
|
||||
logger.info(
|
||||
"Event %s is missing prev_events: calculating state for a "
|
||||
"Event %s is missing prev_events %s: calculating state for a "
|
||||
"backwards extremity",
|
||||
event_id,
|
||||
shortstr(missing_prevs),
|
||||
)
|
||||
|
||||
# Calculate the state after each of the previous events, and
|
||||
|
@ -375,7 +398,7 @@ class FederationHandler(BaseHandler):
|
|||
|
||||
# Ask the remote server for the states we don't
|
||||
# know about
|
||||
for p in prevs - seen:
|
||||
for p in missing_prevs:
|
||||
logger.info("Requesting state after missing prev_event %s", p)
|
||||
|
||||
with nested_logging_context(p):
|
||||
|
@ -432,6 +455,13 @@ class FederationHandler(BaseHandler):
|
|||
affected=event_id,
|
||||
)
|
||||
|
||||
# A second round of checks for all events. Check that the event passes auth
|
||||
# based on `auth_events`, this allows us to assert that the event would
|
||||
# have been allowed at some point. If an event passes this check its OK
|
||||
# for it to be used as part of a returned `/state` request, as either
|
||||
# a) we received the event as part of the original join and so trust it, or
|
||||
# b) we'll do a state resolution with existing state before it becomes
|
||||
# part of the "current state", which adds more protection.
|
||||
await self._process_received_pdu(origin, pdu, state=state)
|
||||
|
||||
async def _get_missing_events_for_pdu(
|
||||
|
@ -531,21 +561,14 @@ class FederationHandler(BaseHandler):
|
|||
logger.warning("Failed to get prev_events: %s", e)
|
||||
return
|
||||
|
||||
logger.info(
|
||||
"Got %d prev_events: %s",
|
||||
len(missing_events),
|
||||
shortstr(missing_events),
|
||||
)
|
||||
logger.info("Got %d prev_events", len(missing_events))
|
||||
|
||||
# We want to sort these by depth so we process them and
|
||||
# tell clients about them in order.
|
||||
missing_events.sort(key=lambda x: x.depth)
|
||||
|
||||
for ev in missing_events:
|
||||
logger.info(
|
||||
"Handling received prev_event %s",
|
||||
ev.event_id,
|
||||
)
|
||||
logger.info("Handling received prev_event %s", ev)
|
||||
with nested_logging_context(ev.event_id):
|
||||
try:
|
||||
await self.on_receive_pdu(origin, ev, sent_to_us_directly=False)
|
||||
|
@ -889,6 +912,79 @@ class FederationHandler(BaseHandler):
|
|||
"resync_device_due_to_pdu", self._resync_device, event.sender
|
||||
)
|
||||
|
||||
await self._handle_marker_event(origin, event)
|
||||
|
||||
async def _handle_marker_event(self, origin: str, marker_event: EventBase):
|
||||
"""Handles backfilling the insertion event when we receive a marker
|
||||
event that points to one.
|
||||
|
||||
Args:
|
||||
origin: Origin of the event. Will be called to get the insertion event
|
||||
marker_event: The event to process
|
||||
"""
|
||||
|
||||
if marker_event.type != EventTypes.MSC2716_MARKER:
|
||||
# Not a marker event
|
||||
return
|
||||
|
||||
if marker_event.rejected_reason is not None:
|
||||
# Rejected event
|
||||
return
|
||||
|
||||
# Skip processing a marker event if the room version doesn't
|
||||
# support it.
|
||||
room_version = await self.store.get_room_version(marker_event.room_id)
|
||||
if not room_version.msc2716_historical:
|
||||
return
|
||||
|
||||
logger.debug("_handle_marker_event: received %s", marker_event)
|
||||
|
||||
insertion_event_id = marker_event.content.get(
|
||||
EventContentFields.MSC2716_MARKER_INSERTION
|
||||
)
|
||||
|
||||
if insertion_event_id is None:
|
||||
# Nothing to retrieve then (invalid marker)
|
||||
return
|
||||
|
||||
logger.debug(
|
||||
"_handle_marker_event: backfilling insertion event %s", insertion_event_id
|
||||
)
|
||||
|
||||
await self._get_events_and_persist(
|
||||
origin,
|
||||
marker_event.room_id,
|
||||
[insertion_event_id],
|
||||
)
|
||||
|
||||
insertion_event = await self.store.get_event(
|
||||
insertion_event_id, allow_none=True
|
||||
)
|
||||
if insertion_event is None:
|
||||
logger.warning(
|
||||
"_handle_marker_event: server %s didn't return insertion event %s for marker %s",
|
||||
origin,
|
||||
insertion_event_id,
|
||||
marker_event.event_id,
|
||||
)
|
||||
return
|
||||
|
||||
logger.debug(
|
||||
"_handle_marker_event: succesfully backfilled insertion event %s from marker event %s",
|
||||
insertion_event,
|
||||
marker_event,
|
||||
)
|
||||
|
||||
await self.store.insert_insertion_extremity(
|
||||
insertion_event_id, marker_event.room_id
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"_handle_marker_event: insertion extremity added for %s from marker event %s",
|
||||
insertion_event,
|
||||
marker_event,
|
||||
)
|
||||
|
||||
async def _resync_device(self, sender: str) -> None:
|
||||
"""We have detected that the device list for the given user may be out
|
||||
of sync, so we try and resync them.
|
||||
|
@ -1000,7 +1096,7 @@ class FederationHandler(BaseHandler):
|
|||
_NewEventInfo(
|
||||
event=ev,
|
||||
state=events_to_state[e_id],
|
||||
auth_events={
|
||||
claimed_auth_event_map={
|
||||
(
|
||||
auth_events[a_id].type,
|
||||
auth_events[a_id].state_key,
|
||||
|
@ -1057,9 +1153,19 @@ class FederationHandler(BaseHandler):
|
|||
async def _maybe_backfill_inner(
|
||||
self, room_id: str, current_depth: int, limit: int
|
||||
) -> bool:
|
||||
extremities = await self.store.get_oldest_events_with_depth_in_room(room_id)
|
||||
oldest_events_with_depth = (
|
||||
await self.store.get_oldest_event_ids_with_depth_in_room(room_id)
|
||||
)
|
||||
insertion_events_to_be_backfilled = (
|
||||
await self.store.get_insertion_event_backwards_extremities_in_room(room_id)
|
||||
)
|
||||
logger.debug(
|
||||
"_maybe_backfill_inner: extremities oldest_events_with_depth=%s insertion_events_to_be_backfilled=%s",
|
||||
oldest_events_with_depth,
|
||||
insertion_events_to_be_backfilled,
|
||||
)
|
||||
|
||||
if not extremities:
|
||||
if not oldest_events_with_depth and not insertion_events_to_be_backfilled:
|
||||
logger.debug("Not backfilling as no extremeties found.")
|
||||
return False
|
||||
|
||||
|
@ -1089,10 +1195,12 @@ class FederationHandler(BaseHandler):
|
|||
# state *before* the event, ignoring the special casing certain event
|
||||
# types have.
|
||||
|
||||
forward_events = await self.store.get_successor_events(list(extremities))
|
||||
forward_event_ids = await self.store.get_successor_events(
|
||||
list(oldest_events_with_depth)
|
||||
)
|
||||
|
||||
extremities_events = await self.store.get_events(
|
||||
forward_events,
|
||||
forward_event_ids,
|
||||
redact_behaviour=EventRedactBehaviour.AS_IS,
|
||||
get_prev_content=False,
|
||||
)
|
||||
|
@ -1106,10 +1214,19 @@ class FederationHandler(BaseHandler):
|
|||
redact=False,
|
||||
check_history_visibility_only=True,
|
||||
)
|
||||
logger.debug(
|
||||
"_maybe_backfill_inner: filtered_extremities %s", filtered_extremities
|
||||
)
|
||||
|
||||
if not filtered_extremities:
|
||||
if not filtered_extremities and not insertion_events_to_be_backfilled:
|
||||
return False
|
||||
|
||||
extremities = {
|
||||
**oldest_events_with_depth,
|
||||
# TODO: insertion_events_to_be_backfilled is currently skipping the filtered_extremities checks
|
||||
**insertion_events_to_be_backfilled,
|
||||
}
|
||||
|
||||
# Check if we reached a point where we should start backfilling.
|
||||
sorted_extremeties_tuple = sorted(extremities.items(), key=lambda e: -int(e[1]))
|
||||
max_depth = sorted_extremeties_tuple[0][1]
|
||||
|
@ -1643,10 +1760,8 @@ class FederationHandler(BaseHandler):
|
|||
for p, origin in room_queue:
|
||||
try:
|
||||
logger.info(
|
||||
"Processing queued PDU %s which was received "
|
||||
"while we were joining %s",
|
||||
p.event_id,
|
||||
p.room_id,
|
||||
"Processing queued PDU %s which was received while we were joining",
|
||||
p,
|
||||
)
|
||||
with nested_logging_context(p.event_id):
|
||||
await self.on_receive_pdu(origin, p, sent_to_us_directly=True)
|
||||
|
@ -2208,7 +2323,7 @@ class FederationHandler(BaseHandler):
|
|||
event: EventBase,
|
||||
context: EventContext,
|
||||
state: Optional[Iterable[EventBase]] = None,
|
||||
auth_events: Optional[MutableStateMap[EventBase]] = None,
|
||||
claimed_auth_event_map: Optional[StateMap[EventBase]] = None,
|
||||
backfilled: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
|
@ -2220,17 +2335,18 @@ class FederationHandler(BaseHandler):
|
|||
context:
|
||||
The event context.
|
||||
|
||||
NB that this function potentially modifies it.
|
||||
state:
|
||||
The state events used to check the event for soft-fail. If this is
|
||||
not provided the current state events will be used.
|
||||
auth_events:
|
||||
Map from (event_type, state_key) to event
|
||||
|
||||
Normally, our calculated auth_events based on the state of the room
|
||||
at the event's position in the DAG, though occasionally (eg if the
|
||||
event is an outlier), may be the auth events claimed by the remote
|
||||
server.
|
||||
claimed_auth_event_map:
|
||||
A map of (type, state_key) => event for the event's claimed auth_events.
|
||||
Possibly incomplete, and possibly including events that are not yet
|
||||
persisted, or authed, or in the right room.
|
||||
|
||||
Only populated where we may not already have persisted these events -
|
||||
for example, when populating outliers.
|
||||
|
||||
backfilled: True if the event was backfilled.
|
||||
"""
|
||||
context = await self._check_event_auth(
|
||||
|
@ -2238,7 +2354,7 @@ class FederationHandler(BaseHandler):
|
|||
event,
|
||||
context,
|
||||
state=state,
|
||||
auth_events=auth_events,
|
||||
claimed_auth_event_map=claimed_auth_event_map,
|
||||
backfilled=backfilled,
|
||||
)
|
||||
|
||||
|
@ -2302,7 +2418,7 @@ class FederationHandler(BaseHandler):
|
|||
event,
|
||||
res,
|
||||
state=ev_info.state,
|
||||
auth_events=ev_info.auth_events,
|
||||
claimed_auth_event_map=ev_info.claimed_auth_event_map,
|
||||
backfilled=backfilled,
|
||||
)
|
||||
return res
|
||||
|
@ -2568,7 +2684,7 @@ class FederationHandler(BaseHandler):
|
|||
event: EventBase,
|
||||
context: EventContext,
|
||||
state: Optional[Iterable[EventBase]] = None,
|
||||
auth_events: Optional[MutableStateMap[EventBase]] = None,
|
||||
claimed_auth_event_map: Optional[StateMap[EventBase]] = None,
|
||||
backfilled: bool = False,
|
||||
) -> EventContext:
|
||||
"""
|
||||
|
@ -2580,21 +2696,19 @@ class FederationHandler(BaseHandler):
|
|||
context:
|
||||
The event context.
|
||||
|
||||
NB that this function potentially modifies it.
|
||||
state:
|
||||
The state events used to check the event for soft-fail. If this is
|
||||
not provided the current state events will be used.
|
||||
auth_events:
|
||||
Map from (event_type, state_key) to event
|
||||
|
||||
Normally, our calculated auth_events based on the state of the room
|
||||
at the event's position in the DAG, though occasionally (eg if the
|
||||
event is an outlier), may be the auth events claimed by the remote
|
||||
server.
|
||||
claimed_auth_event_map:
|
||||
A map of (type, state_key) => event for the event's claimed auth_events.
|
||||
Possibly incomplete, and possibly including events that are not yet
|
||||
persisted, or authed, or in the right room.
|
||||
|
||||
Also NB that this function adds entries to it.
|
||||
Only populated where we may not already have persisted these events -
|
||||
for example, when populating outliers, or the state for a backwards
|
||||
extremity.
|
||||
|
||||
If this is not provided, it is calculated from the previous state IDs.
|
||||
backfilled: True if the event was backfilled.
|
||||
|
||||
Returns:
|
||||
|
@ -2603,7 +2717,12 @@ class FederationHandler(BaseHandler):
|
|||
room_version = await self.store.get_room_version_id(event.room_id)
|
||||
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
|
||||
|
||||
if not auth_events:
|
||||
if claimed_auth_event_map:
|
||||
# if we have a copy of the auth events from the event, use that as the
|
||||
# basis for auth.
|
||||
auth_events = claimed_auth_event_map
|
||||
else:
|
||||
# otherwise, we calculate what the auth events *should* be, and use that
|
||||
prev_state_ids = await context.get_prev_state_ids()
|
||||
auth_events_ids = self._event_auth_handler.compute_auth_events(
|
||||
event, prev_state_ids, for_verification=True
|
||||
|
@ -2611,18 +2730,11 @@ class FederationHandler(BaseHandler):
|
|||
auth_events_x = await self.store.get_events(auth_events_ids)
|
||||
auth_events = {(e.type, e.state_key): e for e in auth_events_x.values()}
|
||||
|
||||
# This is a hack to fix some old rooms where the initial join event
|
||||
# didn't reference the create event in its auth events.
|
||||
if event.type == EventTypes.Member and not event.auth_event_ids():
|
||||
if len(event.prev_event_ids()) == 1 and event.depth < 5:
|
||||
c = await self.store.get_event(
|
||||
event.prev_event_ids()[0], allow_none=True
|
||||
)
|
||||
if c and c.type == EventTypes.Create:
|
||||
auth_events[(c.type, c.state_key)] = c
|
||||
|
||||
try:
|
||||
context = await self._update_auth_events_and_context_for_auth(
|
||||
(
|
||||
context,
|
||||
auth_events_for_auth,
|
||||
) = await self._update_auth_events_and_context_for_auth(
|
||||
origin, event, context, auth_events
|
||||
)
|
||||
except Exception:
|
||||
|
@ -2635,9 +2747,10 @@ class FederationHandler(BaseHandler):
|
|||
"Ignoring failure and continuing processing of event.",
|
||||
event.event_id,
|
||||
)
|
||||
auth_events_for_auth = auth_events
|
||||
|
||||
try:
|
||||
event_auth.check(room_version_obj, event, auth_events=auth_events)
|
||||
event_auth.check(room_version_obj, event, auth_events=auth_events_for_auth)
|
||||
except AuthError as e:
|
||||
logger.warning("Failed auth resolution for %r because %s", event, e)
|
||||
context.rejected = RejectedReason.AUTH_ERROR
|
||||
|
@ -2662,8 +2775,8 @@ class FederationHandler(BaseHandler):
|
|||
origin: str,
|
||||
event: EventBase,
|
||||
context: EventContext,
|
||||
auth_events: MutableStateMap[EventBase],
|
||||
) -> EventContext:
|
||||
input_auth_events: StateMap[EventBase],
|
||||
) -> Tuple[EventContext, StateMap[EventBase]]:
|
||||
"""Helper for _check_event_auth. See there for docs.
|
||||
|
||||
Checks whether a given event has the expected auth events. If it
|
||||
|
@ -2680,7 +2793,7 @@ class FederationHandler(BaseHandler):
|
|||
event:
|
||||
context:
|
||||
|
||||
auth_events:
|
||||
input_auth_events:
|
||||
Map from (event_type, state_key) to event
|
||||
|
||||
Normally, our calculated auth_events based on the state of the room
|
||||
|
@ -2688,11 +2801,12 @@ class FederationHandler(BaseHandler):
|
|||
event is an outlier), may be the auth events claimed by the remote
|
||||
server.
|
||||
|
||||
Also NB that this function adds entries to it.
|
||||
|
||||
Returns:
|
||||
updated context
|
||||
updated context, updated auth event map
|
||||
"""
|
||||
# take a copy of input_auth_events before we modify it.
|
||||
auth_events: MutableStateMap[EventBase] = dict(input_auth_events)
|
||||
|
||||
event_auth_events = set(event.auth_event_ids())
|
||||
|
||||
# missing_auth is the set of the event's auth_events which we don't yet have
|
||||
|
@ -2721,7 +2835,7 @@ class FederationHandler(BaseHandler):
|
|||
# The other side isn't around or doesn't implement the
|
||||
# endpoint, so lets just bail out.
|
||||
logger.info("Failed to get event auth from remote: %s", e1)
|
||||
return context
|
||||
return context, auth_events
|
||||
|
||||
seen_remotes = await self.store.have_seen_events(
|
||||
event.room_id, [e.event_id for e in remote_auth_chain]
|
||||
|
@ -2752,7 +2866,10 @@ class FederationHandler(BaseHandler):
|
|||
await self.state_handler.compute_event_context(e)
|
||||
)
|
||||
await self._auth_and_persist_event(
|
||||
origin, e, missing_auth_event_context, auth_events=auth
|
||||
origin,
|
||||
e,
|
||||
missing_auth_event_context,
|
||||
claimed_auth_event_map=auth,
|
||||
)
|
||||
|
||||
if e.event_id in event_auth_events:
|
||||
|
@ -2770,14 +2887,14 @@ class FederationHandler(BaseHandler):
|
|||
# obviously be empty
|
||||
# (b) alternatively, why don't we do it earlier?
|
||||
logger.info("Skipping auth_event fetch for outlier")
|
||||
return context
|
||||
return context, auth_events
|
||||
|
||||
different_auth = event_auth_events.difference(
|
||||
e.event_id for e in auth_events.values()
|
||||
)
|
||||
|
||||
if not different_auth:
|
||||
return context
|
||||
return context, auth_events
|
||||
|
||||
logger.info(
|
||||
"auth_events refers to events which are not in our calculated auth "
|
||||
|
@ -2803,7 +2920,7 @@ class FederationHandler(BaseHandler):
|
|||
# XXX: should we reject the event in this case? It feels like we should,
|
||||
# but then shouldn't we also do so if we've failed to fetch any of the
|
||||
# auth events?
|
||||
return context
|
||||
return context, auth_events
|
||||
|
||||
# now we state-resolve between our own idea of the auth events, and the remote's
|
||||
# idea of them.
|
||||
|
@ -2833,7 +2950,7 @@ class FederationHandler(BaseHandler):
|
|||
event, context, auth_events
|
||||
)
|
||||
|
||||
return context
|
||||
return context, auth_events
|
||||
|
||||
async def _update_context_for_auth_events(
|
||||
self, event: EventBase, context: EventContext, auth_events: StateMap[EventBase]
|
||||
|
|
|
@ -824,6 +824,7 @@ class IdentityHandler(BaseHandler):
|
|||
room_avatar_url: str,
|
||||
room_join_rules: str,
|
||||
room_name: str,
|
||||
room_type: Optional[str],
|
||||
inviter_display_name: str,
|
||||
inviter_avatar_url: str,
|
||||
id_access_token: Optional[str] = None,
|
||||
|
@ -843,6 +844,7 @@ class IdentityHandler(BaseHandler):
|
|||
notifications.
|
||||
room_join_rules: The join rules of the email (e.g. "public").
|
||||
room_name: The m.room.name of the room.
|
||||
room_type: The type of the room from its m.room.create event (e.g "m.space").
|
||||
inviter_display_name: The current display name of the
|
||||
inviter.
|
||||
inviter_avatar_url: The URL of the inviter's avatar.
|
||||
|
@ -869,6 +871,10 @@ class IdentityHandler(BaseHandler):
|
|||
"sender_display_name": inviter_display_name,
|
||||
"sender_avatar_url": inviter_avatar_url,
|
||||
}
|
||||
|
||||
if room_type is not None:
|
||||
invite_config["org.matrix.msc3288.room_type"] = room_type
|
||||
|
||||
# If a custom web client location is available, include it in the request.
|
||||
if self._web_client_location:
|
||||
invite_config["org.matrix.web_client_location"] = self._web_client_location
|
||||
|
|
|
@ -1184,8 +1184,7 @@ class PresenceHandler(BasePresenceHandler):
|
|||
new_fields = {"state": presence}
|
||||
|
||||
if not ignore_status_msg:
|
||||
msg = status_msg if presence != PresenceState.OFFLINE else None
|
||||
new_fields["status_msg"] = msg
|
||||
new_fields["status_msg"] = status_msg
|
||||
|
||||
if presence == PresenceState.ONLINE or (
|
||||
presence == PresenceState.BUSY and self._busy_presence_enabled
|
||||
|
@ -1478,7 +1477,7 @@ def format_user_presence_state(
|
|||
content["user_id"] = state.user_id
|
||||
if state.last_active_ts:
|
||||
content["last_active_ago"] = now - state.last_active_ts
|
||||
if state.status_msg and state.state != PresenceState.OFFLINE:
|
||||
if state.status_msg:
|
||||
content["status_msg"] = state.status_msg
|
||||
if state.state == PresenceState.ONLINE:
|
||||
content["currently_active"] = state.currently_active
|
||||
|
@ -1840,9 +1839,7 @@ def handle_timeout(
|
|||
# don't set them as offline.
|
||||
sync_or_active = max(state.last_user_sync_ts, state.last_active_ts)
|
||||
if now - sync_or_active > SYNC_ONLINE_TIMEOUT:
|
||||
state = state.copy_and_replace(
|
||||
state=PresenceState.OFFLINE, status_msg=None
|
||||
)
|
||||
state = state.copy_and_replace(state=PresenceState.OFFLINE)
|
||||
changed = True
|
||||
else:
|
||||
# We expect to be poked occasionally by the other side.
|
||||
|
@ -1850,7 +1847,7 @@ def handle_timeout(
|
|||
# no one gets stuck online forever.
|
||||
if now - state.last_federation_update_ts > FEDERATION_TIMEOUT:
|
||||
# The other side seems to have disappeared.
|
||||
state = state.copy_and_replace(state=PresenceState.OFFLINE, status_msg=None)
|
||||
state = state.copy_and_replace(state=PresenceState.OFFLINE)
|
||||
changed = True
|
||||
|
||||
return state if changed else None
|
||||
|
|
|
@ -70,7 +70,8 @@ class ReceiptsHandler(BaseHandler):
|
|||
)
|
||||
if not is_in_room:
|
||||
logger.info(
|
||||
"Ignoring receipt from %s as we're not in the room",
|
||||
"Ignoring receipt for room %r from server %s as we're not in the room",
|
||||
room_id,
|
||||
origin,
|
||||
)
|
||||
continue
|
||||
|
@ -188,7 +189,14 @@ class ReceiptEventSource:
|
|||
|
||||
new_users = {}
|
||||
for rr_user_id, user_rr in m_read.items():
|
||||
hidden = user_rr.get("hidden", None)
|
||||
try:
|
||||
hidden = user_rr.get("hidden")
|
||||
except AttributeError:
|
||||
# Due to https://github.com/matrix-org/synapse/issues/10376
|
||||
# there are cases where user_rr is a string, in those cases
|
||||
# we just ignore the read receipt
|
||||
continue
|
||||
|
||||
if hidden is not True or rr_user_id == user_id:
|
||||
new_users[rr_user_id] = user_rr.copy()
|
||||
# If hidden has a value replace hidden with the correct prefixed key
|
||||
|
|
|
@ -356,6 +356,12 @@ class RoomListHandler(BaseHandler):
|
|||
include_all_networks: bool = False,
|
||||
third_party_instance_id: Optional[str] = None,
|
||||
) -> JsonDict:
|
||||
"""Get the public room list from remote server
|
||||
|
||||
Raises:
|
||||
SynapseError
|
||||
"""
|
||||
|
||||
if not self.enable_room_list_search:
|
||||
return {"chunk": [], "total_room_count_estimate": 0}
|
||||
|
||||
|
@ -395,6 +401,7 @@ class RoomListHandler(BaseHandler):
|
|||
limit = None
|
||||
since_token = None
|
||||
|
||||
try:
|
||||
res = await self._get_remote_list_cached(
|
||||
server_name,
|
||||
limit=limit,
|
||||
|
@ -402,6 +409,8 @@ class RoomListHandler(BaseHandler):
|
|||
include_all_networks=include_all_networks,
|
||||
third_party_instance_id=third_party_instance_id,
|
||||
)
|
||||
except (RequestSendFailed, HttpResponseException):
|
||||
raise SynapseError(502, "Failed to fetch room list")
|
||||
|
||||
if search_filter:
|
||||
res = {
|
||||
|
@ -423,10 +432,13 @@ class RoomListHandler(BaseHandler):
|
|||
include_all_networks: bool = False,
|
||||
third_party_instance_id: Optional[str] = None,
|
||||
) -> JsonDict:
|
||||
"""Wrapper around FederationClient.get_public_rooms that caches the
|
||||
result.
|
||||
"""
|
||||
|
||||
repl_layer = self.hs.get_federation_client()
|
||||
if search_filter:
|
||||
# We can't cache when asking for search
|
||||
try:
|
||||
return await repl_layer.get_public_rooms(
|
||||
server_name,
|
||||
limit=limit,
|
||||
|
@ -435,8 +447,6 @@ class RoomListHandler(BaseHandler):
|
|||
include_all_networks=include_all_networks,
|
||||
third_party_instance_id=third_party_instance_id,
|
||||
)
|
||||
except (RequestSendFailed, HttpResponseException):
|
||||
raise SynapseError(502, "Failed to fetch room list")
|
||||
|
||||
key = (
|
||||
server_name,
|
||||
|
|
|
@ -19,7 +19,12 @@ from http import HTTPStatus
|
|||
from typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple
|
||||
|
||||
from synapse import types
|
||||
from synapse.api.constants import AccountDataTypes, EventTypes, Membership
|
||||
from synapse.api.constants import (
|
||||
AccountDataTypes,
|
||||
EventContentFields,
|
||||
EventTypes,
|
||||
Membership,
|
||||
)
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
Codes,
|
||||
|
@ -1237,6 +1242,11 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
if room_name_event:
|
||||
room_name = room_name_event.content.get("name", "")
|
||||
|
||||
room_type = None
|
||||
room_create_event = room_state.get((EventTypes.Create, ""))
|
||||
if room_create_event:
|
||||
room_type = room_create_event.content.get(EventContentFields.ROOM_TYPE)
|
||||
|
||||
room_join_rules = ""
|
||||
join_rules_event = room_state.get((EventTypes.JoinRules, ""))
|
||||
if join_rules_event:
|
||||
|
@ -1263,6 +1273,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
room_avatar_url=room_avatar_url,
|
||||
room_join_rules=room_join_rules,
|
||||
room_name=room_name,
|
||||
room_type=room_type,
|
||||
inviter_display_name=inviter_display_name,
|
||||
inviter_avatar_url=inviter_avatar_url,
|
||||
id_access_token=id_access_token,
|
||||
|
|
1171
synapse/handlers/room_summary.py
Normal file
1171
synapse/handlers/room_summary.py
Normal file
File diff suppressed because it is too large
Load diff
|
@ -16,7 +16,12 @@ import email.utils
|
|||
import logging
|
||||
from email.mime.multipart import MIMEMultipart
|
||||
from email.mime.text import MIMEText
|
||||
from typing import TYPE_CHECKING
|
||||
from io import BytesIO
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from twisted.internet.defer import Deferred
|
||||
from twisted.internet.interfaces import IReactorTCP
|
||||
from twisted.mail.smtp import ESMTPSenderFactory
|
||||
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
|
||||
|
@ -26,19 +31,75 @@ if TYPE_CHECKING:
|
|||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def _sendmail(
|
||||
reactor: IReactorTCP,
|
||||
smtphost: str,
|
||||
smtpport: int,
|
||||
from_addr: str,
|
||||
to_addr: str,
|
||||
msg_bytes: bytes,
|
||||
username: Optional[bytes] = None,
|
||||
password: Optional[bytes] = None,
|
||||
require_auth: bool = False,
|
||||
require_tls: bool = False,
|
||||
tls_hostname: Optional[str] = None,
|
||||
) -> None:
|
||||
"""A simple wrapper around ESMTPSenderFactory, to allow substitution in tests
|
||||
|
||||
Params:
|
||||
reactor: reactor to use to make the outbound connection
|
||||
smtphost: hostname to connect to
|
||||
smtpport: port to connect to
|
||||
from_addr: "From" address for email
|
||||
to_addr: "To" address for email
|
||||
msg_bytes: Message content
|
||||
username: username to authenticate with, if auth is enabled
|
||||
password: password to give when authenticating
|
||||
require_auth: if auth is not offered, fail the request
|
||||
require_tls: if TLS is not offered, fail the reqest
|
||||
tls_hostname: TLS hostname to check for. None to disable TLS.
|
||||
"""
|
||||
msg = BytesIO(msg_bytes)
|
||||
|
||||
d: "Deferred[object]" = Deferred()
|
||||
|
||||
factory = ESMTPSenderFactory(
|
||||
username,
|
||||
password,
|
||||
from_addr,
|
||||
to_addr,
|
||||
msg,
|
||||
d,
|
||||
heloFallback=True,
|
||||
requireAuthentication=require_auth,
|
||||
requireTransportSecurity=require_tls,
|
||||
hostname=tls_hostname,
|
||||
)
|
||||
|
||||
# the IReactorTCP interface claims host has to be a bytes, which seems to be wrong
|
||||
reactor.connectTCP(smtphost, smtpport, factory, timeout=30, bindAddress=None) # type: ignore[arg-type]
|
||||
|
||||
await make_deferred_yieldable(d)
|
||||
|
||||
|
||||
class SendEmailHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.hs = hs
|
||||
|
||||
self._sendmail = hs.get_sendmail()
|
||||
self._reactor = hs.get_reactor()
|
||||
|
||||
self._from = hs.config.email.email_notif_from
|
||||
self._smtp_host = hs.config.email.email_smtp_host
|
||||
self._smtp_port = hs.config.email.email_smtp_port
|
||||
self._smtp_user = hs.config.email.email_smtp_user
|
||||
self._smtp_pass = hs.config.email.email_smtp_pass
|
||||
|
||||
user = hs.config.email.email_smtp_user
|
||||
self._smtp_user = user.encode("utf-8") if user is not None else None
|
||||
passwd = hs.config.email.email_smtp_pass
|
||||
self._smtp_pass = passwd.encode("utf-8") if passwd is not None else None
|
||||
self._require_transport_security = hs.config.email.require_transport_security
|
||||
self._enable_tls = hs.config.email.enable_smtp_tls
|
||||
|
||||
self._sendmail = _sendmail
|
||||
|
||||
async def send_email(
|
||||
self,
|
||||
|
@ -82,17 +143,16 @@ class SendEmailHandler:
|
|||
|
||||
logger.info("Sending email to %s" % email_address)
|
||||
|
||||
await make_deferred_yieldable(
|
||||
self._sendmail(
|
||||
await self._sendmail(
|
||||
self._reactor,
|
||||
self._smtp_host,
|
||||
self._smtp_port,
|
||||
raw_from,
|
||||
raw_to,
|
||||
multipart_msg.as_string().encode("utf8"),
|
||||
reactor=self._reactor,
|
||||
port=self._smtp_port,
|
||||
requireAuthentication=self._smtp_user is not None,
|
||||
username=self._smtp_user,
|
||||
password=self._smtp_pass,
|
||||
requireTransportSecurity=self._require_transport_security,
|
||||
)
|
||||
require_auth=self._smtp_user is not None,
|
||||
require_tls=self._require_transport_security,
|
||||
tls_hostname=self._smtp_host if self._enable_tls else None,
|
||||
)
|
||||
|
|
|
@ -1,667 +0,0 @@
|
|||
# Copyright 2021 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import itertools
|
||||
import logging
|
||||
import re
|
||||
from collections import deque
|
||||
from typing import TYPE_CHECKING, Iterable, List, Optional, Sequence, Set, Tuple
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.api.constants import (
|
||||
EventContentFields,
|
||||
EventTypes,
|
||||
HistoryVisibility,
|
||||
JoinRules,
|
||||
Membership,
|
||||
RoomTypes,
|
||||
)
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.utils import format_event_for_client_v2
|
||||
from synapse.types import JsonDict
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# number of rooms to return. We'll stop once we hit this limit.
|
||||
MAX_ROOMS = 50
|
||||
|
||||
# max number of events to return per room.
|
||||
MAX_ROOMS_PER_SPACE = 50
|
||||
|
||||
# max number of federation servers to hit per room
|
||||
MAX_SERVERS_PER_SPACE = 3
|
||||
|
||||
|
||||
class SpaceSummaryHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self._clock = hs.get_clock()
|
||||
self._auth = hs.get_auth()
|
||||
self._event_auth_handler = hs.get_event_auth_handler()
|
||||
self._store = hs.get_datastore()
|
||||
self._event_serializer = hs.get_event_client_serializer()
|
||||
self._server_name = hs.hostname
|
||||
self._federation_client = hs.get_federation_client()
|
||||
|
||||
async def get_space_summary(
|
||||
self,
|
||||
requester: str,
|
||||
room_id: str,
|
||||
suggested_only: bool = False,
|
||||
max_rooms_per_space: Optional[int] = None,
|
||||
) -> JsonDict:
|
||||
"""
|
||||
Implementation of the space summary C-S API
|
||||
|
||||
Args:
|
||||
requester: user id of the user making this request
|
||||
|
||||
room_id: room id to start the summary at
|
||||
|
||||
suggested_only: whether we should only return children with the "suggested"
|
||||
flag set.
|
||||
|
||||
max_rooms_per_space: an optional limit on the number of child rooms we will
|
||||
return. This does not apply to the root room (ie, room_id), and
|
||||
is overridden by MAX_ROOMS_PER_SPACE.
|
||||
|
||||
Returns:
|
||||
summary dict to return
|
||||
"""
|
||||
# first of all, check that the user is in the room in question (or it's
|
||||
# world-readable)
|
||||
await self._auth.check_user_in_room_or_world_readable(room_id, requester)
|
||||
|
||||
# the queue of rooms to process
|
||||
room_queue = deque((_RoomQueueEntry(room_id, ()),))
|
||||
|
||||
# rooms we have already processed
|
||||
processed_rooms: Set[str] = set()
|
||||
|
||||
# events we have already processed. We don't necessarily have their event ids,
|
||||
# so instead we key on (room id, state key)
|
||||
processed_events: Set[Tuple[str, str]] = set()
|
||||
|
||||
rooms_result: List[JsonDict] = []
|
||||
events_result: List[JsonDict] = []
|
||||
|
||||
while room_queue and len(rooms_result) < MAX_ROOMS:
|
||||
queue_entry = room_queue.popleft()
|
||||
room_id = queue_entry.room_id
|
||||
if room_id in processed_rooms:
|
||||
# already done this room
|
||||
continue
|
||||
|
||||
logger.debug("Processing room %s", room_id)
|
||||
|
||||
is_in_room = await self._store.is_host_joined(room_id, self._server_name)
|
||||
|
||||
# The client-specified max_rooms_per_space limit doesn't apply to the
|
||||
# room_id specified in the request, so we ignore it if this is the
|
||||
# first room we are processing.
|
||||
max_children = max_rooms_per_space if processed_rooms else None
|
||||
|
||||
if is_in_room:
|
||||
room, events = await self._summarize_local_room(
|
||||
requester, None, room_id, suggested_only, max_children
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"Query of local room %s returned events %s",
|
||||
room_id,
|
||||
["%s->%s" % (ev["room_id"], ev["state_key"]) for ev in events],
|
||||
)
|
||||
|
||||
if room:
|
||||
rooms_result.append(room)
|
||||
else:
|
||||
fed_rooms, fed_events = await self._summarize_remote_room(
|
||||
queue_entry,
|
||||
suggested_only,
|
||||
max_children,
|
||||
exclude_rooms=processed_rooms,
|
||||
)
|
||||
|
||||
# The results over federation might include rooms that the we,
|
||||
# as the requesting server, are allowed to see, but the requesting
|
||||
# user is not permitted see.
|
||||
#
|
||||
# Filter the returned results to only what is accessible to the user.
|
||||
room_ids = set()
|
||||
events = []
|
||||
for room in fed_rooms:
|
||||
fed_room_id = room.get("room_id")
|
||||
if not fed_room_id or not isinstance(fed_room_id, str):
|
||||
continue
|
||||
|
||||
# The room should only be included in the summary if:
|
||||
# a. the user is in the room;
|
||||
# b. the room is world readable; or
|
||||
# c. the user could join the room, e.g. the join rules
|
||||
# are set to public or the user is in a space that
|
||||
# has been granted access to the room.
|
||||
#
|
||||
# Note that we know the user is not in the root room (which is
|
||||
# why the remote call was made in the first place), but the user
|
||||
# could be in one of the children rooms and we just didn't know
|
||||
# about the link.
|
||||
|
||||
# The API doesn't return the room version so assume that a
|
||||
# join rule of knock is valid.
|
||||
include_room = (
|
||||
room.get("join_rules") in (JoinRules.PUBLIC, JoinRules.KNOCK)
|
||||
or room.get("world_readable") is True
|
||||
)
|
||||
|
||||
# Check if the user is a member of any of the allowed spaces
|
||||
# from the response.
|
||||
allowed_rooms = room.get("allowed_spaces")
|
||||
if (
|
||||
not include_room
|
||||
and allowed_rooms
|
||||
and isinstance(allowed_rooms, list)
|
||||
):
|
||||
include_room = await self._event_auth_handler.is_user_in_rooms(
|
||||
allowed_rooms, requester
|
||||
)
|
||||
|
||||
# Finally, if this isn't the requested room, check ourselves
|
||||
# if we can access the room.
|
||||
if not include_room and fed_room_id != queue_entry.room_id:
|
||||
include_room = await self._is_room_accessible(
|
||||
fed_room_id, requester, None
|
||||
)
|
||||
|
||||
# The user can see the room, include it!
|
||||
if include_room:
|
||||
rooms_result.append(room)
|
||||
room_ids.add(fed_room_id)
|
||||
|
||||
# All rooms returned don't need visiting again (even if the user
|
||||
# didn't have access to them).
|
||||
processed_rooms.add(fed_room_id)
|
||||
|
||||
for event in fed_events:
|
||||
if event.get("room_id") in room_ids:
|
||||
events.append(event)
|
||||
|
||||
logger.debug(
|
||||
"Query of %s returned rooms %s, events %s",
|
||||
room_id,
|
||||
[room.get("room_id") for room in fed_rooms],
|
||||
["%s->%s" % (ev["room_id"], ev["state_key"]) for ev in fed_events],
|
||||
)
|
||||
|
||||
# the room we queried may or may not have been returned, but don't process
|
||||
# it again, anyway.
|
||||
processed_rooms.add(room_id)
|
||||
|
||||
# XXX: is it ok that we blindly iterate through any events returned by
|
||||
# a remote server, whether or not they actually link to any rooms in our
|
||||
# tree?
|
||||
for ev in events:
|
||||
# remote servers might return events we have already processed
|
||||
# (eg, Dendrite returns inward pointers as well as outward ones), so
|
||||
# we need to filter them out, to avoid returning duplicate links to the
|
||||
# client.
|
||||
ev_key = (ev["room_id"], ev["state_key"])
|
||||
if ev_key in processed_events:
|
||||
continue
|
||||
events_result.append(ev)
|
||||
|
||||
# add the child to the queue. we have already validated
|
||||
# that the vias are a list of server names.
|
||||
room_queue.append(
|
||||
_RoomQueueEntry(ev["state_key"], ev["content"]["via"])
|
||||
)
|
||||
processed_events.add(ev_key)
|
||||
|
||||
# Before returning to the client, remove the allowed_spaces key for any
|
||||
# rooms.
|
||||
for room in rooms_result:
|
||||
room.pop("allowed_spaces", None)
|
||||
|
||||
return {"rooms": rooms_result, "events": events_result}
|
||||
|
||||
async def federation_space_summary(
|
||||
self,
|
||||
origin: str,
|
||||
room_id: str,
|
||||
suggested_only: bool,
|
||||
max_rooms_per_space: Optional[int],
|
||||
exclude_rooms: Iterable[str],
|
||||
) -> JsonDict:
|
||||
"""
|
||||
Implementation of the space summary Federation API
|
||||
|
||||
Args:
|
||||
origin: The server requesting the spaces summary.
|
||||
|
||||
room_id: room id to start the summary at
|
||||
|
||||
suggested_only: whether we should only return children with the "suggested"
|
||||
flag set.
|
||||
|
||||
max_rooms_per_space: an optional limit on the number of child rooms we will
|
||||
return. Unlike the C-S API, this applies to the root room (room_id).
|
||||
It is clipped to MAX_ROOMS_PER_SPACE.
|
||||
|
||||
exclude_rooms: a list of rooms to skip over (presumably because the
|
||||
calling server has already seen them).
|
||||
|
||||
Returns:
|
||||
summary dict to return
|
||||
"""
|
||||
# the queue of rooms to process
|
||||
room_queue = deque((room_id,))
|
||||
|
||||
# the set of rooms that we should not walk further. Initialise it with the
|
||||
# excluded-rooms list; we will add other rooms as we process them so that
|
||||
# we do not loop.
|
||||
processed_rooms: Set[str] = set(exclude_rooms)
|
||||
|
||||
rooms_result: List[JsonDict] = []
|
||||
events_result: List[JsonDict] = []
|
||||
|
||||
while room_queue and len(rooms_result) < MAX_ROOMS:
|
||||
room_id = room_queue.popleft()
|
||||
if room_id in processed_rooms:
|
||||
# already done this room
|
||||
continue
|
||||
|
||||
logger.debug("Processing room %s", room_id)
|
||||
|
||||
room, events = await self._summarize_local_room(
|
||||
None, origin, room_id, suggested_only, max_rooms_per_space
|
||||
)
|
||||
|
||||
processed_rooms.add(room_id)
|
||||
|
||||
if room:
|
||||
rooms_result.append(room)
|
||||
events_result.extend(events)
|
||||
|
||||
# add any children to the queue
|
||||
room_queue.extend(edge_event["state_key"] for edge_event in events)
|
||||
|
||||
return {"rooms": rooms_result, "events": events_result}
|
||||
|
||||
async def _summarize_local_room(
|
||||
self,
|
||||
requester: Optional[str],
|
||||
origin: Optional[str],
|
||||
room_id: str,
|
||||
suggested_only: bool,
|
||||
max_children: Optional[int],
|
||||
) -> Tuple[Optional[JsonDict], Sequence[JsonDict]]:
|
||||
"""
|
||||
Generate a room entry and a list of event entries for a given room.
|
||||
|
||||
Args:
|
||||
requester:
|
||||
The user requesting the summary, if it is a local request. None
|
||||
if this is a federation request.
|
||||
origin:
|
||||
The server requesting the summary, if it is a federation request.
|
||||
None if this is a local request.
|
||||
room_id: The room ID to summarize.
|
||||
suggested_only: True if only suggested children should be returned.
|
||||
Otherwise, all children are returned.
|
||||
max_children:
|
||||
The maximum number of children rooms to include. This is capped
|
||||
to a server-set limit.
|
||||
|
||||
Returns:
|
||||
A tuple of:
|
||||
The room information, if the room should be returned to the
|
||||
user. None, otherwise.
|
||||
|
||||
An iterable of the sorted children events. This may be limited
|
||||
to a maximum size or may include all children.
|
||||
"""
|
||||
if not await self._is_room_accessible(room_id, requester, origin):
|
||||
return None, ()
|
||||
|
||||
room_entry = await self._build_room_entry(room_id)
|
||||
|
||||
# If the room is not a space, return just the room information.
|
||||
if room_entry.get("room_type") != RoomTypes.SPACE:
|
||||
return room_entry, ()
|
||||
|
||||
# Otherwise, look for child rooms/spaces.
|
||||
child_events = await self._get_child_events(room_id)
|
||||
|
||||
if suggested_only:
|
||||
# we only care about suggested children
|
||||
child_events = filter(_is_suggested_child_event, child_events)
|
||||
|
||||
if max_children is None or max_children > MAX_ROOMS_PER_SPACE:
|
||||
max_children = MAX_ROOMS_PER_SPACE
|
||||
|
||||
now = self._clock.time_msec()
|
||||
events_result: List[JsonDict] = []
|
||||
for edge_event in itertools.islice(child_events, max_children):
|
||||
events_result.append(
|
||||
await self._event_serializer.serialize_event(
|
||||
edge_event,
|
||||
time_now=now,
|
||||
event_format=format_event_for_client_v2,
|
||||
)
|
||||
)
|
||||
|
||||
return room_entry, events_result
|
||||
|
||||
async def _summarize_remote_room(
|
||||
self,
|
||||
room: "_RoomQueueEntry",
|
||||
suggested_only: bool,
|
||||
max_children: Optional[int],
|
||||
exclude_rooms: Iterable[str],
|
||||
) -> Tuple[Sequence[JsonDict], Sequence[JsonDict]]:
|
||||
"""
|
||||
Request room entries and a list of event entries for a given room by querying a remote server.
|
||||
|
||||
Args:
|
||||
room: The room to summarize.
|
||||
suggested_only: True if only suggested children should be returned.
|
||||
Otherwise, all children are returned.
|
||||
max_children:
|
||||
The maximum number of children rooms to include. This is capped
|
||||
to a server-set limit.
|
||||
exclude_rooms:
|
||||
Rooms IDs which do not need to be summarized.
|
||||
|
||||
Returns:
|
||||
A tuple of:
|
||||
An iterable of rooms.
|
||||
|
||||
An iterable of the sorted children events. This may be limited
|
||||
to a maximum size or may include all children.
|
||||
"""
|
||||
room_id = room.room_id
|
||||
logger.info("Requesting summary for %s via %s", room_id, room.via)
|
||||
|
||||
# we need to make the exclusion list json-serialisable
|
||||
exclude_rooms = list(exclude_rooms)
|
||||
|
||||
via = itertools.islice(room.via, MAX_SERVERS_PER_SPACE)
|
||||
try:
|
||||
res = await self._federation_client.get_space_summary(
|
||||
via,
|
||||
room_id,
|
||||
suggested_only=suggested_only,
|
||||
max_rooms_per_space=max_children,
|
||||
exclude_rooms=exclude_rooms,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Unable to get summary of %s via federation: %s",
|
||||
room_id,
|
||||
e,
|
||||
exc_info=logger.isEnabledFor(logging.DEBUG),
|
||||
)
|
||||
return (), ()
|
||||
|
||||
return res.rooms, tuple(
|
||||
ev.data for ev in res.events if ev.event_type == EventTypes.SpaceChild
|
||||
)
|
||||
|
||||
async def _is_room_accessible(
|
||||
self, room_id: str, requester: Optional[str], origin: Optional[str]
|
||||
) -> bool:
|
||||
"""
|
||||
Calculate whether the room should be shown in the spaces summary.
|
||||
|
||||
It should be included if:
|
||||
|
||||
* The requester is joined or can join the room (per MSC3173).
|
||||
* The origin server has any user that is joined or can join the room.
|
||||
* The history visibility is set to world readable.
|
||||
|
||||
Args:
|
||||
room_id: The room ID to summarize.
|
||||
requester:
|
||||
The user requesting the summary, if it is a local request. None
|
||||
if this is a federation request.
|
||||
origin:
|
||||
The server requesting the summary, if it is a federation request.
|
||||
None if this is a local request.
|
||||
|
||||
Returns:
|
||||
True if the room should be included in the spaces summary.
|
||||
"""
|
||||
state_ids = await self._store.get_current_state_ids(room_id)
|
||||
|
||||
# If there's no state for the room, it isn't known.
|
||||
if not state_ids:
|
||||
# The user might have a pending invite for the room.
|
||||
if requester and await self._store.get_invite_for_local_user_in_room(
|
||||
requester, room_id
|
||||
):
|
||||
return True
|
||||
|
||||
logger.info("room %s is unknown, omitting from summary", room_id)
|
||||
return False
|
||||
|
||||
room_version = await self._store.get_room_version(room_id)
|
||||
|
||||
# Include the room if it has join rules of public or knock.
|
||||
join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""))
|
||||
if join_rules_event_id:
|
||||
join_rules_event = await self._store.get_event(join_rules_event_id)
|
||||
join_rule = join_rules_event.content.get("join_rule")
|
||||
if join_rule == JoinRules.PUBLIC or (
|
||||
room_version.msc2403_knocking and join_rule == JoinRules.KNOCK
|
||||
):
|
||||
return True
|
||||
|
||||
# Include the room if it is peekable.
|
||||
hist_vis_event_id = state_ids.get((EventTypes.RoomHistoryVisibility, ""))
|
||||
if hist_vis_event_id:
|
||||
hist_vis_ev = await self._store.get_event(hist_vis_event_id)
|
||||
hist_vis = hist_vis_ev.content.get("history_visibility")
|
||||
if hist_vis == HistoryVisibility.WORLD_READABLE:
|
||||
return True
|
||||
|
||||
# Otherwise we need to check information specific to the user or server.
|
||||
|
||||
# If we have an authenticated requesting user, check if they are a member
|
||||
# of the room (or can join the room).
|
||||
if requester:
|
||||
member_event_id = state_ids.get((EventTypes.Member, requester), None)
|
||||
|
||||
# If they're in the room they can see info on it.
|
||||
if member_event_id:
|
||||
member_event = await self._store.get_event(member_event_id)
|
||||
if member_event.membership in (Membership.JOIN, Membership.INVITE):
|
||||
return True
|
||||
|
||||
# Otherwise, check if they should be allowed access via membership in a space.
|
||||
if await self._event_auth_handler.has_restricted_join_rules(
|
||||
state_ids, room_version
|
||||
):
|
||||
allowed_rooms = (
|
||||
await self._event_auth_handler.get_rooms_that_allow_join(state_ids)
|
||||
)
|
||||
if await self._event_auth_handler.is_user_in_rooms(
|
||||
allowed_rooms, requester
|
||||
):
|
||||
return True
|
||||
|
||||
# If this is a request over federation, check if the host is in the room or
|
||||
# has a user who could join the room.
|
||||
elif origin:
|
||||
if await self._event_auth_handler.check_host_in_room(
|
||||
room_id, origin
|
||||
) or await self._store.is_host_invited(room_id, origin):
|
||||
return True
|
||||
|
||||
# Alternately, if the host has a user in any of the spaces specified
|
||||
# for access, then the host can see this room (and should do filtering
|
||||
# if the requester cannot see it).
|
||||
if await self._event_auth_handler.has_restricted_join_rules(
|
||||
state_ids, room_version
|
||||
):
|
||||
allowed_rooms = (
|
||||
await self._event_auth_handler.get_rooms_that_allow_join(state_ids)
|
||||
)
|
||||
for space_id in allowed_rooms:
|
||||
if await self._event_auth_handler.check_host_in_room(
|
||||
space_id, origin
|
||||
):
|
||||
return True
|
||||
|
||||
logger.info(
|
||||
"room %s is unpeekable and requester %s is not a member / not allowed to join, omitting from summary",
|
||||
room_id,
|
||||
requester or origin,
|
||||
)
|
||||
return False
|
||||
|
||||
async def _build_room_entry(self, room_id: str) -> JsonDict:
|
||||
"""Generate en entry suitable for the 'rooms' list in the summary response"""
|
||||
stats = await self._store.get_room_with_stats(room_id)
|
||||
|
||||
# currently this should be impossible because we call
|
||||
# check_user_in_room_or_world_readable on the room before we get here, so
|
||||
# there should always be an entry
|
||||
assert stats is not None, "unable to retrieve stats for %s" % (room_id,)
|
||||
|
||||
current_state_ids = await self._store.get_current_state_ids(room_id)
|
||||
create_event = await self._store.get_event(
|
||||
current_state_ids[(EventTypes.Create, "")]
|
||||
)
|
||||
|
||||
room_version = await self._store.get_room_version(room_id)
|
||||
allowed_rooms = None
|
||||
if await self._event_auth_handler.has_restricted_join_rules(
|
||||
current_state_ids, room_version
|
||||
):
|
||||
allowed_rooms = await self._event_auth_handler.get_rooms_that_allow_join(
|
||||
current_state_ids
|
||||
)
|
||||
|
||||
entry = {
|
||||
"room_id": stats["room_id"],
|
||||
"name": stats["name"],
|
||||
"topic": stats["topic"],
|
||||
"canonical_alias": stats["canonical_alias"],
|
||||
"num_joined_members": stats["joined_members"],
|
||||
"avatar_url": stats["avatar"],
|
||||
"join_rules": stats["join_rules"],
|
||||
"world_readable": (
|
||||
stats["history_visibility"] == HistoryVisibility.WORLD_READABLE
|
||||
),
|
||||
"guest_can_join": stats["guest_access"] == "can_join",
|
||||
"creation_ts": create_event.origin_server_ts,
|
||||
"room_type": create_event.content.get(EventContentFields.ROOM_TYPE),
|
||||
"allowed_spaces": allowed_rooms,
|
||||
}
|
||||
|
||||
# Filter out Nones – rather omit the field altogether
|
||||
room_entry = {k: v for k, v in entry.items() if v is not None}
|
||||
|
||||
return room_entry
|
||||
|
||||
async def _get_child_events(self, room_id: str) -> Iterable[EventBase]:
|
||||
"""
|
||||
Get the child events for a given room.
|
||||
|
||||
The returned results are sorted for stability.
|
||||
|
||||
Args:
|
||||
room_id: The room id to get the children of.
|
||||
|
||||
Returns:
|
||||
An iterable of sorted child events.
|
||||
"""
|
||||
|
||||
# look for child rooms/spaces.
|
||||
current_state_ids = await self._store.get_current_state_ids(room_id)
|
||||
|
||||
events = await self._store.get_events_as_list(
|
||||
[
|
||||
event_id
|
||||
for key, event_id in current_state_ids.items()
|
||||
if key[0] == EventTypes.SpaceChild
|
||||
]
|
||||
)
|
||||
|
||||
# filter out any events without a "via" (which implies it has been redacted),
|
||||
# and order to ensure we return stable results.
|
||||
return sorted(filter(_has_valid_via, events), key=_child_events_comparison_key)
|
||||
|
||||
|
||||
@attr.s(frozen=True, slots=True)
|
||||
class _RoomQueueEntry:
|
||||
room_id = attr.ib(type=str)
|
||||
via = attr.ib(type=Sequence[str])
|
||||
|
||||
|
||||
def _has_valid_via(e: EventBase) -> bool:
|
||||
via = e.content.get("via")
|
||||
if not via or not isinstance(via, Sequence):
|
||||
return False
|
||||
for v in via:
|
||||
if not isinstance(v, str):
|
||||
logger.debug("Ignoring edge event %s with invalid via entry", e.event_id)
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def _is_suggested_child_event(edge_event: EventBase) -> bool:
|
||||
suggested = edge_event.content.get("suggested")
|
||||
if isinstance(suggested, bool) and suggested:
|
||||
return True
|
||||
logger.debug("Ignorning not-suggested child %s", edge_event.state_key)
|
||||
return False
|
||||
|
||||
|
||||
# Order may only contain characters in the range of \x20 (space) to \x7E (~) inclusive.
|
||||
_INVALID_ORDER_CHARS_RE = re.compile(r"[^\x20-\x7E]")
|
||||
|
||||
|
||||
def _child_events_comparison_key(child: EventBase) -> Tuple[bool, Optional[str], str]:
|
||||
"""
|
||||
Generate a value for comparing two child events for ordering.
|
||||
|
||||
The rules for ordering are supposed to be:
|
||||
|
||||
1. The 'order' key, if it is valid.
|
||||
2. The 'origin_server_ts' of the 'm.room.create' event.
|
||||
3. The 'room_id'.
|
||||
|
||||
But we skip step 2 since we may not have any state from the room.
|
||||
|
||||
Args:
|
||||
child: The event for generating a comparison key.
|
||||
|
||||
Returns:
|
||||
The comparison key as a tuple of:
|
||||
False if the ordering is valid.
|
||||
The ordering field.
|
||||
The room ID.
|
||||
"""
|
||||
order = child.content.get("order")
|
||||
# If order is not a string or doesn't meet the requirements, ignore it.
|
||||
if not isinstance(order, str):
|
||||
order = None
|
||||
elif len(order) > 50 or _INVALID_ORDER_CHARS_RE.search(order):
|
||||
order = None
|
||||
|
||||
# Items without an order come last.
|
||||
return (order is None, order, child.room_id)
|
|
@ -269,14 +269,22 @@ class SyncHandler:
|
|||
self.presence_handler = hs.get_presence_handler()
|
||||
self.event_sources = hs.get_event_sources()
|
||||
self.clock = hs.get_clock()
|
||||
self.response_cache: ResponseCache[SyncRequestKey] = ResponseCache(
|
||||
hs.get_clock(), "sync"
|
||||
)
|
||||
self.state = hs.get_state_handler()
|
||||
self.auth = hs.get_auth()
|
||||
self.storage = hs.get_storage()
|
||||
self.state_store = self.storage.state
|
||||
|
||||
# TODO: flush cache entries on subsequent sync request.
|
||||
# Once we get the next /sync request (ie, one with the same access token
|
||||
# that sets 'since' to 'next_batch'), we know that device won't need a
|
||||
# cached result any more, and we could flush the entry from the cache to save
|
||||
# memory.
|
||||
self.response_cache: ResponseCache[SyncRequestKey] = ResponseCache(
|
||||
hs.get_clock(),
|
||||
"sync",
|
||||
timeout_ms=hs.config.caches.sync_response_cache_duration,
|
||||
)
|
||||
|
||||
# ExpiringCache((User, Device)) -> LruCache(user_id => event_id)
|
||||
self.lazy_loaded_members_cache: ExpiringCache[
|
||||
Tuple[str, Optional[str]], LruCache[str, str]
|
||||
|
|
|
@ -335,7 +335,8 @@ class TypingWriterHandler(FollowerTypingHandler):
|
|||
)
|
||||
if not is_in_room:
|
||||
logger.info(
|
||||
"Ignoring typing update from %s as we're not in the room",
|
||||
"Ignoring typing update for room %r from server %s as we're not in the room",
|
||||
room_id,
|
||||
origin,
|
||||
)
|
||||
return
|
||||
|
|
|
@ -12,8 +12,11 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import base64
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
import attr
|
||||
from zope.interface import implementer
|
||||
|
||||
from twisted.internet import defer, protocol
|
||||
|
@ -21,7 +24,6 @@ from twisted.internet.error import ConnectError
|
|||
from twisted.internet.interfaces import IReactorCore, IStreamClientEndpoint
|
||||
from twisted.internet.protocol import ClientFactory, Protocol, connectionDone
|
||||
from twisted.web import http
|
||||
from twisted.web.http_headers import Headers
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -30,6 +32,22 @@ class ProxyConnectError(ConnectError):
|
|||
pass
|
||||
|
||||
|
||||
@attr.s
|
||||
class ProxyCredentials:
|
||||
username_password = attr.ib(type=bytes)
|
||||
|
||||
def as_proxy_authorization_value(self) -> bytes:
|
||||
"""
|
||||
Return the value for a Proxy-Authorization header (i.e. 'Basic abdef==').
|
||||
|
||||
Returns:
|
||||
A transformation of the authentication string the encoded value for
|
||||
a Proxy-Authorization header.
|
||||
"""
|
||||
# Encode as base64 and prepend the authorization type
|
||||
return b"Basic " + base64.encodebytes(self.username_password)
|
||||
|
||||
|
||||
@implementer(IStreamClientEndpoint)
|
||||
class HTTPConnectProxyEndpoint:
|
||||
"""An Endpoint implementation which will send a CONNECT request to an http proxy
|
||||
|
@ -46,7 +64,7 @@ class HTTPConnectProxyEndpoint:
|
|||
proxy_endpoint: the endpoint to use to connect to the proxy
|
||||
host: hostname that we want to CONNECT to
|
||||
port: port that we want to connect to
|
||||
headers: Extra HTTP headers to include in the CONNECT request
|
||||
proxy_creds: credentials to authenticate at proxy
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
|
@ -55,20 +73,20 @@ class HTTPConnectProxyEndpoint:
|
|||
proxy_endpoint: IStreamClientEndpoint,
|
||||
host: bytes,
|
||||
port: int,
|
||||
headers: Headers,
|
||||
proxy_creds: Optional[ProxyCredentials],
|
||||
):
|
||||
self._reactor = reactor
|
||||
self._proxy_endpoint = proxy_endpoint
|
||||
self._host = host
|
||||
self._port = port
|
||||
self._headers = headers
|
||||
self._proxy_creds = proxy_creds
|
||||
|
||||
def __repr__(self):
|
||||
return "<HTTPConnectProxyEndpoint %s>" % (self._proxy_endpoint,)
|
||||
|
||||
def connect(self, protocolFactory: ClientFactory):
|
||||
f = HTTPProxiedClientFactory(
|
||||
self._host, self._port, protocolFactory, self._headers
|
||||
self._host, self._port, protocolFactory, self._proxy_creds
|
||||
)
|
||||
d = self._proxy_endpoint.connect(f)
|
||||
# once the tcp socket connects successfully, we need to wait for the
|
||||
|
@ -87,7 +105,7 @@ class HTTPProxiedClientFactory(protocol.ClientFactory):
|
|||
dst_host: hostname that we want to CONNECT to
|
||||
dst_port: port that we want to connect to
|
||||
wrapped_factory: The original Factory
|
||||
headers: Extra HTTP headers to include in the CONNECT request
|
||||
proxy_creds: credentials to authenticate at proxy
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
|
@ -95,12 +113,12 @@ class HTTPProxiedClientFactory(protocol.ClientFactory):
|
|||
dst_host: bytes,
|
||||
dst_port: int,
|
||||
wrapped_factory: ClientFactory,
|
||||
headers: Headers,
|
||||
proxy_creds: Optional[ProxyCredentials],
|
||||
):
|
||||
self.dst_host = dst_host
|
||||
self.dst_port = dst_port
|
||||
self.wrapped_factory = wrapped_factory
|
||||
self.headers = headers
|
||||
self.proxy_creds = proxy_creds
|
||||
self.on_connection = defer.Deferred()
|
||||
|
||||
def startedConnecting(self, connector):
|
||||
|
@ -114,7 +132,7 @@ class HTTPProxiedClientFactory(protocol.ClientFactory):
|
|||
self.dst_port,
|
||||
wrapped_protocol,
|
||||
self.on_connection,
|
||||
self.headers,
|
||||
self.proxy_creds,
|
||||
)
|
||||
|
||||
def clientConnectionFailed(self, connector, reason):
|
||||
|
@ -145,7 +163,7 @@ class HTTPConnectProtocol(protocol.Protocol):
|
|||
connected_deferred: a Deferred which will be callbacked with
|
||||
wrapped_protocol when the CONNECT completes
|
||||
|
||||
headers: Extra HTTP headers to include in the CONNECT request
|
||||
proxy_creds: credentials to authenticate at proxy
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
|
@ -154,16 +172,16 @@ class HTTPConnectProtocol(protocol.Protocol):
|
|||
port: int,
|
||||
wrapped_protocol: Protocol,
|
||||
connected_deferred: defer.Deferred,
|
||||
headers: Headers,
|
||||
proxy_creds: Optional[ProxyCredentials],
|
||||
):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.wrapped_protocol = wrapped_protocol
|
||||
self.connected_deferred = connected_deferred
|
||||
self.headers = headers
|
||||
self.proxy_creds = proxy_creds
|
||||
|
||||
self.http_setup_client = HTTPConnectSetupClient(
|
||||
self.host, self.port, self.headers
|
||||
self.host, self.port, self.proxy_creds
|
||||
)
|
||||
self.http_setup_client.on_connected.addCallback(self.proxyConnected)
|
||||
|
||||
|
@ -205,30 +223,38 @@ class HTTPConnectSetupClient(http.HTTPClient):
|
|||
Args:
|
||||
host: The hostname to send in the CONNECT message
|
||||
port: The port to send in the CONNECT message
|
||||
headers: Extra headers to send with the CONNECT message
|
||||
proxy_creds: credentials to authenticate at proxy
|
||||
"""
|
||||
|
||||
def __init__(self, host: bytes, port: int, headers: Headers):
|
||||
def __init__(
|
||||
self,
|
||||
host: bytes,
|
||||
port: int,
|
||||
proxy_creds: Optional[ProxyCredentials],
|
||||
):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.headers = headers
|
||||
self.proxy_creds = proxy_creds
|
||||
self.on_connected = defer.Deferred()
|
||||
|
||||
def connectionMade(self):
|
||||
logger.debug("Connected to proxy, sending CONNECT")
|
||||
self.sendCommand(b"CONNECT", b"%s:%d" % (self.host, self.port))
|
||||
|
||||
# Send any additional specified headers
|
||||
for name, values in self.headers.getAllRawHeaders():
|
||||
for value in values:
|
||||
self.sendHeader(name, value)
|
||||
# Determine whether we need to set Proxy-Authorization headers
|
||||
if self.proxy_creds:
|
||||
# Set a Proxy-Authorization header
|
||||
self.sendHeader(
|
||||
b"Proxy-Authorization",
|
||||
self.proxy_creds.as_proxy_authorization_value(),
|
||||
)
|
||||
|
||||
self.endHeaders()
|
||||
|
||||
def handleStatus(self, version: bytes, status: bytes, message: bytes):
|
||||
logger.debug("Got Status: %s %s %s", status, message, version)
|
||||
if status != b"200":
|
||||
raise ProxyConnectError("Unexpected status on CONNECT: %s" % status)
|
||||
raise ProxyConnectError(f"Unexpected status on CONNECT: {status!s}")
|
||||
|
||||
def handleEndHeaders(self):
|
||||
logger.debug("End Headers")
|
||||
|
|
|
@ -14,6 +14,10 @@
|
|||
import logging
|
||||
import urllib.parse
|
||||
from typing import Any, Generator, List, Optional
|
||||
from urllib.request import ( # type: ignore[attr-defined]
|
||||
getproxies_environment,
|
||||
proxy_bypass_environment,
|
||||
)
|
||||
|
||||
from netaddr import AddrFormatError, IPAddress, IPSet
|
||||
from zope.interface import implementer
|
||||
|
@ -30,9 +34,12 @@ from twisted.web.http_headers import Headers
|
|||
from twisted.web.iweb import IAgent, IAgentEndpointFactory, IBodyProducer, IResponse
|
||||
|
||||
from synapse.crypto.context_factory import FederationPolicyForHTTPS
|
||||
from synapse.http.client import BlacklistingAgentWrapper
|
||||
from synapse.http import proxyagent
|
||||
from synapse.http.client import BlacklistingAgentWrapper, BlacklistingReactorWrapper
|
||||
from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint
|
||||
from synapse.http.federation.srv_resolver import Server, SrvResolver
|
||||
from synapse.http.federation.well_known_resolver import WellKnownResolver
|
||||
from synapse.http.proxyagent import ProxyAgent
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.types import ISynapseReactor
|
||||
from synapse.util import Clock
|
||||
|
@ -57,6 +64,14 @@ class MatrixFederationAgent:
|
|||
user_agent:
|
||||
The user agent header to use for federation requests.
|
||||
|
||||
ip_whitelist: Allowed IP addresses.
|
||||
|
||||
ip_blacklist: Disallowed IP addresses.
|
||||
|
||||
proxy_reactor: twisted reactor to use for connections to the proxy server
|
||||
reactor might have some blacklisting applied (i.e. for DNS queries),
|
||||
but we need unblocked access to the proxy.
|
||||
|
||||
_srv_resolver:
|
||||
SrvResolver implementation to use for looking up SRV records. None
|
||||
to use a default implementation.
|
||||
|
@ -71,11 +86,18 @@ class MatrixFederationAgent:
|
|||
reactor: ISynapseReactor,
|
||||
tls_client_options_factory: Optional[FederationPolicyForHTTPS],
|
||||
user_agent: bytes,
|
||||
ip_whitelist: IPSet,
|
||||
ip_blacklist: IPSet,
|
||||
_srv_resolver: Optional[SrvResolver] = None,
|
||||
_well_known_resolver: Optional[WellKnownResolver] = None,
|
||||
):
|
||||
self._reactor = reactor
|
||||
# proxy_reactor is not blacklisted
|
||||
proxy_reactor = reactor
|
||||
|
||||
# We need to use a DNS resolver which filters out blacklisted IP
|
||||
# addresses, to prevent DNS rebinding.
|
||||
reactor = BlacklistingReactorWrapper(reactor, ip_whitelist, ip_blacklist)
|
||||
|
||||
self._clock = Clock(reactor)
|
||||
self._pool = HTTPConnectionPool(reactor)
|
||||
self._pool.retryAutomatically = False
|
||||
|
@ -83,24 +105,27 @@ class MatrixFederationAgent:
|
|||
self._pool.cachedConnectionTimeout = 2 * 60
|
||||
|
||||
self._agent = Agent.usingEndpointFactory(
|
||||
self._reactor,
|
||||
reactor,
|
||||
MatrixHostnameEndpointFactory(
|
||||
reactor, tls_client_options_factory, _srv_resolver
|
||||
reactor,
|
||||
proxy_reactor,
|
||||
tls_client_options_factory,
|
||||
_srv_resolver,
|
||||
),
|
||||
pool=self._pool,
|
||||
)
|
||||
self.user_agent = user_agent
|
||||
|
||||
if _well_known_resolver is None:
|
||||
# Note that the name resolver has already been wrapped in a
|
||||
# IPBlacklistingResolver by MatrixFederationHttpClient.
|
||||
_well_known_resolver = WellKnownResolver(
|
||||
self._reactor,
|
||||
reactor,
|
||||
agent=BlacklistingAgentWrapper(
|
||||
Agent(
|
||||
self._reactor,
|
||||
ProxyAgent(
|
||||
reactor,
|
||||
proxy_reactor,
|
||||
pool=self._pool,
|
||||
contextFactory=tls_client_options_factory,
|
||||
use_proxy=True,
|
||||
),
|
||||
ip_blacklist=ip_blacklist,
|
||||
),
|
||||
|
@ -200,10 +225,12 @@ class MatrixHostnameEndpointFactory:
|
|||
def __init__(
|
||||
self,
|
||||
reactor: IReactorCore,
|
||||
proxy_reactor: IReactorCore,
|
||||
tls_client_options_factory: Optional[FederationPolicyForHTTPS],
|
||||
srv_resolver: Optional[SrvResolver],
|
||||
):
|
||||
self._reactor = reactor
|
||||
self._proxy_reactor = proxy_reactor
|
||||
self._tls_client_options_factory = tls_client_options_factory
|
||||
|
||||
if srv_resolver is None:
|
||||
|
@ -211,9 +238,10 @@ class MatrixHostnameEndpointFactory:
|
|||
|
||||
self._srv_resolver = srv_resolver
|
||||
|
||||
def endpointForURI(self, parsed_uri):
|
||||
def endpointForURI(self, parsed_uri: URI):
|
||||
return MatrixHostnameEndpoint(
|
||||
self._reactor,
|
||||
self._proxy_reactor,
|
||||
self._tls_client_options_factory,
|
||||
self._srv_resolver,
|
||||
parsed_uri,
|
||||
|
@ -227,23 +255,45 @@ class MatrixHostnameEndpoint:
|
|||
|
||||
Args:
|
||||
reactor: twisted reactor to use for underlying requests
|
||||
proxy_reactor: twisted reactor to use for connections to the proxy server.
|
||||
'reactor' might have some blacklisting applied (i.e. for DNS queries),
|
||||
but we need unblocked access to the proxy.
|
||||
tls_client_options_factory:
|
||||
factory to use for fetching client tls options, or none to disable TLS.
|
||||
srv_resolver: The SRV resolver to use
|
||||
parsed_uri: The parsed URI that we're wanting to connect to.
|
||||
|
||||
Raises:
|
||||
ValueError if the environment variables contain an invalid proxy specification.
|
||||
RuntimeError if no tls_options_factory is given for a https connection
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
reactor: IReactorCore,
|
||||
proxy_reactor: IReactorCore,
|
||||
tls_client_options_factory: Optional[FederationPolicyForHTTPS],
|
||||
srv_resolver: SrvResolver,
|
||||
parsed_uri: URI,
|
||||
):
|
||||
self._reactor = reactor
|
||||
|
||||
self._parsed_uri = parsed_uri
|
||||
|
||||
# http_proxy is not needed because federation is always over TLS
|
||||
proxies = getproxies_environment()
|
||||
https_proxy = proxies["https"].encode() if "https" in proxies else None
|
||||
self.no_proxy = proxies["no"] if "no" in proxies else None
|
||||
|
||||
# endpoint and credentials to use to connect to the outbound https proxy, if any.
|
||||
(
|
||||
self._https_proxy_endpoint,
|
||||
self._https_proxy_creds,
|
||||
) = proxyagent.http_proxy_endpoint(
|
||||
https_proxy,
|
||||
proxy_reactor,
|
||||
tls_client_options_factory,
|
||||
)
|
||||
|
||||
# set up the TLS connection params
|
||||
#
|
||||
# XXX disabling TLS is really only supported here for the benefit of the
|
||||
|
@ -273,8 +323,32 @@ class MatrixHostnameEndpoint:
|
|||
host = server.host
|
||||
port = server.port
|
||||
|
||||
should_skip_proxy = False
|
||||
if self.no_proxy is not None:
|
||||
should_skip_proxy = proxy_bypass_environment(
|
||||
host.decode(),
|
||||
proxies={"no": self.no_proxy},
|
||||
)
|
||||
|
||||
endpoint: IStreamClientEndpoint
|
||||
try:
|
||||
if self._https_proxy_endpoint and not should_skip_proxy:
|
||||
logger.debug(
|
||||
"Connecting to %s:%i via %s",
|
||||
host.decode("ascii"),
|
||||
port,
|
||||
self._https_proxy_endpoint,
|
||||
)
|
||||
endpoint = HTTPConnectProxyEndpoint(
|
||||
self._reactor,
|
||||
self._https_proxy_endpoint,
|
||||
host,
|
||||
port,
|
||||
proxy_creds=self._https_proxy_creds,
|
||||
)
|
||||
else:
|
||||
logger.debug("Connecting to %s:%i", host.decode("ascii"), port)
|
||||
# not using a proxy
|
||||
endpoint = HostnameEndpoint(self._reactor, host, port)
|
||||
if self._tls_options:
|
||||
endpoint = wrapClientTLS(self._tls_options, endpoint)
|
||||
|
|
|
@ -59,7 +59,6 @@ from synapse.api.errors import (
|
|||
from synapse.http import QuieterFileBodyProducer
|
||||
from synapse.http.client import (
|
||||
BlacklistingAgentWrapper,
|
||||
BlacklistingReactorWrapper,
|
||||
BodyExceededMaxSize,
|
||||
ByteWriteable,
|
||||
encode_query_args,
|
||||
|
@ -69,7 +68,7 @@ from synapse.http.federation.matrix_federation_agent import MatrixFederationAgen
|
|||
from synapse.logging import opentracing
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
from synapse.logging.opentracing import set_tag, start_active_span, tags
|
||||
from synapse.types import ISynapseReactor, JsonDict
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import json_decoder
|
||||
from synapse.util.async_helpers import timeout_deferred
|
||||
from synapse.util.metrics import Measure
|
||||
|
@ -325,13 +324,7 @@ class MatrixFederationHttpClient:
|
|||
self.signing_key = hs.signing_key
|
||||
self.server_name = hs.hostname
|
||||
|
||||
# We need to use a DNS resolver which filters out blacklisted IP
|
||||
# addresses, to prevent DNS rebinding.
|
||||
self.reactor: ISynapseReactor = BlacklistingReactorWrapper(
|
||||
hs.get_reactor(),
|
||||
hs.config.federation_ip_range_whitelist,
|
||||
hs.config.federation_ip_range_blacklist,
|
||||
)
|
||||
self.reactor = hs.get_reactor()
|
||||
|
||||
user_agent = hs.version_string
|
||||
if hs.config.user_agent_suffix:
|
||||
|
@ -342,6 +335,7 @@ class MatrixFederationHttpClient:
|
|||
self.reactor,
|
||||
tls_client_options_factory,
|
||||
user_agent,
|
||||
hs.config.federation_ip_range_whitelist,
|
||||
hs.config.federation_ip_range_blacklist,
|
||||
)
|
||||
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import base64
|
||||
import logging
|
||||
import re
|
||||
from typing import Any, Dict, Optional, Tuple
|
||||
|
@ -21,7 +20,6 @@ from urllib.request import ( # type: ignore[attr-defined]
|
|||
proxy_bypass_environment,
|
||||
)
|
||||
|
||||
import attr
|
||||
from zope.interface import implementer
|
||||
|
||||
from twisted.internet import defer
|
||||
|
@ -38,7 +36,7 @@ from twisted.web.error import SchemeNotSupported
|
|||
from twisted.web.http_headers import Headers
|
||||
from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS
|
||||
|
||||
from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint
|
||||
from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint, ProxyCredentials
|
||||
from synapse.types import ISynapseReactor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -46,22 +44,6 @@ logger = logging.getLogger(__name__)
|
|||
_VALID_URI = re.compile(br"\A[\x21-\x7e]+\Z")
|
||||
|
||||
|
||||
@attr.s
|
||||
class ProxyCredentials:
|
||||
username_password = attr.ib(type=bytes)
|
||||
|
||||
def as_proxy_authorization_value(self) -> bytes:
|
||||
"""
|
||||
Return the value for a Proxy-Authorization header (i.e. 'Basic abdef==').
|
||||
|
||||
Returns:
|
||||
A transformation of the authentication string the encoded value for
|
||||
a Proxy-Authorization header.
|
||||
"""
|
||||
# Encode as base64 and prepend the authorization type
|
||||
return b"Basic " + base64.encodebytes(self.username_password)
|
||||
|
||||
|
||||
@implementer(IAgent)
|
||||
class ProxyAgent(_AgentBase):
|
||||
"""An Agent implementation which will use an HTTP proxy if one was requested
|
||||
|
@ -95,6 +77,7 @@ class ProxyAgent(_AgentBase):
|
|||
Raises:
|
||||
ValueError if use_proxy is set and the environment variables
|
||||
contain an invalid proxy specification.
|
||||
RuntimeError if no tls_options_factory is given for a https connection
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
|
@ -131,11 +114,11 @@ class ProxyAgent(_AgentBase):
|
|||
https_proxy = proxies["https"].encode() if "https" in proxies else None
|
||||
no_proxy = proxies["no"] if "no" in proxies else None
|
||||
|
||||
self.http_proxy_endpoint, self.http_proxy_creds = _http_proxy_endpoint(
|
||||
self.http_proxy_endpoint, self.http_proxy_creds = http_proxy_endpoint(
|
||||
http_proxy, self.proxy_reactor, contextFactory, **self._endpoint_kwargs
|
||||
)
|
||||
|
||||
self.https_proxy_endpoint, self.https_proxy_creds = _http_proxy_endpoint(
|
||||
self.https_proxy_endpoint, self.https_proxy_creds = http_proxy_endpoint(
|
||||
https_proxy, self.proxy_reactor, contextFactory, **self._endpoint_kwargs
|
||||
)
|
||||
|
||||
|
@ -224,22 +207,12 @@ class ProxyAgent(_AgentBase):
|
|||
and self.https_proxy_endpoint
|
||||
and not should_skip_proxy
|
||||
):
|
||||
connect_headers = Headers()
|
||||
|
||||
# Determine whether we need to set Proxy-Authorization headers
|
||||
if self.https_proxy_creds:
|
||||
# Set a Proxy-Authorization header
|
||||
connect_headers.addRawHeader(
|
||||
b"Proxy-Authorization",
|
||||
self.https_proxy_creds.as_proxy_authorization_value(),
|
||||
)
|
||||
|
||||
endpoint = HTTPConnectProxyEndpoint(
|
||||
self.proxy_reactor,
|
||||
self.https_proxy_endpoint,
|
||||
parsed_uri.host,
|
||||
parsed_uri.port,
|
||||
headers=connect_headers,
|
||||
self.https_proxy_creds,
|
||||
)
|
||||
else:
|
||||
# not using a proxy
|
||||
|
@ -268,10 +241,10 @@ class ProxyAgent(_AgentBase):
|
|||
)
|
||||
|
||||
|
||||
def _http_proxy_endpoint(
|
||||
def http_proxy_endpoint(
|
||||
proxy: Optional[bytes],
|
||||
reactor: IReactorCore,
|
||||
tls_options_factory: IPolicyForHTTPS,
|
||||
tls_options_factory: Optional[IPolicyForHTTPS],
|
||||
**kwargs,
|
||||
) -> Tuple[Optional[IStreamClientEndpoint], Optional[ProxyCredentials]]:
|
||||
"""Parses an http proxy setting and returns an endpoint for the proxy
|
||||
|
@ -294,6 +267,7 @@ def _http_proxy_endpoint(
|
|||
|
||||
Raise:
|
||||
ValueError if proxy has no hostname or unsupported scheme.
|
||||
RuntimeError if no tls_options_factory is given for a https connection
|
||||
"""
|
||||
if proxy is None:
|
||||
return None, None
|
||||
|
@ -305,8 +279,13 @@ def _http_proxy_endpoint(
|
|||
proxy_endpoint = HostnameEndpoint(reactor, host, port, **kwargs)
|
||||
|
||||
if scheme == b"https":
|
||||
if tls_options_factory:
|
||||
tls_options = tls_options_factory.creatorForNetloc(host, port)
|
||||
proxy_endpoint = wrapClientTLS(tls_options, proxy_endpoint)
|
||||
else:
|
||||
raise RuntimeError(
|
||||
f"No TLS options for a https connection via proxy {proxy!s}"
|
||||
)
|
||||
|
||||
return proxy_endpoint, credentials
|
||||
|
||||
|
|
|
@ -14,16 +14,28 @@
|
|||
|
||||
""" This module contains base REST classes for constructing REST servlets. """
|
||||
import logging
|
||||
from typing import Iterable, List, Mapping, Optional, Sequence, overload
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Iterable,
|
||||
List,
|
||||
Mapping,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
overload,
|
||||
)
|
||||
|
||||
from typing_extensions import Literal
|
||||
|
||||
from twisted.web.server import Request
|
||||
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.types import JsonDict
|
||||
from synapse.types import JsonDict, RoomAlias, RoomID
|
||||
from synapse.util import json_decoder
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
@ -663,3 +675,45 @@ class RestServlet:
|
|||
|
||||
else:
|
||||
raise NotImplementedError("RestServlet must register something.")
|
||||
|
||||
|
||||
class ResolveRoomIdMixin:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.room_member_handler = hs.get_room_member_handler()
|
||||
|
||||
async def resolve_room_id(
|
||||
self, room_identifier: str, remote_room_hosts: Optional[List[str]] = None
|
||||
) -> Tuple[str, Optional[List[str]]]:
|
||||
"""
|
||||
Resolve a room identifier to a room ID, if necessary.
|
||||
|
||||
This also performanes checks to ensure the room ID is of the proper form.
|
||||
|
||||
Args:
|
||||
room_identifier: The room ID or alias.
|
||||
remote_room_hosts: The potential remote room hosts to use.
|
||||
|
||||
Returns:
|
||||
The resolved room ID.
|
||||
|
||||
Raises:
|
||||
SynapseError if the room ID is of the wrong form.
|
||||
"""
|
||||
if RoomID.is_valid(room_identifier):
|
||||
resolved_room_id = room_identifier
|
||||
elif RoomAlias.is_valid(room_identifier):
|
||||
room_alias = RoomAlias.from_string(room_identifier)
|
||||
(
|
||||
room_id,
|
||||
remote_room_hosts,
|
||||
) = await self.room_member_handler.lookup_room_alias(room_alias)
|
||||
resolved_room_id = room_id.to_string()
|
||||
else:
|
||||
raise SynapseError(
|
||||
400, "%s was not legal room ID or room alias" % (room_identifier,)
|
||||
)
|
||||
if not resolved_room_id:
|
||||
raise SynapseError(
|
||||
400, "Unknown room ID or room alias %s" % room_identifier
|
||||
)
|
||||
return resolved_room_id, remote_room_hosts
|
||||
|
|
|
@ -45,7 +45,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process
|
|||
from synapse.storage.database import DatabasePool, LoggingTransaction
|
||||
from synapse.storage.databases.main.roommember import ProfileInfo
|
||||
from synapse.storage.state import StateFilter
|
||||
from synapse.types import JsonDict, Requester, UserID, create_requester
|
||||
from synapse.types import JsonDict, Requester, UserID, UserInfo, create_requester
|
||||
from synapse.util import Clock
|
||||
from synapse.util.caches.descriptors import cached
|
||||
|
||||
|
@ -91,6 +91,7 @@ class ModuleApi:
|
|||
self._state = hs.get_state_handler()
|
||||
self._clock: Clock = hs.get_clock()
|
||||
self._send_email_handler = hs.get_send_email_handler()
|
||||
self.custom_template_dir = hs.config.server.custom_template_directory
|
||||
|
||||
try:
|
||||
app_name = self._hs.config.email_app_name
|
||||
|
@ -174,6 +175,16 @@ class ModuleApi:
|
|||
"""The application name configured in the homeserver's configuration."""
|
||||
return self._hs.config.email.email_app_name
|
||||
|
||||
async def get_userinfo_by_id(self, user_id: str) -> Optional[UserInfo]:
|
||||
"""Get user info by user_id
|
||||
|
||||
Args:
|
||||
user_id: Fully qualified user id.
|
||||
Returns:
|
||||
UserInfo object if a user was found, otherwise None
|
||||
"""
|
||||
return await self._store.get_userinfo_by_id(user_id)
|
||||
|
||||
async def get_user_by_req(
|
||||
self,
|
||||
req: SynapseRequest,
|
||||
|
@ -593,10 +604,15 @@ class ModuleApi:
|
|||
msec: float,
|
||||
*args,
|
||||
desc: Optional[str] = None,
|
||||
run_on_all_instances: bool = False,
|
||||
**kwargs,
|
||||
):
|
||||
"""Wraps a function as a background process and calls it repeatedly.
|
||||
|
||||
NOTE: Will only run on the instance that is configured to run
|
||||
background processes (which is the main process by default), unless
|
||||
`run_on_all_workers` is set.
|
||||
|
||||
Waits `msec` initially before calling `f` for the first time.
|
||||
|
||||
Args:
|
||||
|
@ -607,12 +623,14 @@ class ModuleApi:
|
|||
msec: How long to wait between calls in milliseconds.
|
||||
*args: Positional arguments to pass to function.
|
||||
desc: The background task's description. Default to the function's name.
|
||||
run_on_all_instances: Whether to run this on all instances, rather
|
||||
than just the instance configured to run background tasks.
|
||||
**kwargs: Key arguments to pass to function.
|
||||
"""
|
||||
if desc is None:
|
||||
desc = f.__name__
|
||||
|
||||
if self._hs.config.run_background_tasks:
|
||||
if self._hs.config.run_background_tasks or run_on_all_instances:
|
||||
self._clock.looping_call(
|
||||
run_as_background_process,
|
||||
msec,
|
||||
|
@ -667,7 +685,10 @@ class ModuleApi:
|
|||
A list containing the loaded templates, with the orders matching the one of
|
||||
the filenames parameter.
|
||||
"""
|
||||
return self._hs.config.read_templates(filenames, custom_template_directory)
|
||||
return self._hs.config.read_templates(
|
||||
filenames,
|
||||
(td for td in (self.custom_template_dir, custom_template_directory) if td),
|
||||
)
|
||||
|
||||
|
||||
class PublicRoomListManager:
|
||||
|
|
|
@ -1,37 +0,0 @@
|
|||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.replication.tcp.streams import PublicRoomsStream
|
||||
from synapse.storage.database import DatabasePool
|
||||
from synapse.storage.databases.main.room import RoomWorkerStore
|
||||
|
||||
from ._base import BaseSlavedStore
|
||||
from ._slaved_id_tracker import SlavedIdTracker
|
||||
|
||||
|
||||
class RoomStore(RoomWorkerStore, BaseSlavedStore):
|
||||
def __init__(self, database: DatabasePool, db_conn, hs):
|
||||
super().__init__(database, db_conn, hs)
|
||||
self._public_room_id_gen = SlavedIdTracker(
|
||||
db_conn, "public_room_list_stream", "stream_id"
|
||||
)
|
||||
|
||||
def get_current_public_room_stream_id(self):
|
||||
return self._public_room_id_gen.get_current_token()
|
||||
|
||||
def process_replication_rows(self, stream_name, instance_name, token, rows):
|
||||
if stream_name == PublicRoomsStream.NAME:
|
||||
self._public_room_id_gen.advance(instance_name, token)
|
||||
|
||||
return super().process_replication_rows(stream_name, instance_name, token, rows)
|
|
@ -32,7 +32,6 @@ from synapse.replication.tcp.streams._base import (
|
|||
GroupServerStream,
|
||||
PresenceFederationStream,
|
||||
PresenceStream,
|
||||
PublicRoomsStream,
|
||||
PushersStream,
|
||||
PushRulesStream,
|
||||
ReceiptsStream,
|
||||
|
@ -57,7 +56,6 @@ STREAMS_MAP = {
|
|||
PushRulesStream,
|
||||
PushersStream,
|
||||
CachesStream,
|
||||
PublicRoomsStream,
|
||||
DeviceListsStream,
|
||||
ToDeviceStream,
|
||||
FederationStream,
|
||||
|
@ -79,7 +77,6 @@ __all__ = [
|
|||
"PushRulesStream",
|
||||
"PushersStream",
|
||||
"CachesStream",
|
||||
"PublicRoomsStream",
|
||||
"DeviceListsStream",
|
||||
"ToDeviceStream",
|
||||
"TagAccountDataStream",
|
||||
|
|
|
@ -447,31 +447,6 @@ class CachesStream(Stream):
|
|||
)
|
||||
|
||||
|
||||
class PublicRoomsStream(Stream):
|
||||
"""The public rooms list changed"""
|
||||
|
||||
PublicRoomsStreamRow = namedtuple(
|
||||
"PublicRoomsStreamRow",
|
||||
(
|
||||
"room_id", # str
|
||||
"visibility", # str
|
||||
"appservice_id", # str, optional
|
||||
"network_id", # str, optional
|
||||
),
|
||||
)
|
||||
|
||||
NAME = "public_rooms"
|
||||
ROW_TYPE = PublicRoomsStreamRow
|
||||
|
||||
def __init__(self, hs):
|
||||
store = hs.get_datastore()
|
||||
super().__init__(
|
||||
hs.get_instance_name(),
|
||||
current_token_without_instance(store.get_current_public_room_stream_id),
|
||||
store.get_all_new_public_rooms,
|
||||
)
|
||||
|
||||
|
||||
class DeviceListsStream(Stream):
|
||||
"""Either a user has updated their devices or a remote server needs to be
|
||||
told about a device update.
|
||||
|
|
|
@ -14,39 +14,36 @@
|
|||
# limitations under the License.
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.rest import admin
|
||||
from synapse.rest.client import versions
|
||||
from synapse.rest.client.v1 import (
|
||||
directory,
|
||||
events,
|
||||
initial_sync,
|
||||
login as v1_login,
|
||||
logout,
|
||||
presence,
|
||||
profile,
|
||||
push_rule,
|
||||
pusher,
|
||||
room,
|
||||
voip,
|
||||
)
|
||||
from synapse.rest.client.v2_alpha import (
|
||||
from synapse.rest.client import (
|
||||
account,
|
||||
account_data,
|
||||
account_validity,
|
||||
auth,
|
||||
capabilities,
|
||||
devices,
|
||||
directory,
|
||||
events,
|
||||
filter,
|
||||
groups,
|
||||
initial_sync,
|
||||
keys,
|
||||
knock,
|
||||
login as v1_login,
|
||||
logout,
|
||||
notifications,
|
||||
openid,
|
||||
password_policy,
|
||||
presence,
|
||||
profile,
|
||||
push_rule,
|
||||
pusher,
|
||||
read_marker,
|
||||
receipts,
|
||||
register,
|
||||
relations,
|
||||
report_event,
|
||||
room,
|
||||
room_batch,
|
||||
room_keys,
|
||||
room_upgrade_rest_servlet,
|
||||
sendtodevice,
|
||||
|
@ -56,6 +53,8 @@ from synapse.rest.client.v2_alpha import (
|
|||
thirdparty,
|
||||
tokenrefresh,
|
||||
user_directory,
|
||||
versions,
|
||||
voip,
|
||||
)
|
||||
|
||||
|
||||
|
@ -84,7 +83,6 @@ class ClientRestResource(JsonResource):
|
|||
# Partially deprecated in r0
|
||||
events.register_servlets(hs, client_resource)
|
||||
|
||||
# "v1" + "r0"
|
||||
room.register_servlets(hs, client_resource)
|
||||
v1_login.register_servlets(hs, client_resource)
|
||||
profile.register_servlets(hs, client_resource)
|
||||
|
@ -94,8 +92,6 @@ class ClientRestResource(JsonResource):
|
|||
pusher.register_servlets(hs, client_resource)
|
||||
push_rule.register_servlets(hs, client_resource)
|
||||
logout.register_servlets(hs, client_resource)
|
||||
|
||||
# "v2"
|
||||
sync.register_servlets(hs, client_resource)
|
||||
filter.register_servlets(hs, client_resource)
|
||||
account.register_servlets(hs, client_resource)
|
||||
|
@ -117,6 +113,7 @@ class ClientRestResource(JsonResource):
|
|||
user_directory.register_servlets(hs, client_resource)
|
||||
groups.register_servlets(hs, client_resource)
|
||||
room_upgrade_rest_servlet.register_servlets(hs, client_resource)
|
||||
room_batch.register_servlets(hs, client_resource)
|
||||
capabilities.register_servlets(hs, client_resource)
|
||||
account_validity.register_servlets(hs, client_resource)
|
||||
relations.register_servlets(hs, client_resource)
|
||||
|
|
|
@ -51,6 +51,7 @@ from synapse.rest.admin.rooms import (
|
|||
)
|
||||
from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet
|
||||
from synapse.rest.admin.statistics import UserMediaStatisticsRestServlet
|
||||
from synapse.rest.admin.username_available import UsernameAvailableRestServlet
|
||||
from synapse.rest.admin.users import (
|
||||
AccountValidityRenewServlet,
|
||||
DeactivateAccountRestServlet,
|
||||
|
@ -60,7 +61,6 @@ from synapse.rest.admin.users import (
|
|||
SearchUsersRestServlet,
|
||||
ShadowBanRestServlet,
|
||||
UserAdminServlet,
|
||||
UserMediaRestServlet,
|
||||
UserMembershipRestServlet,
|
||||
UserRegisterServlet,
|
||||
UserRestServletV2,
|
||||
|
@ -224,7 +224,6 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
|||
SendServerNoticeServlet(hs).register(http_server)
|
||||
VersionServlet(hs).register(http_server)
|
||||
UserAdminServlet(hs).register(http_server)
|
||||
UserMediaRestServlet(hs).register(http_server)
|
||||
UserMembershipRestServlet(hs).register(http_server)
|
||||
UserTokenRestServlet(hs).register(http_server)
|
||||
UserRestServletV2(hs).register(http_server)
|
||||
|
@ -241,6 +240,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
|||
ForwardExtremitiesRestServlet(hs).register(http_server)
|
||||
RoomEventContextServlet(hs).register(http_server)
|
||||
RateLimitRestServlet(hs).register(http_server)
|
||||
UsernameAvailableRestServlet(hs).register(http_server)
|
||||
|
||||
|
||||
def register_servlets_for_client_rest_resource(
|
||||
|
|
|
@ -18,14 +18,15 @@ from typing import TYPE_CHECKING, Tuple
|
|||
|
||||
from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
|
||||
from synapse.http.server import HttpServer
|
||||
from synapse.http.servlet import RestServlet, parse_boolean, parse_integer
|
||||
from synapse.http.servlet import RestServlet, parse_boolean, parse_integer, parse_string
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.rest.admin._base import (
|
||||
admin_patterns,
|
||||
assert_requester_is_admin,
|
||||
assert_user_is_admin,
|
||||
)
|
||||
from synapse.types import JsonDict
|
||||
from synapse.storage.databases.main.media_repository import MediaSortOrder
|
||||
from synapse.types import JsonDict, UserID
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
@ -259,7 +260,9 @@ class DeleteMediaByID(RestServlet):
|
|||
|
||||
logging.info("Deleting local media by ID: %s", media_id)
|
||||
|
||||
deleted_media, total = await self.media_repository.delete_local_media(media_id)
|
||||
deleted_media, total = await self.media_repository.delete_local_media_ids(
|
||||
[media_id]
|
||||
)
|
||||
return 200, {"deleted_media": deleted_media, "total": total}
|
||||
|
||||
|
||||
|
@ -312,6 +315,165 @@ class DeleteMediaByDateSize(RestServlet):
|
|||
return 200, {"deleted_media": deleted_media, "total": total}
|
||||
|
||||
|
||||
class UserMediaRestServlet(RestServlet):
|
||||
"""
|
||||
Gets information about all uploaded local media for a specific `user_id`.
|
||||
With DELETE request you can delete all this media.
|
||||
|
||||
Example:
|
||||
http://localhost:8008/_synapse/admin/v1/users/@user:server/media
|
||||
|
||||
Args:
|
||||
The parameters `from` and `limit` are required for pagination.
|
||||
By default, a `limit` of 100 is used.
|
||||
Returns:
|
||||
A list of media and an integer representing the total number of
|
||||
media that exist given for this user
|
||||
"""
|
||||
|
||||
PATTERNS = admin_patterns("/users/(?P<user_id>[^/]+)/media$")
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.is_mine = hs.is_mine
|
||||
self.auth = hs.get_auth()
|
||||
self.store = hs.get_datastore()
|
||||
self.media_repository = hs.get_media_repository()
|
||||
|
||||
async def on_GET(
|
||||
self, request: SynapseRequest, user_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
# This will always be set by the time Twisted calls us.
|
||||
assert request.args is not None
|
||||
|
||||
await assert_requester_is_admin(self.auth, request)
|
||||
|
||||
if not self.is_mine(UserID.from_string(user_id)):
|
||||
raise SynapseError(400, "Can only look up local users")
|
||||
|
||||
user = await self.store.get_user_by_id(user_id)
|
||||
if user is None:
|
||||
raise NotFoundError("Unknown user")
|
||||
|
||||
start = parse_integer(request, "from", default=0)
|
||||
limit = parse_integer(request, "limit", default=100)
|
||||
|
||||
if start < 0:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Query parameter from must be a string representing a positive integer.",
|
||||
errcode=Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
if limit < 0:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Query parameter limit must be a string representing a positive integer.",
|
||||
errcode=Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
# If neither `order_by` nor `dir` is set, set the default order
|
||||
# to newest media is on top for backward compatibility.
|
||||
if b"order_by" not in request.args and b"dir" not in request.args:
|
||||
order_by = MediaSortOrder.CREATED_TS.value
|
||||
direction = "b"
|
||||
else:
|
||||
order_by = parse_string(
|
||||
request,
|
||||
"order_by",
|
||||
default=MediaSortOrder.CREATED_TS.value,
|
||||
allowed_values=(
|
||||
MediaSortOrder.MEDIA_ID.value,
|
||||
MediaSortOrder.UPLOAD_NAME.value,
|
||||
MediaSortOrder.CREATED_TS.value,
|
||||
MediaSortOrder.LAST_ACCESS_TS.value,
|
||||
MediaSortOrder.MEDIA_LENGTH.value,
|
||||
MediaSortOrder.MEDIA_TYPE.value,
|
||||
MediaSortOrder.QUARANTINED_BY.value,
|
||||
MediaSortOrder.SAFE_FROM_QUARANTINE.value,
|
||||
),
|
||||
)
|
||||
direction = parse_string(
|
||||
request, "dir", default="f", allowed_values=("f", "b")
|
||||
)
|
||||
|
||||
media, total = await self.store.get_local_media_by_user_paginate(
|
||||
start, limit, user_id, order_by, direction
|
||||
)
|
||||
|
||||
ret = {"media": media, "total": total}
|
||||
if (start + limit) < total:
|
||||
ret["next_token"] = start + len(media)
|
||||
|
||||
return 200, ret
|
||||
|
||||
async def on_DELETE(
|
||||
self, request: SynapseRequest, user_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
# This will always be set by the time Twisted calls us.
|
||||
assert request.args is not None
|
||||
|
||||
await assert_requester_is_admin(self.auth, request)
|
||||
|
||||
if not self.is_mine(UserID.from_string(user_id)):
|
||||
raise SynapseError(400, "Can only look up local users")
|
||||
|
||||
user = await self.store.get_user_by_id(user_id)
|
||||
if user is None:
|
||||
raise NotFoundError("Unknown user")
|
||||
|
||||
start = parse_integer(request, "from", default=0)
|
||||
limit = parse_integer(request, "limit", default=100)
|
||||
|
||||
if start < 0:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Query parameter from must be a string representing a positive integer.",
|
||||
errcode=Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
if limit < 0:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Query parameter limit must be a string representing a positive integer.",
|
||||
errcode=Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
# If neither `order_by` nor `dir` is set, set the default order
|
||||
# to newest media is on top for backward compatibility.
|
||||
if b"order_by" not in request.args and b"dir" not in request.args:
|
||||
order_by = MediaSortOrder.CREATED_TS.value
|
||||
direction = "b"
|
||||
else:
|
||||
order_by = parse_string(
|
||||
request,
|
||||
"order_by",
|
||||
default=MediaSortOrder.CREATED_TS.value,
|
||||
allowed_values=(
|
||||
MediaSortOrder.MEDIA_ID.value,
|
||||
MediaSortOrder.UPLOAD_NAME.value,
|
||||
MediaSortOrder.CREATED_TS.value,
|
||||
MediaSortOrder.LAST_ACCESS_TS.value,
|
||||
MediaSortOrder.MEDIA_LENGTH.value,
|
||||
MediaSortOrder.MEDIA_TYPE.value,
|
||||
MediaSortOrder.QUARANTINED_BY.value,
|
||||
MediaSortOrder.SAFE_FROM_QUARANTINE.value,
|
||||
),
|
||||
)
|
||||
direction = parse_string(
|
||||
request, "dir", default="f", allowed_values=("f", "b")
|
||||
)
|
||||
|
||||
media, _ = await self.store.get_local_media_by_user_paginate(
|
||||
start, limit, user_id, order_by, direction
|
||||
)
|
||||
|
||||
deleted_media, total = await self.media_repository.delete_local_media_ids(
|
||||
([row["media_id"] for row in media])
|
||||
)
|
||||
|
||||
return 200, {"deleted_media": deleted_media, "total": total}
|
||||
|
||||
|
||||
def register_servlets_for_media_repo(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||
"""
|
||||
Media repo specific APIs.
|
||||
|
@ -326,3 +488,4 @@ def register_servlets_for_media_repo(hs: "HomeServer", http_server: HttpServer)
|
|||
ListMediaInRoom(hs).register(http_server)
|
||||
DeleteMediaByID(hs).register(http_server)
|
||||
DeleteMediaByDateSize(hs).register(http_server)
|
||||
UserMediaRestServlet(hs).register(http_server)
|
||||
|
|
|
@ -20,6 +20,7 @@ from synapse.api.constants import EventTypes, JoinRules, Membership
|
|||
from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
|
||||
from synapse.api.filtering import Filter
|
||||
from synapse.http.servlet import (
|
||||
ResolveRoomIdMixin,
|
||||
RestServlet,
|
||||
assert_params_in_dict,
|
||||
parse_integer,
|
||||
|
@ -33,7 +34,7 @@ from synapse.rest.admin._base import (
|
|||
assert_user_is_admin,
|
||||
)
|
||||
from synapse.storage.databases.main.room import RoomSortOrder
|
||||
from synapse.types import JsonDict, RoomAlias, RoomID, UserID, create_requester
|
||||
from synapse.types import JsonDict, UserID, create_requester
|
||||
from synapse.util import json_decoder
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
@ -45,48 +46,6 @@ if TYPE_CHECKING:
|
|||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ResolveRoomIdMixin:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.room_member_handler = hs.get_room_member_handler()
|
||||
|
||||
async def resolve_room_id(
|
||||
self, room_identifier: str, remote_room_hosts: Optional[List[str]] = None
|
||||
) -> Tuple[str, Optional[List[str]]]:
|
||||
"""
|
||||
Resolve a room identifier to a room ID, if necessary.
|
||||
|
||||
This also performanes checks to ensure the room ID is of the proper form.
|
||||
|
||||
Args:
|
||||
room_identifier: The room ID or alias.
|
||||
remote_room_hosts: The potential remote room hosts to use.
|
||||
|
||||
Returns:
|
||||
The resolved room ID.
|
||||
|
||||
Raises:
|
||||
SynapseError if the room ID is of the wrong form.
|
||||
"""
|
||||
if RoomID.is_valid(room_identifier):
|
||||
resolved_room_id = room_identifier
|
||||
elif RoomAlias.is_valid(room_identifier):
|
||||
room_alias = RoomAlias.from_string(room_identifier)
|
||||
(
|
||||
room_id,
|
||||
remote_room_hosts,
|
||||
) = await self.room_member_handler.lookup_room_alias(room_alias)
|
||||
resolved_room_id = room_id.to_string()
|
||||
else:
|
||||
raise SynapseError(
|
||||
400, "%s was not legal room ID or room alias" % (room_identifier,)
|
||||
)
|
||||
if not resolved_room_id:
|
||||
raise SynapseError(
|
||||
400, "Unknown room ID or room alias %s" % room_identifier
|
||||
)
|
||||
return resolved_room_id, remote_room_hosts
|
||||
|
||||
|
||||
class ShutdownRoomRestServlet(RestServlet):
|
||||
"""Shuts down a room by removing all local users from the room and blocking
|
||||
all future invites and joins to the room. Any local aliases will be repointed
|
||||
|
|
51
synapse/rest/admin/username_available.py
Normal file
51
synapse/rest/admin/username_available.py
Normal file
|
@ -0,0 +1,51 @@
|
|||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from http import HTTPStatus
|
||||
from typing import TYPE_CHECKING, Tuple
|
||||
|
||||
from synapse.http.servlet import RestServlet, parse_string
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin
|
||||
from synapse.types import JsonDict
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class UsernameAvailableRestServlet(RestServlet):
|
||||
"""An admin API to check if a given username is available, regardless of whether registration is enabled.
|
||||
|
||||
Example:
|
||||
GET /_synapse/admin/v1/username_available?username=foo
|
||||
200 OK
|
||||
{
|
||||
"available": true
|
||||
}
|
||||
"""
|
||||
|
||||
PATTERNS = admin_patterns("/username_available")
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.auth = hs.get_auth()
|
||||
self.registration_handler = hs.get_registration_handler()
|
||||
|
||||
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
await assert_requester_is_admin(self.auth, request)
|
||||
|
||||
username = parse_string(request, "username", required=True)
|
||||
await self.registration_handler.check_username(username)
|
||||
return HTTPStatus.OK, {"available": True}
|
|
@ -34,8 +34,7 @@ from synapse.rest.admin._base import (
|
|||
assert_requester_is_admin,
|
||||
assert_user_is_admin,
|
||||
)
|
||||
from synapse.rest.client.v2_alpha._base import client_patterns
|
||||
from synapse.storage.databases.main.media_repository import MediaSortOrder
|
||||
from synapse.rest.client._base import client_patterns
|
||||
from synapse.storage.databases.main.stats import UserSortOrder
|
||||
from synapse.types import JsonDict, UserID
|
||||
|
||||
|
@ -196,20 +195,57 @@ class UserRestServletV2(RestServlet):
|
|||
user = await self.admin_handler.get_user(target_user)
|
||||
user_id = target_user.to_string()
|
||||
|
||||
# check for required parameters for each threepid
|
||||
threepids = body.get("threepids")
|
||||
if threepids is not None:
|
||||
for threepid in threepids:
|
||||
assert_params_in_dict(threepid, ["medium", "address"])
|
||||
|
||||
# check for required parameters for each external_id
|
||||
external_ids = body.get("external_ids")
|
||||
if external_ids is not None:
|
||||
for external_id in external_ids:
|
||||
assert_params_in_dict(external_id, ["auth_provider", "external_id"])
|
||||
|
||||
user_type = body.get("user_type", None)
|
||||
if user_type is not None and user_type not in UserTypes.ALL_USER_TYPES:
|
||||
raise SynapseError(400, "Invalid user type")
|
||||
|
||||
set_admin_to = body.get("admin", False)
|
||||
if not isinstance(set_admin_to, bool):
|
||||
raise SynapseError(
|
||||
HTTPStatus.BAD_REQUEST,
|
||||
"Param 'admin' must be a boolean, if given",
|
||||
Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
password = body.get("password", None)
|
||||
if password is not None:
|
||||
if not isinstance(password, str) or len(password) > 512:
|
||||
raise SynapseError(400, "Invalid password")
|
||||
|
||||
deactivate = body.get("deactivated", False)
|
||||
if not isinstance(deactivate, bool):
|
||||
raise SynapseError(400, "'deactivated' parameter is not of type boolean")
|
||||
|
||||
# convert into List[Tuple[str, str]]
|
||||
if external_ids is not None:
|
||||
new_external_ids = []
|
||||
for external_id in external_ids:
|
||||
new_external_ids.append(
|
||||
(external_id["auth_provider"], external_id["external_id"])
|
||||
)
|
||||
|
||||
if user: # modify user
|
||||
if "displayname" in body:
|
||||
await self.profile_handler.set_displayname(
|
||||
target_user, requester, body["displayname"], True
|
||||
)
|
||||
|
||||
if "threepids" in body:
|
||||
# check for required parameters for each threepid
|
||||
for threepid in body["threepids"]:
|
||||
assert_params_in_dict(threepid, ["medium", "address"])
|
||||
|
||||
if threepids is not None:
|
||||
# remove old threepids from user
|
||||
threepids = await self.store.user_get_threepids(user_id)
|
||||
for threepid in threepids:
|
||||
old_threepids = await self.store.user_get_threepids(user_id)
|
||||
for threepid in old_threepids:
|
||||
try:
|
||||
await self.auth_handler.delete_threepid(
|
||||
user_id, threepid["medium"], threepid["address"], None
|
||||
|
@ -220,18 +256,39 @@ class UserRestServletV2(RestServlet):
|
|||
|
||||
# add new threepids to user
|
||||
current_time = self.hs.get_clock().time_msec()
|
||||
for threepid in body["threepids"]:
|
||||
for threepid in threepids:
|
||||
await self.auth_handler.add_threepid(
|
||||
user_id, threepid["medium"], threepid["address"], current_time
|
||||
)
|
||||
|
||||
if "avatar_url" in body and type(body["avatar_url"]) == str:
|
||||
if external_ids is not None:
|
||||
# get changed external_ids (added and removed)
|
||||
cur_external_ids = await self.store.get_external_ids_by_user(user_id)
|
||||
add_external_ids = set(new_external_ids) - set(cur_external_ids)
|
||||
del_external_ids = set(cur_external_ids) - set(new_external_ids)
|
||||
|
||||
# remove old external_ids
|
||||
for auth_provider, external_id in del_external_ids:
|
||||
await self.store.remove_user_external_id(
|
||||
auth_provider,
|
||||
external_id,
|
||||
user_id,
|
||||
)
|
||||
|
||||
# add new external_ids
|
||||
for auth_provider, external_id in add_external_ids:
|
||||
await self.store.record_user_external_id(
|
||||
auth_provider,
|
||||
external_id,
|
||||
user_id,
|
||||
)
|
||||
|
||||
if "avatar_url" in body and isinstance(body["avatar_url"], str):
|
||||
await self.profile_handler.set_avatar_url(
|
||||
target_user, requester, body["avatar_url"], True
|
||||
)
|
||||
|
||||
if "admin" in body:
|
||||
set_admin_to = bool(body["admin"])
|
||||
if set_admin_to != user["admin"]:
|
||||
auth_user = requester.user
|
||||
if target_user == auth_user and not set_admin_to:
|
||||
|
@ -239,14 +296,9 @@ class UserRestServletV2(RestServlet):
|
|||
|
||||
await self.store.set_server_admin(target_user, set_admin_to)
|
||||
|
||||
if "password" in body:
|
||||
if not isinstance(body["password"], str) or len(body["password"]) > 512:
|
||||
raise SynapseError(400, "Invalid password")
|
||||
else:
|
||||
new_password = body["password"]
|
||||
if password is not None:
|
||||
logout_devices = True
|
||||
|
||||
new_password_hash = await self.auth_handler.hash(new_password)
|
||||
new_password_hash = await self.auth_handler.hash(password)
|
||||
|
||||
await self.set_password_handler.set_password(
|
||||
target_user.to_string(),
|
||||
|
@ -256,12 +308,6 @@ class UserRestServletV2(RestServlet):
|
|||
)
|
||||
|
||||
if "deactivated" in body:
|
||||
deactivate = body["deactivated"]
|
||||
if not isinstance(deactivate, bool):
|
||||
raise SynapseError(
|
||||
400, "'deactivated' parameter is not of type boolean"
|
||||
)
|
||||
|
||||
if deactivate and not user["deactivated"]:
|
||||
await self.deactivate_account_handler.deactivate_account(
|
||||
target_user.to_string(), False, requester, by_admin=True
|
||||
|
@ -285,36 +331,24 @@ class UserRestServletV2(RestServlet):
|
|||
return 200, user
|
||||
|
||||
else: # create user
|
||||
password = body.get("password")
|
||||
password_hash = None
|
||||
if password is not None:
|
||||
if not isinstance(password, str) or len(password) > 512:
|
||||
raise SynapseError(400, "Invalid password")
|
||||
password_hash = await self.auth_handler.hash(password)
|
||||
|
||||
admin = body.get("admin", None)
|
||||
user_type = body.get("user_type", None)
|
||||
displayname = body.get("displayname", None)
|
||||
|
||||
if user_type is not None and user_type not in UserTypes.ALL_USER_TYPES:
|
||||
raise SynapseError(400, "Invalid user type")
|
||||
password_hash = None
|
||||
if password is not None:
|
||||
password_hash = await self.auth_handler.hash(password)
|
||||
|
||||
user_id = await self.registration_handler.register_user(
|
||||
localpart=target_user.localpart,
|
||||
password_hash=password_hash,
|
||||
admin=bool(admin),
|
||||
admin=set_admin_to,
|
||||
default_display_name=displayname,
|
||||
user_type=user_type,
|
||||
by_admin=True,
|
||||
)
|
||||
|
||||
if "threepids" in body:
|
||||
# check for required parameters for each threepid
|
||||
for threepid in body["threepids"]:
|
||||
assert_params_in_dict(threepid, ["medium", "address"])
|
||||
|
||||
if threepids is not None:
|
||||
current_time = self.hs.get_clock().time_msec()
|
||||
for threepid in body["threepids"]:
|
||||
for threepid in threepids:
|
||||
await self.auth_handler.add_threepid(
|
||||
user_id, threepid["medium"], threepid["address"], current_time
|
||||
)
|
||||
|
@ -334,6 +368,14 @@ class UserRestServletV2(RestServlet):
|
|||
data={},
|
||||
)
|
||||
|
||||
if external_ids is not None:
|
||||
for auth_provider, external_id in new_external_ids:
|
||||
await self.store.record_user_external_id(
|
||||
auth_provider,
|
||||
external_id,
|
||||
user_id,
|
||||
)
|
||||
|
||||
if "avatar_url" in body and isinstance(body["avatar_url"], str):
|
||||
await self.profile_handler.set_avatar_url(
|
||||
target_user, requester, body["avatar_url"], True
|
||||
|
@ -461,7 +503,7 @@ class UserRegisterServlet(RestServlet):
|
|||
raise SynapseError(403, "HMAC incorrect")
|
||||
|
||||
# Reuse the parts of RegisterRestServlet to reduce code duplication
|
||||
from synapse.rest.client.v2_alpha.register import RegisterRestServlet
|
||||
from synapse.rest.client.register import RegisterRestServlet
|
||||
|
||||
register = RegisterRestServlet(self.hs)
|
||||
|
||||
|
@ -808,97 +850,6 @@ class PushersRestServlet(RestServlet):
|
|||
return 200, {"pushers": filtered_pushers, "total": len(filtered_pushers)}
|
||||
|
||||
|
||||
class UserMediaRestServlet(RestServlet):
|
||||
"""
|
||||
Gets information about all uploaded local media for a specific `user_id`.
|
||||
|
||||
Example:
|
||||
http://localhost:8008/_synapse/admin/v1/users/
|
||||
@user:server/media
|
||||
|
||||
Args:
|
||||
The parameters `from` and `limit` are required for pagination.
|
||||
By default, a `limit` of 100 is used.
|
||||
Returns:
|
||||
A list of media and an integer representing the total number of
|
||||
media that exist given for this user
|
||||
"""
|
||||
|
||||
PATTERNS = admin_patterns("/users/(?P<user_id>[^/]+)/media$")
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.is_mine = hs.is_mine
|
||||
self.auth = hs.get_auth()
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
async def on_GET(
|
||||
self, request: SynapseRequest, user_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
# This will always be set by the time Twisted calls us.
|
||||
assert request.args is not None
|
||||
|
||||
await assert_requester_is_admin(self.auth, request)
|
||||
|
||||
if not self.is_mine(UserID.from_string(user_id)):
|
||||
raise SynapseError(400, "Can only lookup local users")
|
||||
|
||||
user = await self.store.get_user_by_id(user_id)
|
||||
if user is None:
|
||||
raise NotFoundError("Unknown user")
|
||||
|
||||
start = parse_integer(request, "from", default=0)
|
||||
limit = parse_integer(request, "limit", default=100)
|
||||
|
||||
if start < 0:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Query parameter from must be a string representing a positive integer.",
|
||||
errcode=Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
if limit < 0:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Query parameter limit must be a string representing a positive integer.",
|
||||
errcode=Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
# If neither `order_by` nor `dir` is set, set the default order
|
||||
# to newest media is on top for backward compatibility.
|
||||
if b"order_by" not in request.args and b"dir" not in request.args:
|
||||
order_by = MediaSortOrder.CREATED_TS.value
|
||||
direction = "b"
|
||||
else:
|
||||
order_by = parse_string(
|
||||
request,
|
||||
"order_by",
|
||||
default=MediaSortOrder.CREATED_TS.value,
|
||||
allowed_values=(
|
||||
MediaSortOrder.MEDIA_ID.value,
|
||||
MediaSortOrder.UPLOAD_NAME.value,
|
||||
MediaSortOrder.CREATED_TS.value,
|
||||
MediaSortOrder.LAST_ACCESS_TS.value,
|
||||
MediaSortOrder.MEDIA_LENGTH.value,
|
||||
MediaSortOrder.MEDIA_TYPE.value,
|
||||
MediaSortOrder.QUARANTINED_BY.value,
|
||||
MediaSortOrder.SAFE_FROM_QUARANTINE.value,
|
||||
),
|
||||
)
|
||||
direction = parse_string(
|
||||
request, "dir", default="f", allowed_values=("f", "b")
|
||||
)
|
||||
|
||||
media, total = await self.store.get_local_media_by_user_paginate(
|
||||
start, limit, user_id, order_by, direction
|
||||
)
|
||||
|
||||
ret = {"media": media, "total": total}
|
||||
if (start + limit) < total:
|
||||
ret["next_token"] = start + len(media)
|
||||
|
||||
return 200, ret
|
||||
|
||||
|
||||
class UserTokenRestServlet(RestServlet):
|
||||
"""An admin API for logging in as a user.
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2014-2016 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -23,7 +23,7 @@ from synapse.api.errors import (
|
|||
SynapseError,
|
||||
)
|
||||
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||
from synapse.rest.client.v2_alpha._base import client_patterns
|
||||
from synapse.rest.client._base import client_patterns
|
||||
from synapse.types import RoomAlias
|
||||
|
||||
logger = logging.getLogger(__name__)
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue