mirror of
https://git.anonymousland.org/anonymousland/synapse.git
synced 2025-08-15 22:10:18 -04:00
Merge remote-tracking branch 'upstream/release-v1.36'
This commit is contained in:
commit
8d96b324dc
116 changed files with 4194 additions and 2503 deletions
|
@ -41,7 +41,7 @@ workflows:
|
||||||
- dockerhubuploadlatest:
|
- dockerhubuploadlatest:
|
||||||
filters:
|
filters:
|
||||||
branches:
|
branches:
|
||||||
only: master
|
only: [ master, main ]
|
||||||
|
|
||||||
commands:
|
commands:
|
||||||
docker_prepare:
|
docker_prepare:
|
||||||
|
|
31
.github/workflows/docs.yaml
vendored
Normal file
31
.github/workflows/docs.yaml
vendored
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
name: Deploy the documentation
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- develop
|
||||||
|
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
pages:
|
||||||
|
name: GitHub Pages
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Setup mdbook
|
||||||
|
uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14
|
||||||
|
with:
|
||||||
|
mdbook-version: '0.4.9'
|
||||||
|
|
||||||
|
- name: Build the documentation
|
||||||
|
run: mdbook build
|
||||||
|
|
||||||
|
- name: Deploy latest documentation
|
||||||
|
uses: peaceiris/actions-gh-pages@068dc23d9710f1ba62e86896f84735d869951305 # v3.8.0
|
||||||
|
with:
|
||||||
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
keep_files: true
|
||||||
|
publish_dir: ./book
|
||||||
|
destination_dir: ./develop
|
10
.github/workflows/tests.yml
vendored
10
.github/workflows/tests.yml
vendored
|
@ -34,7 +34,13 @@ jobs:
|
||||||
if: ${{ github.base_ref == 'develop' || contains(github.base_ref, 'release-') }}
|
if: ${{ github.base_ref == 'develop' || contains(github.base_ref, 'release-') }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
|
# Note: This and the script can be simplified once we drop Buildkite. See:
|
||||||
|
# https://github.com/actions/checkout/issues/266#issuecomment-638346893
|
||||||
|
# https://github.com/actions/checkout/issues/416
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
|
fetch-depth: 0
|
||||||
- uses: actions/setup-python@v2
|
- uses: actions/setup-python@v2
|
||||||
- run: pip install tox
|
- run: pip install tox
|
||||||
- name: Patch Buildkite-specific test script
|
- name: Patch Buildkite-specific test script
|
||||||
|
@ -226,9 +232,9 @@ jobs:
|
||||||
- name: Run SyTest
|
- name: Run SyTest
|
||||||
run: /bootstrap.sh synapse
|
run: /bootstrap.sh synapse
|
||||||
working-directory: /src
|
working-directory: /src
|
||||||
- name: Dump results.tap
|
- name: Summarise results.tap
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
run: cat /logs/results.tap
|
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
|
||||||
- name: Upload SyTest logs
|
- name: Upload SyTest logs
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v2
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
|
|
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -46,3 +46,6 @@ __pycache__/
|
||||||
/docs/build/
|
/docs/build/
|
||||||
/htmlcov
|
/htmlcov
|
||||||
/pip-wheel-metadata/
|
/pip-wheel-metadata/
|
||||||
|
|
||||||
|
# docs
|
||||||
|
book/
|
||||||
|
|
68
CHANGES.md
68
CHANGES.md
|
@ -1,3 +1,71 @@
|
||||||
|
Synapse 1.36.0rc1 (2021-06-08)
|
||||||
|
==============================
|
||||||
|
|
||||||
|
Features
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Add new endpoint `/_matrix/client/r0/rooms/{roomId}/aliases` from Client-Server API r0.6.1 (previously [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432)). ([\#9224](https://github.com/matrix-org/synapse/issues/9224))
|
||||||
|
- Improve performance of incoming federation transactions in large rooms. ([\#9953](https://github.com/matrix-org/synapse/issues/9953), [\#9973](https://github.com/matrix-org/synapse/issues/9973))
|
||||||
|
- Rewrite logic around verifying JSON object and fetching server keys to be more performant and use less memory. ([\#10035](https://github.com/matrix-org/synapse/issues/10035))
|
||||||
|
- Add new admin APIs for unprotecting local media from quarantine. Contributed by @dklimpel. ([\#10040](https://github.com/matrix-org/synapse/issues/10040))
|
||||||
|
- Add new admin APIs to remove media by media ID from quarantine. Contributed by @dklimpel. ([\#10044](https://github.com/matrix-org/synapse/issues/10044))
|
||||||
|
- Make reason and score parameters optional for reporting content. Implements [MSC2414](https://github.com/matrix-org/matrix-doc/pull/2414). Contributed by Callum Brown. ([\#10077](https://github.com/matrix-org/synapse/issues/10077))
|
||||||
|
- Add support for routing more requests to workers. ([\#10084](https://github.com/matrix-org/synapse/issues/10084))
|
||||||
|
- Report OpenTracing spans for database activity. ([\#10113](https://github.com/matrix-org/synapse/issues/10113), [\#10136](https://github.com/matrix-org/synapse/issues/10136), [\#10141](https://github.com/matrix-org/synapse/issues/10141))
|
||||||
|
- Significantly reduce memory usage of joining large remote rooms. ([\#10117](https://github.com/matrix-org/synapse/issues/10117))
|
||||||
|
|
||||||
|
|
||||||
|
Bugfixes
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Fixed a bug causing replication requests to fail when receiving a lot of events via federation. ([\#10082](https://github.com/matrix-org/synapse/issues/10082))
|
||||||
|
- Fix a bug in the `force_tracing_for_users` option introduced in Synapse v1.35 which meant that the OpenTracing spans produced were missing most tags. ([\#10092](https://github.com/matrix-org/synapse/issues/10092))
|
||||||
|
- Fixed a bug that could cause Synapse to stop notifying application services. Contributed by Willem Mulder. ([\#10107](https://github.com/matrix-org/synapse/issues/10107))
|
||||||
|
- Fix bug where the server would attempt to fetch the same history in the room from a remote server multiple times in parallel. ([\#10116](https://github.com/matrix-org/synapse/issues/10116))
|
||||||
|
- Fix a bug introduced in Synapse 1.33.0 which caused replication requests to fail when receiving a lot of very large events via federation. ([\#10118](https://github.com/matrix-org/synapse/issues/10118))
|
||||||
|
- Fix bug when using workers where pagination requests failed if a remote server returned zero events from `/backfill`. Introduced in 1.35.0. ([\#10133](https://github.com/matrix-org/synapse/issues/10133))
|
||||||
|
|
||||||
|
|
||||||
|
Improved Documentation
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
- Clarify security note regarding hosting Synapse on the same domain as other web applications. ([\#9221](https://github.com/matrix-org/synapse/issues/9221))
|
||||||
|
- Update CAPTCHA documentation to mention turning off the verify origin feature. Contributed by @aaronraimist. ([\#10046](https://github.com/matrix-org/synapse/issues/10046))
|
||||||
|
- Tweak wording of database recommendation in `INSTALL.md`. Contributed by @aaronraimist. ([\#10057](https://github.com/matrix-org/synapse/issues/10057))
|
||||||
|
- Add initial infrastructure for rendering Synapse documentation with mdbook. ([\#10086](https://github.com/matrix-org/synapse/issues/10086))
|
||||||
|
- Convert the remaining Admin API documentation files to markdown. ([\#10089](https://github.com/matrix-org/synapse/issues/10089))
|
||||||
|
- Make a link in docs use HTTPS. Contributed by @RhnSharma. ([\#10130](https://github.com/matrix-org/synapse/issues/10130))
|
||||||
|
- Fix broken link in Docker docs. ([\#10132](https://github.com/matrix-org/synapse/issues/10132))
|
||||||
|
|
||||||
|
|
||||||
|
Deprecations and Removals
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
- Remove the experimental `spaces_enabled` flag. The spaces features are always available now. ([\#10063](https://github.com/matrix-org/synapse/issues/10063))
|
||||||
|
|
||||||
|
|
||||||
|
Internal Changes
|
||||||
|
----------------
|
||||||
|
|
||||||
|
- Tell CircleCI to build Docker images from `main` branch. ([\#9906](https://github.com/matrix-org/synapse/issues/9906))
|
||||||
|
- Simplify naming convention for release branches to only include the major and minor version numbers. ([\#10013](https://github.com/matrix-org/synapse/issues/10013))
|
||||||
|
- Add `parse_strings_from_args` for parsing an array from query parameters. ([\#10048](https://github.com/matrix-org/synapse/issues/10048), [\#10137](https://github.com/matrix-org/synapse/issues/10137))
|
||||||
|
- Remove some dead code regarding TLS certificate handling. ([\#10054](https://github.com/matrix-org/synapse/issues/10054))
|
||||||
|
- Remove redundant, unmaintained `convert_server_keys` script. ([\#10055](https://github.com/matrix-org/synapse/issues/10055))
|
||||||
|
- Improve the error message printed by synctl when synapse fails to start. ([\#10059](https://github.com/matrix-org/synapse/issues/10059))
|
||||||
|
- Fix GitHub Actions lint for newsfragments. ([\#10069](https://github.com/matrix-org/synapse/issues/10069))
|
||||||
|
- Update opentracing to inject the right context into the carrier. ([\#10074](https://github.com/matrix-org/synapse/issues/10074))
|
||||||
|
- Fix up `BatchingQueue` implementation. ([\#10078](https://github.com/matrix-org/synapse/issues/10078))
|
||||||
|
- Log method and path when dropping request due to size limit. ([\#10091](https://github.com/matrix-org/synapse/issues/10091))
|
||||||
|
- In Github Actions workflows, summarize the Sytest results in an easy-to-read format. ([\#10094](https://github.com/matrix-org/synapse/issues/10094))
|
||||||
|
- Make `/sync` do fewer state resolutions. ([\#10102](https://github.com/matrix-org/synapse/issues/10102))
|
||||||
|
- Add missing type hints to the admin API servlets. ([\#10105](https://github.com/matrix-org/synapse/issues/10105))
|
||||||
|
- Improve opentracing annotations for `Notifier`. ([\#10111](https://github.com/matrix-org/synapse/issues/10111))
|
||||||
|
- Enable Prometheus metrics for the jaeger client library. ([\#10112](https://github.com/matrix-org/synapse/issues/10112))
|
||||||
|
- Work to improve the responsiveness of `/sync` requests. ([\#10124](https://github.com/matrix-org/synapse/issues/10124))
|
||||||
|
- OpenTracing: use a consistent name for background processes. ([\#10135](https://github.com/matrix-org/synapse/issues/10135))
|
||||||
|
|
||||||
|
|
||||||
Synapse 1.35.1 (2021-06-03)
|
Synapse 1.35.1 (2021-06-03)
|
||||||
===========================
|
===========================
|
||||||
|
|
||||||
|
|
12
INSTALL.md
12
INSTALL.md
|
@ -399,11 +399,9 @@ Once you have installed synapse as above, you will need to configure it.
|
||||||
|
|
||||||
### Using PostgreSQL
|
### Using PostgreSQL
|
||||||
|
|
||||||
By default Synapse uses [SQLite](https://sqlite.org/) and in doing so trades performance for convenience.
|
By default Synapse uses an [SQLite](https://sqlite.org/) database and in doing so trades
|
||||||
SQLite is only recommended in Synapse for testing purposes or for servers with
|
performance for convenience. Almost all installations should opt to use [PostgreSQL](https://www.postgresql.org)
|
||||||
very light workloads.
|
instead. Advantages include:
|
||||||
|
|
||||||
Almost all installations should opt to use [PostgreSQL](https://www.postgresql.org). Advantages include:
|
|
||||||
|
|
||||||
- significant performance improvements due to the superior threading and
|
- significant performance improvements due to the superior threading and
|
||||||
caching model, smarter query optimiser
|
caching model, smarter query optimiser
|
||||||
|
@ -412,6 +410,10 @@ Almost all installations should opt to use [PostgreSQL](https://www.postgresql.o
|
||||||
For information on how to install and use PostgreSQL in Synapse, please see
|
For information on how to install and use PostgreSQL in Synapse, please see
|
||||||
[docs/postgres.md](docs/postgres.md)
|
[docs/postgres.md](docs/postgres.md)
|
||||||
|
|
||||||
|
SQLite is only acceptable for testing purposes. SQLite should not be used in
|
||||||
|
a production server. Synapse will perform poorly when using
|
||||||
|
SQLite, especially when participating in large rooms.
|
||||||
|
|
||||||
### TLS certificates
|
### TLS certificates
|
||||||
|
|
||||||
The default configuration exposes a single HTTP port on the local
|
The default configuration exposes a single HTTP port on the local
|
||||||
|
|
|
@ -40,6 +40,7 @@ exclude mypy.ini
|
||||||
exclude sytest-blacklist
|
exclude sytest-blacklist
|
||||||
exclude test_postgresql.sh
|
exclude test_postgresql.sh
|
||||||
|
|
||||||
|
include book.toml
|
||||||
include pyproject.toml
|
include pyproject.toml
|
||||||
recursive-include changelog.d *
|
recursive-include changelog.d *
|
||||||
|
|
||||||
|
|
46
README.rst
46
README.rst
|
@ -149,21 +149,45 @@ For details on having Synapse manage your federation TLS certificates
|
||||||
automatically, please see `<docs/ACME.md>`_.
|
automatically, please see `<docs/ACME.md>`_.
|
||||||
|
|
||||||
|
|
||||||
Security Note
|
Security note
|
||||||
=============
|
=============
|
||||||
|
|
||||||
Matrix serves raw user generated data in some APIs - specifically the `content
|
Matrix serves raw, user-supplied data in some APIs -- specifically the `content
|
||||||
repository endpoints <https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid>`_.
|
repository endpoints`_.
|
||||||
|
|
||||||
Whilst we have tried to mitigate against possible XSS attacks (e.g.
|
.. _content repository endpoints: https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid
|
||||||
https://github.com/matrix-org/synapse/pull/1021) we recommend running
|
|
||||||
matrix homeservers on a dedicated domain name, to limit any malicious user generated
|
|
||||||
content served to web browsers a matrix API from being able to attack webapps hosted
|
|
||||||
on the same domain. This is particularly true of sharing a matrix webclient and
|
|
||||||
server on the same domain.
|
|
||||||
|
|
||||||
See https://github.com/vector-im/riot-web/issues/1977 and
|
Whilst we make a reasonable effort to mitigate against XSS attacks (for
|
||||||
https://developer.github.com/changes/2014-04-25-user-content-security for more details.
|
instance, by using `CSP`_), a Matrix homeserver should not be hosted on a
|
||||||
|
domain hosting other web applications. This especially applies to sharing
|
||||||
|
the domain with Matrix web clients and other sensitive applications like
|
||||||
|
webmail. See
|
||||||
|
https://developer.github.com/changes/2014-04-25-user-content-security for more
|
||||||
|
information.
|
||||||
|
|
||||||
|
.. _CSP: https://github.com/matrix-org/synapse/pull/1021
|
||||||
|
|
||||||
|
Ideally, the homeserver should not simply be on a different subdomain, but on
|
||||||
|
a completely different `registered domain`_ (also known as top-level site or
|
||||||
|
eTLD+1). This is because `some attacks`_ are still possible as long as the two
|
||||||
|
applications share the same registered domain.
|
||||||
|
|
||||||
|
.. _registered domain: https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-03#section-2.3
|
||||||
|
|
||||||
|
.. _some attacks: https://en.wikipedia.org/wiki/Session_fixation#Attacks_using_cross-subdomain_cookie
|
||||||
|
|
||||||
|
To illustrate this with an example, if your Element Web or other sensitive web
|
||||||
|
application is hosted on ``A.example1.com``, you should ideally host Synapse on
|
||||||
|
``example2.com``. Some amount of protection is offered by hosting on
|
||||||
|
``B.example1.com`` instead, so this is also acceptable in some scenarios.
|
||||||
|
However, you should *not* host your Synapse on ``A.example1.com``.
|
||||||
|
|
||||||
|
Note that all of the above refers exclusively to the domain used in Synapse's
|
||||||
|
``public_baseurl`` setting. In particular, it has no bearing on the domain
|
||||||
|
mentioned in MXIDs hosted on that server.
|
||||||
|
|
||||||
|
Following this advice ensures that even if an XSS is found in Synapse, the
|
||||||
|
impact to other applications will be minimal.
|
||||||
|
|
||||||
|
|
||||||
Upgrading an existing Synapse
|
Upgrading an existing Synapse
|
||||||
|
|
39
book.toml
Normal file
39
book.toml
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
# Documentation for possible options in this file is at
|
||||||
|
# https://rust-lang.github.io/mdBook/format/config.html
|
||||||
|
[book]
|
||||||
|
title = "Synapse"
|
||||||
|
authors = ["The Matrix.org Foundation C.I.C."]
|
||||||
|
language = "en"
|
||||||
|
multilingual = false
|
||||||
|
|
||||||
|
# The directory that documentation files are stored in
|
||||||
|
src = "docs"
|
||||||
|
|
||||||
|
[build]
|
||||||
|
# Prevent markdown pages from being automatically generated when they're
|
||||||
|
# linked to in SUMMARY.md
|
||||||
|
create-missing = false
|
||||||
|
|
||||||
|
[output.html]
|
||||||
|
# The URL visitors will be directed to when they try to edit a page
|
||||||
|
edit-url-template = "https://github.com/matrix-org/synapse/edit/develop/{path}"
|
||||||
|
|
||||||
|
# Remove the numbers that appear before each item in the sidebar, as they can
|
||||||
|
# get quite messy as we nest deeper
|
||||||
|
no-section-label = true
|
||||||
|
|
||||||
|
# The source code URL of the repository
|
||||||
|
git-repository-url = "https://github.com/matrix-org/synapse"
|
||||||
|
|
||||||
|
# The path that the docs are hosted on
|
||||||
|
site-url = "/synapse/"
|
||||||
|
|
||||||
|
# Additional HTML, JS, CSS that's injected into each page of the book.
|
||||||
|
# More information available in docs/website_files/README.md
|
||||||
|
additional-css = [
|
||||||
|
"docs/website_files/table-of-contents.css",
|
||||||
|
"docs/website_files/remove-nav-buttons.css",
|
||||||
|
"docs/website_files/indent-section-headers.css",
|
||||||
|
]
|
||||||
|
additional-js = ["docs/website_files/table-of-contents.js"]
|
||||||
|
theme = "docs/website_files/theme"
|
|
@ -226,4 +226,4 @@ healthcheck:
|
||||||
## Using jemalloc
|
## Using jemalloc
|
||||||
|
|
||||||
Jemalloc is embedded in the image and will be used instead of the default allocator.
|
Jemalloc is embedded in the image and will be used instead of the default allocator.
|
||||||
You can read about jemalloc by reading the Synapse [README](../README.md).
|
You can read about jemalloc by reading the Synapse [README](../README.rst).
|
||||||
|
|
|
@ -1,31 +1,37 @@
|
||||||
# Overview
|
# Overview
|
||||||
Captcha can be enabled for this home server. This file explains how to do that.
|
A captcha can be enabled on your homeserver to help prevent bots from registering
|
||||||
The captcha mechanism used is Google's ReCaptcha. This requires API keys from Google.
|
accounts. Synapse currently uses Google's reCAPTCHA service which requires API keys
|
||||||
|
from Google.
|
||||||
|
|
||||||
## Getting keys
|
## Getting API keys
|
||||||
|
|
||||||
Requires a site/secret key pair from:
|
|
||||||
|
|
||||||
<https://developers.google.com/recaptcha/>
|
|
||||||
|
|
||||||
Must be a reCAPTCHA v2 key using the "I'm not a robot" Checkbox option
|
|
||||||
|
|
||||||
## Setting ReCaptcha Keys
|
|
||||||
|
|
||||||
The keys are a config option on the home server config. If they are not
|
|
||||||
visible, you can generate them via `--generate-config`. Set the following value:
|
|
||||||
|
|
||||||
|
1. Create a new site at <https://www.google.com/recaptcha/admin/create>
|
||||||
|
1. Set the label to anything you want
|
||||||
|
1. Set the type to reCAPTCHA v2 using the "I'm not a robot" Checkbox option.
|
||||||
|
This is the only type of captcha that works with Synapse.
|
||||||
|
1. Add the public hostname for your server, as set in `public_baseurl`
|
||||||
|
in `homeserver.yaml`, to the list of authorized domains. If you have not set
|
||||||
|
`public_baseurl`, use `server_name`.
|
||||||
|
1. Agree to the terms of service and submit.
|
||||||
|
1. Copy your site key and secret key and add them to your `homeserver.yaml`
|
||||||
|
configuration file
|
||||||
|
```
|
||||||
recaptcha_public_key: YOUR_SITE_KEY
|
recaptcha_public_key: YOUR_SITE_KEY
|
||||||
recaptcha_private_key: YOUR_SECRET_KEY
|
recaptcha_private_key: YOUR_SECRET_KEY
|
||||||
|
```
|
||||||
In addition, you MUST enable captchas via:
|
1. Enable the CAPTCHA for new registrations
|
||||||
|
```
|
||||||
enable_registration_captcha: true
|
enable_registration_captcha: true
|
||||||
|
```
|
||||||
|
1. Go to the settings page for the CAPTCHA you just created
|
||||||
|
1. Uncheck the "Verify the origin of reCAPTCHA solutions" checkbox so that the
|
||||||
|
captcha can be displayed in any client. If you do not disable this option then you
|
||||||
|
must specify the domains of every client that is allowed to display the CAPTCHA.
|
||||||
|
|
||||||
## Configuring IP used for auth
|
## Configuring IP used for auth
|
||||||
|
|
||||||
The ReCaptcha API requires that the IP address of the user who solved the
|
The reCAPTCHA API requires that the IP address of the user who solved the
|
||||||
captcha is sent. If the client is connecting through a proxy or load balancer,
|
CAPTCHA is sent. If the client is connecting through a proxy or load balancer,
|
||||||
it may be required to use the `X-Forwarded-For` (XFF) header instead of the origin
|
it may be required to use the `X-Forwarded-For` (XFF) header instead of the origin
|
||||||
IP address. This can be configured using the `x_forwarded` directive in the
|
IP address. This can be configured using the `x_forwarded` directive in the
|
||||||
listeners section of the homeserver.yaml configuration file.
|
listeners section of the `homeserver.yaml` configuration file.
|
||||||
|
|
|
@ -1,7 +1,72 @@
|
||||||
# Synapse Documentation
|
# Synapse Documentation
|
||||||
|
|
||||||
This directory contains documentation specific to the `synapse` homeserver.
|
**The documentation is currently hosted [here](https://matrix-org.github.io/synapse).**
|
||||||
|
Please update any links to point to the new website instead.
|
||||||
|
|
||||||
All matrix-generic documentation now lives in its own project, located at [matrix-org/matrix-doc](https://github.com/matrix-org/matrix-doc)
|
## About
|
||||||
|
|
||||||
(Note: some items here may be moved to [matrix-org/matrix-doc](https://github.com/matrix-org/matrix-doc) at some point in the future.)
|
This directory currently holds a series of markdown files documenting how to install, use
|
||||||
|
and develop Synapse, the reference Matrix homeserver. The documentation is readable directly
|
||||||
|
from this repository, but it is recommended to instead browse through the
|
||||||
|
[website](https://matrix-org.github.io/synapse) for easier discoverability.
|
||||||
|
|
||||||
|
## Adding to the documentation
|
||||||
|
|
||||||
|
Most of the documentation currently exists as top-level files, as when organising them into
|
||||||
|
a structured website, these files were kept in place so that existing links would not break.
|
||||||
|
The rest of the documentation is stored in folders, such as `setup`, `usage`, and `development`
|
||||||
|
etc. **All new documentation files should be placed in structured folders.** For example:
|
||||||
|
|
||||||
|
To create a new user-facing documentation page about a new Single Sign-On protocol named
|
||||||
|
"MyCoolProtocol", one should create a new file with a relevant name, such as "my_cool_protocol.md".
|
||||||
|
This file might fit into the documentation structure at:
|
||||||
|
|
||||||
|
- Usage
|
||||||
|
- Configuration
|
||||||
|
- User Authentication
|
||||||
|
- Single Sign-On
|
||||||
|
- **My Cool Protocol**
|
||||||
|
|
||||||
|
Given that, one would place the new file under
|
||||||
|
`usage/configuration/user_authentication/single_sign_on/my_cool_protocol.md`.
|
||||||
|
|
||||||
|
Note that the structure of the documentation (and thus the left sidebar on the website) is determined
|
||||||
|
by the list in [SUMMARY.md](SUMMARY.md). The final thing to do when adding a new page is to add a new
|
||||||
|
line linking to the new documentation file:
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
- [My Cool Protocol](usage/configuration/user_authentication/single_sign_on/my_cool_protocol.md)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Building the documentation
|
||||||
|
|
||||||
|
The documentation is built with [mdbook](https://rust-lang.github.io/mdBook/), and the outline of the
|
||||||
|
documentation is determined by the structure of [SUMMARY.md](SUMMARY.md).
|
||||||
|
|
||||||
|
First, [get mdbook](https://github.com/rust-lang/mdBook#installation). Then, **from the root of the repository**,
|
||||||
|
build the documentation with:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
mdbook build
|
||||||
|
```
|
||||||
|
|
||||||
|
The rendered contents will be outputted to a new `book/` directory at the root of the repository. You can
|
||||||
|
browse the book by opening `book/index.html` in a web browser.
|
||||||
|
|
||||||
|
You can also have mdbook host the docs on a local webserver with hot-reload functionality via:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
mdbook serve
|
||||||
|
```
|
||||||
|
|
||||||
|
The URL at which the docs can be viewed at will be logged.
|
||||||
|
|
||||||
|
## Configuration and theming
|
||||||
|
|
||||||
|
The look and behaviour of the website is configured by the [book.toml](../book.toml) file
|
||||||
|
at the root of the repository. See
|
||||||
|
[mdbook's documentation on configuration](https://rust-lang.github.io/mdBook/format/config.html)
|
||||||
|
for available options.
|
||||||
|
|
||||||
|
The site can be themed and additionally extended with extra UI and features. See
|
||||||
|
[website_files/README.md](website_files/README.md) for details.
|
||||||
|
|
87
docs/SUMMARY.md
Normal file
87
docs/SUMMARY.md
Normal file
|
@ -0,0 +1,87 @@
|
||||||
|
# Summary
|
||||||
|
|
||||||
|
# Introduction
|
||||||
|
- [Welcome and Overview](welcome_and_overview.md)
|
||||||
|
|
||||||
|
# Setup
|
||||||
|
- [Installation](setup/installation.md)
|
||||||
|
- [Using Postgres](postgres.md)
|
||||||
|
- [Configuring a Reverse Proxy](reverse_proxy.md)
|
||||||
|
- [Configuring a Turn Server](turn-howto.md)
|
||||||
|
- [Delegation](delegate.md)
|
||||||
|
|
||||||
|
# Upgrading
|
||||||
|
- [Upgrading between Synapse Versions](upgrading/README.md)
|
||||||
|
- [Upgrading from pre-Synapse 1.0](MSC1711_certificates_FAQ.md)
|
||||||
|
|
||||||
|
# Usage
|
||||||
|
- [Federation](federate.md)
|
||||||
|
- [Configuration](usage/configuration/README.md)
|
||||||
|
- [Homeserver Sample Config File](usage/configuration/homeserver_sample_config.md)
|
||||||
|
- [Logging Sample Config File](usage/configuration/logging_sample_config.md)
|
||||||
|
- [Structured Logging](structured_logging.md)
|
||||||
|
- [User Authentication](usage/configuration/user_authentication/README.md)
|
||||||
|
- [Single-Sign On]()
|
||||||
|
- [OpenID Connect](openid.md)
|
||||||
|
- [SAML]()
|
||||||
|
- [CAS]()
|
||||||
|
- [SSO Mapping Providers](sso_mapping_providers.md)
|
||||||
|
- [Password Auth Providers](password_auth_providers.md)
|
||||||
|
- [JSON Web Tokens](jwt.md)
|
||||||
|
- [Registration Captcha](CAPTCHA_SETUP.md)
|
||||||
|
- [Application Services](application_services.md)
|
||||||
|
- [Server Notices](server_notices.md)
|
||||||
|
- [Consent Tracking](consent_tracking.md)
|
||||||
|
- [URL Previews](url_previews.md)
|
||||||
|
- [User Directory](user_directory.md)
|
||||||
|
- [Message Retention Policies](message_retention_policies.md)
|
||||||
|
- [Pluggable Modules]()
|
||||||
|
- [Third Party Rules]()
|
||||||
|
- [Spam Checker](spam_checker.md)
|
||||||
|
- [Presence Router](presence_router_module.md)
|
||||||
|
- [Media Storage Providers]()
|
||||||
|
- [Workers](workers.md)
|
||||||
|
- [Using `synctl` with Workers](synctl_workers.md)
|
||||||
|
- [Systemd](systemd-with-workers/README.md)
|
||||||
|
- [Administration](usage/administration/README.md)
|
||||||
|
- [Admin API](usage/administration/admin_api/README.md)
|
||||||
|
- [Account Validity](admin_api/account_validity.md)
|
||||||
|
- [Delete Group](admin_api/delete_group.md)
|
||||||
|
- [Event Reports](admin_api/event_reports.md)
|
||||||
|
- [Media](admin_api/media_admin_api.md)
|
||||||
|
- [Purge History](admin_api/purge_history_api.md)
|
||||||
|
- [Purge Rooms](admin_api/purge_room.md)
|
||||||
|
- [Register Users](admin_api/register_api.md)
|
||||||
|
- [Manipulate Room Membership](admin_api/room_membership.md)
|
||||||
|
- [Rooms](admin_api/rooms.md)
|
||||||
|
- [Server Notices](admin_api/server_notices.md)
|
||||||
|
- [Shutdown Room](admin_api/shutdown_room.md)
|
||||||
|
- [Statistics](admin_api/statistics.md)
|
||||||
|
- [Users](admin_api/user_admin_api.md)
|
||||||
|
- [Server Version](admin_api/version_api.md)
|
||||||
|
- [Manhole](manhole.md)
|
||||||
|
- [Monitoring](metrics-howto.md)
|
||||||
|
- [Scripts]()
|
||||||
|
|
||||||
|
# Development
|
||||||
|
- [Contributing Guide](development/contributing_guide.md)
|
||||||
|
- [Code Style](code_style.md)
|
||||||
|
- [Git Usage](dev/git.md)
|
||||||
|
- [Testing]()
|
||||||
|
- [OpenTracing](opentracing.md)
|
||||||
|
- [Synapse Architecture]()
|
||||||
|
- [Log Contexts](log_contexts.md)
|
||||||
|
- [Replication](replication.md)
|
||||||
|
- [TCP Replication](tcp_replication.md)
|
||||||
|
- [Internal Documentation](development/internal_documentation/README.md)
|
||||||
|
- [Single Sign-On]()
|
||||||
|
- [SAML](dev/saml.md)
|
||||||
|
- [CAS](dev/cas.md)
|
||||||
|
- [State Resolution]()
|
||||||
|
- [The Auth Chain Difference Algorithm](auth_chain_difference_algorithm.md)
|
||||||
|
- [Media Repository](media_repository.md)
|
||||||
|
- [Room and User Statistics](room_and_user_statistics.md)
|
||||||
|
- [Scripts]()
|
||||||
|
|
||||||
|
# Other
|
||||||
|
- [Dependency Deprecation Policy](deprecation_policy.md)
|
|
@ -1,28 +1,14 @@
|
||||||
Admin APIs
|
Admin APIs
|
||||||
==========
|
==========
|
||||||
|
|
||||||
|
**Note**: The latest documentation can be viewed `here <https://matrix-org.github.io/synapse>`_.
|
||||||
|
See `docs/README.md <../docs/README.md>`_ for more information.
|
||||||
|
|
||||||
|
**Please update links to point to the website instead.** Existing files in this directory
|
||||||
|
are preserved to maintain historical links, but may be moved in the future.
|
||||||
|
|
||||||
This directory includes documentation for the various synapse specific admin
|
This directory includes documentation for the various synapse specific admin
|
||||||
APIs available.
|
APIs available. Updates to the existing Admin API documentation should still
|
||||||
|
be made to these files, but any new documentation files should instead be placed under
|
||||||
|
`docs/usage/administration/admin_api <../docs/usage/administration/admin_api>`_.
|
||||||
|
|
||||||
Authenticating as a server admin
|
|
||||||
--------------------------------
|
|
||||||
|
|
||||||
Many of the API calls in the admin api will require an `access_token` for a
|
|
||||||
server admin. (Note that a server admin is distinct from a room admin.)
|
|
||||||
|
|
||||||
A user can be marked as a server admin by updating the database directly, e.g.:
|
|
||||||
|
|
||||||
.. code-block:: sql
|
|
||||||
|
|
||||||
UPDATE users SET admin = 1 WHERE name = '@foo:bar.com';
|
|
||||||
|
|
||||||
A new server admin user can also be created using the
|
|
||||||
``register_new_matrix_user`` script.
|
|
||||||
|
|
||||||
Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings.
|
|
||||||
|
|
||||||
Once you have your `access_token`, to include it in a request, the best option is to add the token to a request header:
|
|
||||||
|
|
||||||
``curl --header "Authorization: Bearer <access_token>" <the_rest_of_your_API_request>``
|
|
||||||
|
|
||||||
Fore more details, please refer to the complete `matrix spec documentation <https://matrix.org/docs/spec/client_server/r0.5.0#using-access-tokens>`_.
|
|
||||||
|
|
42
docs/admin_api/account_validity.md
Normal file
42
docs/admin_api/account_validity.md
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
# Account validity API
|
||||||
|
|
||||||
|
This API allows a server administrator to manage the validity of an account. To
|
||||||
|
use it, you must enable the account validity feature (under
|
||||||
|
`account_validity`) in Synapse's configuration.
|
||||||
|
|
||||||
|
## Renew account
|
||||||
|
|
||||||
|
This API extends the validity of an account by as much time as configured in the
|
||||||
|
`period` parameter from the `account_validity` configuration.
|
||||||
|
|
||||||
|
The API is:
|
||||||
|
|
||||||
|
```
|
||||||
|
POST /_synapse/admin/v1/account_validity/validity
|
||||||
|
```
|
||||||
|
|
||||||
|
with the following body:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"user_id": "<user ID for the account to renew>",
|
||||||
|
"expiration_ts": 0,
|
||||||
|
"enable_renewal_emails": true
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
`expiration_ts` is an optional parameter and overrides the expiration date,
|
||||||
|
which otherwise defaults to now + validity period.
|
||||||
|
|
||||||
|
`enable_renewal_emails` is also an optional parameter and enables/disables
|
||||||
|
sending renewal emails to the user. Defaults to true.
|
||||||
|
|
||||||
|
The API returns with the new expiration date for this account, as a timestamp in
|
||||||
|
milliseconds since epoch:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"expiration_ts": 0
|
||||||
|
}
|
||||||
|
```
|
|
@ -1,42 +0,0 @@
|
||||||
Account validity API
|
|
||||||
====================
|
|
||||||
|
|
||||||
This API allows a server administrator to manage the validity of an account. To
|
|
||||||
use it, you must enable the account validity feature (under
|
|
||||||
``account_validity``) in Synapse's configuration.
|
|
||||||
|
|
||||||
Renew account
|
|
||||||
-------------
|
|
||||||
|
|
||||||
This API extends the validity of an account by as much time as configured in the
|
|
||||||
``period`` parameter from the ``account_validity`` configuration.
|
|
||||||
|
|
||||||
The API is::
|
|
||||||
|
|
||||||
POST /_synapse/admin/v1/account_validity/validity
|
|
||||||
|
|
||||||
with the following body:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"user_id": "<user ID for the account to renew>",
|
|
||||||
"expiration_ts": 0,
|
|
||||||
"enable_renewal_emails": true
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
``expiration_ts`` is an optional parameter and overrides the expiration date,
|
|
||||||
which otherwise defaults to now + validity period.
|
|
||||||
|
|
||||||
``enable_renewal_emails`` is also an optional parameter and enables/disables
|
|
||||||
sending renewal emails to the user. Defaults to true.
|
|
||||||
|
|
||||||
The API returns with the new expiration date for this account, as a timestamp in
|
|
||||||
milliseconds since epoch:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"expiration_ts": 0
|
|
||||||
}
|
|
|
@ -11,4 +11,4 @@ POST /_synapse/admin/v1/delete_group/<group_id>
|
||||||
```
|
```
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an `access_token` for a
|
To use it, you will need to authenticate by providing an `access_token` for a
|
||||||
server admin: see [README.rst](README.rst).
|
server admin: see [Admin API](../../usage/administration/admin_api).
|
||||||
|
|
|
@ -7,7 +7,7 @@ The api is:
|
||||||
GET /_synapse/admin/v1/event_reports?from=0&limit=10
|
GET /_synapse/admin/v1/event_reports?from=0&limit=10
|
||||||
```
|
```
|
||||||
To use it, you will need to authenticate by providing an `access_token` for a
|
To use it, you will need to authenticate by providing an `access_token` for a
|
||||||
server admin: see [README.rst](README.rst).
|
server admin: see [Admin API](../../usage/administration/admin_api).
|
||||||
|
|
||||||
It returns a JSON body like the following:
|
It returns a JSON body like the following:
|
||||||
|
|
||||||
|
@ -75,9 +75,9 @@ The following fields are returned in the JSON response body:
|
||||||
* `name`: string - The name of the room.
|
* `name`: string - The name of the room.
|
||||||
* `event_id`: string - The ID of the reported event.
|
* `event_id`: string - The ID of the reported event.
|
||||||
* `user_id`: string - This is the user who reported the event and wrote the reason.
|
* `user_id`: string - This is the user who reported the event and wrote the reason.
|
||||||
* `reason`: string - Comment made by the `user_id` in this report. May be blank.
|
* `reason`: string - Comment made by the `user_id` in this report. May be blank or `null`.
|
||||||
* `score`: integer - Content is reported based upon a negative score, where -100 is
|
* `score`: integer - Content is reported based upon a negative score, where -100 is
|
||||||
"most offensive" and 0 is "inoffensive".
|
"most offensive" and 0 is "inoffensive". May be `null`.
|
||||||
* `sender`: string - This is the ID of the user who sent the original message/event that
|
* `sender`: string - This is the ID of the user who sent the original message/event that
|
||||||
was reported.
|
was reported.
|
||||||
* `canonical_alias`: string - The canonical alias of the room. `null` if the room does not
|
* `canonical_alias`: string - The canonical alias of the room. `null` if the room does not
|
||||||
|
@ -95,7 +95,7 @@ The api is:
|
||||||
GET /_synapse/admin/v1/event_reports/<report_id>
|
GET /_synapse/admin/v1/event_reports/<report_id>
|
||||||
```
|
```
|
||||||
To use it, you will need to authenticate by providing an `access_token` for a
|
To use it, you will need to authenticate by providing an `access_token` for a
|
||||||
server admin: see [README.rst](README.rst).
|
server admin: see [Admin API](../../usage/administration/admin_api).
|
||||||
|
|
||||||
It returns a JSON body like the following:
|
It returns a JSON body like the following:
|
||||||
|
|
||||||
|
|
|
@ -4,9 +4,11 @@
|
||||||
* [List all media uploaded by a user](#list-all-media-uploaded-by-a-user)
|
* [List all media uploaded by a user](#list-all-media-uploaded-by-a-user)
|
||||||
- [Quarantine media](#quarantine-media)
|
- [Quarantine media](#quarantine-media)
|
||||||
* [Quarantining media by ID](#quarantining-media-by-id)
|
* [Quarantining media by ID](#quarantining-media-by-id)
|
||||||
|
* [Remove media from quarantine by ID](#remove-media-from-quarantine-by-id)
|
||||||
* [Quarantining media in a room](#quarantining-media-in-a-room)
|
* [Quarantining media in a room](#quarantining-media-in-a-room)
|
||||||
* [Quarantining all media of a user](#quarantining-all-media-of-a-user)
|
* [Quarantining all media of a user](#quarantining-all-media-of-a-user)
|
||||||
* [Protecting media from being quarantined](#protecting-media-from-being-quarantined)
|
* [Protecting media from being quarantined](#protecting-media-from-being-quarantined)
|
||||||
|
* [Unprotecting media from being quarantined](#unprotecting-media-from-being-quarantined)
|
||||||
- [Delete local media](#delete-local-media)
|
- [Delete local media](#delete-local-media)
|
||||||
* [Delete a specific local media](#delete-a-specific-local-media)
|
* [Delete a specific local media](#delete-a-specific-local-media)
|
||||||
* [Delete local media by date or size](#delete-local-media-by-date-or-size)
|
* [Delete local media by date or size](#delete-local-media-by-date-or-size)
|
||||||
|
@ -26,7 +28,7 @@ The API is:
|
||||||
GET /_synapse/admin/v1/room/<room_id>/media
|
GET /_synapse/admin/v1/room/<room_id>/media
|
||||||
```
|
```
|
||||||
To use it, you will need to authenticate by providing an `access_token` for a
|
To use it, you will need to authenticate by providing an `access_token` for a
|
||||||
server admin: see [README.rst](README.rst).
|
server admin: see [Admin API](../../usage/administration/admin_api).
|
||||||
|
|
||||||
The API returns a JSON body like the following:
|
The API returns a JSON body like the following:
|
||||||
```json
|
```json
|
||||||
|
@ -76,6 +78,27 @@ Response:
|
||||||
{}
|
{}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Remove media from quarantine by ID
|
||||||
|
|
||||||
|
This API removes a single piece of local or remote media from quarantine.
|
||||||
|
|
||||||
|
Request:
|
||||||
|
|
||||||
|
```
|
||||||
|
POST /_synapse/admin/v1/media/unquarantine/<server_name>/<media_id>
|
||||||
|
|
||||||
|
{}
|
||||||
|
```
|
||||||
|
|
||||||
|
Where `server_name` is in the form of `example.org`, and `media_id` is in the
|
||||||
|
form of `abcdefg12345...`.
|
||||||
|
|
||||||
|
Response:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{}
|
||||||
|
```
|
||||||
|
|
||||||
## Quarantining media in a room
|
## Quarantining media in a room
|
||||||
|
|
||||||
This API quarantines all local and remote media in a room.
|
This API quarantines all local and remote media in a room.
|
||||||
|
@ -159,6 +182,26 @@ Response:
|
||||||
{}
|
{}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Unprotecting media from being quarantined
|
||||||
|
|
||||||
|
This API reverts the protection of a media.
|
||||||
|
|
||||||
|
Request:
|
||||||
|
|
||||||
|
```
|
||||||
|
POST /_synapse/admin/v1/media/unprotect/<media_id>
|
||||||
|
|
||||||
|
{}
|
||||||
|
```
|
||||||
|
|
||||||
|
Where `media_id` is in the form of `abcdefg12345...`.
|
||||||
|
|
||||||
|
Response:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{}
|
||||||
|
```
|
||||||
|
|
||||||
# Delete local media
|
# Delete local media
|
||||||
This API deletes the *local* media from the disk of your own server.
|
This API deletes the *local* media from the disk of your own server.
|
||||||
This includes any local thumbnails and copies of media downloaded from
|
This includes any local thumbnails and copies of media downloaded from
|
||||||
|
@ -268,7 +311,7 @@ The following fields are returned in the JSON response body:
|
||||||
* `deleted`: integer - The number of media items successfully deleted
|
* `deleted`: integer - The number of media items successfully deleted
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an `access_token` for a
|
To use it, you will need to authenticate by providing an `access_token` for a
|
||||||
server admin: see [README.rst](README.rst).
|
server admin: see [Admin API](../../usage/administration/admin_api).
|
||||||
|
|
||||||
If the user re-requests purged remote media, synapse will re-request the media
|
If the user re-requests purged remote media, synapse will re-request the media
|
||||||
from the originating server.
|
from the originating server.
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
Purge History API
|
# Purge History API
|
||||||
=================
|
|
||||||
|
|
||||||
The purge history API allows server admins to purge historic events from their
|
The purge history API allows server admins to purge historic events from their
|
||||||
database, reclaiming disk space.
|
database, reclaiming disk space.
|
||||||
|
@ -13,10 +12,12 @@ delete the last message in a room.
|
||||||
|
|
||||||
The API is:
|
The API is:
|
||||||
|
|
||||||
``POST /_synapse/admin/v1/purge_history/<room_id>[/<event_id>]``
|
```
|
||||||
|
POST /_synapse/admin/v1/purge_history/<room_id>[/<event_id>]
|
||||||
|
```
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
To use it, you will need to authenticate by providing an `access_token` for a
|
||||||
server admin: see `README.rst <README.rst>`_.
|
server admin: [Admin API](../../usage/administration/admin_api)
|
||||||
|
|
||||||
By default, events sent by local users are not deleted, as they may represent
|
By default, events sent by local users are not deleted, as they may represent
|
||||||
the only copies of this content in existence. (Events sent by remote users are
|
the only copies of this content in existence. (Events sent by remote users are
|
||||||
|
@ -24,54 +25,54 @@ deleted.)
|
||||||
|
|
||||||
Room state data (such as joins, leaves, topic) is always preserved.
|
Room state data (such as joins, leaves, topic) is always preserved.
|
||||||
|
|
||||||
To delete local message events as well, set ``delete_local_events`` in the body:
|
To delete local message events as well, set `delete_local_events` in the body:
|
||||||
|
|
||||||
.. code:: json
|
```
|
||||||
|
{
|
||||||
{
|
|
||||||
"delete_local_events": true
|
"delete_local_events": true
|
||||||
}
|
}
|
||||||
|
```
|
||||||
|
|
||||||
The caller must specify the point in the room to purge up to. This can be
|
The caller must specify the point in the room to purge up to. This can be
|
||||||
specified by including an event_id in the URI, or by setting a
|
specified by including an event_id in the URI, or by setting a
|
||||||
``purge_up_to_event_id`` or ``purge_up_to_ts`` in the request body. If an event
|
`purge_up_to_event_id` or `purge_up_to_ts` in the request body. If an event
|
||||||
id is given, that event (and others at the same graph depth) will be retained.
|
id is given, that event (and others at the same graph depth) will be retained.
|
||||||
If ``purge_up_to_ts`` is given, it should be a timestamp since the unix epoch,
|
If `purge_up_to_ts` is given, it should be a timestamp since the unix epoch,
|
||||||
in milliseconds.
|
in milliseconds.
|
||||||
|
|
||||||
The API starts the purge running, and returns immediately with a JSON body with
|
The API starts the purge running, and returns immediately with a JSON body with
|
||||||
a purge id:
|
a purge id:
|
||||||
|
|
||||||
.. code:: json
|
```json
|
||||||
|
{
|
||||||
{
|
|
||||||
"purge_id": "<opaque id>"
|
"purge_id": "<opaque id>"
|
||||||
}
|
}
|
||||||
|
```
|
||||||
|
|
||||||
Purge status query
|
## Purge status query
|
||||||
------------------
|
|
||||||
|
|
||||||
It is possible to poll for updates on recent purges with a second API;
|
It is possible to poll for updates on recent purges with a second API;
|
||||||
|
|
||||||
``GET /_synapse/admin/v1/purge_history_status/<purge_id>``
|
```
|
||||||
|
GET /_synapse/admin/v1/purge_history_status/<purge_id>
|
||||||
|
```
|
||||||
|
|
||||||
Again, you will need to authenticate by providing an ``access_token`` for a
|
Again, you will need to authenticate by providing an `access_token` for a
|
||||||
server admin.
|
server admin.
|
||||||
|
|
||||||
This API returns a JSON body like the following:
|
This API returns a JSON body like the following:
|
||||||
|
|
||||||
.. code:: json
|
```json
|
||||||
|
{
|
||||||
{
|
|
||||||
"status": "active"
|
"status": "active"
|
||||||
}
|
}
|
||||||
|
```
|
||||||
|
|
||||||
The status will be one of ``active``, ``complete``, or ``failed``.
|
The status will be one of `active`, `complete`, or `failed`.
|
||||||
|
|
||||||
Reclaim disk space (Postgres)
|
## Reclaim disk space (Postgres)
|
||||||
-----------------------------
|
|
||||||
|
|
||||||
To reclaim the disk space and return it to the operating system, you need to run
|
To reclaim the disk space and return it to the operating system, you need to run
|
||||||
`VACUUM FULL;` on the database.
|
`VACUUM FULL;` on the database.
|
||||||
|
|
||||||
https://www.postgresql.org/docs/current/sql-vacuum.html
|
<https://www.postgresql.org/docs/current/sql-vacuum.html>
|
73
docs/admin_api/register_api.md
Normal file
73
docs/admin_api/register_api.md
Normal file
|
@ -0,0 +1,73 @@
|
||||||
|
# Shared-Secret Registration
|
||||||
|
|
||||||
|
This API allows for the creation of users in an administrative and
|
||||||
|
non-interactive way. This is generally used for bootstrapping a Synapse
|
||||||
|
instance with administrator accounts.
|
||||||
|
|
||||||
|
To authenticate yourself to the server, you will need both the shared secret
|
||||||
|
(`registration_shared_secret` in the homeserver configuration), and a
|
||||||
|
one-time nonce. If the registration shared secret is not configured, this API
|
||||||
|
is not enabled.
|
||||||
|
|
||||||
|
To fetch the nonce, you need to request one from the API:
|
||||||
|
|
||||||
|
```
|
||||||
|
> GET /_synapse/admin/v1/register
|
||||||
|
|
||||||
|
< {"nonce": "thisisanonce"}
|
||||||
|
```
|
||||||
|
|
||||||
|
Once you have the nonce, you can make a `POST` to the same URL with a JSON
|
||||||
|
body containing the nonce, username, password, whether they are an admin
|
||||||
|
(optional, False by default), and a HMAC digest of the content. Also you can
|
||||||
|
set the displayname (optional, `username` by default).
|
||||||
|
|
||||||
|
As an example:
|
||||||
|
|
||||||
|
```
|
||||||
|
> POST /_synapse/admin/v1/register
|
||||||
|
> {
|
||||||
|
"nonce": "thisisanonce",
|
||||||
|
"username": "pepper_roni",
|
||||||
|
"displayname": "Pepper Roni",
|
||||||
|
"password": "pizza",
|
||||||
|
"admin": true,
|
||||||
|
"mac": "mac_digest_here"
|
||||||
|
}
|
||||||
|
|
||||||
|
< {
|
||||||
|
"access_token": "token_here",
|
||||||
|
"user_id": "@pepper_roni:localhost",
|
||||||
|
"home_server": "test",
|
||||||
|
"device_id": "device_id_here"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The MAC is the hex digest output of the HMAC-SHA1 algorithm, with the key being
|
||||||
|
the shared secret and the content being the nonce, user, password, either the
|
||||||
|
string "admin" or "notadmin", and optionally the user_type
|
||||||
|
each separated by NULs. For an example of generation in Python:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import hmac, hashlib
|
||||||
|
|
||||||
|
def generate_mac(nonce, user, password, admin=False, user_type=None):
|
||||||
|
|
||||||
|
mac = hmac.new(
|
||||||
|
key=shared_secret,
|
||||||
|
digestmod=hashlib.sha1,
|
||||||
|
)
|
||||||
|
|
||||||
|
mac.update(nonce.encode('utf8'))
|
||||||
|
mac.update(b"\x00")
|
||||||
|
mac.update(user.encode('utf8'))
|
||||||
|
mac.update(b"\x00")
|
||||||
|
mac.update(password.encode('utf8'))
|
||||||
|
mac.update(b"\x00")
|
||||||
|
mac.update(b"admin" if admin else b"notadmin")
|
||||||
|
if user_type:
|
||||||
|
mac.update(b"\x00")
|
||||||
|
mac.update(user_type.encode('utf8'))
|
||||||
|
|
||||||
|
return mac.hexdigest()
|
||||||
|
```
|
|
@ -1,68 +0,0 @@
|
||||||
Shared-Secret Registration
|
|
||||||
==========================
|
|
||||||
|
|
||||||
This API allows for the creation of users in an administrative and
|
|
||||||
non-interactive way. This is generally used for bootstrapping a Synapse
|
|
||||||
instance with administrator accounts.
|
|
||||||
|
|
||||||
To authenticate yourself to the server, you will need both the shared secret
|
|
||||||
(``registration_shared_secret`` in the homeserver configuration), and a
|
|
||||||
one-time nonce. If the registration shared secret is not configured, this API
|
|
||||||
is not enabled.
|
|
||||||
|
|
||||||
To fetch the nonce, you need to request one from the API::
|
|
||||||
|
|
||||||
> GET /_synapse/admin/v1/register
|
|
||||||
|
|
||||||
< {"nonce": "thisisanonce"}
|
|
||||||
|
|
||||||
Once you have the nonce, you can make a ``POST`` to the same URL with a JSON
|
|
||||||
body containing the nonce, username, password, whether they are an admin
|
|
||||||
(optional, False by default), and a HMAC digest of the content. Also you can
|
|
||||||
set the displayname (optional, ``username`` by default).
|
|
||||||
|
|
||||||
As an example::
|
|
||||||
|
|
||||||
> POST /_synapse/admin/v1/register
|
|
||||||
> {
|
|
||||||
"nonce": "thisisanonce",
|
|
||||||
"username": "pepper_roni",
|
|
||||||
"displayname": "Pepper Roni",
|
|
||||||
"password": "pizza",
|
|
||||||
"admin": true,
|
|
||||||
"mac": "mac_digest_here"
|
|
||||||
}
|
|
||||||
|
|
||||||
< {
|
|
||||||
"access_token": "token_here",
|
|
||||||
"user_id": "@pepper_roni:localhost",
|
|
||||||
"home_server": "test",
|
|
||||||
"device_id": "device_id_here"
|
|
||||||
}
|
|
||||||
|
|
||||||
The MAC is the hex digest output of the HMAC-SHA1 algorithm, with the key being
|
|
||||||
the shared secret and the content being the nonce, user, password, either the
|
|
||||||
string "admin" or "notadmin", and optionally the user_type
|
|
||||||
each separated by NULs. For an example of generation in Python::
|
|
||||||
|
|
||||||
import hmac, hashlib
|
|
||||||
|
|
||||||
def generate_mac(nonce, user, password, admin=False, user_type=None):
|
|
||||||
|
|
||||||
mac = hmac.new(
|
|
||||||
key=shared_secret,
|
|
||||||
digestmod=hashlib.sha1,
|
|
||||||
)
|
|
||||||
|
|
||||||
mac.update(nonce.encode('utf8'))
|
|
||||||
mac.update(b"\x00")
|
|
||||||
mac.update(user.encode('utf8'))
|
|
||||||
mac.update(b"\x00")
|
|
||||||
mac.update(password.encode('utf8'))
|
|
||||||
mac.update(b"\x00")
|
|
||||||
mac.update(b"admin" if admin else b"notadmin")
|
|
||||||
if user_type:
|
|
||||||
mac.update(b"\x00")
|
|
||||||
mac.update(user_type.encode('utf8'))
|
|
||||||
|
|
||||||
return mac.hexdigest()
|
|
|
@ -24,7 +24,7 @@ POST /_synapse/admin/v1/join/<room_id_or_alias>
|
||||||
```
|
```
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an `access_token` for a
|
To use it, you will need to authenticate by providing an `access_token` for a
|
||||||
server admin: see [README.rst](README.rst).
|
server admin: see [Admin API](../../usage/administration/admin_api).
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
|
|
|
@ -443,7 +443,7 @@ with a body of:
|
||||||
```
|
```
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||||
server admin: see [README.rst](README.rst).
|
server admin: see [Admin API](../../usage/administration/admin_api).
|
||||||
|
|
||||||
A response body like the following is returned:
|
A response body like the following is returned:
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,7 @@ GET /_synapse/admin/v1/statistics/users/media
|
||||||
```
|
```
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an `access_token`
|
To use it, you will need to authenticate by providing an `access_token`
|
||||||
for a server admin: see [README.rst](README.rst).
|
for a server admin: see [Admin API](../../usage/administration/admin_api).
|
||||||
|
|
||||||
A response body like the following is returned:
|
A response body like the following is returned:
|
||||||
|
|
||||||
|
|
1001
docs/admin_api/user_admin_api.md
Normal file
1001
docs/admin_api/user_admin_api.md
Normal file
File diff suppressed because it is too large
Load diff
|
@ -1,981 +0,0 @@
|
||||||
.. contents::
|
|
||||||
|
|
||||||
Query User Account
|
|
||||||
==================
|
|
||||||
|
|
||||||
This API returns information about a specific user account.
|
|
||||||
|
|
||||||
The api is::
|
|
||||||
|
|
||||||
GET /_synapse/admin/v2/users/<user_id>
|
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
|
||||||
server admin: see `README.rst <README.rst>`_.
|
|
||||||
|
|
||||||
It returns a JSON body like the following:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"displayname": "User",
|
|
||||||
"threepids": [
|
|
||||||
{
|
|
||||||
"medium": "email",
|
|
||||||
"address": "<user_mail_1>"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"medium": "email",
|
|
||||||
"address": "<user_mail_2>"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"avatar_url": "<avatar_url>",
|
|
||||||
"admin": 0,
|
|
||||||
"deactivated": 0,
|
|
||||||
"shadow_banned": 0,
|
|
||||||
"password_hash": "$2b$12$p9B4GkqYdRTPGD",
|
|
||||||
"creation_ts": 1560432506,
|
|
||||||
"appservice_id": null,
|
|
||||||
"consent_server_notice_sent": null,
|
|
||||||
"consent_version": null
|
|
||||||
}
|
|
||||||
|
|
||||||
URL parameters:
|
|
||||||
|
|
||||||
- ``user_id``: fully-qualified user id: for example, ``@user:server.com``.
|
|
||||||
|
|
||||||
Create or modify Account
|
|
||||||
========================
|
|
||||||
|
|
||||||
This API allows an administrator to create or modify a user account with a
|
|
||||||
specific ``user_id``.
|
|
||||||
|
|
||||||
This api is::
|
|
||||||
|
|
||||||
PUT /_synapse/admin/v2/users/<user_id>
|
|
||||||
|
|
||||||
with a body of:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"password": "user_password",
|
|
||||||
"displayname": "User",
|
|
||||||
"threepids": [
|
|
||||||
{
|
|
||||||
"medium": "email",
|
|
||||||
"address": "<user_mail_1>"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"medium": "email",
|
|
||||||
"address": "<user_mail_2>"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"avatar_url": "<avatar_url>",
|
|
||||||
"admin": false,
|
|
||||||
"deactivated": false
|
|
||||||
}
|
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
|
||||||
server admin: see `README.rst <README.rst>`_.
|
|
||||||
|
|
||||||
URL parameters:
|
|
||||||
|
|
||||||
- ``user_id``: fully-qualified user id: for example, ``@user:server.com``.
|
|
||||||
|
|
||||||
Body parameters:
|
|
||||||
|
|
||||||
- ``password``, optional. If provided, the user's password is updated and all
|
|
||||||
devices are logged out.
|
|
||||||
|
|
||||||
- ``displayname``, optional, defaults to the value of ``user_id``.
|
|
||||||
|
|
||||||
- ``threepids``, optional, allows setting the third-party IDs (email, msisdn)
|
|
||||||
belonging to a user.
|
|
||||||
|
|
||||||
- ``avatar_url``, optional, must be a
|
|
||||||
`MXC URI <https://matrix.org/docs/spec/client_server/r0.6.0#matrix-content-mxc-uris>`_.
|
|
||||||
|
|
||||||
- ``admin``, optional, defaults to ``false``.
|
|
||||||
|
|
||||||
- ``deactivated``, optional. If unspecified, deactivation state will be left
|
|
||||||
unchanged on existing accounts and set to ``false`` for new accounts.
|
|
||||||
A user cannot be erased by deactivating with this API. For details on deactivating users see
|
|
||||||
`Deactivate Account <#deactivate-account>`_.
|
|
||||||
|
|
||||||
If the user already exists then optional parameters default to the current value.
|
|
||||||
|
|
||||||
In order to re-activate an account ``deactivated`` must be set to ``false``. If
|
|
||||||
users do not login via single-sign-on, a new ``password`` must be provided.
|
|
||||||
|
|
||||||
List Accounts
|
|
||||||
=============
|
|
||||||
|
|
||||||
This API returns all local user accounts.
|
|
||||||
By default, the response is ordered by ascending user ID.
|
|
||||||
|
|
||||||
The API is::
|
|
||||||
|
|
||||||
GET /_synapse/admin/v2/users?from=0&limit=10&guests=false
|
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
|
||||||
server admin: see `README.rst <README.rst>`_.
|
|
||||||
|
|
||||||
A response body like the following is returned:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"users": [
|
|
||||||
{
|
|
||||||
"name": "<user_id1>",
|
|
||||||
"is_guest": 0,
|
|
||||||
"admin": 0,
|
|
||||||
"user_type": null,
|
|
||||||
"deactivated": 0,
|
|
||||||
"shadow_banned": 0,
|
|
||||||
"displayname": "<User One>",
|
|
||||||
"avatar_url": null
|
|
||||||
}, {
|
|
||||||
"name": "<user_id2>",
|
|
||||||
"is_guest": 0,
|
|
||||||
"admin": 1,
|
|
||||||
"user_type": null,
|
|
||||||
"deactivated": 0,
|
|
||||||
"shadow_banned": 0,
|
|
||||||
"displayname": "<User Two>",
|
|
||||||
"avatar_url": "<avatar_url>"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"next_token": "100",
|
|
||||||
"total": 200
|
|
||||||
}
|
|
||||||
|
|
||||||
To paginate, check for ``next_token`` and if present, call the endpoint again
|
|
||||||
with ``from`` set to the value of ``next_token``. This will return a new page.
|
|
||||||
|
|
||||||
If the endpoint does not return a ``next_token`` then there are no more users
|
|
||||||
to paginate through.
|
|
||||||
|
|
||||||
**Parameters**
|
|
||||||
|
|
||||||
The following parameters should be set in the URL:
|
|
||||||
|
|
||||||
- ``user_id`` - Is optional and filters to only return users with user IDs
|
|
||||||
that contain this value. This parameter is ignored when using the ``name`` parameter.
|
|
||||||
- ``name`` - Is optional and filters to only return users with user ID localparts
|
|
||||||
**or** displaynames that contain this value.
|
|
||||||
- ``guests`` - string representing a bool - Is optional and if ``false`` will **exclude** guest users.
|
|
||||||
Defaults to ``true`` to include guest users.
|
|
||||||
- ``deactivated`` - string representing a bool - Is optional and if ``true`` will **include** deactivated users.
|
|
||||||
Defaults to ``false`` to exclude deactivated users.
|
|
||||||
- ``limit`` - string representing a positive integer - Is optional but is used for pagination,
|
|
||||||
denoting the maximum number of items to return in this call. Defaults to ``100``.
|
|
||||||
- ``from`` - string representing a positive integer - Is optional but used for pagination,
|
|
||||||
denoting the offset in the returned results. This should be treated as an opaque value and
|
|
||||||
not explicitly set to anything other than the return value of ``next_token`` from a previous call.
|
|
||||||
Defaults to ``0``.
|
|
||||||
- ``order_by`` - The method by which to sort the returned list of users.
|
|
||||||
If the ordered field has duplicates, the second order is always by ascending ``name``,
|
|
||||||
which guarantees a stable ordering. Valid values are:
|
|
||||||
|
|
||||||
- ``name`` - Users are ordered alphabetically by ``name``. This is the default.
|
|
||||||
- ``is_guest`` - Users are ordered by ``is_guest`` status.
|
|
||||||
- ``admin`` - Users are ordered by ``admin`` status.
|
|
||||||
- ``user_type`` - Users are ordered alphabetically by ``user_type``.
|
|
||||||
- ``deactivated`` - Users are ordered by ``deactivated`` status.
|
|
||||||
- ``shadow_banned`` - Users are ordered by ``shadow_banned`` status.
|
|
||||||
- ``displayname`` - Users are ordered alphabetically by ``displayname``.
|
|
||||||
- ``avatar_url`` - Users are ordered alphabetically by avatar URL.
|
|
||||||
|
|
||||||
- ``dir`` - Direction of media order. Either ``f`` for forwards or ``b`` for backwards.
|
|
||||||
Setting this value to ``b`` will reverse the above sort order. Defaults to ``f``.
|
|
||||||
|
|
||||||
Caution. The database only has indexes on the columns ``name`` and ``created_ts``.
|
|
||||||
This means that if a different sort order is used (``is_guest``, ``admin``,
|
|
||||||
``user_type``, ``deactivated``, ``shadow_banned``, ``avatar_url`` or ``displayname``),
|
|
||||||
this can cause a large load on the database, especially for large environments.
|
|
||||||
|
|
||||||
**Response**
|
|
||||||
|
|
||||||
The following fields are returned in the JSON response body:
|
|
||||||
|
|
||||||
- ``users`` - An array of objects, each containing information about an user.
|
|
||||||
User objects contain the following fields:
|
|
||||||
|
|
||||||
- ``name`` - string - Fully-qualified user ID (ex. ``@user:server.com``).
|
|
||||||
- ``is_guest`` - bool - Status if that user is a guest account.
|
|
||||||
- ``admin`` - bool - Status if that user is a server administrator.
|
|
||||||
- ``user_type`` - string - Type of the user. Normal users are type ``None``.
|
|
||||||
This allows user type specific behaviour. There are also types ``support`` and ``bot``.
|
|
||||||
- ``deactivated`` - bool - Status if that user has been marked as deactivated.
|
|
||||||
- ``shadow_banned`` - bool - Status if that user has been marked as shadow banned.
|
|
||||||
- ``displayname`` - string - The user's display name if they have set one.
|
|
||||||
- ``avatar_url`` - string - The user's avatar URL if they have set one.
|
|
||||||
|
|
||||||
- ``next_token``: string representing a positive integer - Indication for pagination. See above.
|
|
||||||
- ``total`` - integer - Total number of media.
|
|
||||||
|
|
||||||
|
|
||||||
Query current sessions for a user
|
|
||||||
=================================
|
|
||||||
|
|
||||||
This API returns information about the active sessions for a specific user.
|
|
||||||
|
|
||||||
The api is::
|
|
||||||
|
|
||||||
GET /_synapse/admin/v1/whois/<user_id>
|
|
||||||
|
|
||||||
and::
|
|
||||||
|
|
||||||
GET /_matrix/client/r0/admin/whois/<userId>
|
|
||||||
|
|
||||||
See also: `Client Server API Whois
|
|
||||||
<https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid>`_
|
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
|
||||||
server admin: see `README.rst <README.rst>`_.
|
|
||||||
|
|
||||||
It returns a JSON body like the following:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"user_id": "<user_id>",
|
|
||||||
"devices": {
|
|
||||||
"": {
|
|
||||||
"sessions": [
|
|
||||||
{
|
|
||||||
"connections": [
|
|
||||||
{
|
|
||||||
"ip": "1.2.3.4",
|
|
||||||
"last_seen": 1417222374433,
|
|
||||||
"user_agent": "Mozilla/5.0 ..."
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ip": "1.2.3.10",
|
|
||||||
"last_seen": 1417222374500,
|
|
||||||
"user_agent": "Dalvik/2.1.0 ..."
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
``last_seen`` is measured in milliseconds since the Unix epoch.
|
|
||||||
|
|
||||||
Deactivate Account
|
|
||||||
==================
|
|
||||||
|
|
||||||
This API deactivates an account. It removes active access tokens, resets the
|
|
||||||
password, and deletes third-party IDs (to prevent the user requesting a
|
|
||||||
password reset).
|
|
||||||
|
|
||||||
It can also mark the user as GDPR-erased. This means messages sent by the
|
|
||||||
user will still be visible by anyone that was in the room when these messages
|
|
||||||
were sent, but hidden from users joining the room afterwards.
|
|
||||||
|
|
||||||
The api is::
|
|
||||||
|
|
||||||
POST /_synapse/admin/v1/deactivate/<user_id>
|
|
||||||
|
|
||||||
with a body of:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"erase": true
|
|
||||||
}
|
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
|
||||||
server admin: see `README.rst <README.rst>`_.
|
|
||||||
|
|
||||||
The erase parameter is optional and defaults to ``false``.
|
|
||||||
An empty body may be passed for backwards compatibility.
|
|
||||||
|
|
||||||
The following actions are performed when deactivating an user:
|
|
||||||
|
|
||||||
- Try to unpind 3PIDs from the identity server
|
|
||||||
- Remove all 3PIDs from the homeserver
|
|
||||||
- Delete all devices and E2EE keys
|
|
||||||
- Delete all access tokens
|
|
||||||
- Delete the password hash
|
|
||||||
- Removal from all rooms the user is a member of
|
|
||||||
- Remove the user from the user directory
|
|
||||||
- Reject all pending invites
|
|
||||||
- Remove all account validity information related to the user
|
|
||||||
|
|
||||||
The following additional actions are performed during deactivation if ``erase``
|
|
||||||
is set to ``true``:
|
|
||||||
|
|
||||||
- Remove the user's display name
|
|
||||||
- Remove the user's avatar URL
|
|
||||||
- Mark the user as erased
|
|
||||||
|
|
||||||
|
|
||||||
Reset password
|
|
||||||
==============
|
|
||||||
|
|
||||||
Changes the password of another user. This will automatically log the user out of all their devices.
|
|
||||||
|
|
||||||
The api is::
|
|
||||||
|
|
||||||
POST /_synapse/admin/v1/reset_password/<user_id>
|
|
||||||
|
|
||||||
with a body of:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"new_password": "<secret>",
|
|
||||||
"logout_devices": true
|
|
||||||
}
|
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
|
||||||
server admin: see `README.rst <README.rst>`_.
|
|
||||||
|
|
||||||
The parameter ``new_password`` is required.
|
|
||||||
The parameter ``logout_devices`` is optional and defaults to ``true``.
|
|
||||||
|
|
||||||
Get whether a user is a server administrator or not
|
|
||||||
===================================================
|
|
||||||
|
|
||||||
|
|
||||||
The api is::
|
|
||||||
|
|
||||||
GET /_synapse/admin/v1/users/<user_id>/admin
|
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
|
||||||
server admin: see `README.rst <README.rst>`_.
|
|
||||||
|
|
||||||
A response body like the following is returned:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"admin": true
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
Change whether a user is a server administrator or not
|
|
||||||
======================================================
|
|
||||||
|
|
||||||
Note that you cannot demote yourself.
|
|
||||||
|
|
||||||
The api is::
|
|
||||||
|
|
||||||
PUT /_synapse/admin/v1/users/<user_id>/admin
|
|
||||||
|
|
||||||
with a body of:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"admin": true
|
|
||||||
}
|
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
|
||||||
server admin: see `README.rst <README.rst>`_.
|
|
||||||
|
|
||||||
|
|
||||||
List room memberships of an user
|
|
||||||
================================
|
|
||||||
Gets a list of all ``room_id`` that a specific ``user_id`` is member.
|
|
||||||
|
|
||||||
The API is::
|
|
||||||
|
|
||||||
GET /_synapse/admin/v1/users/<user_id>/joined_rooms
|
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
|
||||||
server admin: see `README.rst <README.rst>`_.
|
|
||||||
|
|
||||||
A response body like the following is returned:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"joined_rooms": [
|
|
||||||
"!DuGcnbhHGaSZQoNQR:matrix.org",
|
|
||||||
"!ZtSaPCawyWtxfWiIy:matrix.org"
|
|
||||||
],
|
|
||||||
"total": 2
|
|
||||||
}
|
|
||||||
|
|
||||||
The server returns the list of rooms of which the user and the server
|
|
||||||
are member. If the user is local, all the rooms of which the user is
|
|
||||||
member are returned.
|
|
||||||
|
|
||||||
**Parameters**
|
|
||||||
|
|
||||||
The following parameters should be set in the URL:
|
|
||||||
|
|
||||||
- ``user_id`` - fully qualified: for example, ``@user:server.com``.
|
|
||||||
|
|
||||||
**Response**
|
|
||||||
|
|
||||||
The following fields are returned in the JSON response body:
|
|
||||||
|
|
||||||
- ``joined_rooms`` - An array of ``room_id``.
|
|
||||||
- ``total`` - Number of rooms.
|
|
||||||
|
|
||||||
|
|
||||||
List media of a user
|
|
||||||
====================
|
|
||||||
Gets a list of all local media that a specific ``user_id`` has created.
|
|
||||||
By default, the response is ordered by descending creation date and ascending media ID.
|
|
||||||
The newest media is on top. You can change the order with parameters
|
|
||||||
``order_by`` and ``dir``.
|
|
||||||
|
|
||||||
The API is::
|
|
||||||
|
|
||||||
GET /_synapse/admin/v1/users/<user_id>/media
|
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
|
||||||
server admin: see `README.rst <README.rst>`_.
|
|
||||||
|
|
||||||
A response body like the following is returned:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"media": [
|
|
||||||
{
|
|
||||||
"created_ts": 100400,
|
|
||||||
"last_access_ts": null,
|
|
||||||
"media_id": "qXhyRzulkwLsNHTbpHreuEgo",
|
|
||||||
"media_length": 67,
|
|
||||||
"media_type": "image/png",
|
|
||||||
"quarantined_by": null,
|
|
||||||
"safe_from_quarantine": false,
|
|
||||||
"upload_name": "test1.png"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created_ts": 200400,
|
|
||||||
"last_access_ts": null,
|
|
||||||
"media_id": "FHfiSnzoINDatrXHQIXBtahw",
|
|
||||||
"media_length": 67,
|
|
||||||
"media_type": "image/png",
|
|
||||||
"quarantined_by": null,
|
|
||||||
"safe_from_quarantine": false,
|
|
||||||
"upload_name": "test2.png"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"next_token": 3,
|
|
||||||
"total": 2
|
|
||||||
}
|
|
||||||
|
|
||||||
To paginate, check for ``next_token`` and if present, call the endpoint again
|
|
||||||
with ``from`` set to the value of ``next_token``. This will return a new page.
|
|
||||||
|
|
||||||
If the endpoint does not return a ``next_token`` then there are no more
|
|
||||||
reports to paginate through.
|
|
||||||
|
|
||||||
**Parameters**
|
|
||||||
|
|
||||||
The following parameters should be set in the URL:
|
|
||||||
|
|
||||||
- ``user_id`` - string - fully qualified: for example, ``@user:server.com``.
|
|
||||||
- ``limit``: string representing a positive integer - Is optional but is used for pagination,
|
|
||||||
denoting the maximum number of items to return in this call. Defaults to ``100``.
|
|
||||||
- ``from``: string representing a positive integer - Is optional but used for pagination,
|
|
||||||
denoting the offset in the returned results. This should be treated as an opaque value and
|
|
||||||
not explicitly set to anything other than the return value of ``next_token`` from a previous call.
|
|
||||||
Defaults to ``0``.
|
|
||||||
- ``order_by`` - The method by which to sort the returned list of media.
|
|
||||||
If the ordered field has duplicates, the second order is always by ascending ``media_id``,
|
|
||||||
which guarantees a stable ordering. Valid values are:
|
|
||||||
|
|
||||||
- ``media_id`` - Media are ordered alphabetically by ``media_id``.
|
|
||||||
- ``upload_name`` - Media are ordered alphabetically by name the media was uploaded with.
|
|
||||||
- ``created_ts`` - Media are ordered by when the content was uploaded in ms.
|
|
||||||
Smallest to largest. This is the default.
|
|
||||||
- ``last_access_ts`` - Media are ordered by when the content was last accessed in ms.
|
|
||||||
Smallest to largest.
|
|
||||||
- ``media_length`` - Media are ordered by length of the media in bytes.
|
|
||||||
Smallest to largest.
|
|
||||||
- ``media_type`` - Media are ordered alphabetically by MIME-type.
|
|
||||||
- ``quarantined_by`` - Media are ordered alphabetically by the user ID that
|
|
||||||
initiated the quarantine request for this media.
|
|
||||||
- ``safe_from_quarantine`` - Media are ordered by the status if this media is safe
|
|
||||||
from quarantining.
|
|
||||||
|
|
||||||
- ``dir`` - Direction of media order. Either ``f`` for forwards or ``b`` for backwards.
|
|
||||||
Setting this value to ``b`` will reverse the above sort order. Defaults to ``f``.
|
|
||||||
|
|
||||||
If neither ``order_by`` nor ``dir`` is set, the default order is newest media on top
|
|
||||||
(corresponds to ``order_by`` = ``created_ts`` and ``dir`` = ``b``).
|
|
||||||
|
|
||||||
Caution. The database only has indexes on the columns ``media_id``,
|
|
||||||
``user_id`` and ``created_ts``. This means that if a different sort order is used
|
|
||||||
(``upload_name``, ``last_access_ts``, ``media_length``, ``media_type``,
|
|
||||||
``quarantined_by`` or ``safe_from_quarantine``), this can cause a large load on the
|
|
||||||
database, especially for large environments.
|
|
||||||
|
|
||||||
**Response**
|
|
||||||
|
|
||||||
The following fields are returned in the JSON response body:
|
|
||||||
|
|
||||||
- ``media`` - An array of objects, each containing information about a media.
|
|
||||||
Media objects contain the following fields:
|
|
||||||
|
|
||||||
- ``created_ts`` - integer - Timestamp when the content was uploaded in ms.
|
|
||||||
- ``last_access_ts`` - integer - Timestamp when the content was last accessed in ms.
|
|
||||||
- ``media_id`` - string - The id used to refer to the media.
|
|
||||||
- ``media_length`` - integer - Length of the media in bytes.
|
|
||||||
- ``media_type`` - string - The MIME-type of the media.
|
|
||||||
- ``quarantined_by`` - string - The user ID that initiated the quarantine request
|
|
||||||
for this media.
|
|
||||||
|
|
||||||
- ``safe_from_quarantine`` - bool - Status if this media is safe from quarantining.
|
|
||||||
- ``upload_name`` - string - The name the media was uploaded with.
|
|
||||||
|
|
||||||
- ``next_token``: integer - Indication for pagination. See above.
|
|
||||||
- ``total`` - integer - Total number of media.
|
|
||||||
|
|
||||||
Login as a user
|
|
||||||
===============
|
|
||||||
|
|
||||||
Get an access token that can be used to authenticate as that user. Useful for
|
|
||||||
when admins wish to do actions on behalf of a user.
|
|
||||||
|
|
||||||
The API is::
|
|
||||||
|
|
||||||
POST /_synapse/admin/v1/users/<user_id>/login
|
|
||||||
{}
|
|
||||||
|
|
||||||
An optional ``valid_until_ms`` field can be specified in the request body as an
|
|
||||||
integer timestamp that specifies when the token should expire. By default tokens
|
|
||||||
do not expire.
|
|
||||||
|
|
||||||
A response body like the following is returned:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"access_token": "<opaque_access_token_string>"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
This API does *not* generate a new device for the user, and so will not appear
|
|
||||||
their ``/devices`` list, and in general the target user should not be able to
|
|
||||||
tell they have been logged in as.
|
|
||||||
|
|
||||||
To expire the token call the standard ``/logout`` API with the token.
|
|
||||||
|
|
||||||
Note: The token will expire if the *admin* user calls ``/logout/all`` from any
|
|
||||||
of their devices, but the token will *not* expire if the target user does the
|
|
||||||
same.
|
|
||||||
|
|
||||||
|
|
||||||
User devices
|
|
||||||
============
|
|
||||||
|
|
||||||
List all devices
|
|
||||||
----------------
|
|
||||||
Gets information about all devices for a specific ``user_id``.
|
|
||||||
|
|
||||||
The API is::
|
|
||||||
|
|
||||||
GET /_synapse/admin/v2/users/<user_id>/devices
|
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
|
||||||
server admin: see `README.rst <README.rst>`_.
|
|
||||||
|
|
||||||
A response body like the following is returned:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"devices": [
|
|
||||||
{
|
|
||||||
"device_id": "QBUAZIFURK",
|
|
||||||
"display_name": "android",
|
|
||||||
"last_seen_ip": "1.2.3.4",
|
|
||||||
"last_seen_ts": 1474491775024,
|
|
||||||
"user_id": "<user_id>"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"device_id": "AUIECTSRND",
|
|
||||||
"display_name": "ios",
|
|
||||||
"last_seen_ip": "1.2.3.5",
|
|
||||||
"last_seen_ts": 1474491775025,
|
|
||||||
"user_id": "<user_id>"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"total": 2
|
|
||||||
}
|
|
||||||
|
|
||||||
**Parameters**
|
|
||||||
|
|
||||||
The following parameters should be set in the URL:
|
|
||||||
|
|
||||||
- ``user_id`` - fully qualified: for example, ``@user:server.com``.
|
|
||||||
|
|
||||||
**Response**
|
|
||||||
|
|
||||||
The following fields are returned in the JSON response body:
|
|
||||||
|
|
||||||
- ``devices`` - An array of objects, each containing information about a device.
|
|
||||||
Device objects contain the following fields:
|
|
||||||
|
|
||||||
- ``device_id`` - Identifier of device.
|
|
||||||
- ``display_name`` - Display name set by the user for this device.
|
|
||||||
Absent if no name has been set.
|
|
||||||
- ``last_seen_ip`` - The IP address where this device was last seen.
|
|
||||||
(May be a few minutes out of date, for efficiency reasons).
|
|
||||||
- ``last_seen_ts`` - The timestamp (in milliseconds since the unix epoch) when this
|
|
||||||
devices was last seen. (May be a few minutes out of date, for efficiency reasons).
|
|
||||||
- ``user_id`` - Owner of device.
|
|
||||||
|
|
||||||
- ``total`` - Total number of user's devices.
|
|
||||||
|
|
||||||
Delete multiple devices
|
|
||||||
------------------
|
|
||||||
Deletes the given devices for a specific ``user_id``, and invalidates
|
|
||||||
any access token associated with them.
|
|
||||||
|
|
||||||
The API is::
|
|
||||||
|
|
||||||
POST /_synapse/admin/v2/users/<user_id>/delete_devices
|
|
||||||
|
|
||||||
{
|
|
||||||
"devices": [
|
|
||||||
"QBUAZIFURK",
|
|
||||||
"AUIECTSRND"
|
|
||||||
],
|
|
||||||
}
|
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
|
||||||
server admin: see `README.rst <README.rst>`_.
|
|
||||||
|
|
||||||
An empty JSON dict is returned.
|
|
||||||
|
|
||||||
**Parameters**
|
|
||||||
|
|
||||||
The following parameters should be set in the URL:
|
|
||||||
|
|
||||||
- ``user_id`` - fully qualified: for example, ``@user:server.com``.
|
|
||||||
|
|
||||||
The following fields are required in the JSON request body:
|
|
||||||
|
|
||||||
- ``devices`` - The list of device IDs to delete.
|
|
||||||
|
|
||||||
Show a device
|
|
||||||
---------------
|
|
||||||
Gets information on a single device, by ``device_id`` for a specific ``user_id``.
|
|
||||||
|
|
||||||
The API is::
|
|
||||||
|
|
||||||
GET /_synapse/admin/v2/users/<user_id>/devices/<device_id>
|
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
|
||||||
server admin: see `README.rst <README.rst>`_.
|
|
||||||
|
|
||||||
A response body like the following is returned:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"device_id": "<device_id>",
|
|
||||||
"display_name": "android",
|
|
||||||
"last_seen_ip": "1.2.3.4",
|
|
||||||
"last_seen_ts": 1474491775024,
|
|
||||||
"user_id": "<user_id>"
|
|
||||||
}
|
|
||||||
|
|
||||||
**Parameters**
|
|
||||||
|
|
||||||
The following parameters should be set in the URL:
|
|
||||||
|
|
||||||
- ``user_id`` - fully qualified: for example, ``@user:server.com``.
|
|
||||||
- ``device_id`` - The device to retrieve.
|
|
||||||
|
|
||||||
**Response**
|
|
||||||
|
|
||||||
The following fields are returned in the JSON response body:
|
|
||||||
|
|
||||||
- ``device_id`` - Identifier of device.
|
|
||||||
- ``display_name`` - Display name set by the user for this device.
|
|
||||||
Absent if no name has been set.
|
|
||||||
- ``last_seen_ip`` - The IP address where this device was last seen.
|
|
||||||
(May be a few minutes out of date, for efficiency reasons).
|
|
||||||
- ``last_seen_ts`` - The timestamp (in milliseconds since the unix epoch) when this
|
|
||||||
devices was last seen. (May be a few minutes out of date, for efficiency reasons).
|
|
||||||
- ``user_id`` - Owner of device.
|
|
||||||
|
|
||||||
Update a device
|
|
||||||
---------------
|
|
||||||
Updates the metadata on the given ``device_id`` for a specific ``user_id``.
|
|
||||||
|
|
||||||
The API is::
|
|
||||||
|
|
||||||
PUT /_synapse/admin/v2/users/<user_id>/devices/<device_id>
|
|
||||||
|
|
||||||
{
|
|
||||||
"display_name": "My other phone"
|
|
||||||
}
|
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
|
||||||
server admin: see `README.rst <README.rst>`_.
|
|
||||||
|
|
||||||
An empty JSON dict is returned.
|
|
||||||
|
|
||||||
**Parameters**
|
|
||||||
|
|
||||||
The following parameters should be set in the URL:
|
|
||||||
|
|
||||||
- ``user_id`` - fully qualified: for example, ``@user:server.com``.
|
|
||||||
- ``device_id`` - The device to update.
|
|
||||||
|
|
||||||
The following fields are required in the JSON request body:
|
|
||||||
|
|
||||||
- ``display_name`` - The new display name for this device. If not given,
|
|
||||||
the display name is unchanged.
|
|
||||||
|
|
||||||
Delete a device
|
|
||||||
---------------
|
|
||||||
Deletes the given ``device_id`` for a specific ``user_id``,
|
|
||||||
and invalidates any access token associated with it.
|
|
||||||
|
|
||||||
The API is::
|
|
||||||
|
|
||||||
DELETE /_synapse/admin/v2/users/<user_id>/devices/<device_id>
|
|
||||||
|
|
||||||
{}
|
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
|
||||||
server admin: see `README.rst <README.rst>`_.
|
|
||||||
|
|
||||||
An empty JSON dict is returned.
|
|
||||||
|
|
||||||
**Parameters**
|
|
||||||
|
|
||||||
The following parameters should be set in the URL:
|
|
||||||
|
|
||||||
- ``user_id`` - fully qualified: for example, ``@user:server.com``.
|
|
||||||
- ``device_id`` - The device to delete.
|
|
||||||
|
|
||||||
List all pushers
|
|
||||||
================
|
|
||||||
Gets information about all pushers for a specific ``user_id``.
|
|
||||||
|
|
||||||
The API is::
|
|
||||||
|
|
||||||
GET /_synapse/admin/v1/users/<user_id>/pushers
|
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
|
||||||
server admin: see `README.rst <README.rst>`_.
|
|
||||||
|
|
||||||
A response body like the following is returned:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"pushers": [
|
|
||||||
{
|
|
||||||
"app_display_name":"HTTP Push Notifications",
|
|
||||||
"app_id":"m.http",
|
|
||||||
"data": {
|
|
||||||
"url":"example.com"
|
|
||||||
},
|
|
||||||
"device_display_name":"pushy push",
|
|
||||||
"kind":"http",
|
|
||||||
"lang":"None",
|
|
||||||
"profile_tag":"",
|
|
||||||
"pushkey":"a@example.com"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"total": 1
|
|
||||||
}
|
|
||||||
|
|
||||||
**Parameters**
|
|
||||||
|
|
||||||
The following parameters should be set in the URL:
|
|
||||||
|
|
||||||
- ``user_id`` - fully qualified: for example, ``@user:server.com``.
|
|
||||||
|
|
||||||
**Response**
|
|
||||||
|
|
||||||
The following fields are returned in the JSON response body:
|
|
||||||
|
|
||||||
- ``pushers`` - An array containing the current pushers for the user
|
|
||||||
|
|
||||||
- ``app_display_name`` - string - A string that will allow the user to identify
|
|
||||||
what application owns this pusher.
|
|
||||||
|
|
||||||
- ``app_id`` - string - This is a reverse-DNS style identifier for the application.
|
|
||||||
Max length, 64 chars.
|
|
||||||
|
|
||||||
- ``data`` - A dictionary of information for the pusher implementation itself.
|
|
||||||
|
|
||||||
- ``url`` - string - Required if ``kind`` is ``http``. The URL to use to send
|
|
||||||
notifications to.
|
|
||||||
|
|
||||||
- ``format`` - string - The format to use when sending notifications to the
|
|
||||||
Push Gateway.
|
|
||||||
|
|
||||||
- ``device_display_name`` - string - A string that will allow the user to identify
|
|
||||||
what device owns this pusher.
|
|
||||||
|
|
||||||
- ``profile_tag`` - string - This string determines which set of device specific rules
|
|
||||||
this pusher executes.
|
|
||||||
|
|
||||||
- ``kind`` - string - The kind of pusher. "http" is a pusher that sends HTTP pokes.
|
|
||||||
- ``lang`` - string - The preferred language for receiving notifications
|
|
||||||
(e.g. 'en' or 'en-US')
|
|
||||||
|
|
||||||
- ``profile_tag`` - string - This string determines which set of device specific rules
|
|
||||||
this pusher executes.
|
|
||||||
|
|
||||||
- ``pushkey`` - string - This is a unique identifier for this pusher.
|
|
||||||
Max length, 512 bytes.
|
|
||||||
|
|
||||||
- ``total`` - integer - Number of pushers.
|
|
||||||
|
|
||||||
See also `Client-Server API Spec <https://matrix.org/docs/spec/client_server/latest#get-matrix-client-r0-pushers>`_
|
|
||||||
|
|
||||||
Shadow-banning users
|
|
||||||
====================
|
|
||||||
|
|
||||||
Shadow-banning is a useful tool for moderating malicious or egregiously abusive users.
|
|
||||||
A shadow-banned users receives successful responses to their client-server API requests,
|
|
||||||
but the events are not propagated into rooms. This can be an effective tool as it
|
|
||||||
(hopefully) takes longer for the user to realise they are being moderated before
|
|
||||||
pivoting to another account.
|
|
||||||
|
|
||||||
Shadow-banning a user should be used as a tool of last resort and may lead to confusing
|
|
||||||
or broken behaviour for the client. A shadow-banned user will not receive any
|
|
||||||
notification and it is generally more appropriate to ban or kick abusive users.
|
|
||||||
A shadow-banned user will be unable to contact anyone on the server.
|
|
||||||
|
|
||||||
The API is::
|
|
||||||
|
|
||||||
POST /_synapse/admin/v1/users/<user_id>/shadow_ban
|
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
|
||||||
server admin: see `README.rst <README.rst>`_.
|
|
||||||
|
|
||||||
An empty JSON dict is returned.
|
|
||||||
|
|
||||||
**Parameters**
|
|
||||||
|
|
||||||
The following parameters should be set in the URL:
|
|
||||||
|
|
||||||
- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must
|
|
||||||
be local.
|
|
||||||
|
|
||||||
Override ratelimiting for users
|
|
||||||
===============================
|
|
||||||
|
|
||||||
This API allows to override or disable ratelimiting for a specific user.
|
|
||||||
There are specific APIs to set, get and delete a ratelimit.
|
|
||||||
|
|
||||||
Get status of ratelimit
|
|
||||||
-----------------------
|
|
||||||
|
|
||||||
The API is::
|
|
||||||
|
|
||||||
GET /_synapse/admin/v1/users/<user_id>/override_ratelimit
|
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
|
||||||
server admin: see `README.rst <README.rst>`_.
|
|
||||||
|
|
||||||
A response body like the following is returned:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"messages_per_second": 0,
|
|
||||||
"burst_count": 0
|
|
||||||
}
|
|
||||||
|
|
||||||
**Parameters**
|
|
||||||
|
|
||||||
The following parameters should be set in the URL:
|
|
||||||
|
|
||||||
- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must
|
|
||||||
be local.
|
|
||||||
|
|
||||||
**Response**
|
|
||||||
|
|
||||||
The following fields are returned in the JSON response body:
|
|
||||||
|
|
||||||
- ``messages_per_second`` - integer - The number of actions that can
|
|
||||||
be performed in a second. `0` mean that ratelimiting is disabled for this user.
|
|
||||||
- ``burst_count`` - integer - How many actions that can be performed before
|
|
||||||
being limited.
|
|
||||||
|
|
||||||
If **no** custom ratelimit is set, an empty JSON dict is returned.
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
{}
|
|
||||||
|
|
||||||
Set ratelimit
|
|
||||||
-------------
|
|
||||||
|
|
||||||
The API is::
|
|
||||||
|
|
||||||
POST /_synapse/admin/v1/users/<user_id>/override_ratelimit
|
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
|
||||||
server admin: see `README.rst <README.rst>`_.
|
|
||||||
|
|
||||||
A response body like the following is returned:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"messages_per_second": 0,
|
|
||||||
"burst_count": 0
|
|
||||||
}
|
|
||||||
|
|
||||||
**Parameters**
|
|
||||||
|
|
||||||
The following parameters should be set in the URL:
|
|
||||||
|
|
||||||
- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must
|
|
||||||
be local.
|
|
||||||
|
|
||||||
Body parameters:
|
|
||||||
|
|
||||||
- ``messages_per_second`` - positive integer, optional. The number of actions that can
|
|
||||||
be performed in a second. Defaults to ``0``.
|
|
||||||
- ``burst_count`` - positive integer, optional. How many actions that can be performed
|
|
||||||
before being limited. Defaults to ``0``.
|
|
||||||
|
|
||||||
To disable users' ratelimit set both values to ``0``.
|
|
||||||
|
|
||||||
**Response**
|
|
||||||
|
|
||||||
The following fields are returned in the JSON response body:
|
|
||||||
|
|
||||||
- ``messages_per_second`` - integer - The number of actions that can
|
|
||||||
be performed in a second.
|
|
||||||
- ``burst_count`` - integer - How many actions that can be performed before
|
|
||||||
being limited.
|
|
||||||
|
|
||||||
Delete ratelimit
|
|
||||||
----------------
|
|
||||||
|
|
||||||
The API is::
|
|
||||||
|
|
||||||
DELETE /_synapse/admin/v1/users/<user_id>/override_ratelimit
|
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
|
||||||
server admin: see `README.rst <README.rst>`_.
|
|
||||||
|
|
||||||
An empty JSON dict is returned.
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
{}
|
|
||||||
|
|
||||||
**Parameters**
|
|
||||||
|
|
||||||
The following parameters should be set in the URL:
|
|
||||||
|
|
||||||
- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must
|
|
||||||
be local.
|
|
||||||
|
|
|
@ -1,20 +1,21 @@
|
||||||
Version API
|
# Version API
|
||||||
===========
|
|
||||||
|
|
||||||
This API returns the running Synapse version and the Python version
|
This API returns the running Synapse version and the Python version
|
||||||
on which Synapse is being run. This is useful when a Synapse instance
|
on which Synapse is being run. This is useful when a Synapse instance
|
||||||
is behind a proxy that does not forward the 'Server' header (which also
|
is behind a proxy that does not forward the 'Server' header (which also
|
||||||
contains Synapse version information).
|
contains Synapse version information).
|
||||||
|
|
||||||
The api is::
|
The api is:
|
||||||
|
|
||||||
GET /_synapse/admin/v1/server_version
|
```
|
||||||
|
GET /_synapse/admin/v1/server_version
|
||||||
|
```
|
||||||
|
|
||||||
It returns a JSON body like the following:
|
It returns a JSON body like the following:
|
||||||
|
|
||||||
.. code:: json
|
```json
|
||||||
|
{
|
||||||
{
|
|
||||||
"server_version": "0.99.2rc1 (b=develop, abcdef123)",
|
"server_version": "0.99.2rc1 (b=develop, abcdef123)",
|
||||||
"python_version": "3.6.8"
|
"python_version": "3.6.8"
|
||||||
}
|
}
|
||||||
|
```
|
|
@ -122,15 +122,15 @@ So, what counts as a more- or less-stable branch? A little reflection will show
|
||||||
that our active branches are ordered thus, from more-stable to less-stable:
|
that our active branches are ordered thus, from more-stable to less-stable:
|
||||||
|
|
||||||
* `master` (tracks our last release).
|
* `master` (tracks our last release).
|
||||||
* `release-vX.Y.Z` (the branch where we prepare the next release)<sup
|
* `release-vX.Y` (the branch where we prepare the next release)<sup
|
||||||
id="a3">[3](#f3)</sup>.
|
id="a3">[3](#f3)</sup>.
|
||||||
* PR branches which are targeting the release.
|
* PR branches which are targeting the release.
|
||||||
* `develop` (our "mainline" branch containing our bleeding-edge).
|
* `develop` (our "mainline" branch containing our bleeding-edge).
|
||||||
* regular PR branches.
|
* regular PR branches.
|
||||||
|
|
||||||
The corollary is: if you have a bugfix that needs to land in both
|
The corollary is: if you have a bugfix that needs to land in both
|
||||||
`release-vX.Y.Z` *and* `develop`, then you should base your PR on
|
`release-vX.Y` *and* `develop`, then you should base your PR on
|
||||||
`release-vX.Y.Z`, get it merged there, and then merge from `release-vX.Y.Z` to
|
`release-vX.Y`, get it merged there, and then merge from `release-vX.Y` to
|
||||||
`develop`. (If a fix lands in `develop` and we later need it in a
|
`develop`. (If a fix lands in `develop` and we later need it in a
|
||||||
release-branch, we can of course cherry-pick it, but landing it in the release
|
release-branch, we can of course cherry-pick it, but landing it in the release
|
||||||
branch first helps reduce the chance of annoying conflicts.)
|
branch first helps reduce the chance of annoying conflicts.)
|
||||||
|
@ -145,4 +145,4 @@ most intuitive name. [^](#a1)
|
||||||
|
|
||||||
<b id="f3">[3]</b>: Very, very occasionally (I think this has happened once in
|
<b id="f3">[3]</b>: Very, very occasionally (I think this has happened once in
|
||||||
the history of Synapse), we've had two releases in flight at once. Obviously,
|
the history of Synapse), we've had two releases in flight at once. Obviously,
|
||||||
`release-v1.2.3` is more-stable than `release-v1.3.0`. [^](#a3)
|
`release-v1.2` is more-stable than `release-v1.3`. [^](#a3)
|
||||||
|
|
7
docs/development/contributing_guide.md
Normal file
7
docs/development/contributing_guide.md
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
<!--
|
||||||
|
Include the contents of CONTRIBUTING.md from the project root (where GitHub likes it
|
||||||
|
to be)
|
||||||
|
-->
|
||||||
|
# Contributing
|
||||||
|
|
||||||
|
{{#include ../../CONTRIBUTING.md}}
|
12
docs/development/internal_documentation/README.md
Normal file
12
docs/development/internal_documentation/README.md
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
# Internal Documentation
|
||||||
|
|
||||||
|
This section covers implementation documentation for various parts of Synapse.
|
||||||
|
|
||||||
|
If a developer is planning to make a change to a feature of Synapse, it can be useful for
|
||||||
|
general documentation of how that feature is implemented to be available. This saves the
|
||||||
|
developer time in place of needing to understand how the feature works by reading the
|
||||||
|
code.
|
||||||
|
|
||||||
|
Documentation that would be more useful for the perspective of a system administrator,
|
||||||
|
rather than a developer who's intending to change to code, should instead be placed
|
||||||
|
under the Usage section of the documentation.
|
BIN
docs/favicon.png
Normal file
BIN
docs/favicon.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 7.7 KiB |
58
docs/favicon.svg
Normal file
58
docs/favicon.svg
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||||
|
<svg
|
||||||
|
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||||
|
xmlns:cc="http://creativecommons.org/ns#"
|
||||||
|
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||||
|
xmlns:svg="http://www.w3.org/2000/svg"
|
||||||
|
xmlns="http://www.w3.org/2000/svg"
|
||||||
|
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||||
|
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||||
|
viewBox="0 0 199.7 184.2"
|
||||||
|
version="1.1"
|
||||||
|
id="svg62"
|
||||||
|
sodipodi:docname="mdbook-favicon.svg"
|
||||||
|
inkscape:version="1.0.2 (e86c870879, 2021-01-15, custom)">
|
||||||
|
<metadata
|
||||||
|
id="metadata68">
|
||||||
|
<rdf:RDF>
|
||||||
|
<cc:Work
|
||||||
|
rdf:about="">
|
||||||
|
<dc:format>image/svg+xml</dc:format>
|
||||||
|
<dc:type
|
||||||
|
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
||||||
|
</cc:Work>
|
||||||
|
</rdf:RDF>
|
||||||
|
</metadata>
|
||||||
|
<defs
|
||||||
|
id="defs66" />
|
||||||
|
<sodipodi:namedview
|
||||||
|
pagecolor="#ffffff"
|
||||||
|
bordercolor="#666666"
|
||||||
|
borderopacity="1"
|
||||||
|
objecttolerance="10"
|
||||||
|
gridtolerance="10"
|
||||||
|
guidetolerance="10"
|
||||||
|
inkscape:pageopacity="0"
|
||||||
|
inkscape:pageshadow="2"
|
||||||
|
inkscape:window-width="1920"
|
||||||
|
inkscape:window-height="1026"
|
||||||
|
id="namedview64"
|
||||||
|
showgrid="false"
|
||||||
|
inkscape:zoom="3.2245912"
|
||||||
|
inkscape:cx="84.790185"
|
||||||
|
inkscape:cy="117.96478"
|
||||||
|
inkscape:window-x="0"
|
||||||
|
inkscape:window-y="0"
|
||||||
|
inkscape:window-maximized="1"
|
||||||
|
inkscape:current-layer="svg62" />
|
||||||
|
<style
|
||||||
|
id="style58">
|
||||||
|
@media (prefers-color-scheme: dark) {
|
||||||
|
svg { fill: white; }
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
<path
|
||||||
|
d="m 189.5,36.8 c 0.2,2.8 0,5.1 -0.6,6.8 L 153,162 c -0.6,2.1 -2,3.7 -4.2,5 -2.2,1.2 -4.4,1.9 -6.7,1.9 H 31.4 c -9.6,0 -15.3,-2.8 -17.3,-8.4 -0.8,-2.2 -0.8,-3.9 0.1,-5.2 0.9,-1.2 2.4,-1.8 4.6,-1.8 H 123 c 7.4,0 12.6,-1.4 15.4,-4.1 2.8,-2.7 5.7,-8.9 8.6,-18.4 L 179.9,22.4 c 1.8,-5.9 1,-11.1 -2.2,-15.6 C 174.5,2.3 169.9,0 164,0 H 72.7 c -1,0 -3.1,0.4 -6.1,1.1 L 66.7,0.7 C 64.5,0.2 62.6,0 61,0.1 c -1.6,0.1 -3,0.5 -4.3,1.4 -1.3,0.9 -2.4,1.8 -3.2,2.8 -0.8,1 -1.5,2.2 -2.3,3.8 -0.8,1.6 -1.4,3 -1.9,4.3 -0.5,1.3 -1.1,2.7 -1.8,4.2 -0.7,1.5 -1.3,2.7 -2,3.7 -0.5,0.6 -1.2,1.5 -2,2.5 -0.8,1 -1.6,2 -2.2,2.8 -0.6,0.8 -0.9,1.5 -1.1,2.2 -0.2,0.7 -0.1,1.8 0.2,3.2 0.3,1.4 0.4,2.4 0.4,3.1 -0.3,3 -1.4,6.9 -3.3,11.6 -1.9,4.7 -3.6,8.1 -5.1,10.1 -0.3,0.4 -1.2,1.3 -2.6,2.7 -1.4,1.4 -2.3,2.6 -2.6,3.7 -0.3,0.4 -0.3,1.5 -0.1,3.4 0.3,1.8 0.4,3.1 0.3,3.8 -0.3,2.7 -1.3,6.3 -3,10.8 -2.406801,6.370944 -3.4,8.2 -5,11 -0.2,0.5 -0.9,1.4 -2,2.8 -1.1,1.4 -1.8,2.5 -2,3.4 -0.2,0.6 -0.1,1.8 0.1,3.4 0.2,1.6 0.2,2.8 -0.1,3.6 -0.6,3 -1.8,6.7 -3.6,11 -1.8,4.3 -3.6,7.9 -5.4,11 -0.5,0.8 -1.1,1.7 -2,2.8 -0.8,1.1 -1.5,2 -2,2.8 -0.5,0.8 -0.8,1.6 -1,2.5 -0.1,0.5 0,1.3 0.4,2.3 0.3,1.1 0.4,1.9 0.4,2.6 -0.1,1.1 -0.2,2.6 -0.5,4.4 -0.2,1.8 -0.4,2.9 -0.4,3.2 -1.8,4.8 -1.7,9.9 0.2,15.2 2.2,6.2 6.2,11.5 11.9,15.8 5.7,4.3 11.7,6.4 17.8,6.4 h 110.7 c 5.2,0 10.1,-1.7 14.7,-5.2 4.6,-3.5 7.7,-7.8 9.2,-12.9 l 33,-108.6 c 1.8,-5.8 1,-10.9 -2.2,-15.5 -1.7,-2.5 -4,-4.2 -7.1,-5.4 z M 38.14858,105.59813 60.882735,41.992545 h 10.8 c 6.340631,0 33.351895,0.778957 70.804135,0.970479 -18.18245,63.254766 0,0 -18.18245,63.254766 -23.00947,-0.10382 -63.362955,-0.6218 -72.55584,-0.51966 -18,0.2 -13.6,-0.1 -13.6,-0.1 z m 80.621,-5.891206 c 15.19043,-50.034423 0,1e-5 15.19043,-50.034423 l -11.90624,-0.13228 2.73304,-9.302941 -44.32863,0.07339 -2.532953,8.036036 -11.321128,-0.18864 -17.955519,51.440073 c 0.02698,0.027 4.954586,0.0514 12.187488,0.0717 l -2.997994,9.804886 c 11.36463,0.0271 1.219679,-0.0736 46.117666,-0.31499 l 2.65246,-9.571696 c 7.08021,0.14819 11.59705,0.13117 12.16138,0.1189 z m -56.149615,-3.855606 13.7,-42.5 h 9.8 l 1.194896,32.99936 23.205109,-32.99936 h 9.9 l -13.6,42.5 h -7.099996 l 12.499996,-35.4 -24.50001,35.4 h -6.799995 l -0.8,-35 -10.8,35 z"
|
||||||
|
id="path60"
|
||||||
|
sodipodi:nodetypes="ccccssccsssccsssccsssssscsssscssscccscscscsccsccccccssssccccccsccsccccccccccccccccccccccccccccc" />
|
||||||
|
</svg>
|
After Width: | Height: | Size: 3.9 KiB |
|
@ -2916,18 +2916,3 @@ redis:
|
||||||
# Optional password if configured on the Redis instance
|
# Optional password if configured on the Redis instance
|
||||||
#
|
#
|
||||||
#password: <secret_password>
|
#password: <secret_password>
|
||||||
|
|
||||||
|
|
||||||
# Enable experimental features in Synapse.
|
|
||||||
#
|
|
||||||
# Experimental features might break or be removed without a deprecation
|
|
||||||
# period.
|
|
||||||
#
|
|
||||||
experimental_features:
|
|
||||||
# Support for Spaces (MSC1772), it enables the following:
|
|
||||||
#
|
|
||||||
# * The Spaces Summary API (MSC2946).
|
|
||||||
# * Restricting room membership based on space membership (MSC3083).
|
|
||||||
#
|
|
||||||
# Uncomment to disable support for Spaces.
|
|
||||||
#spaces_enabled: false
|
|
||||||
|
|
7
docs/setup/installation.md
Normal file
7
docs/setup/installation.md
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
<!--
|
||||||
|
Include the contents of INSTALL.md from the project root without moving it, which may
|
||||||
|
break links around the internet. Additionally, note that SUMMARY.md is unable to
|
||||||
|
directly link to content outside of the docs/ directory. So we use this file as a
|
||||||
|
redirection.
|
||||||
|
-->
|
||||||
|
{{#include ../../INSTALL.md}}
|
|
@ -4,7 +4,7 @@ This document explains how to enable VoIP relaying on your Home Server with
|
||||||
TURN.
|
TURN.
|
||||||
|
|
||||||
The synapse Matrix Home Server supports integration with TURN server via the
|
The synapse Matrix Home Server supports integration with TURN server via the
|
||||||
[TURN server REST API](<http://tools.ietf.org/html/draft-uberti-behave-turn-rest-00>). This
|
[TURN server REST API](<https://tools.ietf.org/html/draft-uberti-behave-turn-rest-00>). This
|
||||||
allows the Home Server to generate credentials that are valid for use on the
|
allows the Home Server to generate credentials that are valid for use on the
|
||||||
TURN server through the use of a secret shared between the Home Server and the
|
TURN server through the use of a secret shared between the Home Server and the
|
||||||
TURN server.
|
TURN server.
|
||||||
|
|
7
docs/upgrading/README.md
Normal file
7
docs/upgrading/README.md
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
<!--
|
||||||
|
Include the contents of UPGRADE.rst from the project root without moving it, which may
|
||||||
|
break links around the internet. Additionally, note that SUMMARY.md is unable to
|
||||||
|
directly link to content outside of the docs/ directory. So we use this file as a
|
||||||
|
redirection.
|
||||||
|
-->
|
||||||
|
{{#include ../../UPGRADE.rst}}
|
7
docs/usage/administration/README.md
Normal file
7
docs/usage/administration/README.md
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
# Administration
|
||||||
|
|
||||||
|
This section contains information on managing your Synapse homeserver. This includes:
|
||||||
|
|
||||||
|
* Managing users, rooms and media via the Admin API.
|
||||||
|
* Setting up metrics and monitoring to give you insight into your homeserver's health.
|
||||||
|
* Configuring structured logging.
|
29
docs/usage/administration/admin_api/README.md
Normal file
29
docs/usage/administration/admin_api/README.md
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
# The Admin API
|
||||||
|
|
||||||
|
## Authenticate as a server admin
|
||||||
|
|
||||||
|
Many of the API calls in the admin api will require an `access_token` for a
|
||||||
|
server admin. (Note that a server admin is distinct from a room admin.)
|
||||||
|
|
||||||
|
A user can be marked as a server admin by updating the database directly, e.g.:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
UPDATE users SET admin = 1 WHERE name = '@foo:bar.com';
|
||||||
|
```
|
||||||
|
|
||||||
|
A new server admin user can also be created using the `register_new_matrix_user`
|
||||||
|
command. This is a script that is located in the `scripts/` directory, or possibly
|
||||||
|
already on your `$PATH` depending on how Synapse was installed.
|
||||||
|
|
||||||
|
Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings.
|
||||||
|
|
||||||
|
## Making an Admin API request
|
||||||
|
Once you have your `access_token`, you will need to authenticate each request to an Admin API endpoint by
|
||||||
|
providing the token as either a query parameter or a request header. To add it as a request header in cURL:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
curl --header "Authorization: Bearer <access_token>" <the_rest_of_your_API_request>
|
||||||
|
```
|
||||||
|
|
||||||
|
For more details on access tokens in Matrix, please refer to the complete
|
||||||
|
[matrix spec documentation](https://matrix.org/docs/spec/client_server/r0.6.1#using-access-tokens).
|
4
docs/usage/configuration/README.md
Normal file
4
docs/usage/configuration/README.md
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
# Configuration
|
||||||
|
|
||||||
|
This section contains information on tweaking Synapse via the various options in the configuration file. A configuration
|
||||||
|
file should have been generated when you [installed Synapse](../../setup/installation.html).
|
14
docs/usage/configuration/homeserver_sample_config.md
Normal file
14
docs/usage/configuration/homeserver_sample_config.md
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
# Homeserver Sample Configuration File
|
||||||
|
|
||||||
|
Below is a sample homeserver configuration file. The homeserver configuration file
|
||||||
|
can be tweaked to change the behaviour of your homeserver. A restart of the server is
|
||||||
|
generally required to apply any changes made to this file.
|
||||||
|
|
||||||
|
Note that the contents below are *not* intended to be copied and used as the basis for
|
||||||
|
a real homeserver.yaml. Instead, if you are starting from scratch, please generate
|
||||||
|
a fresh config using Synapse by following the instructions in
|
||||||
|
[Installation](../../setup/installation.md).
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
{{#include ../../sample_config.yaml}}
|
||||||
|
```
|
14
docs/usage/configuration/logging_sample_config.md
Normal file
14
docs/usage/configuration/logging_sample_config.md
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
# Logging Sample Configuration File
|
||||||
|
|
||||||
|
Below is a sample logging configuration file. This file can be tweaked to control how your
|
||||||
|
homeserver will output logs. A restart of the server is generally required to apply any
|
||||||
|
changes made to this file.
|
||||||
|
|
||||||
|
Note that the contents below are *not* intended to be copied and used as the basis for
|
||||||
|
a real homeserver.yaml. Instead, if you are starting from scratch, please generate
|
||||||
|
a fresh config using Synapse by following the instructions in
|
||||||
|
[Installation](../../setup/installation.md).
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
{{#include ../../sample_log_config.yaml}}
|
||||||
|
``__`
|
15
docs/usage/configuration/user_authentication/README.md
Normal file
15
docs/usage/configuration/user_authentication/README.md
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
# User Authentication
|
||||||
|
|
||||||
|
Synapse supports multiple methods of authenticating users, either out-of-the-box or through custom pluggable
|
||||||
|
authentication modules.
|
||||||
|
|
||||||
|
Included in Synapse is support for authenticating users via:
|
||||||
|
|
||||||
|
* A username and password.
|
||||||
|
* An email address and password.
|
||||||
|
* Single Sign-On through the SAML, Open ID Connect or CAS protocols.
|
||||||
|
* JSON Web Tokens.
|
||||||
|
* An administrator's shared secret.
|
||||||
|
|
||||||
|
Synapse can additionally be extended to support custom authentication schemes through optional "password auth provider"
|
||||||
|
modules.
|
30
docs/website_files/README.md
Normal file
30
docs/website_files/README.md
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
# Documentation Website Files and Assets
|
||||||
|
|
||||||
|
This directory contains extra files for modifying the look and functionality of
|
||||||
|
[mdbook](https://github.com/rust-lang/mdBook), the documentation software that's
|
||||||
|
used to generate Synapse's documentation website.
|
||||||
|
|
||||||
|
The configuration options in the `output.html` section of [book.toml](../../book.toml)
|
||||||
|
point to additional JS/CSS in this directory that are added on each page load. In
|
||||||
|
addition, the `theme` directory contains files that overwrite their counterparts in
|
||||||
|
each of the default themes included with mdbook.
|
||||||
|
|
||||||
|
Currently we use these files to generate a floating Table of Contents panel. The code for
|
||||||
|
which was partially taken from
|
||||||
|
[JorelAli/mdBook-pagetoc](https://github.com/JorelAli/mdBook-pagetoc/)
|
||||||
|
before being modified such that it scrolls with the content of the page. This is handled
|
||||||
|
by the `table-of-contents.js/css` files. The table of contents panel only appears on pages
|
||||||
|
that have more than one header, as well as only appearing on desktop-sized monitors.
|
||||||
|
|
||||||
|
We remove the navigation arrows which typically appear on the left and right side of the
|
||||||
|
screen on desktop as they interfere with the table of contents. This is handled by
|
||||||
|
the `remove-nav-buttons.css` file.
|
||||||
|
|
||||||
|
Finally, we also stylise the chapter titles in the left sidebar by indenting them
|
||||||
|
slightly so that they are more visually distinguishable from the section headers
|
||||||
|
(the bold titles). This is done through the `indent-section-headers.css` file.
|
||||||
|
|
||||||
|
More information can be found in mdbook's official documentation for
|
||||||
|
[injecting page JS/CSS](https://rust-lang.github.io/mdBook/format/config.html)
|
||||||
|
and
|
||||||
|
[customising the default themes](https://rust-lang.github.io/mdBook/format/theme/index.html).
|
7
docs/website_files/indent-section-headers.css
Normal file
7
docs/website_files/indent-section-headers.css
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
/*
|
||||||
|
* Indents each chapter title in the left sidebar so that they aren't
|
||||||
|
* at the same level as the section headers.
|
||||||
|
*/
|
||||||
|
.chapter-item {
|
||||||
|
margin-left: 1em;
|
||||||
|
}
|
8
docs/website_files/remove-nav-buttons.css
Normal file
8
docs/website_files/remove-nav-buttons.css
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
/* Remove the prev, next chapter buttons as they interfere with the
|
||||||
|
* table of contents.
|
||||||
|
* Note that the table of contents only appears on desktop, thus we
|
||||||
|
* only remove the desktop (wide) chapter buttons.
|
||||||
|
*/
|
||||||
|
.nav-wide-wrapper {
|
||||||
|
display: none
|
||||||
|
}
|
42
docs/website_files/table-of-contents.css
Normal file
42
docs/website_files/table-of-contents.css
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
@media only screen and (max-width:1439px) {
|
||||||
|
.sidetoc {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@media only screen and (min-width:1440px) {
|
||||||
|
main {
|
||||||
|
position: relative;
|
||||||
|
margin-left: 100px !important;
|
||||||
|
}
|
||||||
|
.sidetoc {
|
||||||
|
margin-left: auto;
|
||||||
|
margin-right: auto;
|
||||||
|
left: calc(100% + (var(--content-max-width))/4 - 140px);
|
||||||
|
position: absolute;
|
||||||
|
text-align: right;
|
||||||
|
}
|
||||||
|
.pagetoc {
|
||||||
|
position: fixed;
|
||||||
|
width: 250px;
|
||||||
|
overflow: auto;
|
||||||
|
right: 20px;
|
||||||
|
height: calc(100% - var(--menu-bar-height));
|
||||||
|
}
|
||||||
|
.pagetoc a {
|
||||||
|
color: var(--fg) !important;
|
||||||
|
display: block;
|
||||||
|
padding: 5px 15px 5px 10px;
|
||||||
|
text-align: left;
|
||||||
|
text-decoration: none;
|
||||||
|
}
|
||||||
|
.pagetoc a:hover,
|
||||||
|
.pagetoc a.active {
|
||||||
|
background: var(--sidebar-bg) !important;
|
||||||
|
color: var(--sidebar-fg) !important;
|
||||||
|
}
|
||||||
|
.pagetoc .active {
|
||||||
|
background: var(--sidebar-bg);
|
||||||
|
color: var(--sidebar-fg);
|
||||||
|
}
|
||||||
|
}
|
134
docs/website_files/table-of-contents.js
Normal file
134
docs/website_files/table-of-contents.js
Normal file
|
@ -0,0 +1,134 @@
|
||||||
|
const getPageToc = () => document.getElementsByClassName('pagetoc')[0];
|
||||||
|
|
||||||
|
const pageToc = getPageToc();
|
||||||
|
const pageTocChildren = [...pageToc.children];
|
||||||
|
const headers = [...document.getElementsByClassName('header')];
|
||||||
|
|
||||||
|
|
||||||
|
// Select highlighted item in ToC when clicking an item
|
||||||
|
pageTocChildren.forEach(child => {
|
||||||
|
child.addEventHandler('click', () => {
|
||||||
|
pageTocChildren.forEach(child => {
|
||||||
|
child.classList.remove('active');
|
||||||
|
});
|
||||||
|
child.classList.add('active');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test whether a node is in the viewport
|
||||||
|
*/
|
||||||
|
function isInViewport(node) {
|
||||||
|
const rect = node.getBoundingClientRect();
|
||||||
|
return rect.top >= 0 && rect.left >= 0 && rect.bottom <= (window.innerHeight || document.documentElement.clientHeight) && rect.right <= (window.innerWidth || document.documentElement.clientWidth);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set a new ToC entry.
|
||||||
|
* Clear any previously highlighted ToC items, set the new one,
|
||||||
|
* and adjust the ToC scroll position.
|
||||||
|
*/
|
||||||
|
function setTocEntry() {
|
||||||
|
let activeEntry;
|
||||||
|
const pageTocChildren = [...getPageToc().children];
|
||||||
|
|
||||||
|
// Calculate which header is the current one at the top of screen
|
||||||
|
headers.forEach(header => {
|
||||||
|
if (window.pageYOffset >= header.offsetTop) {
|
||||||
|
activeEntry = header;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Update selected item in ToC when scrolling
|
||||||
|
pageTocChildren.forEach(child => {
|
||||||
|
if (activeEntry.href.localeCompare(child.href) === 0) {
|
||||||
|
child.classList.add('active');
|
||||||
|
} else {
|
||||||
|
child.classList.remove('active');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let tocEntryForLocation = document.querySelector(`nav a[href="${activeEntry.href}"]`);
|
||||||
|
if (tocEntryForLocation) {
|
||||||
|
const headingForLocation = document.querySelector(activeEntry.hash);
|
||||||
|
if (headingForLocation && isInViewport(headingForLocation)) {
|
||||||
|
// Update ToC scroll
|
||||||
|
const nav = getPageToc();
|
||||||
|
const content = document.querySelector('html');
|
||||||
|
if (content.scrollTop !== 0) {
|
||||||
|
nav.scrollTo({
|
||||||
|
top: tocEntryForLocation.offsetTop - 100,
|
||||||
|
left: 0,
|
||||||
|
behavior: 'smooth',
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
nav.scrollTop = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Populate sidebar on load
|
||||||
|
*/
|
||||||
|
window.addEventListener('load', () => {
|
||||||
|
// Only create table of contents if there is more than one header on the page
|
||||||
|
if (headers.length <= 1) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create an entry in the page table of contents for each header in the document
|
||||||
|
headers.forEach((header, index) => {
|
||||||
|
const link = document.createElement('a');
|
||||||
|
|
||||||
|
// Indent shows hierarchy
|
||||||
|
let indent = '0px';
|
||||||
|
switch (header.parentElement.tagName) {
|
||||||
|
case 'H1':
|
||||||
|
indent = '5px';
|
||||||
|
break;
|
||||||
|
case 'H2':
|
||||||
|
indent = '20px';
|
||||||
|
break;
|
||||||
|
case 'H3':
|
||||||
|
indent = '30px';
|
||||||
|
break;
|
||||||
|
case 'H4':
|
||||||
|
indent = '40px';
|
||||||
|
break;
|
||||||
|
case 'H5':
|
||||||
|
indent = '50px';
|
||||||
|
break;
|
||||||
|
case 'H6':
|
||||||
|
indent = '60px';
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
let tocEntry;
|
||||||
|
if (index == 0) {
|
||||||
|
// Create a bolded title for the first element
|
||||||
|
tocEntry = document.createElement("strong");
|
||||||
|
tocEntry.innerHTML = header.text;
|
||||||
|
} else {
|
||||||
|
// All other elements are non-bold
|
||||||
|
tocEntry = document.createTextNode(header.text);
|
||||||
|
}
|
||||||
|
link.appendChild(tocEntry);
|
||||||
|
|
||||||
|
link.style.paddingLeft = indent;
|
||||||
|
link.href = header.href;
|
||||||
|
pageToc.appendChild(link);
|
||||||
|
});
|
||||||
|
setTocEntry.call();
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
// Handle active headers on scroll, if there is more than one header on the page
|
||||||
|
if (headers.length > 1) {
|
||||||
|
window.addEventListener('scroll', setTocEntry);
|
||||||
|
}
|
312
docs/website_files/theme/index.hbs
Normal file
312
docs/website_files/theme/index.hbs
Normal file
|
@ -0,0 +1,312 @@
|
||||||
|
<!DOCTYPE HTML>
|
||||||
|
<html lang="{{ language }}" class="sidebar-visible no-js {{ default_theme }}">
|
||||||
|
<head>
|
||||||
|
<!-- Book generated using mdBook -->
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<title>{{ title }}</title>
|
||||||
|
{{#if is_print }}
|
||||||
|
<meta name="robots" content="noindex" />
|
||||||
|
{{/if}}
|
||||||
|
{{#if base_url}}
|
||||||
|
<base href="{{ base_url }}">
|
||||||
|
{{/if}}
|
||||||
|
|
||||||
|
|
||||||
|
<!-- Custom HTML head -->
|
||||||
|
{{> head}}
|
||||||
|
|
||||||
|
<meta content="text/html; charset=utf-8" http-equiv="Content-Type">
|
||||||
|
<meta name="description" content="{{ description }}">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||||
|
<meta name="theme-color" content="#ffffff" />
|
||||||
|
|
||||||
|
{{#if favicon_svg}}
|
||||||
|
<link rel="icon" href="{{ path_to_root }}favicon.svg">
|
||||||
|
{{/if}}
|
||||||
|
{{#if favicon_png}}
|
||||||
|
<link rel="shortcut icon" href="{{ path_to_root }}favicon.png">
|
||||||
|
{{/if}}
|
||||||
|
<link rel="stylesheet" href="{{ path_to_root }}css/variables.css">
|
||||||
|
<link rel="stylesheet" href="{{ path_to_root }}css/general.css">
|
||||||
|
<link rel="stylesheet" href="{{ path_to_root }}css/chrome.css">
|
||||||
|
{{#if print_enable}}
|
||||||
|
<link rel="stylesheet" href="{{ path_to_root }}css/print.css" media="print">
|
||||||
|
{{/if}}
|
||||||
|
|
||||||
|
<!-- Fonts -->
|
||||||
|
<link rel="stylesheet" href="{{ path_to_root }}FontAwesome/css/font-awesome.css">
|
||||||
|
{{#if copy_fonts}}
|
||||||
|
<link rel="stylesheet" href="{{ path_to_root }}fonts/fonts.css">
|
||||||
|
{{/if}}
|
||||||
|
|
||||||
|
<!-- Highlight.js Stylesheets -->
|
||||||
|
<link rel="stylesheet" href="{{ path_to_root }}highlight.css">
|
||||||
|
<link rel="stylesheet" href="{{ path_to_root }}tomorrow-night.css">
|
||||||
|
<link rel="stylesheet" href="{{ path_to_root }}ayu-highlight.css">
|
||||||
|
|
||||||
|
<!-- Custom theme stylesheets -->
|
||||||
|
{{#each additional_css}}
|
||||||
|
<link rel="stylesheet" href="{{ ../path_to_root }}{{ this }}">
|
||||||
|
{{/each}}
|
||||||
|
|
||||||
|
{{#if mathjax_support}}
|
||||||
|
<!-- MathJax -->
|
||||||
|
<script async type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js?config=TeX-AMS-MML_HTMLorMML"></script>
|
||||||
|
{{/if}}
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<!-- Provide site root to javascript -->
|
||||||
|
<script type="text/javascript">
|
||||||
|
var path_to_root = "{{ path_to_root }}";
|
||||||
|
var default_theme = window.matchMedia("(prefers-color-scheme: dark)").matches ? "{{ preferred_dark_theme }}" : "{{ default_theme }}";
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<!-- Work around some values being stored in localStorage wrapped in quotes -->
|
||||||
|
<script type="text/javascript">
|
||||||
|
try {
|
||||||
|
var theme = localStorage.getItem('mdbook-theme');
|
||||||
|
var sidebar = localStorage.getItem('mdbook-sidebar');
|
||||||
|
if (theme.startsWith('"') && theme.endsWith('"')) {
|
||||||
|
localStorage.setItem('mdbook-theme', theme.slice(1, theme.length - 1));
|
||||||
|
}
|
||||||
|
if (sidebar.startsWith('"') && sidebar.endsWith('"')) {
|
||||||
|
localStorage.setItem('mdbook-sidebar', sidebar.slice(1, sidebar.length - 1));
|
||||||
|
}
|
||||||
|
} catch (e) { }
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<!-- Set the theme before any content is loaded, prevents flash -->
|
||||||
|
<script type="text/javascript">
|
||||||
|
var theme;
|
||||||
|
try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { }
|
||||||
|
if (theme === null || theme === undefined) { theme = default_theme; }
|
||||||
|
var html = document.querySelector('html');
|
||||||
|
html.classList.remove('no-js')
|
||||||
|
html.classList.remove('{{ default_theme }}')
|
||||||
|
html.classList.add(theme);
|
||||||
|
html.classList.add('js');
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<!-- Hide / unhide sidebar before it is displayed -->
|
||||||
|
<script type="text/javascript">
|
||||||
|
var html = document.querySelector('html');
|
||||||
|
var sidebar = 'hidden';
|
||||||
|
if (document.body.clientWidth >= 1080) {
|
||||||
|
try { sidebar = localStorage.getItem('mdbook-sidebar'); } catch(e) { }
|
||||||
|
sidebar = sidebar || 'visible';
|
||||||
|
}
|
||||||
|
html.classList.remove('sidebar-visible');
|
||||||
|
html.classList.add("sidebar-" + sidebar);
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<nav id="sidebar" class="sidebar" aria-label="Table of contents">
|
||||||
|
<div class="sidebar-scrollbox">
|
||||||
|
{{#toc}}{{/toc}}
|
||||||
|
</div>
|
||||||
|
<div id="sidebar-resize-handle" class="sidebar-resize-handle"></div>
|
||||||
|
</nav>
|
||||||
|
|
||||||
|
<div id="page-wrapper" class="page-wrapper">
|
||||||
|
|
||||||
|
<div class="page">
|
||||||
|
{{> header}}
|
||||||
|
<div id="menu-bar-hover-placeholder"></div>
|
||||||
|
<div id="menu-bar" class="menu-bar sticky bordered">
|
||||||
|
<div class="left-buttons">
|
||||||
|
<button id="sidebar-toggle" class="icon-button" type="button" title="Toggle Table of Contents" aria-label="Toggle Table of Contents" aria-controls="sidebar">
|
||||||
|
<i class="fa fa-bars"></i>
|
||||||
|
</button>
|
||||||
|
<button id="theme-toggle" class="icon-button" type="button" title="Change theme" aria-label="Change theme" aria-haspopup="true" aria-expanded="false" aria-controls="theme-list">
|
||||||
|
<i class="fa fa-paint-brush"></i>
|
||||||
|
</button>
|
||||||
|
<ul id="theme-list" class="theme-popup" aria-label="Themes" role="menu">
|
||||||
|
<li role="none"><button role="menuitem" class="theme" id="light">{{ theme_option "Light" }}</button></li>
|
||||||
|
<li role="none"><button role="menuitem" class="theme" id="rust">{{ theme_option "Rust" }}</button></li>
|
||||||
|
<li role="none"><button role="menuitem" class="theme" id="coal">{{ theme_option "Coal" }}</button></li>
|
||||||
|
<li role="none"><button role="menuitem" class="theme" id="navy">{{ theme_option "Navy" }}</button></li>
|
||||||
|
<li role="none"><button role="menuitem" class="theme" id="ayu">{{ theme_option "Ayu" }}</button></li>
|
||||||
|
</ul>
|
||||||
|
{{#if search_enabled}}
|
||||||
|
<button id="search-toggle" class="icon-button" type="button" title="Search. (Shortkey: s)" aria-label="Toggle Searchbar" aria-expanded="false" aria-keyshortcuts="S" aria-controls="searchbar">
|
||||||
|
<i class="fa fa-search"></i>
|
||||||
|
</button>
|
||||||
|
{{/if}}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<h1 class="menu-title">{{ book_title }}</h1>
|
||||||
|
|
||||||
|
<div class="right-buttons">
|
||||||
|
{{#if print_enable}}
|
||||||
|
<a href="{{ path_to_root }}print.html" title="Print this book" aria-label="Print this book">
|
||||||
|
<i id="print-button" class="fa fa-print"></i>
|
||||||
|
</a>
|
||||||
|
{{/if}}
|
||||||
|
{{#if git_repository_url}}
|
||||||
|
<a href="{{git_repository_url}}" title="Git repository" aria-label="Git repository">
|
||||||
|
<i id="git-repository-button" class="fa {{git_repository_icon}}"></i>
|
||||||
|
</a>
|
||||||
|
{{/if}}
|
||||||
|
{{#if git_repository_edit_url}}
|
||||||
|
<a href="{{git_repository_edit_url}}" title="Suggest an edit" aria-label="Suggest an edit">
|
||||||
|
<i id="git-edit-button" class="fa fa-edit"></i>
|
||||||
|
</a>
|
||||||
|
{{/if}}
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{{#if search_enabled}}
|
||||||
|
<div id="search-wrapper" class="hidden">
|
||||||
|
<form id="searchbar-outer" class="searchbar-outer">
|
||||||
|
<input type="search" id="searchbar" name="searchbar" placeholder="Search this book ..." aria-controls="searchresults-outer" aria-describedby="searchresults-header">
|
||||||
|
</form>
|
||||||
|
<div id="searchresults-outer" class="searchresults-outer hidden">
|
||||||
|
<div id="searchresults-header" class="searchresults-header"></div>
|
||||||
|
<ul id="searchresults">
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{{/if}}
|
||||||
|
|
||||||
|
<!-- Apply ARIA attributes after the sidebar and the sidebar toggle button are added to the DOM -->
|
||||||
|
<script type="text/javascript">
|
||||||
|
document.getElementById('sidebar-toggle').setAttribute('aria-expanded', sidebar === 'visible');
|
||||||
|
document.getElementById('sidebar').setAttribute('aria-hidden', sidebar !== 'visible');
|
||||||
|
Array.from(document.querySelectorAll('#sidebar a')).forEach(function(link) {
|
||||||
|
link.setAttribute('tabIndex', sidebar === 'visible' ? 0 : -1);
|
||||||
|
});
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<div id="content" class="content">
|
||||||
|
<main>
|
||||||
|
<!-- Page table of contents -->
|
||||||
|
<div class="sidetoc">
|
||||||
|
<nav class="pagetoc"></nav>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{{{ content }}}
|
||||||
|
</main>
|
||||||
|
|
||||||
|
<nav class="nav-wrapper" aria-label="Page navigation">
|
||||||
|
<!-- Mobile navigation buttons -->
|
||||||
|
{{#previous}}
|
||||||
|
<a rel="prev" href="{{ path_to_root }}{{link}}" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||||
|
<i class="fa fa-angle-left"></i>
|
||||||
|
</a>
|
||||||
|
{{/previous}}
|
||||||
|
|
||||||
|
{{#next}}
|
||||||
|
<a rel="next" href="{{ path_to_root }}{{link}}" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||||
|
<i class="fa fa-angle-right"></i>
|
||||||
|
</a>
|
||||||
|
{{/next}}
|
||||||
|
|
||||||
|
<div style="clear: both"></div>
|
||||||
|
</nav>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<nav class="nav-wide-wrapper" aria-label="Page navigation">
|
||||||
|
{{#previous}}
|
||||||
|
<a rel="prev" href="{{ path_to_root }}{{link}}" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||||
|
<i class="fa fa-angle-left"></i>
|
||||||
|
</a>
|
||||||
|
{{/previous}}
|
||||||
|
|
||||||
|
{{#next}}
|
||||||
|
<a rel="next" href="{{ path_to_root }}{{link}}" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||||
|
<i class="fa fa-angle-right"></i>
|
||||||
|
</a>
|
||||||
|
{{/next}}
|
||||||
|
</nav>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{{#if livereload}}
|
||||||
|
<!-- Livereload script (if served using the cli tool) -->
|
||||||
|
<script type="text/javascript">
|
||||||
|
var socket = new WebSocket("{{{livereload}}}");
|
||||||
|
socket.onmessage = function (event) {
|
||||||
|
if (event.data === "reload") {
|
||||||
|
socket.close();
|
||||||
|
location.reload();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
window.onbeforeunload = function() {
|
||||||
|
socket.close();
|
||||||
|
}
|
||||||
|
</script>
|
||||||
|
{{/if}}
|
||||||
|
|
||||||
|
{{#if google_analytics}}
|
||||||
|
<!-- Google Analytics Tag -->
|
||||||
|
<script type="text/javascript">
|
||||||
|
var localAddrs = ["localhost", "127.0.0.1", ""];
|
||||||
|
// make sure we don't activate google analytics if the developer is
|
||||||
|
// inspecting the book locally...
|
||||||
|
if (localAddrs.indexOf(document.location.hostname) === -1) {
|
||||||
|
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
|
||||||
|
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
|
||||||
|
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
|
||||||
|
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
|
||||||
|
ga('create', '{{google_analytics}}', 'auto');
|
||||||
|
ga('send', 'pageview');
|
||||||
|
}
|
||||||
|
</script>
|
||||||
|
{{/if}}
|
||||||
|
|
||||||
|
{{#if playground_line_numbers}}
|
||||||
|
<script type="text/javascript">
|
||||||
|
window.playground_line_numbers = true;
|
||||||
|
</script>
|
||||||
|
{{/if}}
|
||||||
|
|
||||||
|
{{#if playground_copyable}}
|
||||||
|
<script type="text/javascript">
|
||||||
|
window.playground_copyable = true;
|
||||||
|
</script>
|
||||||
|
{{/if}}
|
||||||
|
|
||||||
|
{{#if playground_js}}
|
||||||
|
<script src="{{ path_to_root }}ace.js" type="text/javascript" charset="utf-8"></script>
|
||||||
|
<script src="{{ path_to_root }}editor.js" type="text/javascript" charset="utf-8"></script>
|
||||||
|
<script src="{{ path_to_root }}mode-rust.js" type="text/javascript" charset="utf-8"></script>
|
||||||
|
<script src="{{ path_to_root }}theme-dawn.js" type="text/javascript" charset="utf-8"></script>
|
||||||
|
<script src="{{ path_to_root }}theme-tomorrow_night.js" type="text/javascript" charset="utf-8"></script>
|
||||||
|
{{/if}}
|
||||||
|
|
||||||
|
{{#if search_js}}
|
||||||
|
<script src="{{ path_to_root }}elasticlunr.min.js" type="text/javascript" charset="utf-8"></script>
|
||||||
|
<script src="{{ path_to_root }}mark.min.js" type="text/javascript" charset="utf-8"></script>
|
||||||
|
<script src="{{ path_to_root }}searcher.js" type="text/javascript" charset="utf-8"></script>
|
||||||
|
{{/if}}
|
||||||
|
|
||||||
|
<script src="{{ path_to_root }}clipboard.min.js" type="text/javascript" charset="utf-8"></script>
|
||||||
|
<script src="{{ path_to_root }}highlight.js" type="text/javascript" charset="utf-8"></script>
|
||||||
|
<script src="{{ path_to_root }}book.js" type="text/javascript" charset="utf-8"></script>
|
||||||
|
|
||||||
|
<!-- Custom JS scripts -->
|
||||||
|
{{#each additional_js}}
|
||||||
|
<script type="text/javascript" src="{{ ../path_to_root }}{{this}}"></script>
|
||||||
|
{{/each}}
|
||||||
|
|
||||||
|
{{#if is_print}}
|
||||||
|
{{#if mathjax_support}}
|
||||||
|
<script type="text/javascript">
|
||||||
|
window.addEventListener('load', function() {
|
||||||
|
MathJax.Hub.Register.StartupHook('End', function() {
|
||||||
|
window.setTimeout(window.print, 100);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
</script>
|
||||||
|
{{else}}
|
||||||
|
<script type="text/javascript">
|
||||||
|
window.addEventListener('load', function() {
|
||||||
|
window.setTimeout(window.print, 100);
|
||||||
|
});
|
||||||
|
</script>
|
||||||
|
{{/if}}
|
||||||
|
{{/if}}
|
||||||
|
|
||||||
|
</body>
|
||||||
|
</html>
|
4
docs/welcome_and_overview.md
Normal file
4
docs/welcome_and_overview.md
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
# Introduction
|
||||||
|
|
||||||
|
Welcome to the documentation repository for Synapse, the reference
|
||||||
|
[Matrix](https://matrix.org) homeserver implementation.
|
|
@ -228,6 +228,9 @@ expressions:
|
||||||
^/_matrix/client/(api/v1|r0|unstable)/joined_groups$
|
^/_matrix/client/(api/v1|r0|unstable)/joined_groups$
|
||||||
^/_matrix/client/(api/v1|r0|unstable)/publicised_groups$
|
^/_matrix/client/(api/v1|r0|unstable)/publicised_groups$
|
||||||
^/_matrix/client/(api/v1|r0|unstable)/publicised_groups/
|
^/_matrix/client/(api/v1|r0|unstable)/publicised_groups/
|
||||||
|
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/event/
|
||||||
|
^/_matrix/client/(api/v1|r0|unstable)/joined_rooms$
|
||||||
|
^/_matrix/client/(api/v1|r0|unstable)/search$
|
||||||
|
|
||||||
# Registration/login requests
|
# Registration/login requests
|
||||||
^/_matrix/client/(api/v1|r0|unstable)/login$
|
^/_matrix/client/(api/v1|r0|unstable)/login$
|
||||||
|
|
3
mypy.ini
3
mypy.ini
|
@ -32,6 +32,7 @@ files =
|
||||||
synapse/http/federation/matrix_federation_agent.py,
|
synapse/http/federation/matrix_federation_agent.py,
|
||||||
synapse/http/federation/well_known_resolver.py,
|
synapse/http/federation/well_known_resolver.py,
|
||||||
synapse/http/matrixfederationclient.py,
|
synapse/http/matrixfederationclient.py,
|
||||||
|
synapse/http/servlet.py,
|
||||||
synapse/http/server.py,
|
synapse/http/server.py,
|
||||||
synapse/http/site.py,
|
synapse/http/site.py,
|
||||||
synapse/logging,
|
synapse/logging,
|
||||||
|
@ -130,7 +131,7 @@ ignore_missing_imports = True
|
||||||
[mypy-canonicaljson]
|
[mypy-canonicaljson]
|
||||||
ignore_missing_imports = True
|
ignore_missing_imports = True
|
||||||
|
|
||||||
[mypy-jaeger_client]
|
[mypy-jaeger_client.*]
|
||||||
ignore_missing_imports = True
|
ignore_missing_imports = True
|
||||||
|
|
||||||
[mypy-jsonschema]
|
[mypy-jsonschema]
|
||||||
|
|
|
@ -1,108 +0,0 @@
|
||||||
import json
|
|
||||||
import sys
|
|
||||||
import time
|
|
||||||
|
|
||||||
import psycopg2
|
|
||||||
import yaml
|
|
||||||
from canonicaljson import encode_canonical_json
|
|
||||||
from signedjson.key import read_signing_keys
|
|
||||||
from signedjson.sign import sign_json
|
|
||||||
from unpaddedbase64 import encode_base64
|
|
||||||
|
|
||||||
db_binary_type = memoryview
|
|
||||||
|
|
||||||
|
|
||||||
def select_v1_keys(connection):
|
|
||||||
cursor = connection.cursor()
|
|
||||||
cursor.execute("SELECT server_name, key_id, verify_key FROM server_signature_keys")
|
|
||||||
rows = cursor.fetchall()
|
|
||||||
cursor.close()
|
|
||||||
results = {}
|
|
||||||
for server_name, key_id, verify_key in rows:
|
|
||||||
results.setdefault(server_name, {})[key_id] = encode_base64(verify_key)
|
|
||||||
return results
|
|
||||||
|
|
||||||
|
|
||||||
def select_v1_certs(connection):
|
|
||||||
cursor = connection.cursor()
|
|
||||||
cursor.execute("SELECT server_name, tls_certificate FROM server_tls_certificates")
|
|
||||||
rows = cursor.fetchall()
|
|
||||||
cursor.close()
|
|
||||||
results = {}
|
|
||||||
for server_name, tls_certificate in rows:
|
|
||||||
results[server_name] = tls_certificate
|
|
||||||
return results
|
|
||||||
|
|
||||||
|
|
||||||
def select_v2_json(connection):
|
|
||||||
cursor = connection.cursor()
|
|
||||||
cursor.execute("SELECT server_name, key_id, key_json FROM server_keys_json")
|
|
||||||
rows = cursor.fetchall()
|
|
||||||
cursor.close()
|
|
||||||
results = {}
|
|
||||||
for server_name, key_id, key_json in rows:
|
|
||||||
results.setdefault(server_name, {})[key_id] = json.loads(
|
|
||||||
str(key_json).decode("utf-8")
|
|
||||||
)
|
|
||||||
return results
|
|
||||||
|
|
||||||
|
|
||||||
def convert_v1_to_v2(server_name, valid_until, keys, certificate):
|
|
||||||
return {
|
|
||||||
"old_verify_keys": {},
|
|
||||||
"server_name": server_name,
|
|
||||||
"verify_keys": {key_id: {"key": key} for key_id, key in keys.items()},
|
|
||||||
"valid_until_ts": valid_until,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def rows_v2(server, json):
|
|
||||||
valid_until = json["valid_until_ts"]
|
|
||||||
key_json = encode_canonical_json(json)
|
|
||||||
for key_id in json["verify_keys"]:
|
|
||||||
yield (server, key_id, "-", valid_until, valid_until, db_binary_type(key_json))
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
config = yaml.safe_load(open(sys.argv[1]))
|
|
||||||
valid_until = int(time.time() / (3600 * 24)) * 1000 * 3600 * 24
|
|
||||||
|
|
||||||
server_name = config["server_name"]
|
|
||||||
signing_key = read_signing_keys(open(config["signing_key_path"]))[0]
|
|
||||||
|
|
||||||
database = config["database"]
|
|
||||||
assert database["name"] == "psycopg2", "Can only convert for postgresql"
|
|
||||||
args = database["args"]
|
|
||||||
args.pop("cp_max")
|
|
||||||
args.pop("cp_min")
|
|
||||||
connection = psycopg2.connect(**args)
|
|
||||||
keys = select_v1_keys(connection)
|
|
||||||
certificates = select_v1_certs(connection)
|
|
||||||
json = select_v2_json(connection)
|
|
||||||
|
|
||||||
result = {}
|
|
||||||
for server in keys:
|
|
||||||
if server not in json:
|
|
||||||
v2_json = convert_v1_to_v2(
|
|
||||||
server, valid_until, keys[server], certificates[server]
|
|
||||||
)
|
|
||||||
v2_json = sign_json(v2_json, server_name, signing_key)
|
|
||||||
result[server] = v2_json
|
|
||||||
|
|
||||||
yaml.safe_dump(result, sys.stdout, default_flow_style=False)
|
|
||||||
|
|
||||||
rows = [row for server, json in result.items() for row in rows_v2(server, json)]
|
|
||||||
|
|
||||||
cursor = connection.cursor()
|
|
||||||
cursor.executemany(
|
|
||||||
"INSERT INTO server_keys_json ("
|
|
||||||
" server_name, key_id, from_server,"
|
|
||||||
" ts_added_ms, ts_valid_until_ms, key_json"
|
|
||||||
") VALUES (%s, %s, %s, %s, %s, %s)",
|
|
||||||
rows,
|
|
||||||
)
|
|
||||||
connection.commit()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
|
@ -139,7 +139,7 @@ def run():
|
||||||
click.get_current_context().abort()
|
click.get_current_context().abort()
|
||||||
|
|
||||||
# Switch to the release branch.
|
# Switch to the release branch.
|
||||||
release_branch_name = f"release-v{base_version}"
|
release_branch_name = f"release-v{current_version.major}.{current_version.minor}"
|
||||||
release_branch = find_ref(repo, release_branch_name)
|
release_branch = find_ref(repo, release_branch_name)
|
||||||
if release_branch:
|
if release_branch:
|
||||||
if release_branch.is_remote():
|
if release_branch.is_remote():
|
||||||
|
|
|
@ -47,7 +47,7 @@ try:
|
||||||
except ImportError:
|
except ImportError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
__version__ = "1.35.1"
|
__version__ = "1.36.0rc1"
|
||||||
|
|
||||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||||
# We import here so that we don't have to install a bunch of deps when
|
# We import here so that we don't have to install a bunch of deps when
|
||||||
|
|
|
@ -206,11 +206,11 @@ class Auth:
|
||||||
requester = create_requester(user_id, app_service=app_service)
|
requester = create_requester(user_id, app_service=app_service)
|
||||||
|
|
||||||
request.requester = user_id
|
request.requester = user_id
|
||||||
|
if user_id in self._force_tracing_for_users:
|
||||||
|
opentracing.set_tag(opentracing.tags.SAMPLING_PRIORITY, 1)
|
||||||
opentracing.set_tag("authenticated_entity", user_id)
|
opentracing.set_tag("authenticated_entity", user_id)
|
||||||
opentracing.set_tag("user_id", user_id)
|
opentracing.set_tag("user_id", user_id)
|
||||||
opentracing.set_tag("appservice_id", app_service.id)
|
opentracing.set_tag("appservice_id", app_service.id)
|
||||||
if user_id in self._force_tracing_for_users:
|
|
||||||
opentracing.set_tag(opentracing.tags.SAMPLING_PRIORITY, 1)
|
|
||||||
|
|
||||||
return requester
|
return requester
|
||||||
|
|
||||||
|
@ -259,12 +259,12 @@ class Auth:
|
||||||
)
|
)
|
||||||
|
|
||||||
request.requester = requester
|
request.requester = requester
|
||||||
|
if user_info.token_owner in self._force_tracing_for_users:
|
||||||
|
opentracing.set_tag(opentracing.tags.SAMPLING_PRIORITY, 1)
|
||||||
opentracing.set_tag("authenticated_entity", user_info.token_owner)
|
opentracing.set_tag("authenticated_entity", user_info.token_owner)
|
||||||
opentracing.set_tag("user_id", user_info.user_id)
|
opentracing.set_tag("user_id", user_info.user_id)
|
||||||
if device_id:
|
if device_id:
|
||||||
opentracing.set_tag("device_id", device_id)
|
opentracing.set_tag("device_id", device_id)
|
||||||
if user_info.token_owner in self._force_tracing_for_users:
|
|
||||||
opentracing.set_tag(opentracing.tags.SAMPLING_PRIORITY, 1)
|
|
||||||
|
|
||||||
return requester
|
return requester
|
||||||
except KeyError:
|
except KeyError:
|
||||||
|
|
|
@ -181,6 +181,6 @@ KNOWN_ROOM_VERSIONS = {
|
||||||
RoomVersions.V5,
|
RoomVersions.V5,
|
||||||
RoomVersions.V6,
|
RoomVersions.V6,
|
||||||
RoomVersions.MSC2176,
|
RoomVersions.MSC2176,
|
||||||
|
RoomVersions.MSC3083,
|
||||||
)
|
)
|
||||||
# Note that we do not include MSC3083 here unless it is enabled in the config.
|
|
||||||
} # type: Dict[str, RoomVersion]
|
} # type: Dict[str, RoomVersion]
|
||||||
|
|
|
@ -261,13 +261,10 @@ def refresh_certificate(hs):
|
||||||
Refresh the TLS certificates that Synapse is using by re-reading them from
|
Refresh the TLS certificates that Synapse is using by re-reading them from
|
||||||
disk and updating the TLS context factories to use them.
|
disk and updating the TLS context factories to use them.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not hs.config.has_tls_listener():
|
if not hs.config.has_tls_listener():
|
||||||
# attempt to reload the certs for the good of the tls_fingerprints
|
|
||||||
hs.config.read_certificate_from_disk(require_cert_and_key=False)
|
|
||||||
return
|
return
|
||||||
|
|
||||||
hs.config.read_certificate_from_disk(require_cert_and_key=True)
|
hs.config.read_certificate_from_disk()
|
||||||
hs.tls_server_context_factory = context_factory.ServerContextFactory(hs.config)
|
hs.tls_server_context_factory = context_factory.ServerContextFactory(hs.config)
|
||||||
|
|
||||||
if hs._listening_services:
|
if hs._listening_services:
|
||||||
|
|
|
@ -109,7 +109,7 @@ from synapse.storage.databases.main.monthly_active_users import (
|
||||||
MonthlyActiveUsersWorkerStore,
|
MonthlyActiveUsersWorkerStore,
|
||||||
)
|
)
|
||||||
from synapse.storage.databases.main.presence import PresenceStore
|
from synapse.storage.databases.main.presence import PresenceStore
|
||||||
from synapse.storage.databases.main.search import SearchWorkerStore
|
from synapse.storage.databases.main.search import SearchStore
|
||||||
from synapse.storage.databases.main.stats import StatsStore
|
from synapse.storage.databases.main.stats import StatsStore
|
||||||
from synapse.storage.databases.main.transactions import TransactionWorkerStore
|
from synapse.storage.databases.main.transactions import TransactionWorkerStore
|
||||||
from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore
|
from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore
|
||||||
|
@ -242,7 +242,7 @@ class GenericWorkerSlavedStore(
|
||||||
MonthlyActiveUsersWorkerStore,
|
MonthlyActiveUsersWorkerStore,
|
||||||
MediaRepositoryStore,
|
MediaRepositoryStore,
|
||||||
ServerMetricsStore,
|
ServerMetricsStore,
|
||||||
SearchWorkerStore,
|
SearchStore,
|
||||||
TransactionWorkerStore,
|
TransactionWorkerStore,
|
||||||
BaseSlavedStore,
|
BaseSlavedStore,
|
||||||
):
|
):
|
||||||
|
|
|
@ -12,7 +12,6 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
|
|
||||||
from synapse.config._base import Config
|
from synapse.config._base import Config
|
||||||
from synapse.types import JsonDict
|
from synapse.types import JsonDict
|
||||||
|
|
||||||
|
@ -28,27 +27,5 @@ class ExperimentalConfig(Config):
|
||||||
# MSC2858 (multiple SSO identity providers)
|
# MSC2858 (multiple SSO identity providers)
|
||||||
self.msc2858_enabled = experimental.get("msc2858_enabled", False) # type: bool
|
self.msc2858_enabled = experimental.get("msc2858_enabled", False) # type: bool
|
||||||
|
|
||||||
# Spaces (MSC1772, MSC2946, MSC3083, etc)
|
|
||||||
self.spaces_enabled = experimental.get("spaces_enabled", True) # type: bool
|
|
||||||
if self.spaces_enabled:
|
|
||||||
KNOWN_ROOM_VERSIONS[RoomVersions.MSC3083.identifier] = RoomVersions.MSC3083
|
|
||||||
|
|
||||||
# MSC3026 (busy presence state)
|
# MSC3026 (busy presence state)
|
||||||
self.msc3026_enabled = experimental.get("msc3026_enabled", False) # type: bool
|
self.msc3026_enabled = experimental.get("msc3026_enabled", False) # type: bool
|
||||||
|
|
||||||
def generate_config_section(self, **kwargs):
|
|
||||||
return """\
|
|
||||||
# Enable experimental features in Synapse.
|
|
||||||
#
|
|
||||||
# Experimental features might break or be removed without a deprecation
|
|
||||||
# period.
|
|
||||||
#
|
|
||||||
experimental_features:
|
|
||||||
# Support for Spaces (MSC1772), it enables the following:
|
|
||||||
#
|
|
||||||
# * The Spaces Summary API (MSC2946).
|
|
||||||
# * Restricting room membership based on space membership (MSC3083).
|
|
||||||
#
|
|
||||||
# Uncomment to disable support for Spaces.
|
|
||||||
#spaces_enabled: false
|
|
||||||
"""
|
|
||||||
|
|
|
@ -215,28 +215,12 @@ class TlsConfig(Config):
|
||||||
days_remaining = (expires_on - now).days
|
days_remaining = (expires_on - now).days
|
||||||
return days_remaining
|
return days_remaining
|
||||||
|
|
||||||
def read_certificate_from_disk(self, require_cert_and_key: bool):
|
def read_certificate_from_disk(self):
|
||||||
"""
|
"""
|
||||||
Read the certificates and private key from disk.
|
Read the certificates and private key from disk.
|
||||||
|
|
||||||
Args:
|
|
||||||
require_cert_and_key: set to True to throw an error if the certificate
|
|
||||||
and key file are not given
|
|
||||||
"""
|
"""
|
||||||
if require_cert_and_key:
|
|
||||||
self.tls_private_key = self.read_tls_private_key()
|
self.tls_private_key = self.read_tls_private_key()
|
||||||
self.tls_certificate = self.read_tls_certificate()
|
self.tls_certificate = self.read_tls_certificate()
|
||||||
elif self.tls_certificate_file:
|
|
||||||
# we only need the certificate for the tls_fingerprints. Reload it if we
|
|
||||||
# can, but it's not a fatal error if we can't.
|
|
||||||
try:
|
|
||||||
self.tls_certificate = self.read_tls_certificate()
|
|
||||||
except Exception as e:
|
|
||||||
logger.info(
|
|
||||||
"Unable to read TLS certificate (%s). Ignoring as no "
|
|
||||||
"tls listeners enabled.",
|
|
||||||
e,
|
|
||||||
)
|
|
||||||
|
|
||||||
def generate_config_section(
|
def generate_config_section(
|
||||||
self,
|
self,
|
||||||
|
|
|
@ -16,8 +16,7 @@
|
||||||
import abc
|
import abc
|
||||||
import logging
|
import logging
|
||||||
import urllib
|
import urllib
|
||||||
from collections import defaultdict
|
from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Tuple
|
||||||
from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Set, Tuple
|
|
||||||
|
|
||||||
import attr
|
import attr
|
||||||
from signedjson.key import (
|
from signedjson.key import (
|
||||||
|
@ -44,17 +43,12 @@ from synapse.api.errors import (
|
||||||
from synapse.config.key import TrustedKeyServer
|
from synapse.config.key import TrustedKeyServer
|
||||||
from synapse.events import EventBase
|
from synapse.events import EventBase
|
||||||
from synapse.events.utils import prune_event_dict
|
from synapse.events.utils import prune_event_dict
|
||||||
from synapse.logging.context import (
|
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||||
PreserveLoggingContext,
|
|
||||||
make_deferred_yieldable,
|
|
||||||
preserve_fn,
|
|
||||||
run_in_background,
|
|
||||||
)
|
|
||||||
from synapse.storage.keys import FetchKeyResult
|
from synapse.storage.keys import FetchKeyResult
|
||||||
from synapse.types import JsonDict
|
from synapse.types import JsonDict
|
||||||
from synapse.util import unwrapFirstError
|
from synapse.util import unwrapFirstError
|
||||||
from synapse.util.async_helpers import yieldable_gather_results
|
from synapse.util.async_helpers import yieldable_gather_results
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.batching_queue import BatchingQueue
|
||||||
from synapse.util.retryutils import NotRetryingDestination
|
from synapse.util.retryutils import NotRetryingDestination
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
|
@ -80,32 +74,19 @@ class VerifyJsonRequest:
|
||||||
minimum_valid_until_ts: time at which we require the signing key to
|
minimum_valid_until_ts: time at which we require the signing key to
|
||||||
be valid. (0 implies we don't care)
|
be valid. (0 implies we don't care)
|
||||||
|
|
||||||
request_name: The name of the request.
|
|
||||||
|
|
||||||
key_ids: The set of key_ids to that could be used to verify the JSON object
|
key_ids: The set of key_ids to that could be used to verify the JSON object
|
||||||
|
|
||||||
key_ready (Deferred[str, str, nacl.signing.VerifyKey]):
|
|
||||||
A deferred (server_name, key_id, verify_key) tuple that resolves when
|
|
||||||
a verify key has been fetched. The deferreds' callbacks are run with no
|
|
||||||
logcontext.
|
|
||||||
|
|
||||||
If we are unable to find a key which satisfies the request, the deferred
|
|
||||||
errbacks with an M_UNAUTHORIZED SynapseError.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
server_name = attr.ib(type=str)
|
server_name = attr.ib(type=str)
|
||||||
get_json_object = attr.ib(type=Callable[[], JsonDict])
|
get_json_object = attr.ib(type=Callable[[], JsonDict])
|
||||||
minimum_valid_until_ts = attr.ib(type=int)
|
minimum_valid_until_ts = attr.ib(type=int)
|
||||||
request_name = attr.ib(type=str)
|
|
||||||
key_ids = attr.ib(type=List[str])
|
key_ids = attr.ib(type=List[str])
|
||||||
key_ready = attr.ib(default=attr.Factory(defer.Deferred), type=defer.Deferred)
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def from_json_object(
|
def from_json_object(
|
||||||
server_name: str,
|
server_name: str,
|
||||||
json_object: JsonDict,
|
json_object: JsonDict,
|
||||||
minimum_valid_until_ms: int,
|
minimum_valid_until_ms: int,
|
||||||
request_name: str,
|
|
||||||
):
|
):
|
||||||
"""Create a VerifyJsonRequest to verify all signatures on a signed JSON
|
"""Create a VerifyJsonRequest to verify all signatures on a signed JSON
|
||||||
object for the given server.
|
object for the given server.
|
||||||
|
@ -115,7 +96,6 @@ class VerifyJsonRequest:
|
||||||
server_name,
|
server_name,
|
||||||
lambda: json_object,
|
lambda: json_object,
|
||||||
minimum_valid_until_ms,
|
minimum_valid_until_ms,
|
||||||
request_name=request_name,
|
|
||||||
key_ids=key_ids,
|
key_ids=key_ids,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -135,16 +115,48 @@ class VerifyJsonRequest:
|
||||||
# memory than the Event object itself.
|
# memory than the Event object itself.
|
||||||
lambda: prune_event_dict(event.room_version, event.get_pdu_json()),
|
lambda: prune_event_dict(event.room_version, event.get_pdu_json()),
|
||||||
minimum_valid_until_ms,
|
minimum_valid_until_ms,
|
||||||
request_name=event.event_id,
|
|
||||||
key_ids=key_ids,
|
key_ids=key_ids,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def to_fetch_key_request(self) -> "_FetchKeyRequest":
|
||||||
|
"""Create a key fetch request for all keys needed to satisfy the
|
||||||
|
verification request.
|
||||||
|
"""
|
||||||
|
return _FetchKeyRequest(
|
||||||
|
server_name=self.server_name,
|
||||||
|
minimum_valid_until_ts=self.minimum_valid_until_ts,
|
||||||
|
key_ids=self.key_ids,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class KeyLookupError(ValueError):
|
class KeyLookupError(ValueError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@attr.s(slots=True)
|
||||||
|
class _FetchKeyRequest:
|
||||||
|
"""A request for keys for a given server.
|
||||||
|
|
||||||
|
We will continue to try and fetch until we have all the keys listed under
|
||||||
|
`key_ids` (with an appropriate `valid_until_ts` property) or we run out of
|
||||||
|
places to fetch keys from.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
server_name: The name of the server that owns the keys.
|
||||||
|
minimum_valid_until_ts: The timestamp which the keys must be valid until.
|
||||||
|
key_ids: The IDs of the keys to attempt to fetch
|
||||||
|
"""
|
||||||
|
|
||||||
|
server_name = attr.ib(type=str)
|
||||||
|
minimum_valid_until_ts = attr.ib(type=int)
|
||||||
|
key_ids = attr.ib(type=List[str])
|
||||||
|
|
||||||
|
|
||||||
class Keyring:
|
class Keyring:
|
||||||
|
"""Handles verifying signed JSON objects and fetching the keys needed to do
|
||||||
|
so.
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self, hs: "HomeServer", key_fetchers: "Optional[Iterable[KeyFetcher]]" = None
|
self, hs: "HomeServer", key_fetchers: "Optional[Iterable[KeyFetcher]]" = None
|
||||||
):
|
):
|
||||||
|
@ -158,22 +170,22 @@ class Keyring:
|
||||||
)
|
)
|
||||||
self._key_fetchers = key_fetchers
|
self._key_fetchers = key_fetchers
|
||||||
|
|
||||||
# map from server name to Deferred. Has an entry for each server with
|
self._server_queue = BatchingQueue(
|
||||||
# an ongoing key download; the Deferred completes once the download
|
"keyring_server",
|
||||||
# completes.
|
clock=hs.get_clock(),
|
||||||
#
|
process_batch_callback=self._inner_fetch_key_requests,
|
||||||
# These are regular, logcontext-agnostic Deferreds.
|
) # type: BatchingQueue[_FetchKeyRequest, Dict[str, Dict[str, FetchKeyResult]]]
|
||||||
self.key_downloads = {} # type: Dict[str, defer.Deferred]
|
|
||||||
|
|
||||||
def verify_json_for_server(
|
async def verify_json_for_server(
|
||||||
self,
|
self,
|
||||||
server_name: str,
|
server_name: str,
|
||||||
json_object: JsonDict,
|
json_object: JsonDict,
|
||||||
validity_time: int,
|
validity_time: int,
|
||||||
request_name: str,
|
) -> None:
|
||||||
) -> defer.Deferred:
|
|
||||||
"""Verify that a JSON object has been signed by a given server
|
"""Verify that a JSON object has been signed by a given server
|
||||||
|
|
||||||
|
Completes if the the object was correctly signed, otherwise raises.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
server_name: name of the server which must have signed this object
|
server_name: name of the server which must have signed this object
|
||||||
|
|
||||||
|
@ -181,392 +193,265 @@ class Keyring:
|
||||||
|
|
||||||
validity_time: timestamp at which we require the signing key to
|
validity_time: timestamp at which we require the signing key to
|
||||||
be valid. (0 implies we don't care)
|
be valid. (0 implies we don't care)
|
||||||
|
|
||||||
request_name: an identifier for this json object (eg, an event id)
|
|
||||||
for logging.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Deferred[None]: completes if the the object was correctly signed, otherwise
|
|
||||||
errbacks with an error
|
|
||||||
"""
|
"""
|
||||||
request = VerifyJsonRequest.from_json_object(
|
request = VerifyJsonRequest.from_json_object(
|
||||||
server_name,
|
server_name,
|
||||||
json_object,
|
json_object,
|
||||||
validity_time,
|
validity_time,
|
||||||
request_name,
|
|
||||||
)
|
)
|
||||||
requests = (request,)
|
return await self.process_request(request)
|
||||||
return make_deferred_yieldable(self._verify_objects(requests)[0])
|
|
||||||
|
|
||||||
def verify_json_objects_for_server(
|
def verify_json_objects_for_server(
|
||||||
self, server_and_json: Iterable[Tuple[str, dict, int, str]]
|
self, server_and_json: Iterable[Tuple[str, dict, int]]
|
||||||
) -> List[defer.Deferred]:
|
) -> List[defer.Deferred]:
|
||||||
"""Bulk verifies signatures of json objects, bulk fetching keys as
|
"""Bulk verifies signatures of json objects, bulk fetching keys as
|
||||||
necessary.
|
necessary.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
server_and_json:
|
server_and_json:
|
||||||
Iterable of (server_name, json_object, validity_time, request_name)
|
Iterable of (server_name, json_object, validity_time)
|
||||||
tuples.
|
tuples.
|
||||||
|
|
||||||
validity_time is a timestamp at which the signing key must be
|
validity_time is a timestamp at which the signing key must be
|
||||||
valid.
|
valid.
|
||||||
|
|
||||||
request_name is an identifier for this json object (eg, an event id)
|
|
||||||
for logging.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
List<Deferred[None]>: for each input triplet, a deferred indicating success
|
List<Deferred[None]>: for each input triplet, a deferred indicating success
|
||||||
or failure to verify each json object's signature for the given
|
or failure to verify each json object's signature for the given
|
||||||
server_name. The deferreds run their callbacks in the sentinel
|
server_name. The deferreds run their callbacks in the sentinel
|
||||||
logcontext.
|
logcontext.
|
||||||
"""
|
"""
|
||||||
return self._verify_objects(
|
return [
|
||||||
|
run_in_background(
|
||||||
|
self.process_request,
|
||||||
VerifyJsonRequest.from_json_object(
|
VerifyJsonRequest.from_json_object(
|
||||||
server_name, json_object, validity_time, request_name
|
server_name,
|
||||||
)
|
json_object,
|
||||||
for server_name, json_object, validity_time, request_name in server_and_json
|
validity_time,
|
||||||
)
|
|
||||||
|
|
||||||
def verify_events_for_server(
|
|
||||||
self, server_and_events: Iterable[Tuple[str, EventBase, int]]
|
|
||||||
) -> List[defer.Deferred]:
|
|
||||||
"""Bulk verification of signatures on events.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
server_and_events:
|
|
||||||
Iterable of `(server_name, event, validity_time)` tuples.
|
|
||||||
|
|
||||||
`server_name` is which server we are verifying the signature for
|
|
||||||
on the event.
|
|
||||||
|
|
||||||
`event` is the event that we'll verify the signatures of for
|
|
||||||
the given `server_name`.
|
|
||||||
|
|
||||||
`validity_time` is a timestamp at which the signing key must be
|
|
||||||
valid.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List<Deferred[None]>: for each input triplet, a deferred indicating success
|
|
||||||
or failure to verify each event's signature for the given
|
|
||||||
server_name. The deferreds run their callbacks in the sentinel
|
|
||||||
logcontext.
|
|
||||||
"""
|
|
||||||
return self._verify_objects(
|
|
||||||
VerifyJsonRequest.from_event(server_name, event, validity_time)
|
|
||||||
for server_name, event, validity_time in server_and_events
|
|
||||||
)
|
|
||||||
|
|
||||||
def _verify_objects(
|
|
||||||
self, verify_requests: Iterable[VerifyJsonRequest]
|
|
||||||
) -> List[defer.Deferred]:
|
|
||||||
"""Does the work of verify_json_[objects_]for_server
|
|
||||||
|
|
||||||
|
|
||||||
Args:
|
|
||||||
verify_requests: Iterable of verification requests.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List<Deferred[None]>: for each input item, a deferred indicating success
|
|
||||||
or failure to verify each json object's signature for the given
|
|
||||||
server_name. The deferreds run their callbacks in the sentinel
|
|
||||||
logcontext.
|
|
||||||
"""
|
|
||||||
# a list of VerifyJsonRequests which are awaiting a key lookup
|
|
||||||
key_lookups = []
|
|
||||||
handle = preserve_fn(_handle_key_deferred)
|
|
||||||
|
|
||||||
def process(verify_request: VerifyJsonRequest) -> defer.Deferred:
|
|
||||||
"""Process an entry in the request list
|
|
||||||
|
|
||||||
Adds a key request to key_lookups, and returns a deferred which
|
|
||||||
will complete or fail (in the sentinel context) when verification completes.
|
|
||||||
"""
|
|
||||||
if not verify_request.key_ids:
|
|
||||||
return defer.fail(
|
|
||||||
SynapseError(
|
|
||||||
400,
|
|
||||||
"Not signed by %s" % (verify_request.server_name,),
|
|
||||||
Codes.UNAUTHORIZED,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.debug(
|
|
||||||
"Verifying %s for %s with key_ids %s, min_validity %i",
|
|
||||||
verify_request.request_name,
|
|
||||||
verify_request.server_name,
|
|
||||||
verify_request.key_ids,
|
|
||||||
verify_request.minimum_valid_until_ts,
|
|
||||||
)
|
|
||||||
|
|
||||||
# add the key request to the queue, but don't start it off yet.
|
|
||||||
key_lookups.append(verify_request)
|
|
||||||
|
|
||||||
# now run _handle_key_deferred, which will wait for the key request
|
|
||||||
# to complete and then do the verification.
|
|
||||||
#
|
|
||||||
# We want _handle_key_request to log to the right context, so we
|
|
||||||
# wrap it with preserve_fn (aka run_in_background)
|
|
||||||
return handle(verify_request)
|
|
||||||
|
|
||||||
results = [process(r) for r in verify_requests]
|
|
||||||
|
|
||||||
if key_lookups:
|
|
||||||
run_in_background(self._start_key_lookups, key_lookups)
|
|
||||||
|
|
||||||
return results
|
|
||||||
|
|
||||||
async def _start_key_lookups(
|
|
||||||
self, verify_requests: List[VerifyJsonRequest]
|
|
||||||
) -> None:
|
|
||||||
"""Sets off the key fetches for each verify request
|
|
||||||
|
|
||||||
Once each fetch completes, verify_request.key_ready will be resolved.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
verify_requests:
|
|
||||||
"""
|
|
||||||
|
|
||||||
try:
|
|
||||||
# map from server name to a set of outstanding request ids
|
|
||||||
server_to_request_ids = {} # type: Dict[str, Set[int]]
|
|
||||||
|
|
||||||
for verify_request in verify_requests:
|
|
||||||
server_name = verify_request.server_name
|
|
||||||
request_id = id(verify_request)
|
|
||||||
server_to_request_ids.setdefault(server_name, set()).add(request_id)
|
|
||||||
|
|
||||||
# Wait for any previous lookups to complete before proceeding.
|
|
||||||
await self.wait_for_previous_lookups(server_to_request_ids.keys())
|
|
||||||
|
|
||||||
# take out a lock on each of the servers by sticking a Deferred in
|
|
||||||
# key_downloads
|
|
||||||
for server_name in server_to_request_ids.keys():
|
|
||||||
self.key_downloads[server_name] = defer.Deferred()
|
|
||||||
logger.debug("Got key lookup lock on %s", server_name)
|
|
||||||
|
|
||||||
# When we've finished fetching all the keys for a given server_name,
|
|
||||||
# drop the lock by resolving the deferred in key_downloads.
|
|
||||||
def drop_server_lock(server_name):
|
|
||||||
d = self.key_downloads.pop(server_name)
|
|
||||||
d.callback(None)
|
|
||||||
|
|
||||||
def lookup_done(res, verify_request):
|
|
||||||
server_name = verify_request.server_name
|
|
||||||
server_requests = server_to_request_ids[server_name]
|
|
||||||
server_requests.remove(id(verify_request))
|
|
||||||
|
|
||||||
# if there are no more requests for this server, we can drop the lock.
|
|
||||||
if not server_requests:
|
|
||||||
logger.debug("Releasing key lookup lock on %s", server_name)
|
|
||||||
drop_server_lock(server_name)
|
|
||||||
|
|
||||||
return res
|
|
||||||
|
|
||||||
for verify_request in verify_requests:
|
|
||||||
verify_request.key_ready.addBoth(lookup_done, verify_request)
|
|
||||||
|
|
||||||
# Actually start fetching keys.
|
|
||||||
self._get_server_verify_keys(verify_requests)
|
|
||||||
except Exception:
|
|
||||||
logger.exception("Error starting key lookups")
|
|
||||||
|
|
||||||
async def wait_for_previous_lookups(self, server_names: Iterable[str]) -> None:
|
|
||||||
"""Waits for any previous key lookups for the given servers to finish.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
server_names: list of servers which we want to look up
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Resolves once all key lookups for the given servers have
|
|
||||||
completed. Follows the synapse rules of logcontext preservation.
|
|
||||||
"""
|
|
||||||
loop_count = 1
|
|
||||||
while True:
|
|
||||||
wait_on = [
|
|
||||||
(server_name, self.key_downloads[server_name])
|
|
||||||
for server_name in server_names
|
|
||||||
if server_name in self.key_downloads
|
|
||||||
]
|
|
||||||
if not wait_on:
|
|
||||||
break
|
|
||||||
logger.info(
|
|
||||||
"Waiting for existing lookups for %s to complete [loop %i]",
|
|
||||||
[w[0] for w in wait_on],
|
|
||||||
loop_count,
|
|
||||||
)
|
|
||||||
with PreserveLoggingContext():
|
|
||||||
await defer.DeferredList((w[1] for w in wait_on))
|
|
||||||
|
|
||||||
loop_count += 1
|
|
||||||
|
|
||||||
def _get_server_verify_keys(self, verify_requests: List[VerifyJsonRequest]) -> None:
|
|
||||||
"""Tries to find at least one key for each verify request
|
|
||||||
|
|
||||||
For each verify_request, verify_request.key_ready is called back with
|
|
||||||
params (server_name, key_id, VerifyKey) if a key is found, or errbacked
|
|
||||||
with a SynapseError if none of the keys are found.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
verify_requests: list of verify requests
|
|
||||||
"""
|
|
||||||
|
|
||||||
remaining_requests = {rq for rq in verify_requests if not rq.key_ready.called}
|
|
||||||
|
|
||||||
async def do_iterations():
|
|
||||||
try:
|
|
||||||
with Measure(self.clock, "get_server_verify_keys"):
|
|
||||||
for f in self._key_fetchers:
|
|
||||||
if not remaining_requests:
|
|
||||||
return
|
|
||||||
await self._attempt_key_fetches_with_fetcher(
|
|
||||||
f, remaining_requests
|
|
||||||
)
|
|
||||||
|
|
||||||
# look for any requests which weren't satisfied
|
|
||||||
while remaining_requests:
|
|
||||||
verify_request = remaining_requests.pop()
|
|
||||||
rq_str = (
|
|
||||||
"VerifyJsonRequest(server=%s, key_ids=%s, min_valid=%i)"
|
|
||||||
% (
|
|
||||||
verify_request.server_name,
|
|
||||||
verify_request.key_ids,
|
|
||||||
verify_request.minimum_valid_until_ts,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# If we run the errback immediately, it may cancel our
|
|
||||||
# loggingcontext while we are still in it, so instead we
|
|
||||||
# schedule it for the next time round the reactor.
|
|
||||||
#
|
|
||||||
# (this also ensures that we don't get a stack overflow if we
|
|
||||||
# has a massive queue of lookups waiting for this server).
|
|
||||||
self.clock.call_later(
|
|
||||||
0,
|
|
||||||
verify_request.key_ready.errback,
|
|
||||||
SynapseError(
|
|
||||||
401,
|
|
||||||
"Failed to find any key to satisfy %s" % (rq_str,),
|
|
||||||
Codes.UNAUTHORIZED,
|
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
except Exception as err:
|
for server_name, json_object, validity_time in server_and_json
|
||||||
# we don't really expect to get here, because any errors should already
|
]
|
||||||
# have been caught and logged. But if we do, let's log the error and make
|
|
||||||
# sure that all of the deferreds are resolved.
|
|
||||||
logger.error("Unexpected error in _get_server_verify_keys: %s", err)
|
|
||||||
with PreserveLoggingContext():
|
|
||||||
for verify_request in remaining_requests:
|
|
||||||
if not verify_request.key_ready.called:
|
|
||||||
verify_request.key_ready.errback(err)
|
|
||||||
|
|
||||||
run_in_background(do_iterations)
|
async def verify_event_for_server(
|
||||||
|
self,
|
||||||
|
server_name: str,
|
||||||
|
event: EventBase,
|
||||||
|
validity_time: int,
|
||||||
|
) -> None:
|
||||||
|
await self.process_request(
|
||||||
|
VerifyJsonRequest.from_event(
|
||||||
|
server_name,
|
||||||
|
event,
|
||||||
|
validity_time,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
async def _attempt_key_fetches_with_fetcher(
|
async def process_request(self, verify_request: VerifyJsonRequest) -> None:
|
||||||
self, fetcher: "KeyFetcher", remaining_requests: Set[VerifyJsonRequest]
|
"""Processes the `VerifyJsonRequest`. Raises if the object is not signed
|
||||||
):
|
by the server, the signatures don't match or we failed to fetch the
|
||||||
"""Use a key fetcher to attempt to satisfy some key requests
|
necessary keys.
|
||||||
|
|
||||||
Args:
|
|
||||||
fetcher: fetcher to use to fetch the keys
|
|
||||||
remaining_requests: outstanding key requests.
|
|
||||||
Any successfully-completed requests will be removed from the list.
|
|
||||||
"""
|
"""
|
||||||
# The keys to fetch.
|
|
||||||
# server_name -> key_id -> min_valid_ts
|
|
||||||
missing_keys = defaultdict(dict) # type: Dict[str, Dict[str, int]]
|
|
||||||
|
|
||||||
for verify_request in remaining_requests:
|
if not verify_request.key_ids:
|
||||||
# any completed requests should already have been removed
|
raise SynapseError(
|
||||||
assert not verify_request.key_ready.called
|
400,
|
||||||
keys_for_server = missing_keys[verify_request.server_name]
|
f"Not signed by {verify_request.server_name}",
|
||||||
|
Codes.UNAUTHORIZED,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add the keys we need to verify to the queue for retrieval. We queue
|
||||||
|
# up requests for the same server so we don't end up with many in flight
|
||||||
|
# requests for the same keys.
|
||||||
|
key_request = verify_request.to_fetch_key_request()
|
||||||
|
found_keys_by_server = await self._server_queue.add_to_queue(
|
||||||
|
key_request, key=verify_request.server_name
|
||||||
|
)
|
||||||
|
|
||||||
|
# Since we batch up requests the returned set of keys may contain keys
|
||||||
|
# from other servers, so we pull out only the ones we care about.s
|
||||||
|
found_keys = found_keys_by_server.get(verify_request.server_name, {})
|
||||||
|
|
||||||
|
# Verify each signature we got valid keys for, raising if we can't
|
||||||
|
# verify any of them.
|
||||||
|
verified = False
|
||||||
for key_id in verify_request.key_ids:
|
for key_id in verify_request.key_ids:
|
||||||
# If we have several requests for the same key, then we only need to
|
key_result = found_keys.get(key_id)
|
||||||
# request that key once, but we should do so with the greatest
|
if not key_result:
|
||||||
# min_valid_until_ts of the requests, so that we can satisfy all of
|
continue
|
||||||
# the requests.
|
|
||||||
keys_for_server[key_id] = max(
|
if key_result.valid_until_ts < verify_request.minimum_valid_until_ts:
|
||||||
keys_for_server.get(key_id, -1),
|
continue
|
||||||
|
|
||||||
|
verify_key = key_result.verify_key
|
||||||
|
json_object = verify_request.get_json_object()
|
||||||
|
try:
|
||||||
|
verify_signed_json(
|
||||||
|
json_object,
|
||||||
|
verify_request.server_name,
|
||||||
|
verify_key,
|
||||||
|
)
|
||||||
|
verified = True
|
||||||
|
except SignatureVerifyException as e:
|
||||||
|
logger.debug(
|
||||||
|
"Error verifying signature for %s:%s:%s with key %s: %s",
|
||||||
|
verify_request.server_name,
|
||||||
|
verify_key.alg,
|
||||||
|
verify_key.version,
|
||||||
|
encode_verify_key_base64(verify_key),
|
||||||
|
str(e),
|
||||||
|
)
|
||||||
|
raise SynapseError(
|
||||||
|
401,
|
||||||
|
"Invalid signature for server %s with key %s:%s: %s"
|
||||||
|
% (
|
||||||
|
verify_request.server_name,
|
||||||
|
verify_key.alg,
|
||||||
|
verify_key.version,
|
||||||
|
str(e),
|
||||||
|
),
|
||||||
|
Codes.UNAUTHORIZED,
|
||||||
|
)
|
||||||
|
|
||||||
|
if not verified:
|
||||||
|
raise SynapseError(
|
||||||
|
401,
|
||||||
|
f"Failed to find any key to satisfy: {key_request}",
|
||||||
|
Codes.UNAUTHORIZED,
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _inner_fetch_key_requests(
|
||||||
|
self, requests: List[_FetchKeyRequest]
|
||||||
|
) -> Dict[str, Dict[str, FetchKeyResult]]:
|
||||||
|
"""Processing function for the queue of `_FetchKeyRequest`."""
|
||||||
|
|
||||||
|
logger.debug("Starting fetch for %s", requests)
|
||||||
|
|
||||||
|
# First we need to deduplicate requests for the same key. We do this by
|
||||||
|
# taking the *maximum* requested `minimum_valid_until_ts` for each pair
|
||||||
|
# of server name/key ID.
|
||||||
|
server_to_key_to_ts = {} # type: Dict[str, Dict[str, int]]
|
||||||
|
for request in requests:
|
||||||
|
by_server = server_to_key_to_ts.setdefault(request.server_name, {})
|
||||||
|
for key_id in request.key_ids:
|
||||||
|
existing_ts = by_server.get(key_id, 0)
|
||||||
|
by_server[key_id] = max(request.minimum_valid_until_ts, existing_ts)
|
||||||
|
|
||||||
|
deduped_requests = [
|
||||||
|
_FetchKeyRequest(server_name, minimum_valid_ts, [key_id])
|
||||||
|
for server_name, by_server in server_to_key_to_ts.items()
|
||||||
|
for key_id, minimum_valid_ts in by_server.items()
|
||||||
|
]
|
||||||
|
|
||||||
|
logger.debug("Deduplicated key requests to %s", deduped_requests)
|
||||||
|
|
||||||
|
# For each key we call `_inner_verify_request` which will handle
|
||||||
|
# fetching each key. Note these shouldn't throw if we fail to contact
|
||||||
|
# other servers etc.
|
||||||
|
results_per_request = await yieldable_gather_results(
|
||||||
|
self._inner_fetch_key_request,
|
||||||
|
deduped_requests,
|
||||||
|
)
|
||||||
|
|
||||||
|
# We now convert the returned list of results into a map from server
|
||||||
|
# name to key ID to FetchKeyResult, to return.
|
||||||
|
to_return = {} # type: Dict[str, Dict[str, FetchKeyResult]]
|
||||||
|
for (request, results) in zip(deduped_requests, results_per_request):
|
||||||
|
to_return_by_server = to_return.setdefault(request.server_name, {})
|
||||||
|
for key_id, key_result in results.items():
|
||||||
|
existing = to_return_by_server.get(key_id)
|
||||||
|
if not existing or existing.valid_until_ts < key_result.valid_until_ts:
|
||||||
|
to_return_by_server[key_id] = key_result
|
||||||
|
|
||||||
|
return to_return
|
||||||
|
|
||||||
|
async def _inner_fetch_key_request(
|
||||||
|
self, verify_request: _FetchKeyRequest
|
||||||
|
) -> Dict[str, FetchKeyResult]:
|
||||||
|
"""Attempt to fetch the given key by calling each key fetcher one by
|
||||||
|
one.
|
||||||
|
"""
|
||||||
|
logger.debug("Starting fetch for %s", verify_request)
|
||||||
|
|
||||||
|
found_keys: Dict[str, FetchKeyResult] = {}
|
||||||
|
missing_key_ids = set(verify_request.key_ids)
|
||||||
|
|
||||||
|
for fetcher in self._key_fetchers:
|
||||||
|
if not missing_key_ids:
|
||||||
|
break
|
||||||
|
|
||||||
|
logger.debug("Getting keys from %s for %s", fetcher, verify_request)
|
||||||
|
keys = await fetcher.get_keys(
|
||||||
|
verify_request.server_name,
|
||||||
|
list(missing_key_ids),
|
||||||
verify_request.minimum_valid_until_ts,
|
verify_request.minimum_valid_until_ts,
|
||||||
)
|
)
|
||||||
|
|
||||||
results = await fetcher.get_keys(missing_keys)
|
for key_id, key in keys.items():
|
||||||
|
if not key:
|
||||||
completed = []
|
|
||||||
for verify_request in remaining_requests:
|
|
||||||
server_name = verify_request.server_name
|
|
||||||
|
|
||||||
# see if any of the keys we got this time are sufficient to
|
|
||||||
# complete this VerifyJsonRequest.
|
|
||||||
result_keys = results.get(server_name, {})
|
|
||||||
for key_id in verify_request.key_ids:
|
|
||||||
fetch_key_result = result_keys.get(key_id)
|
|
||||||
if not fetch_key_result:
|
|
||||||
# we didn't get a result for this key
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if (
|
# If we already have a result for the given key ID we keep the
|
||||||
fetch_key_result.valid_until_ts
|
# one with the highest `valid_until_ts`.
|
||||||
< verify_request.minimum_valid_until_ts
|
existing_key = found_keys.get(key_id)
|
||||||
):
|
if existing_key:
|
||||||
# key was not valid at this point
|
if key.valid_until_ts <= existing_key.valid_until_ts:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# we have a valid key for this request. If we run the callback
|
# We always store the returned key even if it doesn't the
|
||||||
# immediately, it may cancel our loggingcontext while we are still in
|
# `minimum_valid_until_ts` requirement, as some verification
|
||||||
# it, so instead we schedule it for the next time round the reactor.
|
# requests may still be able to be satisfied by it.
|
||||||
#
|
#
|
||||||
# (this also ensures that we don't get a stack overflow if we had
|
# We still keep looking for the key from other fetchers in that
|
||||||
# a massive queue of lookups waiting for this server).
|
# case though.
|
||||||
logger.debug(
|
found_keys[key_id] = key
|
||||||
"Found key %s:%s for %s",
|
|
||||||
server_name,
|
|
||||||
key_id,
|
|
||||||
verify_request.request_name,
|
|
||||||
)
|
|
||||||
self.clock.call_later(
|
|
||||||
0,
|
|
||||||
verify_request.key_ready.callback,
|
|
||||||
(server_name, key_id, fetch_key_result.verify_key),
|
|
||||||
)
|
|
||||||
completed.append(verify_request)
|
|
||||||
break
|
|
||||||
|
|
||||||
remaining_requests.difference_update(completed)
|
if key.valid_until_ts < verify_request.minimum_valid_until_ts:
|
||||||
|
continue
|
||||||
|
|
||||||
|
missing_key_ids.discard(key_id)
|
||||||
|
|
||||||
|
return found_keys
|
||||||
|
|
||||||
|
|
||||||
class KeyFetcher(metaclass=abc.ABCMeta):
|
class KeyFetcher(metaclass=abc.ABCMeta):
|
||||||
@abc.abstractmethod
|
def __init__(self, hs: "HomeServer"):
|
||||||
async def get_keys(
|
self._queue = BatchingQueue(
|
||||||
self, keys_to_fetch: Dict[str, Dict[str, int]]
|
self.__class__.__name__, hs.get_clock(), self._fetch_keys
|
||||||
) -> Dict[str, Dict[str, FetchKeyResult]]:
|
)
|
||||||
"""
|
|
||||||
Args:
|
|
||||||
keys_to_fetch:
|
|
||||||
the keys to be fetched. server_name -> key_id -> min_valid_ts
|
|
||||||
|
|
||||||
Returns:
|
async def get_keys(
|
||||||
Map from server_name -> key_id -> FetchKeyResult
|
self, server_name: str, key_ids: List[str], minimum_valid_until_ts: int
|
||||||
"""
|
) -> Dict[str, FetchKeyResult]:
|
||||||
raise NotImplementedError
|
results = await self._queue.add_to_queue(
|
||||||
|
_FetchKeyRequest(
|
||||||
|
server_name=server_name,
|
||||||
|
key_ids=key_ids,
|
||||||
|
minimum_valid_until_ts=minimum_valid_until_ts,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return results.get(server_name, {})
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
async def _fetch_keys(
|
||||||
|
self, keys_to_fetch: List[_FetchKeyRequest]
|
||||||
|
) -> Dict[str, Dict[str, FetchKeyResult]]:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class StoreKeyFetcher(KeyFetcher):
|
class StoreKeyFetcher(KeyFetcher):
|
||||||
"""KeyFetcher impl which fetches keys from our data store"""
|
"""KeyFetcher impl which fetches keys from our data store"""
|
||||||
|
|
||||||
def __init__(self, hs: "HomeServer"):
|
def __init__(self, hs: "HomeServer"):
|
||||||
|
super().__init__(hs)
|
||||||
|
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
|
|
||||||
async def get_keys(
|
async def _fetch_keys(self, keys_to_fetch: List[_FetchKeyRequest]):
|
||||||
self, keys_to_fetch: Dict[str, Dict[str, int]]
|
|
||||||
) -> Dict[str, Dict[str, FetchKeyResult]]:
|
|
||||||
"""see KeyFetcher.get_keys"""
|
|
||||||
|
|
||||||
key_ids_to_fetch = (
|
key_ids_to_fetch = (
|
||||||
(server_name, key_id)
|
(queue_value.server_name, key_id)
|
||||||
for server_name, keys_for_server in keys_to_fetch.items()
|
for queue_value in keys_to_fetch
|
||||||
for key_id in keys_for_server.keys()
|
for key_id in queue_value.key_ids
|
||||||
)
|
)
|
||||||
|
|
||||||
res = await self.store.get_server_verify_keys(key_ids_to_fetch)
|
res = await self.store.get_server_verify_keys(key_ids_to_fetch)
|
||||||
|
@ -578,6 +463,8 @@ class StoreKeyFetcher(KeyFetcher):
|
||||||
|
|
||||||
class BaseV2KeyFetcher(KeyFetcher):
|
class BaseV2KeyFetcher(KeyFetcher):
|
||||||
def __init__(self, hs: "HomeServer"):
|
def __init__(self, hs: "HomeServer"):
|
||||||
|
super().__init__(hs)
|
||||||
|
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
self.config = hs.config
|
self.config = hs.config
|
||||||
|
|
||||||
|
@ -685,10 +572,10 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
||||||
self.client = hs.get_federation_http_client()
|
self.client = hs.get_federation_http_client()
|
||||||
self.key_servers = self.config.key_servers
|
self.key_servers = self.config.key_servers
|
||||||
|
|
||||||
async def get_keys(
|
async def _fetch_keys(
|
||||||
self, keys_to_fetch: Dict[str, Dict[str, int]]
|
self, keys_to_fetch: List[_FetchKeyRequest]
|
||||||
) -> Dict[str, Dict[str, FetchKeyResult]]:
|
) -> Dict[str, Dict[str, FetchKeyResult]]:
|
||||||
"""see KeyFetcher.get_keys"""
|
"""see KeyFetcher._fetch_keys"""
|
||||||
|
|
||||||
async def get_key(key_server: TrustedKeyServer) -> Dict:
|
async def get_key(key_server: TrustedKeyServer) -> Dict:
|
||||||
try:
|
try:
|
||||||
|
@ -724,12 +611,12 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
||||||
return union_of_keys
|
return union_of_keys
|
||||||
|
|
||||||
async def get_server_verify_key_v2_indirect(
|
async def get_server_verify_key_v2_indirect(
|
||||||
self, keys_to_fetch: Dict[str, Dict[str, int]], key_server: TrustedKeyServer
|
self, keys_to_fetch: List[_FetchKeyRequest], key_server: TrustedKeyServer
|
||||||
) -> Dict[str, Dict[str, FetchKeyResult]]:
|
) -> Dict[str, Dict[str, FetchKeyResult]]:
|
||||||
"""
|
"""
|
||||||
Args:
|
Args:
|
||||||
keys_to_fetch:
|
keys_to_fetch:
|
||||||
the keys to be fetched. server_name -> key_id -> min_valid_ts
|
the keys to be fetched.
|
||||||
|
|
||||||
key_server: notary server to query for the keys
|
key_server: notary server to query for the keys
|
||||||
|
|
||||||
|
@ -743,7 +630,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
||||||
perspective_name = key_server.server_name
|
perspective_name = key_server.server_name
|
||||||
logger.info(
|
logger.info(
|
||||||
"Requesting keys %s from notary server %s",
|
"Requesting keys %s from notary server %s",
|
||||||
keys_to_fetch.items(),
|
keys_to_fetch,
|
||||||
perspective_name,
|
perspective_name,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -753,11 +640,13 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
||||||
path="/_matrix/key/v2/query",
|
path="/_matrix/key/v2/query",
|
||||||
data={
|
data={
|
||||||
"server_keys": {
|
"server_keys": {
|
||||||
server_name: {
|
queue_value.server_name: {
|
||||||
key_id: {"minimum_valid_until_ts": min_valid_ts}
|
key_id: {
|
||||||
for key_id, min_valid_ts in server_keys.items()
|
"minimum_valid_until_ts": queue_value.minimum_valid_until_ts,
|
||||||
}
|
}
|
||||||
for server_name, server_keys in keys_to_fetch.items()
|
for key_id in queue_value.key_ids
|
||||||
|
}
|
||||||
|
for queue_value in keys_to_fetch
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
@ -858,7 +747,20 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
|
||||||
self.client = hs.get_federation_http_client()
|
self.client = hs.get_federation_http_client()
|
||||||
|
|
||||||
async def get_keys(
|
async def get_keys(
|
||||||
self, keys_to_fetch: Dict[str, Dict[str, int]]
|
self, server_name: str, key_ids: List[str], minimum_valid_until_ts: int
|
||||||
|
) -> Dict[str, FetchKeyResult]:
|
||||||
|
results = await self._queue.add_to_queue(
|
||||||
|
_FetchKeyRequest(
|
||||||
|
server_name=server_name,
|
||||||
|
key_ids=key_ids,
|
||||||
|
minimum_valid_until_ts=minimum_valid_until_ts,
|
||||||
|
),
|
||||||
|
key=server_name,
|
||||||
|
)
|
||||||
|
return results.get(server_name, {})
|
||||||
|
|
||||||
|
async def _fetch_keys(
|
||||||
|
self, keys_to_fetch: List[_FetchKeyRequest]
|
||||||
) -> Dict[str, Dict[str, FetchKeyResult]]:
|
) -> Dict[str, Dict[str, FetchKeyResult]]:
|
||||||
"""
|
"""
|
||||||
Args:
|
Args:
|
||||||
|
@ -871,8 +773,10 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
|
||||||
|
|
||||||
results = {}
|
results = {}
|
||||||
|
|
||||||
async def get_key(key_to_fetch_item: Tuple[str, Dict[str, int]]) -> None:
|
async def get_key(key_to_fetch_item: _FetchKeyRequest) -> None:
|
||||||
server_name, key_ids = key_to_fetch_item
|
server_name = key_to_fetch_item.server_name
|
||||||
|
key_ids = key_to_fetch_item.key_ids
|
||||||
|
|
||||||
try:
|
try:
|
||||||
keys = await self.get_server_verify_key_v2_direct(server_name, key_ids)
|
keys = await self.get_server_verify_key_v2_direct(server_name, key_ids)
|
||||||
results[server_name] = keys
|
results[server_name] = keys
|
||||||
|
@ -883,7 +787,7 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.exception("Error getting keys %s from %s", key_ids, server_name)
|
logger.exception("Error getting keys %s from %s", key_ids, server_name)
|
||||||
|
|
||||||
await yieldable_gather_results(get_key, keys_to_fetch.items())
|
await yieldable_gather_results(get_key, keys_to_fetch)
|
||||||
return results
|
return results
|
||||||
|
|
||||||
async def get_server_verify_key_v2_direct(
|
async def get_server_verify_key_v2_direct(
|
||||||
|
@ -955,37 +859,3 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
|
||||||
keys.update(response_keys)
|
keys.update(response_keys)
|
||||||
|
|
||||||
return keys
|
return keys
|
||||||
|
|
||||||
|
|
||||||
async def _handle_key_deferred(verify_request: VerifyJsonRequest) -> None:
|
|
||||||
"""Waits for the key to become available, and then performs a verification
|
|
||||||
|
|
||||||
Args:
|
|
||||||
verify_request:
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
SynapseError if there was a problem performing the verification
|
|
||||||
"""
|
|
||||||
server_name = verify_request.server_name
|
|
||||||
with PreserveLoggingContext():
|
|
||||||
_, key_id, verify_key = await verify_request.key_ready
|
|
||||||
|
|
||||||
json_object = verify_request.get_json_object()
|
|
||||||
|
|
||||||
try:
|
|
||||||
verify_signed_json(json_object, server_name, verify_key)
|
|
||||||
except SignatureVerifyException as e:
|
|
||||||
logger.debug(
|
|
||||||
"Error verifying signature for %s:%s:%s with key %s: %s",
|
|
||||||
server_name,
|
|
||||||
verify_key.alg,
|
|
||||||
verify_key.version,
|
|
||||||
encode_verify_key_base64(verify_key),
|
|
||||||
str(e),
|
|
||||||
)
|
|
||||||
raise SynapseError(
|
|
||||||
401,
|
|
||||||
"Invalid signature for server %s with key %s:%s: %s"
|
|
||||||
% (server_name, verify_key.alg, verify_key.version, str(e)),
|
|
||||||
Codes.UNAUTHORIZED,
|
|
||||||
)
|
|
||||||
|
|
|
@ -14,11 +14,6 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
import logging
|
import logging
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
from typing import Iterable, List
|
|
||||||
|
|
||||||
from twisted.internet import defer
|
|
||||||
from twisted.internet.defer import Deferred, DeferredList
|
|
||||||
from twisted.python.failure import Failure
|
|
||||||
|
|
||||||
from synapse.api.constants import MAX_DEPTH, EventTypes, Membership
|
from synapse.api.constants import MAX_DEPTH, EventTypes, Membership
|
||||||
from synapse.api.errors import Codes, SynapseError
|
from synapse.api.errors import Codes, SynapseError
|
||||||
|
@ -28,11 +23,6 @@ from synapse.crypto.keyring import Keyring
|
||||||
from synapse.events import EventBase, make_event_from_dict
|
from synapse.events import EventBase, make_event_from_dict
|
||||||
from synapse.events.utils import prune_event, validate_canonicaljson
|
from synapse.events.utils import prune_event, validate_canonicaljson
|
||||||
from synapse.http.servlet import assert_params_in_dict
|
from synapse.http.servlet import assert_params_in_dict
|
||||||
from synapse.logging.context import (
|
|
||||||
PreserveLoggingContext,
|
|
||||||
current_context,
|
|
||||||
make_deferred_yieldable,
|
|
||||||
)
|
|
||||||
from synapse.types import JsonDict, get_domain_from_id
|
from synapse.types import JsonDict, get_domain_from_id
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
@ -48,38 +38,30 @@ class FederationBase:
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
self._clock = hs.get_clock()
|
self._clock = hs.get_clock()
|
||||||
|
|
||||||
def _check_sigs_and_hash(
|
async def _check_sigs_and_hash(
|
||||||
self, room_version: RoomVersion, pdu: EventBase
|
self, room_version: RoomVersion, pdu: EventBase
|
||||||
) -> Deferred:
|
) -> EventBase:
|
||||||
return make_deferred_yieldable(
|
"""Checks that event is correctly signed by the sending server.
|
||||||
self._check_sigs_and_hashes(room_version, [pdu])[0]
|
|
||||||
)
|
|
||||||
|
|
||||||
def _check_sigs_and_hashes(
|
|
||||||
self, room_version: RoomVersion, pdus: List[EventBase]
|
|
||||||
) -> List[Deferred]:
|
|
||||||
"""Checks that each of the received events is correctly signed by the
|
|
||||||
sending server.
|
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
room_version: The room version of the PDUs
|
room_version: The room version of the PDU
|
||||||
pdus: the events to be checked
|
pdu: the event to be checked
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
For each input event, a deferred which:
|
* the original event if the checks pass
|
||||||
* returns the original event if the checks pass
|
* a redacted version of the event (if the signature
|
||||||
* returns a redacted version of the event (if the signature
|
|
||||||
matched but the hash did not)
|
matched but the hash did not)
|
||||||
* throws a SynapseError if the signature check failed.
|
* throws a SynapseError if the signature check failed."""
|
||||||
The deferreds run their callbacks in the sentinel
|
try:
|
||||||
"""
|
await _check_sigs_on_pdu(self.keyring, room_version, pdu)
|
||||||
deferreds = _check_sigs_on_pdus(self.keyring, room_version, pdus)
|
except SynapseError as e:
|
||||||
|
logger.warning(
|
||||||
|
"Signature check failed for %s: %s",
|
||||||
|
pdu.event_id,
|
||||||
|
e,
|
||||||
|
)
|
||||||
|
raise
|
||||||
|
|
||||||
ctx = current_context()
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def callback(_, pdu: EventBase):
|
|
||||||
with PreserveLoggingContext(ctx):
|
|
||||||
if not check_event_content_hash(pdu):
|
if not check_event_content_hash(pdu):
|
||||||
# let's try to distinguish between failures because the event was
|
# let's try to distinguish between failures because the event was
|
||||||
# redacted (which are somewhat expected) vs actual ball-tampering
|
# redacted (which are somewhat expected) vs actual ball-tampering
|
||||||
|
@ -94,8 +76,7 @@ class FederationBase:
|
||||||
redacted_event.content.keys()
|
redacted_event.content.keys()
|
||||||
) == set(pdu.content.keys()):
|
) == set(pdu.content.keys()):
|
||||||
logger.info(
|
logger.info(
|
||||||
"Event %s seems to have been redacted; using our redacted "
|
"Event %s seems to have been redacted; using our redacted copy",
|
||||||
"copy",
|
|
||||||
pdu.event_id,
|
pdu.event_id,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
|
@ -105,9 +86,7 @@ class FederationBase:
|
||||||
)
|
)
|
||||||
return redacted_event
|
return redacted_event
|
||||||
|
|
||||||
result = yield defer.ensureDeferred(
|
result = await self.spam_checker.check_event_for_spam(pdu)
|
||||||
self.spam_checker.check_event_for_spam(pdu)
|
|
||||||
)
|
|
||||||
|
|
||||||
if result:
|
if result:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
|
@ -119,41 +98,22 @@ class FederationBase:
|
||||||
|
|
||||||
return pdu
|
return pdu
|
||||||
|
|
||||||
def errback(failure: Failure, pdu: EventBase):
|
|
||||||
failure.trap(SynapseError)
|
|
||||||
with PreserveLoggingContext(ctx):
|
|
||||||
logger.warning(
|
|
||||||
"Signature check failed for %s: %s",
|
|
||||||
pdu.event_id,
|
|
||||||
failure.getErrorMessage(),
|
|
||||||
)
|
|
||||||
return failure
|
|
||||||
|
|
||||||
for deferred, pdu in zip(deferreds, pdus):
|
|
||||||
deferred.addCallbacks(
|
|
||||||
callback, errback, callbackArgs=[pdu], errbackArgs=[pdu]
|
|
||||||
)
|
|
||||||
|
|
||||||
return deferreds
|
|
||||||
|
|
||||||
|
|
||||||
class PduToCheckSig(namedtuple("PduToCheckSig", ["pdu", "sender_domain", "deferreds"])):
|
class PduToCheckSig(namedtuple("PduToCheckSig", ["pdu", "sender_domain", "deferreds"])):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def _check_sigs_on_pdus(
|
async def _check_sigs_on_pdu(
|
||||||
keyring: Keyring, room_version: RoomVersion, pdus: Iterable[EventBase]
|
keyring: Keyring, room_version: RoomVersion, pdu: EventBase
|
||||||
) -> List[Deferred]:
|
) -> None:
|
||||||
"""Check that the given events are correctly signed
|
"""Check that the given events are correctly signed
|
||||||
|
|
||||||
|
Raise a SynapseError if the event wasn't correctly signed.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
keyring: keyring object to do the checks
|
keyring: keyring object to do the checks
|
||||||
room_version: the room version of the PDUs
|
room_version: the room version of the PDUs
|
||||||
pdus: the events to be checked
|
pdus: the events to be checked
|
||||||
|
|
||||||
Returns:
|
|
||||||
A Deferred for each event in pdus, which will either succeed if
|
|
||||||
the signatures are valid, or fail (with a SynapseError) if not.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# we want to check that the event is signed by:
|
# we want to check that the event is signed by:
|
||||||
|
@ -177,90 +137,47 @@ def _check_sigs_on_pdus(
|
||||||
# let's start by getting the domain for each pdu, and flattening the event back
|
# let's start by getting the domain for each pdu, and flattening the event back
|
||||||
# to JSON.
|
# to JSON.
|
||||||
|
|
||||||
pdus_to_check = [
|
|
||||||
PduToCheckSig(
|
|
||||||
pdu=p,
|
|
||||||
sender_domain=get_domain_from_id(p.sender),
|
|
||||||
deferreds=[],
|
|
||||||
)
|
|
||||||
for p in pdus
|
|
||||||
]
|
|
||||||
|
|
||||||
# First we check that the sender event is signed by the sender's domain
|
# First we check that the sender event is signed by the sender's domain
|
||||||
# (except if its a 3pid invite, in which case it may be sent by any server)
|
# (except if its a 3pid invite, in which case it may be sent by any server)
|
||||||
pdus_to_check_sender = [p for p in pdus_to_check if not _is_invite_via_3pid(p.pdu)]
|
if not _is_invite_via_3pid(pdu):
|
||||||
|
try:
|
||||||
more_deferreds = keyring.verify_events_for_server(
|
await keyring.verify_event_for_server(
|
||||||
[
|
get_domain_from_id(pdu.sender),
|
||||||
(
|
pdu,
|
||||||
p.sender_domain,
|
pdu.origin_server_ts if room_version.enforce_key_validity else 0,
|
||||||
p.pdu,
|
|
||||||
p.pdu.origin_server_ts if room_version.enforce_key_validity else 0,
|
|
||||||
)
|
)
|
||||||
for p in pdus_to_check_sender
|
except Exception as e:
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
def sender_err(e, pdu_to_check):
|
|
||||||
errmsg = "event id %s: unable to verify signature for sender %s: %s" % (
|
errmsg = "event id %s: unable to verify signature for sender %s: %s" % (
|
||||||
pdu_to_check.pdu.event_id,
|
pdu.event_id,
|
||||||
pdu_to_check.sender_domain,
|
get_domain_from_id(pdu.sender),
|
||||||
e.getErrorMessage(),
|
e,
|
||||||
)
|
)
|
||||||
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
|
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
|
||||||
|
|
||||||
for p, d in zip(pdus_to_check_sender, more_deferreds):
|
|
||||||
d.addErrback(sender_err, p)
|
|
||||||
p.deferreds.append(d)
|
|
||||||
|
|
||||||
# now let's look for events where the sender's domain is different to the
|
# now let's look for events where the sender's domain is different to the
|
||||||
# event id's domain (normally only the case for joins/leaves), and add additional
|
# event id's domain (normally only the case for joins/leaves), and add additional
|
||||||
# checks. Only do this if the room version has a concept of event ID domain
|
# checks. Only do this if the room version has a concept of event ID domain
|
||||||
# (ie, the room version uses old-style non-hash event IDs).
|
# (ie, the room version uses old-style non-hash event IDs).
|
||||||
if room_version.event_format == EventFormatVersions.V1:
|
if room_version.event_format == EventFormatVersions.V1 and get_domain_from_id(
|
||||||
pdus_to_check_event_id = [
|
pdu.event_id
|
||||||
p
|
) != get_domain_from_id(pdu.sender):
|
||||||
for p in pdus_to_check
|
try:
|
||||||
if p.sender_domain != get_domain_from_id(p.pdu.event_id)
|
await keyring.verify_event_for_server(
|
||||||
]
|
get_domain_from_id(pdu.event_id),
|
||||||
|
pdu,
|
||||||
more_deferreds = keyring.verify_events_for_server(
|
pdu.origin_server_ts if room_version.enforce_key_validity else 0,
|
||||||
[
|
|
||||||
(
|
|
||||||
get_domain_from_id(p.pdu.event_id),
|
|
||||||
p.pdu,
|
|
||||||
p.pdu.origin_server_ts if room_version.enforce_key_validity else 0,
|
|
||||||
)
|
)
|
||||||
for p in pdus_to_check_event_id
|
except Exception as e:
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
def event_err(e, pdu_to_check):
|
|
||||||
errmsg = (
|
errmsg = (
|
||||||
"event id %s: unable to verify signature for event id domain: %s"
|
"event id %s: unable to verify signature for event id domain %s: %s"
|
||||||
% (pdu_to_check.pdu.event_id, e.getErrorMessage())
|
% (
|
||||||
|
pdu.event_id,
|
||||||
|
get_domain_from_id(pdu.event_id),
|
||||||
|
e,
|
||||||
|
)
|
||||||
)
|
)
|
||||||
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
|
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
|
||||||
|
|
||||||
for p, d in zip(pdus_to_check_event_id, more_deferreds):
|
|
||||||
d.addErrback(event_err, p)
|
|
||||||
p.deferreds.append(d)
|
|
||||||
|
|
||||||
# replace lists of deferreds with single Deferreds
|
|
||||||
return [_flatten_deferred_list(p.deferreds) for p in pdus_to_check]
|
|
||||||
|
|
||||||
|
|
||||||
def _flatten_deferred_list(deferreds: List[Deferred]) -> Deferred:
|
|
||||||
"""Given a list of deferreds, either return the single deferred,
|
|
||||||
combine into a DeferredList, or return an already resolved deferred.
|
|
||||||
"""
|
|
||||||
if len(deferreds) > 1:
|
|
||||||
return DeferredList(deferreds, fireOnOneErrback=True, consumeErrors=True)
|
|
||||||
elif len(deferreds) == 1:
|
|
||||||
return deferreds[0]
|
|
||||||
else:
|
|
||||||
return defer.succeed(None)
|
|
||||||
|
|
||||||
|
|
||||||
def _is_invite_via_3pid(event: EventBase) -> bool:
|
def _is_invite_via_3pid(event: EventBase) -> bool:
|
||||||
return (
|
return (
|
||||||
|
|
|
@ -21,6 +21,7 @@ from typing import (
|
||||||
Any,
|
Any,
|
||||||
Awaitable,
|
Awaitable,
|
||||||
Callable,
|
Callable,
|
||||||
|
Collection,
|
||||||
Dict,
|
Dict,
|
||||||
Iterable,
|
Iterable,
|
||||||
List,
|
List,
|
||||||
|
@ -35,9 +36,6 @@ from typing import (
|
||||||
import attr
|
import attr
|
||||||
from prometheus_client import Counter
|
from prometheus_client import Counter
|
||||||
|
|
||||||
from twisted.internet import defer
|
|
||||||
from twisted.internet.defer import Deferred
|
|
||||||
|
|
||||||
from synapse.api.constants import EventTypes, Membership
|
from synapse.api.constants import EventTypes, Membership
|
||||||
from synapse.api.errors import (
|
from synapse.api.errors import (
|
||||||
CodeMessageException,
|
CodeMessageException,
|
||||||
|
@ -56,10 +54,9 @@ from synapse.api.room_versions import (
|
||||||
from synapse.events import EventBase, builder
|
from synapse.events import EventBase, builder
|
||||||
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
||||||
from synapse.federation.transport.client import SendJoinResponse
|
from synapse.federation.transport.client import SendJoinResponse
|
||||||
from synapse.logging.context import make_deferred_yieldable, preserve_fn
|
|
||||||
from synapse.logging.utils import log_function
|
from synapse.logging.utils import log_function
|
||||||
from synapse.types import JsonDict, get_domain_from_id
|
from synapse.types import JsonDict, get_domain_from_id
|
||||||
from synapse.util import unwrapFirstError
|
from synapse.util.async_helpers import concurrently_execute
|
||||||
from synapse.util.caches.expiringcache import ExpiringCache
|
from synapse.util.caches.expiringcache import ExpiringCache
|
||||||
from synapse.util.retryutils import NotRetryingDestination
|
from synapse.util.retryutils import NotRetryingDestination
|
||||||
|
|
||||||
|
@ -360,10 +357,9 @@ class FederationClient(FederationBase):
|
||||||
async def _check_sigs_and_hash_and_fetch(
|
async def _check_sigs_and_hash_and_fetch(
|
||||||
self,
|
self,
|
||||||
origin: str,
|
origin: str,
|
||||||
pdus: List[EventBase],
|
pdus: Collection[EventBase],
|
||||||
room_version: RoomVersion,
|
room_version: RoomVersion,
|
||||||
outlier: bool = False,
|
outlier: bool = False,
|
||||||
include_none: bool = False,
|
|
||||||
) -> List[EventBase]:
|
) -> List[EventBase]:
|
||||||
"""Takes a list of PDUs and checks the signatures and hashes of each
|
"""Takes a list of PDUs and checks the signatures and hashes of each
|
||||||
one. If a PDU fails its signature check then we check if we have it in
|
one. If a PDU fails its signature check then we check if we have it in
|
||||||
|
@ -380,19 +376,61 @@ class FederationClient(FederationBase):
|
||||||
pdu
|
pdu
|
||||||
room_version
|
room_version
|
||||||
outlier: Whether the events are outliers or not
|
outlier: Whether the events are outliers or not
|
||||||
include_none: Whether to include None in the returned list
|
|
||||||
for events that have failed their checks
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
A list of PDUs that have valid signatures and hashes.
|
A list of PDUs that have valid signatures and hashes.
|
||||||
"""
|
"""
|
||||||
deferreds = self._check_sigs_and_hashes(room_version, pdus)
|
|
||||||
|
|
||||||
async def handle_check_result(pdu: EventBase, deferred: Deferred):
|
# We limit how many PDUs we check at once, as if we try to do hundreds
|
||||||
try:
|
# of thousands of PDUs at once we see large memory spikes.
|
||||||
res = await make_deferred_yieldable(deferred)
|
|
||||||
except SynapseError:
|
valid_pdus = []
|
||||||
|
|
||||||
|
async def _execute(pdu: EventBase) -> None:
|
||||||
|
valid_pdu = await self._check_sigs_and_hash_and_fetch_one(
|
||||||
|
pdu=pdu,
|
||||||
|
origin=origin,
|
||||||
|
outlier=outlier,
|
||||||
|
room_version=room_version,
|
||||||
|
)
|
||||||
|
|
||||||
|
if valid_pdu:
|
||||||
|
valid_pdus.append(valid_pdu)
|
||||||
|
|
||||||
|
await concurrently_execute(_execute, pdus, 10000)
|
||||||
|
|
||||||
|
return valid_pdus
|
||||||
|
|
||||||
|
async def _check_sigs_and_hash_and_fetch_one(
|
||||||
|
self,
|
||||||
|
pdu: EventBase,
|
||||||
|
origin: str,
|
||||||
|
room_version: RoomVersion,
|
||||||
|
outlier: bool = False,
|
||||||
|
) -> Optional[EventBase]:
|
||||||
|
"""Takes a PDU and checks its signatures and hashes. If the PDU fails
|
||||||
|
its signature check then we check if we have it in the database and if
|
||||||
|
not then request if from the originating server of that PDU.
|
||||||
|
|
||||||
|
If then PDU fails its content hash check then it is redacted.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
origin
|
||||||
|
pdu
|
||||||
|
room_version
|
||||||
|
outlier: Whether the events are outliers or not
|
||||||
|
include_none: Whether to include None in the returned list
|
||||||
|
for events that have failed their checks
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The PDU (possibly redacted) if it has valid signatures and hashes.
|
||||||
|
"""
|
||||||
|
|
||||||
res = None
|
res = None
|
||||||
|
try:
|
||||||
|
res = await self._check_sigs_and_hash(room_version, pdu)
|
||||||
|
except SynapseError:
|
||||||
|
pass
|
||||||
|
|
||||||
if not res:
|
if not res:
|
||||||
# Check local db.
|
# Check local db.
|
||||||
|
@ -420,18 +458,6 @@ class FederationClient(FederationBase):
|
||||||
|
|
||||||
return res
|
return res
|
||||||
|
|
||||||
handle = preserve_fn(handle_check_result)
|
|
||||||
deferreds2 = [handle(pdu, deferred) for pdu, deferred in zip(pdus, deferreds)]
|
|
||||||
|
|
||||||
valid_pdus = await make_deferred_yieldable(
|
|
||||||
defer.gatherResults(deferreds2, consumeErrors=True)
|
|
||||||
).addErrback(unwrapFirstError)
|
|
||||||
|
|
||||||
if include_none:
|
|
||||||
return valid_pdus
|
|
||||||
else:
|
|
||||||
return [p for p in valid_pdus if p]
|
|
||||||
|
|
||||||
async def get_event_auth(
|
async def get_event_auth(
|
||||||
self, destination: str, room_id: str, event_id: str
|
self, destination: str, room_id: str, event_id: str
|
||||||
) -> List[EventBase]:
|
) -> List[EventBase]:
|
||||||
|
@ -671,8 +697,6 @@ class FederationClient(FederationBase):
|
||||||
state = response.state
|
state = response.state
|
||||||
auth_chain = response.auth_events
|
auth_chain = response.auth_events
|
||||||
|
|
||||||
pdus = {p.event_id: p for p in itertools.chain(state, auth_chain)}
|
|
||||||
|
|
||||||
create_event = None
|
create_event = None
|
||||||
for e in state:
|
for e in state:
|
||||||
if (e.type, e.state_key) == (EventTypes.Create, ""):
|
if (e.type, e.state_key) == (EventTypes.Create, ""):
|
||||||
|
@ -696,14 +720,29 @@ class FederationClient(FederationBase):
|
||||||
% (create_room_version,)
|
% (create_room_version,)
|
||||||
)
|
)
|
||||||
|
|
||||||
valid_pdus = await self._check_sigs_and_hash_and_fetch(
|
logger.info(
|
||||||
destination,
|
"Processing from send_join %d events", len(state) + len(auth_chain)
|
||||||
list(pdus.values()),
|
)
|
||||||
|
|
||||||
|
# We now go and check the signatures and hashes for the event. Note
|
||||||
|
# that we limit how many events we process at a time to keep the
|
||||||
|
# memory overhead from exploding.
|
||||||
|
valid_pdus_map: Dict[str, EventBase] = {}
|
||||||
|
|
||||||
|
async def _execute(pdu: EventBase) -> None:
|
||||||
|
valid_pdu = await self._check_sigs_and_hash_and_fetch_one(
|
||||||
|
pdu=pdu,
|
||||||
|
origin=destination,
|
||||||
outlier=True,
|
outlier=True,
|
||||||
room_version=room_version,
|
room_version=room_version,
|
||||||
)
|
)
|
||||||
|
|
||||||
valid_pdus_map = {p.event_id: p for p in valid_pdus}
|
if valid_pdu:
|
||||||
|
valid_pdus_map[valid_pdu.event_id] = valid_pdu
|
||||||
|
|
||||||
|
await concurrently_execute(
|
||||||
|
_execute, itertools.chain(state, auth_chain), 10000
|
||||||
|
)
|
||||||
|
|
||||||
# NB: We *need* to copy to ensure that we don't have multiple
|
# NB: We *need* to copy to ensure that we don't have multiple
|
||||||
# references being passed on, as that causes... issues.
|
# references being passed on, as that causes... issues.
|
||||||
|
|
|
@ -37,6 +37,7 @@ from synapse.http.servlet import (
|
||||||
)
|
)
|
||||||
from synapse.logging.context import run_in_background
|
from synapse.logging.context import run_in_background
|
||||||
from synapse.logging.opentracing import (
|
from synapse.logging.opentracing import (
|
||||||
|
SynapseTags,
|
||||||
start_active_span,
|
start_active_span,
|
||||||
start_active_span_from_request,
|
start_active_span_from_request,
|
||||||
tags,
|
tags,
|
||||||
|
@ -151,7 +152,9 @@ class Authenticator:
|
||||||
)
|
)
|
||||||
|
|
||||||
await self.keyring.verify_json_for_server(
|
await self.keyring.verify_json_for_server(
|
||||||
origin, json_request, now, "Incoming request"
|
origin,
|
||||||
|
json_request,
|
||||||
|
now,
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.debug("Request from %s", origin)
|
logger.debug("Request from %s", origin)
|
||||||
|
@ -314,7 +317,7 @@ class BaseFederationServlet:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
request_tags = {
|
request_tags = {
|
||||||
"request_id": request.get_request_id(),
|
SynapseTags.REQUEST_ID: request.get_request_id(),
|
||||||
tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
|
tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
|
||||||
tags.HTTP_METHOD: request.get_method(),
|
tags.HTTP_METHOD: request.get_method(),
|
||||||
tags.HTTP_URL: request.get_redacted_uri(),
|
tags.HTTP_URL: request.get_redacted_uri(),
|
||||||
|
@ -1562,7 +1565,6 @@ def register_servlets(
|
||||||
server_name=hs.hostname,
|
server_name=hs.hostname,
|
||||||
).register(resource)
|
).register(resource)
|
||||||
|
|
||||||
if hs.config.experimental.spaces_enabled:
|
|
||||||
FederationSpaceSummaryServlet(
|
FederationSpaceSummaryServlet(
|
||||||
handler=hs.get_space_summary_handler(),
|
handler=hs.get_space_summary_handler(),
|
||||||
authenticator=authenticator,
|
authenticator=authenticator,
|
||||||
|
|
|
@ -108,7 +108,9 @@ class GroupAttestationSigning:
|
||||||
|
|
||||||
assert server_name is not None
|
assert server_name is not None
|
||||||
await self.keyring.verify_json_for_server(
|
await self.keyring.verify_json_for_server(
|
||||||
server_name, attestation, now, "Group attestation"
|
server_name,
|
||||||
|
attestation,
|
||||||
|
now,
|
||||||
)
|
)
|
||||||
|
|
||||||
def create_attestation(self, group_id: str, user_id: str) -> JsonDict:
|
def create_attestation(self, group_id: str, user_id: str) -> JsonDict:
|
||||||
|
|
|
@ -87,7 +87,8 @@ class ApplicationServicesHandler:
|
||||||
self.is_processing = True
|
self.is_processing = True
|
||||||
try:
|
try:
|
||||||
limit = 100
|
limit = 100
|
||||||
while True:
|
upper_bound = -1
|
||||||
|
while upper_bound < self.current_max:
|
||||||
(
|
(
|
||||||
upper_bound,
|
upper_bound,
|
||||||
events,
|
events,
|
||||||
|
@ -95,9 +96,6 @@ class ApplicationServicesHandler:
|
||||||
self.current_max, limit
|
self.current_max, limit
|
||||||
)
|
)
|
||||||
|
|
||||||
if not events:
|
|
||||||
break
|
|
||||||
|
|
||||||
events_by_room = {} # type: Dict[str, List[EventBase]]
|
events_by_room = {} # type: Dict[str, List[EventBase]]
|
||||||
for event in events:
|
for event in events:
|
||||||
events_by_room.setdefault(event.room_id, []).append(event)
|
events_by_room.setdefault(event.room_id, []).append(event)
|
||||||
|
@ -153,9 +151,6 @@ class ApplicationServicesHandler:
|
||||||
|
|
||||||
await self.store.set_appservice_last_pos(upper_bound)
|
await self.store.set_appservice_last_pos(upper_bound)
|
||||||
|
|
||||||
now = self.clock.time_msec()
|
|
||||||
ts = await self.store.get_received_ts(events[-1].event_id)
|
|
||||||
|
|
||||||
synapse.metrics.event_processing_positions.labels(
|
synapse.metrics.event_processing_positions.labels(
|
||||||
"appservice_sender"
|
"appservice_sender"
|
||||||
).set(upper_bound)
|
).set(upper_bound)
|
||||||
|
@ -168,6 +163,10 @@ class ApplicationServicesHandler:
|
||||||
|
|
||||||
event_processing_loop_counter.labels("appservice_sender").inc()
|
event_processing_loop_counter.labels("appservice_sender").inc()
|
||||||
|
|
||||||
|
if events:
|
||||||
|
now = self.clock.time_msec()
|
||||||
|
ts = await self.store.get_received_ts(events[-1].event_id)
|
||||||
|
|
||||||
synapse.metrics.event_processing_lag.labels(
|
synapse.metrics.event_processing_lag.labels(
|
||||||
"appservice_sender"
|
"appservice_sender"
|
||||||
).set(now - ts)
|
).set(now - ts)
|
||||||
|
|
|
@ -22,6 +22,7 @@ from collections.abc import Container
|
||||||
from http import HTTPStatus
|
from http import HTTPStatus
|
||||||
from typing import (
|
from typing import (
|
||||||
TYPE_CHECKING,
|
TYPE_CHECKING,
|
||||||
|
Collection,
|
||||||
Dict,
|
Dict,
|
||||||
Iterable,
|
Iterable,
|
||||||
List,
|
List,
|
||||||
|
@ -178,6 +179,8 @@ class FederationHandler(BaseHandler):
|
||||||
self.room_queues = {} # type: Dict[str, List[Tuple[EventBase, str]]]
|
self.room_queues = {} # type: Dict[str, List[Tuple[EventBase, str]]]
|
||||||
self._room_pdu_linearizer = Linearizer("fed_room_pdu")
|
self._room_pdu_linearizer = Linearizer("fed_room_pdu")
|
||||||
|
|
||||||
|
self._room_backfill = Linearizer("room_backfill")
|
||||||
|
|
||||||
self.third_party_event_rules = hs.get_third_party_event_rules()
|
self.third_party_event_rules = hs.get_third_party_event_rules()
|
||||||
|
|
||||||
self._ephemeral_messages_enabled = hs.config.enable_ephemeral_messages
|
self._ephemeral_messages_enabled = hs.config.enable_ephemeral_messages
|
||||||
|
@ -577,7 +580,9 @@ class FederationHandler(BaseHandler):
|
||||||
|
|
||||||
# Fetch the state events from the DB, and check we have the auth events.
|
# Fetch the state events from the DB, and check we have the auth events.
|
||||||
event_map = await self.store.get_events(state_event_ids, allow_rejected=True)
|
event_map = await self.store.get_events(state_event_ids, allow_rejected=True)
|
||||||
auth_events_in_store = await self.store.have_seen_events(auth_event_ids)
|
auth_events_in_store = await self.store.have_seen_events(
|
||||||
|
room_id, auth_event_ids
|
||||||
|
)
|
||||||
|
|
||||||
# Check for missing events. We handle state and auth event seperately,
|
# Check for missing events. We handle state and auth event seperately,
|
||||||
# as we want to pull the state from the DB, but we don't for the auth
|
# as we want to pull the state from the DB, but we don't for the auth
|
||||||
|
@ -610,7 +615,7 @@ class FederationHandler(BaseHandler):
|
||||||
|
|
||||||
if missing_auth_events:
|
if missing_auth_events:
|
||||||
auth_events_in_store = await self.store.have_seen_events(
|
auth_events_in_store = await self.store.have_seen_events(
|
||||||
missing_auth_events
|
room_id, missing_auth_events
|
||||||
)
|
)
|
||||||
missing_auth_events.difference_update(auth_events_in_store)
|
missing_auth_events.difference_update(auth_events_in_store)
|
||||||
|
|
||||||
|
@ -710,7 +715,7 @@ class FederationHandler(BaseHandler):
|
||||||
|
|
||||||
missing_auth_events = set(auth_event_ids) - fetched_events.keys()
|
missing_auth_events = set(auth_event_ids) - fetched_events.keys()
|
||||||
missing_auth_events.difference_update(
|
missing_auth_events.difference_update(
|
||||||
await self.store.have_seen_events(missing_auth_events)
|
await self.store.have_seen_events(room_id, missing_auth_events)
|
||||||
)
|
)
|
||||||
logger.debug("We are also missing %i auth events", len(missing_auth_events))
|
logger.debug("We are also missing %i auth events", len(missing_auth_events))
|
||||||
|
|
||||||
|
@ -1039,6 +1044,12 @@ class FederationHandler(BaseHandler):
|
||||||
return. This is used as part of the heuristic to decide if we
|
return. This is used as part of the heuristic to decide if we
|
||||||
should back paginate.
|
should back paginate.
|
||||||
"""
|
"""
|
||||||
|
with (await self._room_backfill.queue(room_id)):
|
||||||
|
return await self._maybe_backfill_inner(room_id, current_depth, limit)
|
||||||
|
|
||||||
|
async def _maybe_backfill_inner(
|
||||||
|
self, room_id: str, current_depth: int, limit: int
|
||||||
|
) -> bool:
|
||||||
extremities = await self.store.get_oldest_events_with_depth_in_room(room_id)
|
extremities = await self.store.get_oldest_events_with_depth_in_room(room_id)
|
||||||
|
|
||||||
if not extremities:
|
if not extremities:
|
||||||
|
@ -1354,6 +1365,7 @@ class FederationHandler(BaseHandler):
|
||||||
|
|
||||||
event_infos.append(_NewEventInfo(event, None, auth))
|
event_infos.append(_NewEventInfo(event, None, auth))
|
||||||
|
|
||||||
|
if event_infos:
|
||||||
await self._auth_and_persist_events(
|
await self._auth_and_persist_events(
|
||||||
destination,
|
destination,
|
||||||
room_id,
|
room_id,
|
||||||
|
@ -2067,7 +2079,7 @@ class FederationHandler(BaseHandler):
|
||||||
self,
|
self,
|
||||||
origin: str,
|
origin: str,
|
||||||
room_id: str,
|
room_id: str,
|
||||||
event_infos: Iterable[_NewEventInfo],
|
event_infos: Collection[_NewEventInfo],
|
||||||
backfilled: bool = False,
|
backfilled: bool = False,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Creates the appropriate contexts and persists events. The events
|
"""Creates the appropriate contexts and persists events. The events
|
||||||
|
@ -2078,6 +2090,9 @@ class FederationHandler(BaseHandler):
|
||||||
Notifies about the events where appropriate.
|
Notifies about the events where appropriate.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
if not event_infos:
|
||||||
|
return
|
||||||
|
|
||||||
async def prep(ev_info: _NewEventInfo):
|
async def prep(ev_info: _NewEventInfo):
|
||||||
event = ev_info.event
|
event = ev_info.event
|
||||||
with nested_logging_context(suffix=event.event_id):
|
with nested_logging_context(suffix=event.event_id):
|
||||||
|
@ -2206,6 +2221,7 @@ class FederationHandler(BaseHandler):
|
||||||
raise
|
raise
|
||||||
events_to_context[e.event_id].rejected = RejectedReason.AUTH_ERROR
|
events_to_context[e.event_id].rejected = RejectedReason.AUTH_ERROR
|
||||||
|
|
||||||
|
if auth_events or state:
|
||||||
await self.persist_events_and_notify(
|
await self.persist_events_and_notify(
|
||||||
room_id,
|
room_id,
|
||||||
[
|
[
|
||||||
|
@ -2475,7 +2491,7 @@ class FederationHandler(BaseHandler):
|
||||||
#
|
#
|
||||||
# we start by checking if they are in the store, and then try calling /event_auth/.
|
# we start by checking if they are in the store, and then try calling /event_auth/.
|
||||||
if missing_auth:
|
if missing_auth:
|
||||||
have_events = await self.store.have_seen_events(missing_auth)
|
have_events = await self.store.have_seen_events(event.room_id, missing_auth)
|
||||||
logger.debug("Events %s are in the store", have_events)
|
logger.debug("Events %s are in the store", have_events)
|
||||||
missing_auth.difference_update(have_events)
|
missing_auth.difference_update(have_events)
|
||||||
|
|
||||||
|
@ -2494,7 +2510,7 @@ class FederationHandler(BaseHandler):
|
||||||
return context
|
return context
|
||||||
|
|
||||||
seen_remotes = await self.store.have_seen_events(
|
seen_remotes = await self.store.have_seen_events(
|
||||||
[e.event_id for e in remote_auth_chain]
|
event.room_id, [e.event_id for e in remote_auth_chain]
|
||||||
)
|
)
|
||||||
|
|
||||||
for e in remote_auth_chain:
|
for e in remote_auth_chain:
|
||||||
|
@ -3051,11 +3067,18 @@ class FederationHandler(BaseHandler):
|
||||||
the same room.
|
the same room.
|
||||||
backfilled: Whether these events are a result of
|
backfilled: Whether these events are a result of
|
||||||
backfilling or not
|
backfilling or not
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The stream ID after which all events have been persisted.
|
||||||
"""
|
"""
|
||||||
|
if not event_and_contexts:
|
||||||
|
return self.store.get_current_events_token()
|
||||||
|
|
||||||
instance = self.config.worker.events_shard_config.get_instance(room_id)
|
instance = self.config.worker.events_shard_config.get_instance(room_id)
|
||||||
if instance != self._instance_name:
|
if instance != self._instance_name:
|
||||||
# Limit the number of events sent over federation.
|
# Limit the number of events sent over replication. We choose 200
|
||||||
for batch in batch_iter(event_and_contexts, 1000):
|
# here as that is what we default to in `max_request_body_size(..)`
|
||||||
|
for batch in batch_iter(event_and_contexts, 200):
|
||||||
result = await self._send_events(
|
result = await self._send_events(
|
||||||
instance_name=instance,
|
instance_name=instance,
|
||||||
store=self.store,
|
store=self.store,
|
||||||
|
|
|
@ -315,6 +315,17 @@ class SyncHandler:
|
||||||
if context:
|
if context:
|
||||||
context.tag = sync_type
|
context.tag = sync_type
|
||||||
|
|
||||||
|
# if we have a since token, delete any to-device messages before that token
|
||||||
|
# (since we now know that the device has received them)
|
||||||
|
if since_token is not None:
|
||||||
|
since_stream_id = since_token.to_device_key
|
||||||
|
deleted = await self.store.delete_messages_for_device(
|
||||||
|
sync_config.user.to_string(), sync_config.device_id, since_stream_id
|
||||||
|
)
|
||||||
|
logger.debug(
|
||||||
|
"Deleted %d to-device messages up to %d", deleted, since_stream_id
|
||||||
|
)
|
||||||
|
|
||||||
if timeout == 0 or since_token is None or full_state:
|
if timeout == 0 or since_token is None or full_state:
|
||||||
# we are going to return immediately, so don't bother calling
|
# we are going to return immediately, so don't bother calling
|
||||||
# notifier.wait_for_events.
|
# notifier.wait_for_events.
|
||||||
|
@ -463,7 +474,7 @@ class SyncHandler:
|
||||||
# ensure that we always include current state in the timeline
|
# ensure that we always include current state in the timeline
|
||||||
current_state_ids = frozenset() # type: FrozenSet[str]
|
current_state_ids = frozenset() # type: FrozenSet[str]
|
||||||
if any(e.is_state() for e in recents):
|
if any(e.is_state() for e in recents):
|
||||||
current_state_ids_map = await self.state.get_current_state_ids(
|
current_state_ids_map = await self.store.get_current_state_ids(
|
||||||
room_id
|
room_id
|
||||||
)
|
)
|
||||||
current_state_ids = frozenset(current_state_ids_map.values())
|
current_state_ids = frozenset(current_state_ids_map.values())
|
||||||
|
@ -523,7 +534,7 @@ class SyncHandler:
|
||||||
# ensure that we always include current state in the timeline
|
# ensure that we always include current state in the timeline
|
||||||
current_state_ids = frozenset()
|
current_state_ids = frozenset()
|
||||||
if any(e.is_state() for e in loaded_recents):
|
if any(e.is_state() for e in loaded_recents):
|
||||||
current_state_ids_map = await self.state.get_current_state_ids(
|
current_state_ids_map = await self.store.get_current_state_ids(
|
||||||
room_id
|
room_id
|
||||||
)
|
)
|
||||||
current_state_ids = frozenset(current_state_ids_map.values())
|
current_state_ids = frozenset(current_state_ids_map.values())
|
||||||
|
@ -1229,16 +1240,6 @@ class SyncHandler:
|
||||||
since_stream_id = int(sync_result_builder.since_token.to_device_key)
|
since_stream_id = int(sync_result_builder.since_token.to_device_key)
|
||||||
|
|
||||||
if since_stream_id != int(now_token.to_device_key):
|
if since_stream_id != int(now_token.to_device_key):
|
||||||
# We only delete messages when a new message comes in, but that's
|
|
||||||
# fine so long as we delete them at some point.
|
|
||||||
|
|
||||||
deleted = await self.store.delete_messages_for_device(
|
|
||||||
user_id, device_id, since_stream_id
|
|
||||||
)
|
|
||||||
logger.debug(
|
|
||||||
"Deleted %d to-device messages up to %d", deleted, since_stream_id
|
|
||||||
)
|
|
||||||
|
|
||||||
messages, stream_id = await self.store.get_new_messages_for_device(
|
messages, stream_id = await self.store.get_new_messages_for_device(
|
||||||
user_id, device_id, since_stream_id, now_token.to_device_key
|
user_id, device_id, since_stream_id, now_token.to_device_key
|
||||||
)
|
)
|
||||||
|
|
|
@ -15,6 +15,11 @@
|
||||||
""" This module contains base REST classes for constructing REST servlets. """
|
""" This module contains base REST classes for constructing REST servlets. """
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
from typing import Dict, Iterable, List, Optional, overload
|
||||||
|
|
||||||
|
from typing_extensions import Literal
|
||||||
|
|
||||||
|
from twisted.web.server import Request
|
||||||
|
|
||||||
from synapse.api.errors import Codes, SynapseError
|
from synapse.api.errors import Codes, SynapseError
|
||||||
from synapse.util import json_decoder
|
from synapse.util import json_decoder
|
||||||
|
@ -105,14 +110,66 @@ def parse_boolean_from_args(args, name, default=None, required=False):
|
||||||
return default
|
return default
|
||||||
|
|
||||||
|
|
||||||
|
@overload
|
||||||
|
def parse_bytes_from_args(
|
||||||
|
args: Dict[bytes, List[bytes]],
|
||||||
|
name: str,
|
||||||
|
default: Literal[None] = None,
|
||||||
|
required: Literal[True] = True,
|
||||||
|
) -> bytes:
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
@overload
|
||||||
|
def parse_bytes_from_args(
|
||||||
|
args: Dict[bytes, List[bytes]],
|
||||||
|
name: str,
|
||||||
|
default: Optional[bytes] = None,
|
||||||
|
required: bool = False,
|
||||||
|
) -> Optional[bytes]:
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
def parse_bytes_from_args(
|
||||||
|
args: Dict[bytes, List[bytes]],
|
||||||
|
name: str,
|
||||||
|
default: Optional[bytes] = None,
|
||||||
|
required: bool = False,
|
||||||
|
) -> Optional[bytes]:
|
||||||
|
"""
|
||||||
|
Parse a string parameter as bytes from the request query string.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
args: A mapping of request args as bytes to a list of bytes (e.g. request.args).
|
||||||
|
name: the name of the query parameter.
|
||||||
|
default: value to use if the parameter is absent,
|
||||||
|
defaults to None. Must be bytes if encoding is None.
|
||||||
|
required: whether to raise a 400 SynapseError if the
|
||||||
|
parameter is absent, defaults to False.
|
||||||
|
Returns:
|
||||||
|
Bytes or the default value.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
SynapseError if the parameter is absent and required.
|
||||||
|
"""
|
||||||
|
name_bytes = name.encode("ascii")
|
||||||
|
|
||||||
|
if name_bytes in args:
|
||||||
|
return args[name_bytes][0]
|
||||||
|
elif required:
|
||||||
|
message = "Missing string query parameter %s" % (name,)
|
||||||
|
raise SynapseError(400, message, errcode=Codes.MISSING_PARAM)
|
||||||
|
|
||||||
|
return default
|
||||||
|
|
||||||
|
|
||||||
def parse_string(
|
def parse_string(
|
||||||
request,
|
request: Request,
|
||||||
name,
|
name: str,
|
||||||
default=None,
|
default: Optional[str] = None,
|
||||||
required=False,
|
required: bool = False,
|
||||||
allowed_values=None,
|
allowed_values: Optional[Iterable[str]] = None,
|
||||||
param_type="string",
|
encoding: str = "ascii",
|
||||||
encoding="ascii",
|
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Parse a string parameter from the request query string.
|
Parse a string parameter from the request query string.
|
||||||
|
@ -122,74 +179,171 @@ def parse_string(
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
request: the twisted HTTP request.
|
request: the twisted HTTP request.
|
||||||
name (bytes|unicode): the name of the query parameter.
|
name: the name of the query parameter.
|
||||||
default (bytes|unicode|None): value to use if the parameter is absent,
|
default: value to use if the parameter is absent, defaults to None.
|
||||||
defaults to None. Must be bytes if encoding is None.
|
required: whether to raise a 400 SynapseError if the
|
||||||
required (bool): whether to raise a 400 SynapseError if the
|
|
||||||
parameter is absent, defaults to False.
|
parameter is absent, defaults to False.
|
||||||
allowed_values (list[bytes|unicode]): List of allowed values for the
|
allowed_values: List of allowed values for the
|
||||||
string, or None if any value is allowed, defaults to None. Must be
|
string, or None if any value is allowed, defaults to None. Must be
|
||||||
the same type as name, if given.
|
the same type as name, if given.
|
||||||
encoding (str|None): The encoding to decode the string content with.
|
encoding: The encoding to decode the string content with.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
bytes/unicode|None: A string value or the default. Unicode if encoding
|
A string value or the default.
|
||||||
was given, bytes otherwise.
|
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
SynapseError if the parameter is absent and required, or if the
|
SynapseError if the parameter is absent and required, or if the
|
||||||
parameter is present, must be one of a list of allowed values and
|
parameter is present, must be one of a list of allowed values and
|
||||||
is not one of those allowed values.
|
is not one of those allowed values.
|
||||||
"""
|
"""
|
||||||
|
args = request.args # type: Dict[bytes, List[bytes]] # type: ignore
|
||||||
return parse_string_from_args(
|
return parse_string_from_args(
|
||||||
request.args, name, default, required, allowed_values, param_type, encoding
|
args, name, default, required, allowed_values, encoding
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def parse_string_from_args(
|
def _parse_string_value(
|
||||||
args,
|
value: bytes,
|
||||||
name,
|
allowed_values: Optional[Iterable[str]],
|
||||||
default=None,
|
name: str,
|
||||||
required=False,
|
encoding: str,
|
||||||
allowed_values=None,
|
) -> str:
|
||||||
param_type="string",
|
|
||||||
encoding="ascii",
|
|
||||||
):
|
|
||||||
|
|
||||||
if not isinstance(name, bytes):
|
|
||||||
name = name.encode("ascii")
|
|
||||||
|
|
||||||
if name in args:
|
|
||||||
value = args[name][0]
|
|
||||||
|
|
||||||
if encoding:
|
|
||||||
try:
|
try:
|
||||||
value = value.decode(encoding)
|
value_str = value.decode(encoding)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
raise SynapseError(
|
raise SynapseError(400, "Query parameter %r must be %s" % (name, encoding))
|
||||||
400, "Query parameter %r must be %s" % (name, encoding)
|
|
||||||
)
|
|
||||||
|
|
||||||
if allowed_values is not None and value not in allowed_values:
|
if allowed_values is not None and value_str not in allowed_values:
|
||||||
message = "Query parameter %r must be one of [%s]" % (
|
message = "Query parameter %r must be one of [%s]" % (
|
||||||
name,
|
name,
|
||||||
", ".join(repr(v) for v in allowed_values),
|
", ".join(repr(v) for v in allowed_values),
|
||||||
)
|
)
|
||||||
raise SynapseError(400, message)
|
raise SynapseError(400, message)
|
||||||
else:
|
else:
|
||||||
return value
|
return value_str
|
||||||
|
|
||||||
|
|
||||||
|
@overload
|
||||||
|
def parse_strings_from_args(
|
||||||
|
args: Dict[bytes, List[bytes]],
|
||||||
|
name: str,
|
||||||
|
default: Optional[List[str]] = None,
|
||||||
|
required: Literal[True] = True,
|
||||||
|
allowed_values: Optional[Iterable[str]] = None,
|
||||||
|
encoding: str = "ascii",
|
||||||
|
) -> List[str]:
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
@overload
|
||||||
|
def parse_strings_from_args(
|
||||||
|
args: Dict[bytes, List[bytes]],
|
||||||
|
name: str,
|
||||||
|
default: Optional[List[str]] = None,
|
||||||
|
required: bool = False,
|
||||||
|
allowed_values: Optional[Iterable[str]] = None,
|
||||||
|
encoding: str = "ascii",
|
||||||
|
) -> Optional[List[str]]:
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
def parse_strings_from_args(
|
||||||
|
args: Dict[bytes, List[bytes]],
|
||||||
|
name: str,
|
||||||
|
default: Optional[List[str]] = None,
|
||||||
|
required: bool = False,
|
||||||
|
allowed_values: Optional[Iterable[str]] = None,
|
||||||
|
encoding: str = "ascii",
|
||||||
|
) -> Optional[List[str]]:
|
||||||
|
"""
|
||||||
|
Parse a string parameter from the request query string list.
|
||||||
|
|
||||||
|
The content of the query param will be decoded to Unicode using the encoding.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
args: A mapping of request args as bytes to a list of bytes (e.g. request.args).
|
||||||
|
name: the name of the query parameter.
|
||||||
|
default: value to use if the parameter is absent, defaults to None.
|
||||||
|
required: whether to raise a 400 SynapseError if the
|
||||||
|
parameter is absent, defaults to False.
|
||||||
|
allowed_values: List of allowed values for the
|
||||||
|
string, or None if any value is allowed, defaults to None.
|
||||||
|
encoding: The encoding to decode the string content with.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A string value or the default.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
SynapseError if the parameter is absent and required, or if the
|
||||||
|
parameter is present, must be one of a list of allowed values and
|
||||||
|
is not one of those allowed values.
|
||||||
|
"""
|
||||||
|
name_bytes = name.encode("ascii")
|
||||||
|
|
||||||
|
if name_bytes in args:
|
||||||
|
values = args[name_bytes]
|
||||||
|
|
||||||
|
return [
|
||||||
|
_parse_string_value(value, allowed_values, name=name, encoding=encoding)
|
||||||
|
for value in values
|
||||||
|
]
|
||||||
else:
|
else:
|
||||||
if required:
|
if required:
|
||||||
message = "Missing %s query parameter %r" % (param_type, name)
|
message = "Missing string query parameter %r" % (name,)
|
||||||
raise SynapseError(400, message, errcode=Codes.MISSING_PARAM)
|
raise SynapseError(400, message, errcode=Codes.MISSING_PARAM)
|
||||||
else:
|
|
||||||
|
|
||||||
if encoding and isinstance(default, bytes):
|
|
||||||
return default.decode(encoding)
|
|
||||||
|
|
||||||
return default
|
return default
|
||||||
|
|
||||||
|
|
||||||
|
def parse_string_from_args(
|
||||||
|
args: Dict[bytes, List[bytes]],
|
||||||
|
name: str,
|
||||||
|
default: Optional[str] = None,
|
||||||
|
required: bool = False,
|
||||||
|
allowed_values: Optional[Iterable[str]] = None,
|
||||||
|
encoding: str = "ascii",
|
||||||
|
) -> Optional[str]:
|
||||||
|
"""
|
||||||
|
Parse the string parameter from the request query string list
|
||||||
|
and return the first result.
|
||||||
|
|
||||||
|
The content of the query param will be decoded to Unicode using the encoding.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
args: A mapping of request args as bytes to a list of bytes (e.g. request.args).
|
||||||
|
name: the name of the query parameter.
|
||||||
|
default: value to use if the parameter is absent, defaults to None.
|
||||||
|
required: whether to raise a 400 SynapseError if the
|
||||||
|
parameter is absent, defaults to False.
|
||||||
|
allowed_values: List of allowed values for the
|
||||||
|
string, or None if any value is allowed, defaults to None. Must be
|
||||||
|
the same type as name, if given.
|
||||||
|
encoding: The encoding to decode the string content with.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A string value or the default.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
SynapseError if the parameter is absent and required, or if the
|
||||||
|
parameter is present, must be one of a list of allowed values and
|
||||||
|
is not one of those allowed values.
|
||||||
|
"""
|
||||||
|
|
||||||
|
strings = parse_strings_from_args(
|
||||||
|
args,
|
||||||
|
name,
|
||||||
|
default=[default] if default is not None else None,
|
||||||
|
required=required,
|
||||||
|
allowed_values=allowed_values,
|
||||||
|
encoding=encoding,
|
||||||
|
)
|
||||||
|
|
||||||
|
if strings is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return strings[0]
|
||||||
|
|
||||||
|
|
||||||
def parse_json_value_from_request(request, allow_empty_body=False):
|
def parse_json_value_from_request(request, allow_empty_body=False):
|
||||||
"""Parse a JSON value from the body of a twisted HTTP request.
|
"""Parse a JSON value from the body of a twisted HTTP request.
|
||||||
|
|
||||||
|
@ -215,7 +369,7 @@ def parse_json_value_from_request(request, allow_empty_body=False):
|
||||||
try:
|
try:
|
||||||
content = json_decoder.decode(content_bytes.decode("utf-8"))
|
content = json_decoder.decode(content_bytes.decode("utf-8"))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning("Unable to parse JSON: %s", e)
|
logger.warning("Unable to parse JSON: %s (%s)", e, content_bytes)
|
||||||
raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON)
|
raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON)
|
||||||
|
|
||||||
return content
|
return content
|
||||||
|
@ -278,9 +432,8 @@ class RestServlet:
|
||||||
|
|
||||||
def register(self, http_server):
|
def register(self, http_server):
|
||||||
""" Register this servlet with the given HTTP server. """
|
""" Register this servlet with the given HTTP server. """
|
||||||
if hasattr(self, "PATTERNS"):
|
patterns = getattr(self, "PATTERNS", None)
|
||||||
patterns = self.PATTERNS
|
if patterns:
|
||||||
|
|
||||||
for method in ("GET", "PUT", "POST", "DELETE"):
|
for method in ("GET", "PUT", "POST", "DELETE"):
|
||||||
if hasattr(self, "on_%s" % (method,)):
|
if hasattr(self, "on_%s" % (method,)):
|
||||||
servlet_classname = self.__class__.__name__
|
servlet_classname = self.__class__.__name__
|
||||||
|
|
|
@ -265,6 +265,18 @@ class SynapseTags:
|
||||||
# Whether the sync response has new data to be returned to the client.
|
# Whether the sync response has new data to be returned to the client.
|
||||||
SYNC_RESULT = "sync.new_data"
|
SYNC_RESULT = "sync.new_data"
|
||||||
|
|
||||||
|
# incoming HTTP request ID (as written in the logs)
|
||||||
|
REQUEST_ID = "request_id"
|
||||||
|
|
||||||
|
# HTTP request tag (used to distinguish full vs incremental syncs, etc)
|
||||||
|
REQUEST_TAG = "request_tag"
|
||||||
|
|
||||||
|
# Text description of a database transaction
|
||||||
|
DB_TXN_DESC = "db.txn_desc"
|
||||||
|
|
||||||
|
# Uniqueish ID of a database transaction
|
||||||
|
DB_TXN_ID = "db.txn_id"
|
||||||
|
|
||||||
|
|
||||||
# Block everything by default
|
# Block everything by default
|
||||||
# A regex which matches the server_names to expose traces for.
|
# A regex which matches the server_names to expose traces for.
|
||||||
|
@ -325,6 +337,7 @@ def ensure_active_span(message, ret=None):
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def noop_context_manager(*args, **kwargs):
|
def noop_context_manager(*args, **kwargs):
|
||||||
"""Does exactly what it says on the tin"""
|
"""Does exactly what it says on the tin"""
|
||||||
|
# TODO: replace with contextlib.nullcontext once we drop support for Python 3.6
|
||||||
yield
|
yield
|
||||||
|
|
||||||
|
|
||||||
|
@ -350,10 +363,13 @@ def init_tracer(hs: "HomeServer"):
|
||||||
|
|
||||||
set_homeserver_whitelist(hs.config.opentracer_whitelist)
|
set_homeserver_whitelist(hs.config.opentracer_whitelist)
|
||||||
|
|
||||||
|
from jaeger_client.metrics.prometheus import PrometheusMetricsFactory
|
||||||
|
|
||||||
config = JaegerConfig(
|
config = JaegerConfig(
|
||||||
config=hs.config.jaeger_config,
|
config=hs.config.jaeger_config,
|
||||||
service_name="{} {}".format(hs.config.server_name, hs.get_instance_name()),
|
service_name="{} {}".format(hs.config.server_name, hs.get_instance_name()),
|
||||||
scope_manager=LogContextScopeManager(hs.config),
|
scope_manager=LogContextScopeManager(hs.config),
|
||||||
|
metrics_factory=PrometheusMetricsFactory(),
|
||||||
)
|
)
|
||||||
|
|
||||||
# If we have the rust jaeger reporter available let's use that.
|
# If we have the rust jaeger reporter available let's use that.
|
||||||
|
@ -588,7 +604,7 @@ def inject_active_span_twisted_headers(headers, destination, check_destination=T
|
||||||
|
|
||||||
span = opentracing.tracer.active_span
|
span = opentracing.tracer.active_span
|
||||||
carrier = {} # type: Dict[str, str]
|
carrier = {} # type: Dict[str, str]
|
||||||
opentracing.tracer.inject(span, opentracing.Format.HTTP_HEADERS, carrier)
|
opentracing.tracer.inject(span.context, opentracing.Format.HTTP_HEADERS, carrier)
|
||||||
|
|
||||||
for key, value in carrier.items():
|
for key, value in carrier.items():
|
||||||
headers.addRawHeaders(key, value)
|
headers.addRawHeaders(key, value)
|
||||||
|
@ -625,7 +641,7 @@ def inject_active_span_byte_dict(headers, destination, check_destination=True):
|
||||||
span = opentracing.tracer.active_span
|
span = opentracing.tracer.active_span
|
||||||
|
|
||||||
carrier = {} # type: Dict[str, str]
|
carrier = {} # type: Dict[str, str]
|
||||||
opentracing.tracer.inject(span, opentracing.Format.HTTP_HEADERS, carrier)
|
opentracing.tracer.inject(span.context, opentracing.Format.HTTP_HEADERS, carrier)
|
||||||
|
|
||||||
for key, value in carrier.items():
|
for key, value in carrier.items():
|
||||||
headers[key.encode()] = [value.encode()]
|
headers[key.encode()] = [value.encode()]
|
||||||
|
@ -659,7 +675,7 @@ def inject_active_span_text_map(carrier, destination, check_destination=True):
|
||||||
return
|
return
|
||||||
|
|
||||||
opentracing.tracer.inject(
|
opentracing.tracer.inject(
|
||||||
opentracing.tracer.active_span, opentracing.Format.TEXT_MAP, carrier
|
opentracing.tracer.active_span.context, opentracing.Format.TEXT_MAP, carrier
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -681,7 +697,7 @@ def get_active_span_text_map(destination=None):
|
||||||
|
|
||||||
carrier = {} # type: Dict[str, str]
|
carrier = {} # type: Dict[str, str]
|
||||||
opentracing.tracer.inject(
|
opentracing.tracer.inject(
|
||||||
opentracing.tracer.active_span, opentracing.Format.TEXT_MAP, carrier
|
opentracing.tracer.active_span.context, opentracing.Format.TEXT_MAP, carrier
|
||||||
)
|
)
|
||||||
|
|
||||||
return carrier
|
return carrier
|
||||||
|
@ -696,7 +712,7 @@ def active_span_context_as_string():
|
||||||
carrier = {} # type: Dict[str, str]
|
carrier = {} # type: Dict[str, str]
|
||||||
if opentracing:
|
if opentracing:
|
||||||
opentracing.tracer.inject(
|
opentracing.tracer.inject(
|
||||||
opentracing.tracer.active_span, opentracing.Format.TEXT_MAP, carrier
|
opentracing.tracer.active_span.context, opentracing.Format.TEXT_MAP, carrier
|
||||||
)
|
)
|
||||||
return json_encoder.encode(carrier)
|
return json_encoder.encode(carrier)
|
||||||
|
|
||||||
|
@ -824,7 +840,7 @@ def trace_servlet(request: "SynapseRequest", extract_context: bool = False):
|
||||||
return
|
return
|
||||||
|
|
||||||
request_tags = {
|
request_tags = {
|
||||||
"request_id": request.get_request_id(),
|
SynapseTags.REQUEST_ID: request.get_request_id(),
|
||||||
tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
|
tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
|
||||||
tags.HTTP_METHOD: request.get_method(),
|
tags.HTTP_METHOD: request.get_method(),
|
||||||
tags.HTTP_URL: request.get_redacted_uri(),
|
tags.HTTP_URL: request.get_redacted_uri(),
|
||||||
|
@ -833,9 +849,9 @@ def trace_servlet(request: "SynapseRequest", extract_context: bool = False):
|
||||||
|
|
||||||
request_name = request.request_metrics.name
|
request_name = request.request_metrics.name
|
||||||
if extract_context:
|
if extract_context:
|
||||||
scope = start_active_span_from_request(request, request_name, tags=request_tags)
|
scope = start_active_span_from_request(request, request_name)
|
||||||
else:
|
else:
|
||||||
scope = start_active_span(request_name, tags=request_tags)
|
scope = start_active_span(request_name)
|
||||||
|
|
||||||
with scope:
|
with scope:
|
||||||
try:
|
try:
|
||||||
|
@ -845,4 +861,11 @@ def trace_servlet(request: "SynapseRequest", extract_context: bool = False):
|
||||||
# with JsonResource).
|
# with JsonResource).
|
||||||
scope.span.set_operation_name(request.request_metrics.name)
|
scope.span.set_operation_name(request.request_metrics.name)
|
||||||
|
|
||||||
scope.span.set_tag("request_tag", request.request_metrics.start_context.tag)
|
# set the tags *after* the servlet completes, in case it decided to
|
||||||
|
# prioritise the span (tags will get dropped on unprioritised spans)
|
||||||
|
request_tags[
|
||||||
|
SynapseTags.REQUEST_TAG
|
||||||
|
] = request.request_metrics.start_context.tag
|
||||||
|
|
||||||
|
for k, v in request_tags.items():
|
||||||
|
scope.span.set_tag(k, v)
|
||||||
|
|
|
@ -22,7 +22,11 @@ from prometheus_client.core import REGISTRY, Counter, Gauge
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.logging.context import LoggingContext, PreserveLoggingContext
|
from synapse.logging.context import LoggingContext, PreserveLoggingContext
|
||||||
from synapse.logging.opentracing import noop_context_manager, start_active_span
|
from synapse.logging.opentracing import (
|
||||||
|
SynapseTags,
|
||||||
|
noop_context_manager,
|
||||||
|
start_active_span,
|
||||||
|
)
|
||||||
from synapse.util.async_helpers import maybe_awaitable
|
from synapse.util.async_helpers import maybe_awaitable
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
|
@ -200,9 +204,12 @@ def run_as_background_process(desc: str, func, *args, bg_start_span=True, **kwar
|
||||||
|
|
||||||
with BackgroundProcessLoggingContext(desc, count) as context:
|
with BackgroundProcessLoggingContext(desc, count) as context:
|
||||||
try:
|
try:
|
||||||
ctx = noop_context_manager()
|
|
||||||
if bg_start_span:
|
if bg_start_span:
|
||||||
ctx = start_active_span(desc, tags={"request_id": str(context)})
|
ctx = start_active_span(
|
||||||
|
f"bgproc.{desc}", tags={SynapseTags.REQUEST_ID: str(context)}
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
ctx = noop_context_manager()
|
||||||
with ctx:
|
with ctx:
|
||||||
return await maybe_awaitable(func(*args, **kwargs))
|
return await maybe_awaitable(func(*args, **kwargs))
|
||||||
except Exception:
|
except Exception:
|
||||||
|
|
|
@ -485,6 +485,7 @@ class Notifier:
|
||||||
end_time = self.clock.time_msec() + timeout
|
end_time = self.clock.time_msec() + timeout
|
||||||
|
|
||||||
while not result:
|
while not result:
|
||||||
|
with start_active_span("wait_for_events"):
|
||||||
try:
|
try:
|
||||||
now = self.clock.time_msec()
|
now = self.clock.time_msec()
|
||||||
if end_time <= now:
|
if end_time <= now:
|
||||||
|
@ -499,7 +500,6 @@ class Notifier:
|
||||||
self.hs.get_reactor(),
|
self.hs.get_reactor(),
|
||||||
)
|
)
|
||||||
|
|
||||||
with start_active_span("wait_for_events.deferred"):
|
|
||||||
log_kv(
|
log_kv(
|
||||||
{
|
{
|
||||||
"wait_for_events": "sleep",
|
"wait_for_events": "sleep",
|
||||||
|
|
|
@ -68,7 +68,7 @@ class SlavedDeviceStore(EndToEndKeyWorkerStore, DeviceWorkerStore, BaseSlavedSto
|
||||||
if row.entity.startswith("@"):
|
if row.entity.startswith("@"):
|
||||||
self._device_list_stream_cache.entity_has_changed(row.entity, token)
|
self._device_list_stream_cache.entity_has_changed(row.entity, token)
|
||||||
self.get_cached_devices_for_user.invalidate((row.entity,))
|
self.get_cached_devices_for_user.invalidate((row.entity,))
|
||||||
self._get_cached_user_device.invalidate_many((row.entity,))
|
self._get_cached_user_device.invalidate((row.entity,))
|
||||||
self.get_device_list_last_stream_id_for_remote.invalidate((row.entity,))
|
self.get_device_list_last_stream_id_for_remote.invalidate((row.entity,))
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -17,11 +17,13 @@
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import platform
|
import platform
|
||||||
|
from typing import TYPE_CHECKING, Optional, Tuple
|
||||||
|
|
||||||
import synapse
|
import synapse
|
||||||
from synapse.api.errors import Codes, NotFoundError, SynapseError
|
from synapse.api.errors import Codes, NotFoundError, SynapseError
|
||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import HttpServer, JsonResource
|
||||||
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||||
|
from synapse.http.site import SynapseRequest
|
||||||
from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin
|
from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin
|
||||||
from synapse.rest.admin.devices import (
|
from synapse.rest.admin.devices import (
|
||||||
DeleteDevicesRestServlet,
|
DeleteDevicesRestServlet,
|
||||||
|
@ -66,22 +68,25 @@ from synapse.rest.admin.users import (
|
||||||
UserTokenRestServlet,
|
UserTokenRestServlet,
|
||||||
WhoisRestServlet,
|
WhoisRestServlet,
|
||||||
)
|
)
|
||||||
from synapse.types import RoomStreamToken
|
from synapse.types import JsonDict, RoomStreamToken
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class VersionServlet(RestServlet):
|
class VersionServlet(RestServlet):
|
||||||
PATTERNS = admin_patterns("/server_version$")
|
PATTERNS = admin_patterns("/server_version$")
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs: "HomeServer"):
|
||||||
self.res = {
|
self.res = {
|
||||||
"server_version": get_version_string(synapse),
|
"server_version": get_version_string(synapse),
|
||||||
"python_version": platform.python_version(),
|
"python_version": platform.python_version(),
|
||||||
}
|
}
|
||||||
|
|
||||||
def on_GET(self, request):
|
def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||||
return 200, self.res
|
return 200, self.res
|
||||||
|
|
||||||
|
|
||||||
|
@ -90,17 +95,14 @@ class PurgeHistoryRestServlet(RestServlet):
|
||||||
"/purge_history/(?P<room_id>[^/]*)(/(?P<event_id>[^/]+))?"
|
"/purge_history/(?P<room_id>[^/]*)(/(?P<event_id>[^/]+))?"
|
||||||
)
|
)
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs: "HomeServer"):
|
||||||
"""
|
|
||||||
|
|
||||||
Args:
|
|
||||||
hs (synapse.server.HomeServer)
|
|
||||||
"""
|
|
||||||
self.pagination_handler = hs.get_pagination_handler()
|
self.pagination_handler = hs.get_pagination_handler()
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
self.auth = hs.get_auth()
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
async def on_POST(self, request, room_id, event_id):
|
async def on_POST(
|
||||||
|
self, request: SynapseRequest, room_id: str, event_id: Optional[str]
|
||||||
|
) -> Tuple[int, JsonDict]:
|
||||||
await assert_requester_is_admin(self.auth, request)
|
await assert_requester_is_admin(self.auth, request)
|
||||||
|
|
||||||
body = parse_json_object_from_request(request, allow_empty_body=True)
|
body = parse_json_object_from_request(request, allow_empty_body=True)
|
||||||
|
@ -119,6 +121,8 @@ class PurgeHistoryRestServlet(RestServlet):
|
||||||
if event.room_id != room_id:
|
if event.room_id != room_id:
|
||||||
raise SynapseError(400, "Event is for wrong room.")
|
raise SynapseError(400, "Event is for wrong room.")
|
||||||
|
|
||||||
|
# RoomStreamToken expects [int] not Optional[int]
|
||||||
|
assert event.internal_metadata.stream_ordering is not None
|
||||||
room_token = RoomStreamToken(
|
room_token = RoomStreamToken(
|
||||||
event.depth, event.internal_metadata.stream_ordering
|
event.depth, event.internal_metadata.stream_ordering
|
||||||
)
|
)
|
||||||
|
@ -173,16 +177,13 @@ class PurgeHistoryRestServlet(RestServlet):
|
||||||
class PurgeHistoryStatusRestServlet(RestServlet):
|
class PurgeHistoryStatusRestServlet(RestServlet):
|
||||||
PATTERNS = admin_patterns("/purge_history_status/(?P<purge_id>[^/]+)")
|
PATTERNS = admin_patterns("/purge_history_status/(?P<purge_id>[^/]+)")
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs: "HomeServer"):
|
||||||
"""
|
|
||||||
|
|
||||||
Args:
|
|
||||||
hs (synapse.server.HomeServer)
|
|
||||||
"""
|
|
||||||
self.pagination_handler = hs.get_pagination_handler()
|
self.pagination_handler = hs.get_pagination_handler()
|
||||||
self.auth = hs.get_auth()
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
async def on_GET(self, request, purge_id):
|
async def on_GET(
|
||||||
|
self, request: SynapseRequest, purge_id: str
|
||||||
|
) -> Tuple[int, JsonDict]:
|
||||||
await assert_requester_is_admin(self.auth, request)
|
await assert_requester_is_admin(self.auth, request)
|
||||||
|
|
||||||
purge_status = self.pagination_handler.get_purge_status(purge_id)
|
purge_status = self.pagination_handler.get_purge_status(purge_id)
|
||||||
|
@ -203,12 +204,12 @@ class PurgeHistoryStatusRestServlet(RestServlet):
|
||||||
class AdminRestResource(JsonResource):
|
class AdminRestResource(JsonResource):
|
||||||
"""The REST resource which gets mounted at /_synapse/admin"""
|
"""The REST resource which gets mounted at /_synapse/admin"""
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs: "HomeServer"):
|
||||||
JsonResource.__init__(self, hs, canonical_json=False)
|
JsonResource.__init__(self, hs, canonical_json=False)
|
||||||
register_servlets(hs, self)
|
register_servlets(hs, self)
|
||||||
|
|
||||||
|
|
||||||
def register_servlets(hs, http_server):
|
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||||
"""
|
"""
|
||||||
Register all the admin servlets.
|
Register all the admin servlets.
|
||||||
"""
|
"""
|
||||||
|
@ -242,7 +243,9 @@ def register_servlets(hs, http_server):
|
||||||
RateLimitRestServlet(hs).register(http_server)
|
RateLimitRestServlet(hs).register(http_server)
|
||||||
|
|
||||||
|
|
||||||
def register_servlets_for_client_rest_resource(hs, http_server):
|
def register_servlets_for_client_rest_resource(
|
||||||
|
hs: "HomeServer", http_server: HttpServer
|
||||||
|
) -> None:
|
||||||
"""Register only the servlets which need to be exposed on /_matrix/client/xxx"""
|
"""Register only the servlets which need to be exposed on /_matrix/client/xxx"""
|
||||||
WhoisRestServlet(hs).register(http_server)
|
WhoisRestServlet(hs).register(http_server)
|
||||||
PurgeHistoryStatusRestServlet(hs).register(http_server)
|
PurgeHistoryStatusRestServlet(hs).register(http_server)
|
||||||
|
|
|
@ -13,6 +13,7 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
from typing import Iterable, Pattern
|
||||||
|
|
||||||
from synapse.api.auth import Auth
|
from synapse.api.auth import Auth
|
||||||
from synapse.api.errors import AuthError
|
from synapse.api.errors import AuthError
|
||||||
|
@ -20,7 +21,7 @@ from synapse.http.site import SynapseRequest
|
||||||
from synapse.types import UserID
|
from synapse.types import UserID
|
||||||
|
|
||||||
|
|
||||||
def admin_patterns(path_regex: str, version: str = "v1"):
|
def admin_patterns(path_regex: str, version: str = "v1") -> Iterable[Pattern]:
|
||||||
"""Returns the list of patterns for an admin endpoint
|
"""Returns the list of patterns for an admin endpoint
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
|
|
@ -12,10 +12,16 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
import logging
|
import logging
|
||||||
|
from typing import TYPE_CHECKING, Tuple
|
||||||
|
|
||||||
from synapse.api.errors import SynapseError
|
from synapse.api.errors import SynapseError
|
||||||
from synapse.http.servlet import RestServlet
|
from synapse.http.servlet import RestServlet
|
||||||
|
from synapse.http.site import SynapseRequest
|
||||||
from synapse.rest.admin._base import admin_patterns, assert_user_is_admin
|
from synapse.rest.admin._base import admin_patterns, assert_user_is_admin
|
||||||
|
from synapse.types import JsonDict
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -25,12 +31,14 @@ class DeleteGroupAdminRestServlet(RestServlet):
|
||||||
|
|
||||||
PATTERNS = admin_patterns("/delete_group/(?P<group_id>[^/]*)")
|
PATTERNS = admin_patterns("/delete_group/(?P<group_id>[^/]*)")
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs: "HomeServer"):
|
||||||
self.group_server = hs.get_groups_server_handler()
|
self.group_server = hs.get_groups_server_handler()
|
||||||
self.is_mine_id = hs.is_mine_id
|
self.is_mine_id = hs.is_mine_id
|
||||||
self.auth = hs.get_auth()
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
async def on_POST(self, request, group_id):
|
async def on_POST(
|
||||||
|
self, request: SynapseRequest, group_id: str
|
||||||
|
) -> Tuple[int, JsonDict]:
|
||||||
requester = await self.auth.get_user_by_req(request)
|
requester = await self.auth.get_user_by_req(request)
|
||||||
await assert_user_is_admin(self.auth, requester.user)
|
await assert_user_is_admin(self.auth, requester.user)
|
||||||
|
|
||||||
|
|
|
@ -17,6 +17,7 @@ import logging
|
||||||
from typing import TYPE_CHECKING, Tuple
|
from typing import TYPE_CHECKING, Tuple
|
||||||
|
|
||||||
from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
|
from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
|
||||||
|
from synapse.http.server import HttpServer
|
||||||
from synapse.http.servlet import RestServlet, parse_boolean, parse_integer
|
from synapse.http.servlet import RestServlet, parse_boolean, parse_integer
|
||||||
from synapse.http.site import SynapseRequest
|
from synapse.http.site import SynapseRequest
|
||||||
from synapse.rest.admin._base import (
|
from synapse.rest.admin._base import (
|
||||||
|
@ -37,12 +38,11 @@ class QuarantineMediaInRoom(RestServlet):
|
||||||
this server.
|
this server.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
PATTERNS = (
|
PATTERNS = [
|
||||||
admin_patterns("/room/(?P<room_id>[^/]+)/media/quarantine")
|
*admin_patterns("/room/(?P<room_id>[^/]+)/media/quarantine"),
|
||||||
+
|
|
||||||
# This path kept around for legacy reasons
|
# This path kept around for legacy reasons
|
||||||
admin_patterns("/quarantine_media/(?P<room_id>[^/]+)")
|
*admin_patterns("/quarantine_media/(?P<room_id>[^/]+)"),
|
||||||
)
|
]
|
||||||
|
|
||||||
def __init__(self, hs: "HomeServer"):
|
def __init__(self, hs: "HomeServer"):
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
|
@ -120,6 +120,35 @@ class QuarantineMediaByID(RestServlet):
|
||||||
return 200, {}
|
return 200, {}
|
||||||
|
|
||||||
|
|
||||||
|
class UnquarantineMediaByID(RestServlet):
|
||||||
|
"""Quarantines local or remote media by a given ID so that no one can download
|
||||||
|
it via this server.
|
||||||
|
"""
|
||||||
|
|
||||||
|
PATTERNS = admin_patterns(
|
||||||
|
"/media/unquarantine/(?P<server_name>[^/]+)/(?P<media_id>[^/]+)"
|
||||||
|
)
|
||||||
|
|
||||||
|
def __init__(self, hs: "HomeServer"):
|
||||||
|
self.store = hs.get_datastore()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
|
async def on_POST(
|
||||||
|
self, request: SynapseRequest, server_name: str, media_id: str
|
||||||
|
) -> Tuple[int, JsonDict]:
|
||||||
|
requester = await self.auth.get_user_by_req(request)
|
||||||
|
await assert_user_is_admin(self.auth, requester.user)
|
||||||
|
|
||||||
|
logging.info(
|
||||||
|
"Remove from quarantine local media by ID: %s/%s", server_name, media_id
|
||||||
|
)
|
||||||
|
|
||||||
|
# Remove from quarantine this media id
|
||||||
|
await self.store.quarantine_media_by_id(server_name, media_id, None)
|
||||||
|
|
||||||
|
return 200, {}
|
||||||
|
|
||||||
|
|
||||||
class ProtectMediaByID(RestServlet):
|
class ProtectMediaByID(RestServlet):
|
||||||
"""Protect local media from being quarantined."""
|
"""Protect local media from being quarantined."""
|
||||||
|
|
||||||
|
@ -137,8 +166,31 @@ class ProtectMediaByID(RestServlet):
|
||||||
|
|
||||||
logging.info("Protecting local media by ID: %s", media_id)
|
logging.info("Protecting local media by ID: %s", media_id)
|
||||||
|
|
||||||
# Quarantine this media id
|
# Protect this media id
|
||||||
await self.store.mark_local_media_as_safe(media_id)
|
await self.store.mark_local_media_as_safe(media_id, safe=True)
|
||||||
|
|
||||||
|
return 200, {}
|
||||||
|
|
||||||
|
|
||||||
|
class UnprotectMediaByID(RestServlet):
|
||||||
|
"""Unprotect local media from being quarantined."""
|
||||||
|
|
||||||
|
PATTERNS = admin_patterns("/media/unprotect/(?P<media_id>[^/]+)")
|
||||||
|
|
||||||
|
def __init__(self, hs: "HomeServer"):
|
||||||
|
self.store = hs.get_datastore()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
|
async def on_POST(
|
||||||
|
self, request: SynapseRequest, media_id: str
|
||||||
|
) -> Tuple[int, JsonDict]:
|
||||||
|
requester = await self.auth.get_user_by_req(request)
|
||||||
|
await assert_user_is_admin(self.auth, requester.user)
|
||||||
|
|
||||||
|
logging.info("Unprotecting local media by ID: %s", media_id)
|
||||||
|
|
||||||
|
# Unprotect this media id
|
||||||
|
await self.store.mark_local_media_as_safe(media_id, safe=False)
|
||||||
|
|
||||||
return 200, {}
|
return 200, {}
|
||||||
|
|
||||||
|
@ -260,15 +312,17 @@ class DeleteMediaByDateSize(RestServlet):
|
||||||
return 200, {"deleted_media": deleted_media, "total": total}
|
return 200, {"deleted_media": deleted_media, "total": total}
|
||||||
|
|
||||||
|
|
||||||
def register_servlets_for_media_repo(hs: "HomeServer", http_server):
|
def register_servlets_for_media_repo(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||||
"""
|
"""
|
||||||
Media repo specific APIs.
|
Media repo specific APIs.
|
||||||
"""
|
"""
|
||||||
PurgeMediaCacheRestServlet(hs).register(http_server)
|
PurgeMediaCacheRestServlet(hs).register(http_server)
|
||||||
QuarantineMediaInRoom(hs).register(http_server)
|
QuarantineMediaInRoom(hs).register(http_server)
|
||||||
QuarantineMediaByID(hs).register(http_server)
|
QuarantineMediaByID(hs).register(http_server)
|
||||||
|
UnquarantineMediaByID(hs).register(http_server)
|
||||||
QuarantineMediaByUser(hs).register(http_server)
|
QuarantineMediaByUser(hs).register(http_server)
|
||||||
ProtectMediaByID(hs).register(http_server)
|
ProtectMediaByID(hs).register(http_server)
|
||||||
|
UnprotectMediaByID(hs).register(http_server)
|
||||||
ListMediaInRoom(hs).register(http_server)
|
ListMediaInRoom(hs).register(http_server)
|
||||||
DeleteMediaByID(hs).register(http_server)
|
DeleteMediaByID(hs).register(http_server)
|
||||||
DeleteMediaByDateSize(hs).register(http_server)
|
DeleteMediaByDateSize(hs).register(http_server)
|
||||||
|
|
|
@ -649,7 +649,7 @@ class RoomEventContextServlet(RestServlet):
|
||||||
limit = parse_integer(request, "limit", default=10)
|
limit = parse_integer(request, "limit", default=10)
|
||||||
|
|
||||||
# picking the API shape for symmetry with /messages
|
# picking the API shape for symmetry with /messages
|
||||||
filter_str = parse_string(request, b"filter", encoding="utf-8")
|
filter_str = parse_string(request, "filter", encoding="utf-8")
|
||||||
if filter_str:
|
if filter_str:
|
||||||
filter_json = urlparse.unquote(filter_str)
|
filter_json = urlparse.unquote(filter_str)
|
||||||
event_filter = Filter(
|
event_filter = Filter(
|
||||||
|
|
|
@ -478,13 +478,12 @@ class UserRegisterServlet(RestServlet):
|
||||||
|
|
||||||
class WhoisRestServlet(RestServlet):
|
class WhoisRestServlet(RestServlet):
|
||||||
path_regex = "/whois/(?P<user_id>[^/]*)$"
|
path_regex = "/whois/(?P<user_id>[^/]*)$"
|
||||||
PATTERNS = (
|
PATTERNS = [
|
||||||
admin_patterns(path_regex)
|
*admin_patterns(path_regex),
|
||||||
+
|
|
||||||
# URL for spec reason
|
# URL for spec reason
|
||||||
# https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid
|
# https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid
|
||||||
client_patterns("/admin" + path_regex, v1=True)
|
*client_patterns("/admin" + path_regex, v1=True),
|
||||||
)
|
]
|
||||||
|
|
||||||
def __init__(self, hs: "HomeServer"):
|
def __init__(self, hs: "HomeServer"):
|
||||||
self.hs = hs
|
self.hs = hs
|
||||||
|
@ -553,11 +552,7 @@ class DeactivateAccountRestServlet(RestServlet):
|
||||||
class AccountValidityRenewServlet(RestServlet):
|
class AccountValidityRenewServlet(RestServlet):
|
||||||
PATTERNS = admin_patterns("/account_validity/validity$")
|
PATTERNS = admin_patterns("/account_validity/validity$")
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs: "HomeServer"):
|
||||||
"""
|
|
||||||
Args:
|
|
||||||
hs (synapse.server.HomeServer): server
|
|
||||||
"""
|
|
||||||
self.hs = hs
|
self.hs = hs
|
||||||
self.account_activity_handler = hs.get_account_validity_handler()
|
self.account_activity_handler = hs.get_account_validity_handler()
|
||||||
self.auth = hs.get_auth()
|
self.auth = hs.get_auth()
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
from typing import TYPE_CHECKING, Awaitable, Callable, Dict, Optional
|
from typing import TYPE_CHECKING, Awaitable, Callable, Dict, List, Optional
|
||||||
|
|
||||||
from synapse.api.errors import Codes, LoginError, SynapseError
|
from synapse.api.errors import Codes, LoginError, SynapseError
|
||||||
from synapse.api.ratelimiting import Ratelimiter
|
from synapse.api.ratelimiting import Ratelimiter
|
||||||
|
@ -25,6 +25,7 @@ from synapse.http import get_request_uri
|
||||||
from synapse.http.server import HttpServer, finish_request
|
from synapse.http.server import HttpServer, finish_request
|
||||||
from synapse.http.servlet import (
|
from synapse.http.servlet import (
|
||||||
RestServlet,
|
RestServlet,
|
||||||
|
parse_bytes_from_args,
|
||||||
parse_json_object_from_request,
|
parse_json_object_from_request,
|
||||||
parse_string,
|
parse_string,
|
||||||
)
|
)
|
||||||
|
@ -437,9 +438,8 @@ class SsoRedirectServlet(RestServlet):
|
||||||
finish_request(request)
|
finish_request(request)
|
||||||
return
|
return
|
||||||
|
|
||||||
client_redirect_url = parse_string(
|
args = request.args # type: Dict[bytes, List[bytes]] # type: ignore
|
||||||
request, "redirectUrl", required=True, encoding=None
|
client_redirect_url = parse_bytes_from_args(args, "redirectUrl", required=True)
|
||||||
)
|
|
||||||
sso_url = await self._sso_handler.handle_redirect_request(
|
sso_url = await self._sso_handler.handle_redirect_request(
|
||||||
request,
|
request,
|
||||||
client_redirect_url,
|
client_redirect_url,
|
||||||
|
|
|
@ -540,7 +540,7 @@ class RoomMessageListRestServlet(RestServlet):
|
||||||
self.store, request, default_limit=10
|
self.store, request, default_limit=10
|
||||||
)
|
)
|
||||||
as_client_event = b"raw" not in request.args
|
as_client_event = b"raw" not in request.args
|
||||||
filter_str = parse_string(request, b"filter", encoding="utf-8")
|
filter_str = parse_string(request, "filter", encoding="utf-8")
|
||||||
if filter_str:
|
if filter_str:
|
||||||
filter_json = urlparse.unquote(filter_str)
|
filter_json = urlparse.unquote(filter_str)
|
||||||
event_filter = Filter(
|
event_filter = Filter(
|
||||||
|
@ -655,7 +655,7 @@ class RoomEventContextServlet(RestServlet):
|
||||||
limit = parse_integer(request, "limit", default=10)
|
limit = parse_integer(request, "limit", default=10)
|
||||||
|
|
||||||
# picking the API shape for symmetry with /messages
|
# picking the API shape for symmetry with /messages
|
||||||
filter_str = parse_string(request, b"filter", encoding="utf-8")
|
filter_str = parse_string(request, "filter", encoding="utf-8")
|
||||||
if filter_str:
|
if filter_str:
|
||||||
filter_json = urlparse.unquote(filter_str)
|
filter_json = urlparse.unquote(filter_str)
|
||||||
event_filter = Filter(
|
event_filter = Filter(
|
||||||
|
@ -913,7 +913,7 @@ class RoomAliasListServlet(RestServlet):
|
||||||
r"^/_matrix/client/unstable/org\.matrix\.msc2432"
|
r"^/_matrix/client/unstable/org\.matrix\.msc2432"
|
||||||
r"/rooms/(?P<room_id>[^/]*)/aliases"
|
r"/rooms/(?P<room_id>[^/]*)/aliases"
|
||||||
),
|
),
|
||||||
]
|
] + list(client_patterns("/rooms/(?P<room_id>[^/]*)/aliases$", unstable=False))
|
||||||
|
|
||||||
def __init__(self, hs: "HomeServer"):
|
def __init__(self, hs: "HomeServer"):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
@ -1063,18 +1063,16 @@ def register_servlets(hs: "HomeServer", http_server, is_worker=False):
|
||||||
RoomRedactEventRestServlet(hs).register(http_server)
|
RoomRedactEventRestServlet(hs).register(http_server)
|
||||||
RoomTypingRestServlet(hs).register(http_server)
|
RoomTypingRestServlet(hs).register(http_server)
|
||||||
RoomEventContextServlet(hs).register(http_server)
|
RoomEventContextServlet(hs).register(http_server)
|
||||||
|
|
||||||
if hs.config.experimental.spaces_enabled:
|
|
||||||
RoomSpaceSummaryRestServlet(hs).register(http_server)
|
RoomSpaceSummaryRestServlet(hs).register(http_server)
|
||||||
|
RoomEventServlet(hs).register(http_server)
|
||||||
|
JoinedRoomsRestServlet(hs).register(http_server)
|
||||||
|
RoomAliasListServlet(hs).register(http_server)
|
||||||
|
SearchRestServlet(hs).register(http_server)
|
||||||
|
|
||||||
# Some servlets only get registered for the main process.
|
# Some servlets only get registered for the main process.
|
||||||
if not is_worker:
|
if not is_worker:
|
||||||
RoomCreateRestServlet(hs).register(http_server)
|
RoomCreateRestServlet(hs).register(http_server)
|
||||||
RoomForgetRestServlet(hs).register(http_server)
|
RoomForgetRestServlet(hs).register(http_server)
|
||||||
SearchRestServlet(hs).register(http_server)
|
|
||||||
JoinedRoomsRestServlet(hs).register(http_server)
|
|
||||||
RoomEventServlet(hs).register(http_server)
|
|
||||||
RoomAliasListServlet(hs).register(http_server)
|
|
||||||
|
|
||||||
|
|
||||||
def register_deprecated_servlets(hs, http_server):
|
def register_deprecated_servlets(hs, http_server):
|
||||||
|
|
|
@ -16,11 +16,7 @@ import logging
|
||||||
from http import HTTPStatus
|
from http import HTTPStatus
|
||||||
|
|
||||||
from synapse.api.errors import Codes, SynapseError
|
from synapse.api.errors import Codes, SynapseError
|
||||||
from synapse.http.servlet import (
|
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||||
RestServlet,
|
|
||||||
assert_params_in_dict,
|
|
||||||
parse_json_object_from_request,
|
|
||||||
)
|
|
||||||
|
|
||||||
from ._base import client_patterns
|
from ._base import client_patterns
|
||||||
|
|
||||||
|
@ -42,15 +38,14 @@ class ReportEventRestServlet(RestServlet):
|
||||||
user_id = requester.user.to_string()
|
user_id = requester.user.to_string()
|
||||||
|
|
||||||
body = parse_json_object_from_request(request)
|
body = parse_json_object_from_request(request)
|
||||||
assert_params_in_dict(body, ("reason", "score"))
|
|
||||||
|
|
||||||
if not isinstance(body["reason"], str):
|
if not isinstance(body.get("reason", ""), str):
|
||||||
raise SynapseError(
|
raise SynapseError(
|
||||||
HTTPStatus.BAD_REQUEST,
|
HTTPStatus.BAD_REQUEST,
|
||||||
"Param 'reason' must be a string",
|
"Param 'reason' must be a string",
|
||||||
Codes.BAD_JSON,
|
Codes.BAD_JSON,
|
||||||
)
|
)
|
||||||
if not isinstance(body["score"], int):
|
if not isinstance(body.get("score", 0), int):
|
||||||
raise SynapseError(
|
raise SynapseError(
|
||||||
HTTPStatus.BAD_REQUEST,
|
HTTPStatus.BAD_REQUEST,
|
||||||
"Param 'score' must be an integer",
|
"Param 'score' must be an integer",
|
||||||
|
@ -61,7 +56,7 @@ class ReportEventRestServlet(RestServlet):
|
||||||
room_id=room_id,
|
room_id=room_id,
|
||||||
event_id=event_id,
|
event_id=event_id,
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
reason=body["reason"],
|
reason=body.get("reason"),
|
||||||
content=body,
|
content=body,
|
||||||
received_ts=self.clock.time_msec(),
|
received_ts=self.clock.time_msec(),
|
||||||
)
|
)
|
||||||
|
|
|
@ -17,6 +17,7 @@ import logging
|
||||||
from hashlib import sha256
|
from hashlib import sha256
|
||||||
from http import HTTPStatus
|
from http import HTTPStatus
|
||||||
from os import path
|
from os import path
|
||||||
|
from typing import Dict, List
|
||||||
|
|
||||||
import jinja2
|
import jinja2
|
||||||
from jinja2 import TemplateNotFound
|
from jinja2 import TemplateNotFound
|
||||||
|
@ -24,7 +25,7 @@ from jinja2 import TemplateNotFound
|
||||||
from synapse.api.errors import NotFoundError, StoreError, SynapseError
|
from synapse.api.errors import NotFoundError, StoreError, SynapseError
|
||||||
from synapse.config import ConfigError
|
from synapse.config import ConfigError
|
||||||
from synapse.http.server import DirectServeHtmlResource, respond_with_html
|
from synapse.http.server import DirectServeHtmlResource, respond_with_html
|
||||||
from synapse.http.servlet import parse_string
|
from synapse.http.servlet import parse_bytes_from_args, parse_string
|
||||||
from synapse.types import UserID
|
from synapse.types import UserID
|
||||||
|
|
||||||
# language to use for the templates. TODO: figure this out from Accept-Language
|
# language to use for the templates. TODO: figure this out from Accept-Language
|
||||||
|
@ -116,7 +117,8 @@ class ConsentResource(DirectServeHtmlResource):
|
||||||
has_consented = False
|
has_consented = False
|
||||||
public_version = username == ""
|
public_version = username == ""
|
||||||
if not public_version:
|
if not public_version:
|
||||||
userhmac_bytes = parse_string(request, "h", required=True, encoding=None)
|
args = request.args # type: Dict[bytes, List[bytes]]
|
||||||
|
userhmac_bytes = parse_bytes_from_args(args, "h", required=True)
|
||||||
|
|
||||||
self._check_hash(username, userhmac_bytes)
|
self._check_hash(username, userhmac_bytes)
|
||||||
|
|
||||||
|
@ -152,7 +154,8 @@ class ConsentResource(DirectServeHtmlResource):
|
||||||
"""
|
"""
|
||||||
version = parse_string(request, "v", required=True)
|
version = parse_string(request, "v", required=True)
|
||||||
username = parse_string(request, "u", required=True)
|
username = parse_string(request, "u", required=True)
|
||||||
userhmac = parse_string(request, "h", required=True, encoding=None)
|
args = request.args # type: Dict[bytes, List[bytes]]
|
||||||
|
userhmac = parse_bytes_from_args(args, "h", required=True)
|
||||||
|
|
||||||
self._check_hash(username, userhmac)
|
self._check_hash(username, userhmac)
|
||||||
|
|
||||||
|
|
|
@ -22,6 +22,7 @@ from synapse.crypto.keyring import ServerKeyFetcher
|
||||||
from synapse.http.server import DirectServeJsonResource, respond_with_json
|
from synapse.http.server import DirectServeJsonResource, respond_with_json
|
||||||
from synapse.http.servlet import parse_integer, parse_json_object_from_request
|
from synapse.http.servlet import parse_integer, parse_json_object_from_request
|
||||||
from synapse.util import json_decoder
|
from synapse.util import json_decoder
|
||||||
|
from synapse.util.async_helpers import yieldable_gather_results
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -210,7 +211,13 @@ class RemoteKey(DirectServeJsonResource):
|
||||||
# If there is a cache miss, request the missing keys, then recurse (and
|
# If there is a cache miss, request the missing keys, then recurse (and
|
||||||
# ensure the result is sent).
|
# ensure the result is sent).
|
||||||
if cache_misses and query_remote_on_cache_miss:
|
if cache_misses and query_remote_on_cache_miss:
|
||||||
await self.fetcher.get_keys(cache_misses)
|
await yieldable_gather_results(
|
||||||
|
lambda t: self.fetcher.get_keys(*t),
|
||||||
|
(
|
||||||
|
(server_name, list(keys), 0)
|
||||||
|
for server_name, keys in cache_misses.items()
|
||||||
|
),
|
||||||
|
)
|
||||||
await self.query_keys(request, query, query_remote_on_cache_miss=False)
|
await self.query_keys(request, query, query_remote_on_cache_miss=False)
|
||||||
else:
|
else:
|
||||||
signed_keys = []
|
signed_keys = []
|
||||||
|
|
|
@ -14,13 +14,13 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from typing import IO, TYPE_CHECKING
|
from typing import IO, TYPE_CHECKING, Dict, List, Optional
|
||||||
|
|
||||||
from twisted.web.server import Request
|
from twisted.web.server import Request
|
||||||
|
|
||||||
from synapse.api.errors import Codes, SynapseError
|
from synapse.api.errors import Codes, SynapseError
|
||||||
from synapse.http.server import DirectServeJsonResource, respond_with_json
|
from synapse.http.server import DirectServeJsonResource, respond_with_json
|
||||||
from synapse.http.servlet import parse_string
|
from synapse.http.servlet import parse_bytes_from_args
|
||||||
from synapse.http.site import SynapseRequest
|
from synapse.http.site import SynapseRequest
|
||||||
from synapse.rest.media.v1.media_storage import SpamMediaException
|
from synapse.rest.media.v1.media_storage import SpamMediaException
|
||||||
|
|
||||||
|
@ -61,10 +61,11 @@ class UploadResource(DirectServeJsonResource):
|
||||||
errcode=Codes.TOO_LARGE,
|
errcode=Codes.TOO_LARGE,
|
||||||
)
|
)
|
||||||
|
|
||||||
upload_name = parse_string(request, b"filename", encoding=None)
|
args = request.args # type: Dict[bytes, List[bytes]] # type: ignore
|
||||||
if upload_name:
|
upload_name_bytes = parse_bytes_from_args(args, "filename")
|
||||||
|
if upload_name_bytes:
|
||||||
try:
|
try:
|
||||||
upload_name = upload_name.decode("utf8")
|
upload_name = upload_name_bytes.decode("utf8") # type: Optional[str]
|
||||||
except UnicodeDecodeError:
|
except UnicodeDecodeError:
|
||||||
raise SynapseError(
|
raise SynapseError(
|
||||||
msg="Invalid UTF-8 filename parameter: %r" % (upload_name), code=400
|
msg="Invalid UTF-8 filename parameter: %r" % (upload_name), code=400
|
||||||
|
|
|
@ -40,6 +40,7 @@ from twisted.enterprise import adbapi
|
||||||
|
|
||||||
from synapse.api.errors import StoreError
|
from synapse.api.errors import StoreError
|
||||||
from synapse.config.database import DatabaseConnectionConfig
|
from synapse.config.database import DatabaseConnectionConfig
|
||||||
|
from synapse.logging import opentracing
|
||||||
from synapse.logging.context import (
|
from synapse.logging.context import (
|
||||||
LoggingContext,
|
LoggingContext,
|
||||||
current_context,
|
current_context,
|
||||||
|
@ -90,12 +91,18 @@ def make_pool(
|
||||||
db_args = dict(db_config.config.get("args", {}))
|
db_args = dict(db_config.config.get("args", {}))
|
||||||
db_args.setdefault("cp_reconnect", True)
|
db_args.setdefault("cp_reconnect", True)
|
||||||
|
|
||||||
|
def _on_new_connection(conn):
|
||||||
|
# Ensure we have a logging context so we can correctly track queries,
|
||||||
|
# etc.
|
||||||
|
with LoggingContext("db.on_new_connection"):
|
||||||
|
engine.on_new_connection(
|
||||||
|
LoggingDatabaseConnection(conn, engine, "on_new_connection")
|
||||||
|
)
|
||||||
|
|
||||||
return adbapi.ConnectionPool(
|
return adbapi.ConnectionPool(
|
||||||
db_config.config["name"],
|
db_config.config["name"],
|
||||||
cp_reactor=reactor,
|
cp_reactor=reactor,
|
||||||
cp_openfun=lambda conn: engine.on_new_connection(
|
cp_openfun=_on_new_connection,
|
||||||
LoggingDatabaseConnection(conn, engine, "on_new_connection")
|
|
||||||
),
|
|
||||||
**db_args,
|
**db_args,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -313,6 +320,13 @@ class LoggingTransaction:
|
||||||
start = time.time()
|
start = time.time()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
with opentracing.start_active_span(
|
||||||
|
"db.query",
|
||||||
|
tags={
|
||||||
|
opentracing.tags.DATABASE_TYPE: "sql",
|
||||||
|
opentracing.tags.DATABASE_STATEMENT: sql,
|
||||||
|
},
|
||||||
|
):
|
||||||
return func(sql, *args)
|
return func(sql, *args)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
sql_logger.debug("[SQL FAIL] {%s} %s", self.name, e)
|
sql_logger.debug("[SQL FAIL] {%s} %s", self.name, e)
|
||||||
|
@ -525,7 +539,15 @@ class DatabasePool:
|
||||||
exception_callbacks=exception_callbacks,
|
exception_callbacks=exception_callbacks,
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
|
with opentracing.start_active_span(
|
||||||
|
"db.txn",
|
||||||
|
tags={
|
||||||
|
opentracing.SynapseTags.DB_TXN_DESC: desc,
|
||||||
|
opentracing.SynapseTags.DB_TXN_ID: name,
|
||||||
|
},
|
||||||
|
):
|
||||||
r = func(cursor, *args, **kwargs)
|
r = func(cursor, *args, **kwargs)
|
||||||
|
opentracing.log_kv({"message": "commit"})
|
||||||
conn.commit()
|
conn.commit()
|
||||||
return r
|
return r
|
||||||
except self.engine.module.OperationalError as e:
|
except self.engine.module.OperationalError as e:
|
||||||
|
@ -541,6 +563,7 @@ class DatabasePool:
|
||||||
if i < N:
|
if i < N:
|
||||||
i += 1
|
i += 1
|
||||||
try:
|
try:
|
||||||
|
with opentracing.start_active_span("db.rollback"):
|
||||||
conn.rollback()
|
conn.rollback()
|
||||||
except self.engine.module.Error as e1:
|
except self.engine.module.Error as e1:
|
||||||
transaction_logger.warning("[TXN EROLL] {%s} %s", name, e1)
|
transaction_logger.warning("[TXN EROLL] {%s} %s", name, e1)
|
||||||
|
@ -554,6 +577,7 @@ class DatabasePool:
|
||||||
if i < N:
|
if i < N:
|
||||||
i += 1
|
i += 1
|
||||||
try:
|
try:
|
||||||
|
with opentracing.start_active_span("db.rollback"):
|
||||||
conn.rollback()
|
conn.rollback()
|
||||||
except self.engine.module.Error as e1:
|
except self.engine.module.Error as e1:
|
||||||
transaction_logger.warning(
|
transaction_logger.warning(
|
||||||
|
@ -653,6 +677,7 @@ class DatabasePool:
|
||||||
logger.warning("Starting db txn '%s' from sentinel context", desc)
|
logger.warning("Starting db txn '%s' from sentinel context", desc)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
with opentracing.start_active_span(f"db.{desc}"):
|
||||||
result = await self.runWithConnection(
|
result = await self.runWithConnection(
|
||||||
self.new_transaction,
|
self.new_transaction,
|
||||||
desc,
|
desc,
|
||||||
|
@ -718,6 +743,9 @@ class DatabasePool:
|
||||||
with LoggingContext(
|
with LoggingContext(
|
||||||
str(curr_context), parent_context=parent_context
|
str(curr_context), parent_context=parent_context
|
||||||
) as context:
|
) as context:
|
||||||
|
with opentracing.start_active_span(
|
||||||
|
operation_name="db.connection",
|
||||||
|
):
|
||||||
sched_duration_sec = monotonic_time() - start_time
|
sched_duration_sec = monotonic_time() - start_time
|
||||||
sql_scheduling_timer.observe(sched_duration_sec)
|
sql_scheduling_timer.observe(sched_duration_sec)
|
||||||
context.add_database_scheduled(sched_duration_sec)
|
context.add_database_scheduled(sched_duration_sec)
|
||||||
|
@ -725,6 +753,7 @@ class DatabasePool:
|
||||||
if self.engine.is_connection_closed(conn):
|
if self.engine.is_connection_closed(conn):
|
||||||
logger.debug("Reconnecting closed database connection")
|
logger.debug("Reconnecting closed database connection")
|
||||||
conn.reconnect()
|
conn.reconnect()
|
||||||
|
opentracing.log_kv({"message": "reconnected"})
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if db_autocommit:
|
if db_autocommit:
|
||||||
|
|
|
@ -168,10 +168,11 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
|
||||||
backfilled,
|
backfilled,
|
||||||
):
|
):
|
||||||
self._invalidate_get_event_cache(event_id)
|
self._invalidate_get_event_cache(event_id)
|
||||||
|
self.have_seen_event.invalidate((room_id, event_id))
|
||||||
|
|
||||||
self.get_latest_event_ids_in_room.invalidate((room_id,))
|
self.get_latest_event_ids_in_room.invalidate((room_id,))
|
||||||
|
|
||||||
self.get_unread_event_push_actions_by_room_for_user.invalidate_many((room_id,))
|
self.get_unread_event_push_actions_by_room_for_user.invalidate((room_id,))
|
||||||
|
|
||||||
if not backfilled:
|
if not backfilled:
|
||||||
self._events_stream_cache.entity_has_changed(room_id, stream_ordering)
|
self._events_stream_cache.entity_has_changed(room_id, stream_ordering)
|
||||||
|
@ -184,8 +185,8 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
|
||||||
self.get_invited_rooms_for_local_user.invalidate((state_key,))
|
self.get_invited_rooms_for_local_user.invalidate((state_key,))
|
||||||
|
|
||||||
if relates_to:
|
if relates_to:
|
||||||
self.get_relations_for_event.invalidate_many((relates_to,))
|
self.get_relations_for_event.invalidate((relates_to,))
|
||||||
self.get_aggregation_groups_for_event.invalidate_many((relates_to,))
|
self.get_aggregation_groups_for_event.invalidate((relates_to,))
|
||||||
self.get_applicable_edit.invalidate((relates_to,))
|
self.get_applicable_edit.invalidate((relates_to,))
|
||||||
|
|
||||||
async def invalidate_cache_and_stream(self, cache_name: str, keys: Tuple[Any, ...]):
|
async def invalidate_cache_and_stream(self, cache_name: str, keys: Tuple[Any, ...]):
|
||||||
|
|
|
@ -1282,7 +1282,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||||
)
|
)
|
||||||
|
|
||||||
txn.call_after(self.get_cached_devices_for_user.invalidate, (user_id,))
|
txn.call_after(self.get_cached_devices_for_user.invalidate, (user_id,))
|
||||||
txn.call_after(self._get_cached_user_device.invalidate_many, (user_id,))
|
txn.call_after(self._get_cached_user_device.invalidate, (user_id,))
|
||||||
txn.call_after(
|
txn.call_after(
|
||||||
self.get_device_list_last_stream_id_for_remote.invalidate, (user_id,)
|
self.get_device_list_last_stream_id_for_remote.invalidate, (user_id,)
|
||||||
)
|
)
|
||||||
|
|
|
@ -860,7 +860,7 @@ class EventPushActionsWorkerStore(SQLBaseStore):
|
||||||
not be deleted.
|
not be deleted.
|
||||||
"""
|
"""
|
||||||
txn.call_after(
|
txn.call_after(
|
||||||
self.get_unread_event_push_actions_by_room_for_user.invalidate_many,
|
self.get_unread_event_push_actions_by_room_for_user.invalidate,
|
||||||
(room_id, user_id),
|
(room_id, user_id),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -1748,9 +1748,9 @@ class PersistEventsStore:
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
txn.call_after(self.store.get_relations_for_event.invalidate_many, (parent_id,))
|
txn.call_after(self.store.get_relations_for_event.invalidate, (parent_id,))
|
||||||
txn.call_after(
|
txn.call_after(
|
||||||
self.store.get_aggregation_groups_for_event.invalidate_many, (parent_id,)
|
self.store.get_aggregation_groups_for_event.invalidate, (parent_id,)
|
||||||
)
|
)
|
||||||
|
|
||||||
if rel_type == RelationTypes.REPLACE:
|
if rel_type == RelationTypes.REPLACE:
|
||||||
|
@ -1903,7 +1903,7 @@ class PersistEventsStore:
|
||||||
|
|
||||||
for user_id in user_ids:
|
for user_id in user_ids:
|
||||||
txn.call_after(
|
txn.call_after(
|
||||||
self.store.get_unread_event_push_actions_by_room_for_user.invalidate_many,
|
self.store.get_unread_event_push_actions_by_room_for_user.invalidate,
|
||||||
(room_id, user_id),
|
(room_id, user_id),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -1917,7 +1917,7 @@ class PersistEventsStore:
|
||||||
def _remove_push_actions_for_event_id_txn(self, txn, room_id, event_id):
|
def _remove_push_actions_for_event_id_txn(self, txn, room_id, event_id):
|
||||||
# Sad that we have to blow away the cache for the whole room here
|
# Sad that we have to blow away the cache for the whole room here
|
||||||
txn.call_after(
|
txn.call_after(
|
||||||
self.store.get_unread_event_push_actions_by_room_for_user.invalidate_many,
|
self.store.get_unread_event_push_actions_by_room_for_user.invalidate,
|
||||||
(room_id,),
|
(room_id,),
|
||||||
)
|
)
|
||||||
txn.execute(
|
txn.execute(
|
||||||
|
|
|
@ -22,6 +22,7 @@ from typing import (
|
||||||
Iterable,
|
Iterable,
|
||||||
List,
|
List,
|
||||||
Optional,
|
Optional,
|
||||||
|
Set,
|
||||||
Tuple,
|
Tuple,
|
||||||
overload,
|
overload,
|
||||||
)
|
)
|
||||||
|
@ -55,7 +56,7 @@ from synapse.storage.engines import PostgresEngine
|
||||||
from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator
|
from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator
|
||||||
from synapse.storage.util.sequence import build_sequence_generator
|
from synapse.storage.util.sequence import build_sequence_generator
|
||||||
from synapse.types import JsonDict, get_domain_from_id
|
from synapse.types import JsonDict, get_domain_from_id
|
||||||
from synapse.util.caches.descriptors import cached
|
from synapse.util.caches.descriptors import cached, cachedList
|
||||||
from synapse.util.caches.lrucache import LruCache
|
from synapse.util.caches.lrucache import LruCache
|
||||||
from synapse.util.iterutils import batch_iter
|
from synapse.util.iterutils import batch_iter
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
|
@ -1045,32 +1046,74 @@ class EventsWorkerStore(SQLBaseStore):
|
||||||
|
|
||||||
return {r["event_id"] for r in rows}
|
return {r["event_id"] for r in rows}
|
||||||
|
|
||||||
async def have_seen_events(self, event_ids):
|
async def have_seen_events(
|
||||||
|
self, room_id: str, event_ids: Iterable[str]
|
||||||
|
) -> Set[str]:
|
||||||
"""Given a list of event ids, check if we have already processed them.
|
"""Given a list of event ids, check if we have already processed them.
|
||||||
|
|
||||||
|
The room_id is only used to structure the cache (so that it can later be
|
||||||
|
invalidated by room_id) - there is no guarantee that the events are actually
|
||||||
|
in the room in question.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
event_ids (iterable[str]):
|
room_id: Room we are polling
|
||||||
|
event_ids: events we are looking for
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
set[str]: The events we have already seen.
|
set[str]: The events we have already seen.
|
||||||
"""
|
"""
|
||||||
# if the event cache contains the event, obviously we've seen it.
|
res = await self._have_seen_events_dict(
|
||||||
results = {x for x in event_ids if self._get_event_cache.contains(x)}
|
(room_id, event_id) for event_id in event_ids
|
||||||
|
)
|
||||||
|
return {eid for ((_rid, eid), have_event) in res.items() if have_event}
|
||||||
|
|
||||||
def have_seen_events_txn(txn, chunk):
|
@cachedList("have_seen_event", "keys")
|
||||||
sql = "SELECT event_id FROM events as e WHERE "
|
async def _have_seen_events_dict(
|
||||||
|
self, keys: Iterable[Tuple[str, str]]
|
||||||
|
) -> Dict[Tuple[str, str], bool]:
|
||||||
|
"""Helper for have_seen_events
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
a dict {(room_id, event_id)-> bool}
|
||||||
|
"""
|
||||||
|
# if the event cache contains the event, obviously we've seen it.
|
||||||
|
|
||||||
|
cache_results = {
|
||||||
|
(rid, eid) for (rid, eid) in keys if self._get_event_cache.contains((eid,))
|
||||||
|
}
|
||||||
|
results = {x: True for x in cache_results}
|
||||||
|
|
||||||
|
def have_seen_events_txn(txn, chunk: Tuple[Tuple[str, str], ...]):
|
||||||
|
# we deliberately do *not* query the database for room_id, to make the
|
||||||
|
# query an index-only lookup on `events_event_id_key`.
|
||||||
|
#
|
||||||
|
# We therefore pull the events from the database into a set...
|
||||||
|
|
||||||
|
sql = "SELECT event_id FROM events AS e WHERE "
|
||||||
clause, args = make_in_list_sql_clause(
|
clause, args = make_in_list_sql_clause(
|
||||||
txn.database_engine, "e.event_id", chunk
|
txn.database_engine, "e.event_id", [eid for (_rid, eid) in chunk]
|
||||||
)
|
)
|
||||||
txn.execute(sql + clause, args)
|
txn.execute(sql + clause, args)
|
||||||
results.update(row[0] for row in txn)
|
found_events = {eid for eid, in txn}
|
||||||
|
|
||||||
for chunk in batch_iter((x for x in event_ids if x not in results), 100):
|
# ... and then we can update the results for each row in the batch
|
||||||
|
results.update({(rid, eid): (eid in found_events) for (rid, eid) in chunk})
|
||||||
|
|
||||||
|
# each batch requires its own index scan, so we make the batches as big as
|
||||||
|
# possible.
|
||||||
|
for chunk in batch_iter((k for k in keys if k not in cache_results), 500):
|
||||||
await self.db_pool.runInteraction(
|
await self.db_pool.runInteraction(
|
||||||
"have_seen_events", have_seen_events_txn, chunk
|
"have_seen_events", have_seen_events_txn, chunk
|
||||||
)
|
)
|
||||||
|
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
@cached(max_entries=100000, tree=True)
|
||||||
|
async def have_seen_event(self, room_id: str, event_id: str):
|
||||||
|
# this only exists for the benefit of the @cachedList descriptor on
|
||||||
|
# _have_seen_events_dict
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
def _get_current_state_event_counts_txn(self, txn, room_id):
|
def _get_current_state_event_counts_txn(self, txn, room_id):
|
||||||
"""
|
"""
|
||||||
See get_current_state_event_counts.
|
See get_current_state_event_counts.
|
||||||
|
|
|
@ -143,6 +143,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||||
"created_ts",
|
"created_ts",
|
||||||
"quarantined_by",
|
"quarantined_by",
|
||||||
"url_cache",
|
"url_cache",
|
||||||
|
"safe_from_quarantine",
|
||||||
),
|
),
|
||||||
allow_none=True,
|
allow_none=True,
|
||||||
desc="get_local_media",
|
desc="get_local_media",
|
||||||
|
@ -296,12 +297,12 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||||
desc="store_local_media",
|
desc="store_local_media",
|
||||||
)
|
)
|
||||||
|
|
||||||
async def mark_local_media_as_safe(self, media_id: str) -> None:
|
async def mark_local_media_as_safe(self, media_id: str, safe: bool = True) -> None:
|
||||||
"""Mark a local media as safe from quarantining."""
|
"""Mark a local media as safe or unsafe from quarantining."""
|
||||||
await self.db_pool.simple_update_one(
|
await self.db_pool.simple_update_one(
|
||||||
table="local_media_repository",
|
table="local_media_repository",
|
||||||
keyvalues={"media_id": media_id},
|
keyvalues={"media_id": media_id},
|
||||||
updatevalues={"safe_from_quarantine": True},
|
updatevalues={"safe_from_quarantine": safe},
|
||||||
desc="mark_local_media_as_safe",
|
desc="mark_local_media_as_safe",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -16,14 +16,14 @@ import logging
|
||||||
from typing import Any, List, Set, Tuple
|
from typing import Any, List, Set, Tuple
|
||||||
|
|
||||||
from synapse.api.errors import SynapseError
|
from synapse.api.errors import SynapseError
|
||||||
from synapse.storage._base import SQLBaseStore
|
from synapse.storage.databases.main import CacheInvalidationWorkerStore
|
||||||
from synapse.storage.databases.main.state import StateGroupWorkerStore
|
from synapse.storage.databases.main.state import StateGroupWorkerStore
|
||||||
from synapse.types import RoomStreamToken
|
from synapse.types import RoomStreamToken
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class PurgeEventsStore(StateGroupWorkerStore, SQLBaseStore):
|
class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
|
||||||
async def purge_history(
|
async def purge_history(
|
||||||
self, room_id: str, token: str, delete_local_events: bool
|
self, room_id: str, token: str, delete_local_events: bool
|
||||||
) -> Set[int]:
|
) -> Set[int]:
|
||||||
|
@ -203,8 +203,6 @@ class PurgeEventsStore(StateGroupWorkerStore, SQLBaseStore):
|
||||||
"DELETE FROM event_to_state_groups "
|
"DELETE FROM event_to_state_groups "
|
||||||
"WHERE event_id IN (SELECT event_id from events_to_purge)"
|
"WHERE event_id IN (SELECT event_id from events_to_purge)"
|
||||||
)
|
)
|
||||||
for event_id, _ in event_rows:
|
|
||||||
txn.call_after(self._get_state_group_for_event.invalidate, (event_id,))
|
|
||||||
|
|
||||||
# Delete all remote non-state events
|
# Delete all remote non-state events
|
||||||
for table in (
|
for table in (
|
||||||
|
@ -283,6 +281,20 @@ class PurgeEventsStore(StateGroupWorkerStore, SQLBaseStore):
|
||||||
# so make sure to keep this actually last.
|
# so make sure to keep this actually last.
|
||||||
txn.execute("DROP TABLE events_to_purge")
|
txn.execute("DROP TABLE events_to_purge")
|
||||||
|
|
||||||
|
for event_id, should_delete in event_rows:
|
||||||
|
self._invalidate_cache_and_stream(
|
||||||
|
txn, self._get_state_group_for_event, (event_id,)
|
||||||
|
)
|
||||||
|
|
||||||
|
# XXX: This is racy, since have_seen_events could be called between the
|
||||||
|
# transaction completing and the invalidation running. On the other hand,
|
||||||
|
# that's no different to calling `have_seen_events` just before the
|
||||||
|
# event is deleted from the database.
|
||||||
|
if should_delete:
|
||||||
|
self._invalidate_cache_and_stream(
|
||||||
|
txn, self.have_seen_event, (room_id, event_id)
|
||||||
|
)
|
||||||
|
|
||||||
logger.info("[purge] done")
|
logger.info("[purge] done")
|
||||||
|
|
||||||
return referenced_state_groups
|
return referenced_state_groups
|
||||||
|
@ -422,7 +434,11 @@ class PurgeEventsStore(StateGroupWorkerStore, SQLBaseStore):
|
||||||
# index on them. In any case we should be clearing out 'stream' tables
|
# index on them. In any case we should be clearing out 'stream' tables
|
||||||
# periodically anyway (#5888)
|
# periodically anyway (#5888)
|
||||||
|
|
||||||
# TODO: we could probably usefully do a bunch of cache invalidation here
|
# TODO: we could probably usefully do a bunch more cache invalidation here
|
||||||
|
|
||||||
|
# XXX: as with purge_history, this is racy, but no worse than other races
|
||||||
|
# that already exist.
|
||||||
|
self._invalidate_cache_and_stream(txn, self.have_seen_event, (room_id,))
|
||||||
|
|
||||||
logger.info("[purge] done")
|
logger.info("[purge] done")
|
||||||
|
|
||||||
|
|
|
@ -460,7 +460,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
||||||
|
|
||||||
def invalidate_caches_for_receipt(self, room_id, receipt_type, user_id):
|
def invalidate_caches_for_receipt(self, room_id, receipt_type, user_id):
|
||||||
self.get_receipts_for_user.invalidate((user_id, receipt_type))
|
self.get_receipts_for_user.invalidate((user_id, receipt_type))
|
||||||
self._get_linearized_receipts_for_room.invalidate_many((room_id,))
|
self._get_linearized_receipts_for_room.invalidate((room_id,))
|
||||||
self.get_last_receipt_event_id_for_user.invalidate(
|
self.get_last_receipt_event_id_for_user.invalidate(
|
||||||
(user_id, room_id, receipt_type)
|
(user_id, room_id, receipt_type)
|
||||||
)
|
)
|
||||||
|
@ -659,9 +659,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
||||||
)
|
)
|
||||||
txn.call_after(self.get_receipts_for_user.invalidate, (user_id, receipt_type))
|
txn.call_after(self.get_receipts_for_user.invalidate, (user_id, receipt_type))
|
||||||
# FIXME: This shouldn't invalidate the whole cache
|
# FIXME: This shouldn't invalidate the whole cache
|
||||||
txn.call_after(
|
txn.call_after(self._get_linearized_receipts_for_room.invalidate, (room_id,))
|
||||||
self._get_linearized_receipts_for_room.invalidate_many, (room_id,)
|
|
||||||
)
|
|
||||||
|
|
||||||
self.db_pool.simple_delete_txn(
|
self.db_pool.simple_delete_txn(
|
||||||
txn,
|
txn,
|
||||||
|
|
|
@ -764,14 +764,15 @@ class RoomWorkerStore(SQLBaseStore):
|
||||||
self,
|
self,
|
||||||
server_name: str,
|
server_name: str,
|
||||||
media_id: str,
|
media_id: str,
|
||||||
quarantined_by: str,
|
quarantined_by: Optional[str],
|
||||||
) -> int:
|
) -> int:
|
||||||
"""quarantines a single local or remote media id
|
"""quarantines or unquarantines a single local or remote media id
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
server_name: The name of the server that holds this media
|
server_name: The name of the server that holds this media
|
||||||
media_id: The ID of the media to be quarantined
|
media_id: The ID of the media to be quarantined
|
||||||
quarantined_by: The user ID that initiated the quarantine request
|
quarantined_by: The user ID that initiated the quarantine request
|
||||||
|
If it is `None` media will be removed from quarantine
|
||||||
"""
|
"""
|
||||||
logger.info("Quarantining media: %s/%s", server_name, media_id)
|
logger.info("Quarantining media: %s/%s", server_name, media_id)
|
||||||
is_local = server_name == self.config.server_name
|
is_local = server_name == self.config.server_name
|
||||||
|
@ -838,9 +839,9 @@ class RoomWorkerStore(SQLBaseStore):
|
||||||
txn,
|
txn,
|
||||||
local_mxcs: List[str],
|
local_mxcs: List[str],
|
||||||
remote_mxcs: List[Tuple[str, str]],
|
remote_mxcs: List[Tuple[str, str]],
|
||||||
quarantined_by: str,
|
quarantined_by: Optional[str],
|
||||||
) -> int:
|
) -> int:
|
||||||
"""Quarantine local and remote media items
|
"""Quarantine and unquarantine local and remote media items
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
txn (cursor)
|
txn (cursor)
|
||||||
|
@ -848,18 +849,27 @@ class RoomWorkerStore(SQLBaseStore):
|
||||||
remote_mxcs: A list of (remote server, media id) tuples representing
|
remote_mxcs: A list of (remote server, media id) tuples representing
|
||||||
remote mxc URLs
|
remote mxc URLs
|
||||||
quarantined_by: The ID of the user who initiated the quarantine request
|
quarantined_by: The ID of the user who initiated the quarantine request
|
||||||
|
If it is `None` media will be removed from quarantine
|
||||||
Returns:
|
Returns:
|
||||||
The total number of media items quarantined
|
The total number of media items quarantined
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Update all the tables to set the quarantined_by flag
|
# Update all the tables to set the quarantined_by flag
|
||||||
txn.executemany(
|
sql = """
|
||||||
"""
|
|
||||||
UPDATE local_media_repository
|
UPDATE local_media_repository
|
||||||
SET quarantined_by = ?
|
SET quarantined_by = ?
|
||||||
WHERE media_id = ? AND safe_from_quarantine = ?
|
WHERE media_id = ?
|
||||||
""",
|
"""
|
||||||
((quarantined_by, media_id, False) for media_id in local_mxcs),
|
|
||||||
)
|
# set quarantine
|
||||||
|
if quarantined_by is not None:
|
||||||
|
sql += "AND safe_from_quarantine = ?"
|
||||||
|
rows = [(quarantined_by, media_id, False) for media_id in local_mxcs]
|
||||||
|
# remove from quarantine
|
||||||
|
else:
|
||||||
|
rows = [(quarantined_by, media_id) for media_id in local_mxcs]
|
||||||
|
|
||||||
|
txn.executemany(sql, rows)
|
||||||
# Note that a rowcount of -1 can be used to indicate no rows were affected.
|
# Note that a rowcount of -1 can be used to indicate no rows were affected.
|
||||||
total_media_quarantined = txn.rowcount if txn.rowcount > 0 else 0
|
total_media_quarantined = txn.rowcount if txn.rowcount > 0 else 0
|
||||||
|
|
||||||
|
@ -1498,7 +1508,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
|
||||||
room_id: str,
|
room_id: str,
|
||||||
event_id: str,
|
event_id: str,
|
||||||
user_id: str,
|
user_id: str,
|
||||||
reason: str,
|
reason: Optional[str],
|
||||||
content: JsonDict,
|
content: JsonDict,
|
||||||
received_ts: int,
|
received_ts: int,
|
||||||
) -> None:
|
) -> None:
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
|
|
||||||
import collections
|
import collections
|
||||||
import inspect
|
import inspect
|
||||||
|
import itertools
|
||||||
import logging
|
import logging
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
from typing import (
|
from typing import (
|
||||||
|
@ -160,8 +161,11 @@ class ObservableDeferred:
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
T = TypeVar("T")
|
||||||
|
|
||||||
|
|
||||||
def concurrently_execute(
|
def concurrently_execute(
|
||||||
func: Callable, args: Iterable[Any], limit: int
|
func: Callable[[T], Any], args: Iterable[T], limit: int
|
||||||
) -> defer.Deferred:
|
) -> defer.Deferred:
|
||||||
"""Executes the function with each argument concurrently while limiting
|
"""Executes the function with each argument concurrently while limiting
|
||||||
the number of concurrent executions.
|
the number of concurrent executions.
|
||||||
|
@ -173,20 +177,27 @@ def concurrently_execute(
|
||||||
limit: Maximum number of conccurent executions.
|
limit: Maximum number of conccurent executions.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Deferred[list]: Resolved when all function invocations have finished.
|
Deferred: Resolved when all function invocations have finished.
|
||||||
"""
|
"""
|
||||||
it = iter(args)
|
it = iter(args)
|
||||||
|
|
||||||
async def _concurrently_execute_inner():
|
async def _concurrently_execute_inner(value: T) -> None:
|
||||||
try:
|
try:
|
||||||
while True:
|
while True:
|
||||||
await maybe_awaitable(func(next(it)))
|
await maybe_awaitable(func(value))
|
||||||
|
value = next(it)
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
# We use `itertools.islice` to handle the case where the number of args is
|
||||||
|
# less than the limit, avoiding needlessly spawning unnecessary background
|
||||||
|
# tasks.
|
||||||
return make_deferred_yieldable(
|
return make_deferred_yieldable(
|
||||||
defer.gatherResults(
|
defer.gatherResults(
|
||||||
[run_in_background(_concurrently_execute_inner) for _ in range(limit)],
|
[
|
||||||
|
run_in_background(_concurrently_execute_inner, value)
|
||||||
|
for value in itertools.islice(it, limit)
|
||||||
|
],
|
||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
)
|
)
|
||||||
).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError)
|
||||||
|
|
|
@ -25,10 +25,11 @@ from typing import (
|
||||||
TypeVar,
|
TypeVar,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
from prometheus_client import Gauge
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
|
from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
|
||||||
from synapse.metrics import LaterGauge
|
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.util import Clock
|
from synapse.util import Clock
|
||||||
|
|
||||||
|
@ -38,6 +39,24 @@ logger = logging.getLogger(__name__)
|
||||||
V = TypeVar("V")
|
V = TypeVar("V")
|
||||||
R = TypeVar("R")
|
R = TypeVar("R")
|
||||||
|
|
||||||
|
number_queued = Gauge(
|
||||||
|
"synapse_util_batching_queue_number_queued",
|
||||||
|
"The number of items waiting in the queue across all keys",
|
||||||
|
labelnames=("name",),
|
||||||
|
)
|
||||||
|
|
||||||
|
number_in_flight = Gauge(
|
||||||
|
"synapse_util_batching_queue_number_pending",
|
||||||
|
"The number of items across all keys either being processed or waiting in a queue",
|
||||||
|
labelnames=("name",),
|
||||||
|
)
|
||||||
|
|
||||||
|
number_of_keys = Gauge(
|
||||||
|
"synapse_util_batching_queue_number_of_keys",
|
||||||
|
"The number of distinct keys that have items queued",
|
||||||
|
labelnames=("name",),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class BatchingQueue(Generic[V, R]):
|
class BatchingQueue(Generic[V, R]):
|
||||||
"""A queue that batches up work, calling the provided processing function
|
"""A queue that batches up work, calling the provided processing function
|
||||||
|
@ -48,10 +67,20 @@ class BatchingQueue(Generic[V, R]):
|
||||||
called, and will keep being called until the queue has been drained (for the
|
called, and will keep being called until the queue has been drained (for the
|
||||||
given key).
|
given key).
|
||||||
|
|
||||||
|
If the processing function raises an exception then the exception is proxied
|
||||||
|
through to the callers waiting on that batch of work.
|
||||||
|
|
||||||
Note that the return value of `add_to_queue` will be the return value of the
|
Note that the return value of `add_to_queue` will be the return value of the
|
||||||
processing function that processed the given item. This means that the
|
processing function that processed the given item. This means that the
|
||||||
returned value will likely include data for other items that were in the
|
returned value will likely include data for other items that were in the
|
||||||
batch.
|
batch.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name: A name for the queue, used for logging contexts and metrics.
|
||||||
|
This must be unique, otherwise the metrics will be wrong.
|
||||||
|
clock: The clock to use to schedule work.
|
||||||
|
process_batch_callback: The callback to to be run to process a batch of
|
||||||
|
work.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
|
@ -73,19 +102,15 @@ class BatchingQueue(Generic[V, R]):
|
||||||
# The function to call with batches of values.
|
# The function to call with batches of values.
|
||||||
self._process_batch_callback = process_batch_callback
|
self._process_batch_callback = process_batch_callback
|
||||||
|
|
||||||
LaterGauge(
|
number_queued.labels(self._name).set_function(
|
||||||
"synapse_util_batching_queue_number_queued",
|
lambda: sum(len(q) for q in self._next_values.values())
|
||||||
"The number of items waiting in the queue across all keys",
|
|
||||||
labels=("name",),
|
|
||||||
caller=lambda: sum(len(v) for v in self._next_values.values()),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
LaterGauge(
|
number_of_keys.labels(self._name).set_function(lambda: len(self._next_values))
|
||||||
"synapse_util_batching_queue_number_of_keys",
|
|
||||||
"The number of distinct keys that have items queued",
|
self._number_in_flight_metric = number_in_flight.labels(
|
||||||
labels=("name",),
|
self._name
|
||||||
caller=lambda: len(self._next_values),
|
) # type: Gauge
|
||||||
)
|
|
||||||
|
|
||||||
async def add_to_queue(self, value: V, key: Hashable = ()) -> R:
|
async def add_to_queue(self, value: V, key: Hashable = ()) -> R:
|
||||||
"""Adds the value to the queue with the given key, returning the result
|
"""Adds the value to the queue with the given key, returning the result
|
||||||
|
@ -107,6 +132,7 @@ class BatchingQueue(Generic[V, R]):
|
||||||
if key not in self._processing_keys:
|
if key not in self._processing_keys:
|
||||||
run_as_background_process(self._name, self._process_queue, key)
|
run_as_background_process(self._name, self._process_queue, key)
|
||||||
|
|
||||||
|
with self._number_in_flight_metric.track_inprogress():
|
||||||
return await make_deferred_yieldable(d)
|
return await make_deferred_yieldable(d)
|
||||||
|
|
||||||
async def _process_queue(self, key: Hashable) -> None:
|
async def _process_queue(self, key: Hashable) -> None:
|
||||||
|
@ -114,10 +140,10 @@ class BatchingQueue(Generic[V, R]):
|
||||||
given key and call the `self._process_batch_callback` with the values.
|
given key and call the `self._process_batch_callback` with the values.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
try:
|
|
||||||
if key in self._processing_keys:
|
if key in self._processing_keys:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
self._processing_keys.add(key)
|
self._processing_keys.add(key)
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
|
@ -137,16 +163,16 @@ class BatchingQueue(Generic[V, R]):
|
||||||
values = [value for value, _ in next_values]
|
values = [value for value, _ in next_values]
|
||||||
results = await self._process_batch_callback(values)
|
results = await self._process_batch_callback(values)
|
||||||
|
|
||||||
for _, deferred in next_values:
|
|
||||||
with PreserveLoggingContext():
|
with PreserveLoggingContext():
|
||||||
|
for _, deferred in next_values:
|
||||||
deferred.callback(results)
|
deferred.callback(results)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
with PreserveLoggingContext():
|
||||||
for _, deferred in next_values:
|
for _, deferred in next_values:
|
||||||
if deferred.called:
|
if deferred.called:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
with PreserveLoggingContext():
|
|
||||||
deferred.errback(e)
|
deferred.errback(e)
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
|
|
|
@ -16,16 +16,7 @@
|
||||||
|
|
||||||
import enum
|
import enum
|
||||||
import threading
|
import threading
|
||||||
from typing import (
|
from typing import Callable, Generic, Iterable, MutableMapping, Optional, TypeVar, Union
|
||||||
Callable,
|
|
||||||
Generic,
|
|
||||||
Iterable,
|
|
||||||
MutableMapping,
|
|
||||||
Optional,
|
|
||||||
TypeVar,
|
|
||||||
Union,
|
|
||||||
cast,
|
|
||||||
)
|
|
||||||
|
|
||||||
from prometheus_client import Gauge
|
from prometheus_client import Gauge
|
||||||
|
|
||||||
|
@ -91,7 +82,7 @@ class DeferredCache(Generic[KT, VT]):
|
||||||
# _pending_deferred_cache maps from the key value to a `CacheEntry` object.
|
# _pending_deferred_cache maps from the key value to a `CacheEntry` object.
|
||||||
self._pending_deferred_cache = (
|
self._pending_deferred_cache = (
|
||||||
cache_type()
|
cache_type()
|
||||||
) # type: MutableMapping[KT, CacheEntry]
|
) # type: Union[TreeCache, MutableMapping[KT, CacheEntry]]
|
||||||
|
|
||||||
def metrics_cb():
|
def metrics_cb():
|
||||||
cache_pending_metric.labels(name).set(len(self._pending_deferred_cache))
|
cache_pending_metric.labels(name).set(len(self._pending_deferred_cache))
|
||||||
|
@ -287,8 +278,17 @@ class DeferredCache(Generic[KT, VT]):
|
||||||
self.cache.set(key, value, callbacks=callbacks)
|
self.cache.set(key, value, callbacks=callbacks)
|
||||||
|
|
||||||
def invalidate(self, key):
|
def invalidate(self, key):
|
||||||
|
"""Delete a key, or tree of entries
|
||||||
|
|
||||||
|
If the cache is backed by a regular dict, then "key" must be of
|
||||||
|
the right type for this cache
|
||||||
|
|
||||||
|
If the cache is backed by a TreeCache, then "key" must be a tuple, but
|
||||||
|
may be of lower cardinality than the TreeCache - in which case the whole
|
||||||
|
subtree is deleted.
|
||||||
|
"""
|
||||||
self.check_thread()
|
self.check_thread()
|
||||||
self.cache.pop(key, None)
|
self.cache.del_multi(key)
|
||||||
|
|
||||||
# if we have a pending lookup for this key, remove it from the
|
# if we have a pending lookup for this key, remove it from the
|
||||||
# _pending_deferred_cache, which will (a) stop it being returned
|
# _pending_deferred_cache, which will (a) stop it being returned
|
||||||
|
@ -299,20 +299,10 @@ class DeferredCache(Generic[KT, VT]):
|
||||||
# run the invalidation callbacks now, rather than waiting for the
|
# run the invalidation callbacks now, rather than waiting for the
|
||||||
# deferred to resolve.
|
# deferred to resolve.
|
||||||
if entry:
|
if entry:
|
||||||
entry.invalidate()
|
# _pending_deferred_cache.pop should either return a CacheEntry, or, in the
|
||||||
|
# case of a TreeCache, a dict of keys to cache entries. Either way calling
|
||||||
def invalidate_many(self, key: KT):
|
# iterate_tree_cache_entry on it will do the right thing.
|
||||||
self.check_thread()
|
for entry in iterate_tree_cache_entry(entry):
|
||||||
if not isinstance(key, tuple):
|
|
||||||
raise TypeError("The cache key must be a tuple not %r" % (type(key),))
|
|
||||||
key = cast(KT, key)
|
|
||||||
self.cache.del_multi(key)
|
|
||||||
|
|
||||||
# if we have a pending lookup for this key, remove it from the
|
|
||||||
# _pending_deferred_cache, as above
|
|
||||||
entry_dict = self._pending_deferred_cache.pop(key, None)
|
|
||||||
if entry_dict is not None:
|
|
||||||
for entry in iterate_tree_cache_entry(entry_dict):
|
|
||||||
entry.invalidate()
|
entry.invalidate()
|
||||||
|
|
||||||
def invalidate_all(self):
|
def invalidate_all(self):
|
||||||
|
|
|
@ -48,7 +48,6 @@ F = TypeVar("F", bound=Callable[..., Any])
|
||||||
class _CachedFunction(Generic[F]):
|
class _CachedFunction(Generic[F]):
|
||||||
invalidate = None # type: Any
|
invalidate = None # type: Any
|
||||||
invalidate_all = None # type: Any
|
invalidate_all = None # type: Any
|
||||||
invalidate_many = None # type: Any
|
|
||||||
prefill = None # type: Any
|
prefill = None # type: Any
|
||||||
cache = None # type: Any
|
cache = None # type: Any
|
||||||
num_args = None # type: Any
|
num_args = None # type: Any
|
||||||
|
@ -262,6 +261,11 @@ class DeferredCacheDescriptor(_CacheDescriptorBase):
|
||||||
):
|
):
|
||||||
super().__init__(orig, num_args=num_args, cache_context=cache_context)
|
super().__init__(orig, num_args=num_args, cache_context=cache_context)
|
||||||
|
|
||||||
|
if tree and self.num_args < 2:
|
||||||
|
raise RuntimeError(
|
||||||
|
"tree=True is nonsensical for cached functions with a single parameter"
|
||||||
|
)
|
||||||
|
|
||||||
self.max_entries = max_entries
|
self.max_entries = max_entries
|
||||||
self.tree = tree
|
self.tree = tree
|
||||||
self.iterable = iterable
|
self.iterable = iterable
|
||||||
|
@ -302,11 +306,11 @@ class DeferredCacheDescriptor(_CacheDescriptorBase):
|
||||||
wrapped = cast(_CachedFunction, _wrapped)
|
wrapped = cast(_CachedFunction, _wrapped)
|
||||||
|
|
||||||
if self.num_args == 1:
|
if self.num_args == 1:
|
||||||
|
assert not self.tree
|
||||||
wrapped.invalidate = lambda key: cache.invalidate(key[0])
|
wrapped.invalidate = lambda key: cache.invalidate(key[0])
|
||||||
wrapped.prefill = lambda key, val: cache.prefill(key[0], val)
|
wrapped.prefill = lambda key, val: cache.prefill(key[0], val)
|
||||||
else:
|
else:
|
||||||
wrapped.invalidate = cache.invalidate
|
wrapped.invalidate = cache.invalidate
|
||||||
wrapped.invalidate_many = cache.invalidate_many
|
|
||||||
wrapped.prefill = cache.prefill
|
wrapped.prefill = cache.prefill
|
||||||
|
|
||||||
wrapped.invalidate_all = cache.invalidate_all
|
wrapped.invalidate_all = cache.invalidate_all
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue