Merge remote-tracking branch 'upstream/release-v1.46'

This commit is contained in:
Tulir Asokan 2021-10-27 15:42:34 +03:00
commit cf45cfd314
172 changed files with 5549 additions and 2350 deletions

View File

@ -0,0 +1,57 @@
#!/usr/bin/env bash
# Test for the export-data admin command against sqlite and postgres
set -xe
cd `dirname $0`/../..
echo "--- Install dependencies"
# Install dependencies for this test.
pip install psycopg2
# Install Synapse itself. This won't update any libraries.
pip install -e .
echo "--- Generate the signing key"
# Generate the server's signing key.
python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml
echo "--- Prepare test database"
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# Run the export-data command on the sqlite test database
python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
--output-directory /tmp/export_data
# Test that the output directory exists and contains the rooms directory
dir="/tmp/export_data/rooms"
if [ -d "$dir" ]; then
echo "Command successful, this test passes"
else
echo "No output directories found, the command fails against a sqlite database."
exit 1
fi
# Create the PostgreSQL database.
.ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
# Port the SQLite databse to postgres so we can check command works against postgres
echo "+++ Port SQLite3 databse to postgres"
scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
# Run the export-data command on postgres database
python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
--output-directory /tmp/export_data2
# Test that the output directory exists and contains the rooms directory
dir2="/tmp/export_data2/rooms"
if [ -d "$dir2" ]; then
echo "Command successful, this test passes"
else
echo "No output directories found, the command fails against a postgres database."
exit 1
fi

View File

@ -122,6 +122,8 @@ jobs:
SYNAPSE_POSTGRES_USER: postgres SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres SYNAPSE_POSTGRES_PASSWORD: postgres
- name: Dump logs - name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
# Note: Dumps to workflow logs instead of using actions/upload-artifact # Note: Dumps to workflow logs instead of using actions/upload-artifact
# This keeps logs colocated with failing jobs # This keeps logs colocated with failing jobs
# It also ignores find's exit code; this is a best effort affair # It also ignores find's exit code; this is a best effort affair
@ -146,6 +148,8 @@ jobs:
env: env:
TRIAL_FLAGS: "--jobs=2" TRIAL_FLAGS: "--jobs=2"
- name: Dump logs - name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
# Note: Dumps to workflow logs instead of using actions/upload-artifact # Note: Dumps to workflow logs instead of using actions/upload-artifact
# This keeps logs colocated with failing jobs # This keeps logs colocated with failing jobs
# It also ignores find's exit code; this is a best effort affair # It also ignores find's exit code; this is a best effort affair
@ -176,6 +180,8 @@ jobs:
env: env:
TRIAL_FLAGS: "--jobs=2" TRIAL_FLAGS: "--jobs=2"
- name: Dump logs - name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
# Note: Dumps to workflow logs instead of using actions/upload-artifact # Note: Dumps to workflow logs instead of using actions/upload-artifact
# This keeps logs colocated with failing jobs # This keeps logs colocated with failing jobs
# It also ignores find's exit code; this is a best effort affair # It also ignores find's exit code; this is a best effort affair
@ -247,6 +253,35 @@ jobs:
/logs/results.tap /logs/results.tap
/logs/**/*.log* /logs/**/*.log*
export-data:
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
needs: [linting-done, portdb]
runs-on: ubuntu-latest
env:
TOP: ${{ github.workspace }}
services:
postgres:
image: postgres
ports:
- 5432:5432
env:
POSTGRES_PASSWORD: "postgres"
POSTGRES_INITDB_ARGS: "--lc-collate C --lc-ctype C --encoding UTF8"
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- uses: actions/setup-python@v2
with:
python-version: "3.9"
- run: .ci/scripts/test_export_data_command.sh
portdb: portdb:
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
needs: linting-done needs: linting-done

View File

@ -33,6 +33,8 @@ jobs:
TRIAL_FLAGS: "--jobs=2" TRIAL_FLAGS: "--jobs=2"
- name: Dump logs - name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
# Note: Dumps to workflow logs instead of using actions/upload-artifact # Note: Dumps to workflow logs instead of using actions/upload-artifact
# This keeps logs colocated with failing jobs # This keeps logs colocated with failing jobs
# It also ignores find's exit code; this is a best effort affair # It also ignores find's exit code; this is a best effort affair

View File

@ -1,3 +1,80 @@
Synapse 1.46.0rc1 (2021-10-26)
==============================
The cause of the [performance regression affecting Synapse 1.44](https://github.com/matrix-org/synapse/issues/11049) has been identified and fixed. ([\#11177](https://github.com/matrix-org/synapse/issues/11177))
Features
--------
- Add support for Ubuntu 21.10 "Impish Indri". ([\#11024](https://github.com/matrix-org/synapse/issues/11024))
- Port the Password Auth Providers module interface to the new generic interface. ([\#10548](https://github.com/matrix-org/synapse/issues/10548), [\#11180](https://github.com/matrix-org/synapse/issues/11180))
- Experimental support for the thread relation defined in [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440). ([\#11088](https://github.com/matrix-org/synapse/issues/11088), [\#11181](https://github.com/matrix-org/synapse/issues/11181), [\#11192](https://github.com/matrix-org/synapse/issues/11192))
- Users admin API can now also modify user type in addition to allowing it to be set on user creation. ([\#11174](https://github.com/matrix-org/synapse/issues/11174))
Bugfixes
--------
- Newly-created public rooms are now only assigned an alias if the room's creation has not been blocked by permission settings. Contributed by @AndrewFerr. ([\#10930](https://github.com/matrix-org/synapse/issues/10930))
- Fix a long-standing bug which meant that events received over federation were sometimes incorrectly accepted into the room state. ([\#11001](https://github.com/matrix-org/synapse/issues/11001), [\#11009](https://github.com/matrix-org/synapse/issues/11009), [\#11012](https://github.com/matrix-org/synapse/issues/11012))
- Fix 500 error on `/messages` when the server accumulates more than 5 backwards extremities at a given depth for a room. ([\#11027](https://github.com/matrix-org/synapse/issues/11027))
- Fix a bug where setting a user's `external_id` via the admin API returns 500 and deletes user's existing external mappings if that external ID is already mapped. ([\#11051](https://github.com/matrix-org/synapse/issues/11051))
- Fix a long-standing bug where users excluded from the user directory were added into the directory if they belonged to a room which became public or private. ([\#11075](https://github.com/matrix-org/synapse/issues/11075))
- Fix a long-standing bug when attempting to preview URLs which are in the `windows-1252` character encoding. ([\#11077](https://github.com/matrix-org/synapse/issues/11077), [\#11089](https://github.com/matrix-org/synapse/issues/11089))
- Fix broken export-data admin command and add test script checking the command to CI. ([\#11078](https://github.com/matrix-org/synapse/issues/11078))
- Show an error when timestamp in seconds is provided to the `/purge_media_cache` Admin API. ([\#11101](https://github.com/matrix-org/synapse/issues/11101))
- Fix local users who left all their rooms being removed from the user directory, even if the `search_all_users` config option was enabled. ([\#11103](https://github.com/matrix-org/synapse/issues/11103))
- Fix a bug which caused the module API's `get_user_ip_and_agents` function to always fail on workers. `get_user_ip_and_agents` was introduced in 1.44.0 and did not function correctly on worker processes at the time. ([\#11112](https://github.com/matrix-org/synapse/issues/11112))
- Identity server connection is no longer ignoring `ip_range_whitelist`. ([\#11120](https://github.com/matrix-org/synapse/issues/11120))
- Fix a bug introduced in Synapse 1.45.0 breaking the configuration file parsing script. ([\#11145](https://github.com/matrix-org/synapse/issues/11145))
- Fix a performance regression introduced in 1.44.0 which could cause client requests to time out when making large numbers of outbound requests. ([\#11177](https://github.com/matrix-org/synapse/issues/11177), [\#11190](https://github.com/matrix-org/synapse/issues/11190))
- Resolve and share `state_groups` for all [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) historical events in batch. ([\#10975](https://github.com/matrix-org/synapse/issues/10975))
Improved Documentation
----------------------
- Fix broken links relating to module API deprecation in the upgrade notes. ([\#11069](https://github.com/matrix-org/synapse/issues/11069))
- Add more information about what happens when a user is deactivated. ([\#11083](https://github.com/matrix-org/synapse/issues/11083))
- Clarify the the sample log config can be copied from the documentation without issue. ([\#11092](https://github.com/matrix-org/synapse/issues/11092))
- Update the admin API documentation with an updated list of the characters allowed in registration tokens. ([\#11093](https://github.com/matrix-org/synapse/issues/11093))
- Document Synapse's behaviour when dealing with multiple modules registering the same callbacks and/or handlers for the same HTTP endpoints. ([\#11096](https://github.com/matrix-org/synapse/issues/11096))
- Fix instances of `[example]{.title-ref}` in the upgrade documentation as a result of prior RST to Markdown conversion. ([\#11118](https://github.com/matrix-org/synapse/issues/11118))
- Document the version of Synapse each module callback was introduced in. ([\#11132](https://github.com/matrix-org/synapse/issues/11132))
- Document the version of Synapse that introduced each module API method. ([\#11183](https://github.com/matrix-org/synapse/issues/11183))
Internal Changes
----------------
- Fix spurious warnings about losing the logging context on the `ReplicationCommandHandler` when losing the replication connection. ([\#10984](https://github.com/matrix-org/synapse/issues/10984))
- Include rejected status when we log events. ([\#11008](https://github.com/matrix-org/synapse/issues/11008))
- Add some extra logging to the event persistence code. ([\#11014](https://github.com/matrix-org/synapse/issues/11014))
- Rearrange the internal workings of the incremental user directory updates. ([\#11035](https://github.com/matrix-org/synapse/issues/11035))
- Fix a long-standing bug where users excluded from the directory could still be added to the `users_who_share_private_rooms` table after a regular user joins a private room. ([\#11143](https://github.com/matrix-org/synapse/issues/11143))
- Add and improve type hints. ([\#10972](https://github.com/matrix-org/synapse/issues/10972), [\#11055](https://github.com/matrix-org/synapse/issues/11055), [\#11066](https://github.com/matrix-org/synapse/issues/11066), [\#11076](https://github.com/matrix-org/synapse/issues/11076), [\#11095](https://github.com/matrix-org/synapse/issues/11095), [\#11109](https://github.com/matrix-org/synapse/issues/11109), [\#11121](https://github.com/matrix-org/synapse/issues/11121), [\#11146](https://github.com/matrix-org/synapse/issues/11146))
- Mark the Synapse package as containing type annotations and fix export declarations so that Synapse pluggable modules may be type checked against Synapse. ([\#11054](https://github.com/matrix-org/synapse/issues/11054))
- Remove dead code from `MediaFilePaths`. ([\#11056](https://github.com/matrix-org/synapse/issues/11056))
- Be more lenient when parsing oEmbed response versions. ([\#11065](https://github.com/matrix-org/synapse/issues/11065))
- Create a separate module for the retention configuration. ([\#11070](https://github.com/matrix-org/synapse/issues/11070))
- Clean up some of the federation event authentication code for clarity. ([\#11115](https://github.com/matrix-org/synapse/issues/11115), [\#11116](https://github.com/matrix-org/synapse/issues/11116), [\#11122](https://github.com/matrix-org/synapse/issues/11122))
- Add docstrings and comments to the application service ephemeral event sending code. ([\#11138](https://github.com/matrix-org/synapse/issues/11138))
- Update the `sign_json` script to support inline configuration of the signing key. ([\#11139](https://github.com/matrix-org/synapse/issues/11139))
- Fix broken link in the docker image README. ([\#11144](https://github.com/matrix-org/synapse/issues/11144))
- Always dump logs from unit tests during CI runs. ([\#11068](https://github.com/matrix-org/synapse/issues/11068))
- Add tests for `MediaFilePaths` class. ([\#11057](https://github.com/matrix-org/synapse/issues/11057))
- Simplify the user admin API tests. ([\#11048](https://github.com/matrix-org/synapse/issues/11048))
- Add a test for the workaround introduced in [\#11042](https://github.com/matrix-org/synapse/pull/11042) concerning the behaviour of third-party rule modules and `SynapseError`s. ([\#11071](https://github.com/matrix-org/synapse/issues/11071))
Synapse 1.45.1 (2021-10-20)
===========================
Bugfixes
--------
- Revert change to counting of deactivated users towards the monthly active users limit, introduced in 1.45.0rc1. ([\#11127](https://github.com/matrix-org/synapse/issues/11127))
Synapse 1.45.0 (2021-10-19) Synapse 1.45.0 (2021-10-19)
=========================== ===========================

View File

@ -8,6 +8,7 @@ include demo/demo.tls.dh
include demo/*.py include demo/*.py
include demo/*.sh include demo/*.sh
include synapse/py.typed
recursive-include synapse/storage *.sql recursive-include synapse/storage *.sql
recursive-include synapse/storage *.sql.postgres recursive-include synapse/storage *.sql.postgres
recursive-include synapse/storage *.sql.sqlite recursive-include synapse/storage *.sql.sqlite

12
debian/changelog vendored
View File

@ -1,3 +1,15 @@
matrix-synapse-py3 (1.46.0~rc1) stable; urgency=medium
* New synapse release 1.46.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 26 Oct 2021 14:04:04 +0100
matrix-synapse-py3 (1.45.1) stable; urgency=medium
* New synapse release 1.45.1.
-- Synapse Packaging team <packages@matrix.org> Wed, 20 Oct 2021 11:58:27 +0100
matrix-synapse-py3 (1.45.0) stable; urgency=medium matrix-synapse-py3 (1.45.0) stable; urgency=medium
* New synapse release 1.45.0. * New synapse release 1.45.0.

View File

@ -226,4 +226,5 @@ healthcheck:
## Using jemalloc ## Using jemalloc
Jemalloc is embedded in the image and will be used instead of the default allocator. Jemalloc is embedded in the image and will be used instead of the default allocator.
You can read about jemalloc by reading the Synapse [README](../README.rst). You can read about jemalloc by reading the Synapse
[README](https://github.com/matrix-org/synapse/blob/HEAD/README.rst#help-synapse-is-slow-and-eats-all-my-ram-cpu).

View File

@ -43,6 +43,7 @@
- [Third-party rules callbacks](modules/third_party_rules_callbacks.md) - [Third-party rules callbacks](modules/third_party_rules_callbacks.md)
- [Presence router callbacks](modules/presence_router_callbacks.md) - [Presence router callbacks](modules/presence_router_callbacks.md)
- [Account validity callbacks](modules/account_validity_callbacks.md) - [Account validity callbacks](modules/account_validity_callbacks.md)
- [Password auth provider callbacks](modules/password_auth_provider_callbacks.md)
- [Porting a legacy module to the new interface](modules/porting_legacy_module.md) - [Porting a legacy module to the new interface](modules/porting_legacy_module.md)
- [Workers](workers.md) - [Workers](workers.md)
- [Using `synctl` with Workers](synctl_workers.md) - [Using `synctl` with Workers](synctl_workers.md)

View File

@ -257,9 +257,9 @@ POST /_synapse/admin/v1/media/<server_name>/delete?before_ts=<before_ts>
URL Parameters URL Parameters
* `server_name`: string - The name of your local server (e.g `matrix.org`). * `server_name`: string - The name of your local server (e.g `matrix.org`).
* `before_ts`: string representing a positive integer - Unix timestamp in ms. * `before_ts`: string representing a positive integer - Unix timestamp in milliseconds.
Files that were last used before this timestamp will be deleted. It is the timestamp of Files that were last used before this timestamp will be deleted. It is the timestamp of
last access and not the timestamp creation. last access, not the timestamp when the file was created.
* `size_gt`: Optional - string representing a positive integer - Size of the media in bytes. * `size_gt`: Optional - string representing a positive integer - Size of the media in bytes.
Files that are larger will be deleted. Defaults to `0`. Files that are larger will be deleted. Defaults to `0`.
* `keep_profiles`: Optional - string representing a boolean - Switch to also delete files * `keep_profiles`: Optional - string representing a boolean - Switch to also delete files
@ -302,7 +302,7 @@ POST /_synapse/admin/v1/purge_media_cache?before_ts=<unix_timestamp_in_ms>
URL Parameters URL Parameters
* `unix_timestamp_in_ms`: string representing a positive integer - Unix timestamp in ms. * `unix_timestamp_in_ms`: string representing a positive integer - Unix timestamp in milliseconds.
All cached media that was last accessed before this timestamp will be removed. All cached media that was last accessed before this timestamp will be removed.
Response: Response:

View File

@ -50,7 +50,8 @@ It returns a JSON body like the following:
"auth_provider": "<provider2>", "auth_provider": "<provider2>",
"external_id": "<user_id_provider_2>" "external_id": "<user_id_provider_2>"
} }
] ],
"user_type": null
} }
``` ```
@ -97,7 +98,8 @@ with a body of:
], ],
"avatar_url": "<avatar_url>", "avatar_url": "<avatar_url>",
"admin": false, "admin": false,
"deactivated": false "deactivated": false,
"user_type": null
} }
``` ```
@ -135,6 +137,9 @@ Body parameters:
unchanged on existing accounts and set to `false` for new accounts. unchanged on existing accounts and set to `false` for new accounts.
A user cannot be erased by deactivating with this API. For details on A user cannot be erased by deactivating with this API. For details on
deactivating users see [Deactivate Account](#deactivate-account). deactivating users see [Deactivate Account](#deactivate-account).
- `user_type` - string or null, optional. If provided, the user type will be
adjusted. If `null` given, the user type will be cleared. Other
allowed options are: `bot` and `support`.
If the user already exists then optional parameters default to the current value. If the user already exists then optional parameters default to the current value.
@ -341,6 +346,7 @@ The following actions are performed when deactivating an user:
- Remove all 3PIDs from the homeserver - Remove all 3PIDs from the homeserver
- Delete all devices and E2EE keys - Delete all devices and E2EE keys
- Delete all access tokens - Delete all access tokens
- Delete all pushers
- Delete the password hash - Delete the password hash
- Removal from all rooms the user is a member of - Removal from all rooms the user is a member of
- Remove the user from the user directory - Remove the user from the user directory
@ -354,6 +360,15 @@ is set to `true`:
- Remove the user's avatar URL - Remove the user's avatar URL
- Mark the user as erased - Mark the user as erased
The following actions are **NOT** performed. The list may be incomplete.
- Remove mappings of SSO IDs
- [Delete media uploaded](#delete-media-uploaded-by-a-user) by user (included avatar images)
- Delete sent and received messages
- Delete E2E cross-signing keys
- Remove the user's creation (registration) timestamp
- [Remove rate limit overrides](#override-ratelimiting-for-users)
- Remove from monthly active users
## Reset password ## Reset password

View File

@ -9,6 +9,8 @@ The available account validity callbacks are:
### `is_user_expired` ### `is_user_expired`
_First introduced in Synapse v1.39.0_
```python ```python
async def is_user_expired(user: str) -> Optional[bool] async def is_user_expired(user: str) -> Optional[bool]
``` ```
@ -22,8 +24,15 @@ If the module returns `True`, the current request will be denied with the error
`ORG_MATRIX_EXPIRED_ACCOUNT` and the HTTP status code 403. Note that this doesn't `ORG_MATRIX_EXPIRED_ACCOUNT` and the HTTP status code 403. Note that this doesn't
invalidate the user's access token. invalidate the user's access token.
If multiple modules implement this callback, they will be considered in order. If a
callback returns `None`, Synapse falls through to the next one. The value of the first
callback that does not return `None` will be used. If this happens, Synapse will not call
any of the subsequent implementations of this callback.
### `on_user_registration` ### `on_user_registration`
_First introduced in Synapse v1.39.0_
```python ```python
async def on_user_registration(user: str) -> None async def on_user_registration(user: str) -> None
``` ```
@ -31,3 +40,5 @@ async def on_user_registration(user: str) -> None
Called after successfully registering a user, in case the module needs to perform extra Called after successfully registering a user, in case the module needs to perform extra
operations to keep track of them. (e.g. add them to a database table). The user is operations to keep track of them. (e.g. add them to a database table). The user is
represented by their Matrix user ID. represented by their Matrix user ID.
If multiple modules implement this callback, Synapse runs them all in order.

View File

@ -2,6 +2,11 @@
Synapse supports extending its functionality by configuring external modules. Synapse supports extending its functionality by configuring external modules.
**Note**: When using third-party modules, you effectively allow someone else to run
custom code on your Synapse homeserver. Server admins are encouraged to verify the
provenance of the modules they use on their homeserver and make sure the modules aren't
running malicious code on their instance.
## Using modules ## Using modules
To use a module on Synapse, add it to the `modules` section of the configuration file: To use a module on Synapse, add it to the `modules` section of the configuration file:
@ -18,17 +23,31 @@ modules:
Each module is defined by a path to a Python class as well as a configuration. This Each module is defined by a path to a Python class as well as a configuration. This
information for a given module should be available in the module's own documentation. information for a given module should be available in the module's own documentation.
**Note**: When using third-party modules, you effectively allow someone else to run ## Using multiple modules
custom code on your Synapse homeserver. Server admins are encouraged to verify the
provenance of the modules they use on their homeserver and make sure the modules aren't
running malicious code on their instance.
Also note that we are currently in the process of migrating module interfaces to this The order in which modules are listed in this section is important. When processing an
system. While some interfaces might be compatible with it, others still require action that can be handled by several modules, Synapse will always prioritise the module
configuring modules in another part of Synapse's configuration file. that appears first (i.e. is the highest in the list). This means:
* If several modules register the same callback, the callback registered by the module
that appears first is used.
* If several modules try to register a handler for the same HTTP path, only the handler
registered by the module that appears first is used. Handlers registered by the other
module(s) are ignored and Synapse will log a warning message about them.
Note that Synapse doesn't allow multiple modules implementing authentication checkers via
the password auth provider feature for the same login type with different fields. If this
happens, Synapse will refuse to start.
## Current status
We are currently in the process of migrating module interfaces to this system. While some
interfaces might be compatible with it, others still require configuring modules in
another part of Synapse's configuration file.
Currently, only the following pre-existing interfaces are compatible with this new system: Currently, only the following pre-existing interfaces are compatible with this new system:
* spam checker * spam checker
* third-party rules * third-party rules
* presence router * presence router
* password auth providers

View File

@ -0,0 +1,176 @@
# Password auth provider callbacks
Password auth providers offer a way for server administrators to integrate
their Synapse installation with an external authentication system. The callbacks can be
registered by using the Module API's `register_password_auth_provider_callbacks` method.
## Callbacks
### `auth_checkers`
_First introduced in Synapse v1.46.0_
```
auth_checkers: Dict[Tuple[str,Tuple], Callable]
```
A dict mapping from tuples of a login type identifier (such as `m.login.password`) and a
tuple of field names (such as `("password", "secret_thing")`) to authentication checking
callbacks, which should be of the following form:
```python
async def check_auth(
user: str,
login_type: str,
login_dict: "synapse.module_api.JsonDict",
) -> Optional[
Tuple[
str,
Optional[Callable[["synapse.module_api.LoginResponse"], Awaitable[None]]]
]
]
```
The login type and field names should be provided by the user in the
request to the `/login` API. [The Matrix specification](https://matrix.org/docs/spec/client_server/latest#authentication-types)
defines some types, however user defined ones are also allowed.
The callback is passed the `user` field provided by the client (which might not be in
`@username:server` form), the login type, and a dictionary of login secrets passed by
the client.
If the authentication is successful, the module must return the user's Matrix ID (e.g.
`@alice:example.com`) and optionally a callback to be called with the response to the
`/login` request. If the module doesn't wish to return a callback, it must return `None`
instead.
If the authentication is unsuccessful, the module must return `None`.
If multiple modules register an auth checker for the same login type but with different
fields, Synapse will refuse to start.
If multiple modules register an auth checker for the same login type with the same fields,
then the callbacks will be executed in order, until one returns a Matrix User ID (and
optionally a callback). In that case, the return value of that callback will be accepted
and subsequent callbacks will not be fired. If every callback returns `None`, then the
authentication fails.
### `check_3pid_auth`
_First introduced in Synapse v1.46.0_
```python
async def check_3pid_auth(
medium: str,
address: str,
password: str,
) -> Optional[
Tuple[
str,
Optional[Callable[["synapse.module_api.LoginResponse"], Awaitable[None]]]
]
]
```
Called when a user attempts to register or log in with a third party identifier,
such as email. It is passed the medium (eg. `email`), an address (eg. `jdoe@example.com`)
and the user's password.
If the authentication is successful, the module must return the user's Matrix ID (e.g.
`@alice:example.com`) and optionally a callback to be called with the response to the `/login` request.
If the module doesn't wish to return a callback, it must return None instead.
If the authentication is unsuccessful, the module must return `None`.
If multiple modules implement this callback, they will be considered in order. If a
callback returns `None`, Synapse falls through to the next one. The value of the first
callback that does not return `None` will be used. If this happens, Synapse will not call
any of the subsequent implementations of this callback. If every callback return `None`,
the authentication is denied.
### `on_logged_out`
_First introduced in Synapse v1.46.0_
```python
async def on_logged_out(
user_id: str,
device_id: Optional[str],
access_token: str
) -> None
```
Called during a logout request for a user. It is passed the qualified user ID, the ID of the
deactivated device (if any: access tokens are occasionally created without an associated
device ID), and the (now deactivated) access token.
If multiple modules implement this callback, Synapse runs them all in order.
## Example
The example module below implements authentication checkers for two different login types:
- `my.login.type`
- Expects a `my_field` field to be sent to `/login`
- Is checked by the method: `self.check_my_login`
- `m.login.password` (defined in [the spec](https://matrix.org/docs/spec/client_server/latest#password-based))
- Expects a `password` field to be sent to `/login`
- Is checked by the method: `self.check_pass`
```python
from typing import Awaitable, Callable, Optional, Tuple
import synapse
from synapse import module_api
class MyAuthProvider:
def __init__(self, config: dict, api: module_api):
self.api = api
self.credentials = {
"bob": "building",
"@scoop:matrix.org": "digging",
}
api.register_password_auth_provider_callbacks(
auth_checkers={
("my.login_type", ("my_field",)): self.check_my_login,
("m.login.password", ("password",)): self.check_pass,
},
)
async def check_my_login(
self,
username: str,
login_type: str,
login_dict: "synapse.module_api.JsonDict",
) -> Optional[
Tuple[
str,
Optional[Callable[["synapse.module_api.LoginResponse"], Awaitable[None]]],
]
]:
if login_type != "my.login_type":
return None
if self.credentials.get(username) == login_dict.get("my_field"):
return self.api.get_qualified_user_id(username)
async def check_pass(
self,
username: str,
login_type: str,
login_dict: "synapse.module_api.JsonDict",
) -> Optional[
Tuple[
str,
Optional[Callable[["synapse.module_api.LoginResponse"], Awaitable[None]]],
]
]:
if login_type != "m.login.password":
return None
if self.credentials.get(username) == login_dict.get("password"):
return self.api.get_qualified_user_id(username)
```

View File

@ -12,6 +12,9 @@ should register this resource in its `__init__` method using the `register_web_r
method from the `ModuleApi` class (see [this section](writing_a_module.html#registering-a-web-resource) for method from the `ModuleApi` class (see [this section](writing_a_module.html#registering-a-web-resource) for
more info). more info).
There is no longer a `get_db_schema_files` callback provided for password auth provider modules. Any
changes to the database should now be made by the module using the module API class.
The module's author should also update any example in the module's configuration to only The module's author should also update any example in the module's configuration to only
use the new `modules` section in Synapse's configuration file (see [this section](index.html#using-modules) use the new `modules` section in Synapse's configuration file (see [this section](index.html#using-modules)
for more info). for more info).

View File

@ -10,6 +10,8 @@ The available presence router callbacks are:
### `get_users_for_states` ### `get_users_for_states`
_First introduced in Synapse v1.42.0_
```python ```python
async def get_users_for_states( async def get_users_for_states(
state_updates: Iterable["synapse.api.UserPresenceState"], state_updates: Iterable["synapse.api.UserPresenceState"],
@ -24,8 +26,14 @@ must return a dictionary that maps from Matrix user IDs (which can be local or r
Synapse will then attempt to send the specified presence updates to each user when possible. Synapse will then attempt to send the specified presence updates to each user when possible.
If multiple modules implement this callback, Synapse merges all the dictionaries returned
by the callbacks. If multiple callbacks return a dictionary containing the same key,
Synapse concatenates the sets associated with this key from each dictionary.
### `get_interested_users` ### `get_interested_users`
_First introduced in Synapse v1.42.0_
```python ```python
async def get_interested_users( async def get_interested_users(
user_id: str user_id: str
@ -44,6 +52,12 @@ query. The returned users can be local or remote.
Alternatively the callback can return `synapse.module_api.PRESENCE_ALL_USERS` Alternatively the callback can return `synapse.module_api.PRESENCE_ALL_USERS`
to indicate that the user should receive updates from all known users. to indicate that the user should receive updates from all known users.
If multiple modules implement this callback, they will be considered in order. Synapse
calls each callback one by one, and use a concatenation of all the `set`s returned by the
callbacks. If one callback returns `synapse.module_api.PRESENCE_ALL_USERS`, Synapse uses
this value instead. If this happens, Synapse does not call any of the subsequent
implementations of this callback.
## Example ## Example
The example below is a module that implements both presence router callbacks, and ensures The example below is a module that implements both presence router callbacks, and ensures

View File

@ -10,6 +10,8 @@ The available spam checker callbacks are:
### `check_event_for_spam` ### `check_event_for_spam`
_First introduced in Synapse v1.37.0_
```python ```python
async def check_event_for_spam(event: "synapse.events.EventBase") -> Union[bool, str] async def check_event_for_spam(event: "synapse.events.EventBase") -> Union[bool, str]
``` ```
@ -19,8 +21,15 @@ either a `bool` to indicate whether the event must be rejected because of spam,
to indicate the event must be rejected because of spam and to give a rejection reason to to indicate the event must be rejected because of spam and to give a rejection reason to
forward to clients. forward to clients.
If multiple modules implement this callback, they will be considered in order. If a
callback returns `False`, Synapse falls through to the next one. The value of the first
callback that does not return `False` will be used. If this happens, Synapse will not call
any of the subsequent implementations of this callback.
### `user_may_join_room` ### `user_may_join_room`
_First introduced in Synapse v1.37.0_
```python ```python
async def user_may_join_room(user: str, room: str, is_invited: bool) -> bool async def user_may_join_room(user: str, room: str, is_invited: bool) -> bool
``` ```
@ -34,8 +43,15 @@ currently has a pending invite in the room.
This callback isn't called if the join is performed by a server administrator, or in the This callback isn't called if the join is performed by a server administrator, or in the
context of a room creation. context of a room creation.
If multiple modules implement this callback, they will be considered in order. If a
callback returns `True`, Synapse falls through to the next one. The value of the first
callback that does not return `True` will be used. If this happens, Synapse will not call
any of the subsequent implementations of this callback.
### `user_may_invite` ### `user_may_invite`
_First introduced in Synapse v1.37.0_
```python ```python
async def user_may_invite(inviter: str, invitee: str, room_id: str) -> bool async def user_may_invite(inviter: str, invitee: str, room_id: str) -> bool
``` ```
@ -44,8 +60,15 @@ Called when processing an invitation. The module must return a `bool` indicating
the inviter can invite the invitee to the given room. Both inviter and invitee are the inviter can invite the invitee to the given room. Both inviter and invitee are
represented by their Matrix user ID (e.g. `@alice:example.com`). represented by their Matrix user ID (e.g. `@alice:example.com`).
If multiple modules implement this callback, they will be considered in order. If a
callback returns `True`, Synapse falls through to the next one. The value of the first
callback that does not return `True` will be used. If this happens, Synapse will not call
any of the subsequent implementations of this callback.
### `user_may_send_3pid_invite` ### `user_may_send_3pid_invite`
_First introduced in Synapse v1.45.0_
```python ```python
async def user_may_send_3pid_invite( async def user_may_send_3pid_invite(
inviter: str, inviter: str,
@ -79,8 +102,15 @@ await user_may_send_3pid_invite(
**Note**: If the third-party identifier is already associated with a matrix user ID, **Note**: If the third-party identifier is already associated with a matrix user ID,
[`user_may_invite`](#user_may_invite) will be used instead. [`user_may_invite`](#user_may_invite) will be used instead.
If multiple modules implement this callback, they will be considered in order. If a
callback returns `True`, Synapse falls through to the next one. The value of the first
callback that does not return `True` will be used. If this happens, Synapse will not call
any of the subsequent implementations of this callback.
### `user_may_create_room` ### `user_may_create_room`
_First introduced in Synapse v1.37.0_
```python ```python
async def user_may_create_room(user: str) -> bool async def user_may_create_room(user: str) -> bool
``` ```
@ -88,8 +118,15 @@ async def user_may_create_room(user: str) -> bool
Called when processing a room creation request. The module must return a `bool` indicating Called when processing a room creation request. The module must return a `bool` indicating
whether the given user (represented by their Matrix user ID) is allowed to create a room. whether the given user (represented by their Matrix user ID) is allowed to create a room.
If multiple modules implement this callback, they will be considered in order. If a
callback returns `True`, Synapse falls through to the next one. The value of the first
callback that does not return `True` will be used. If this happens, Synapse will not call
any of the subsequent implementations of this callback.
### `user_may_create_room_with_invites` ### `user_may_create_room_with_invites`
_First introduced in Synapse v1.44.0_
```python ```python
async def user_may_create_room_with_invites( async def user_may_create_room_with_invites(
user: str, user: str,
@ -117,8 +154,15 @@ corresponding list(s) will be empty.
since no invites are sent when cloning a room. To cover this case, modules also need to since no invites are sent when cloning a room. To cover this case, modules also need to
implement `user_may_create_room`. implement `user_may_create_room`.
If multiple modules implement this callback, they will be considered in order. If a
callback returns `True`, Synapse falls through to the next one. The value of the first
callback that does not return `True` will be used. If this happens, Synapse will not call
any of the subsequent implementations of this callback.
### `user_may_create_room_alias` ### `user_may_create_room_alias`
_First introduced in Synapse v1.37.0_
```python ```python
async def user_may_create_room_alias(user: str, room_alias: "synapse.types.RoomAlias") -> bool async def user_may_create_room_alias(user: str, room_alias: "synapse.types.RoomAlias") -> bool
``` ```
@ -127,8 +171,15 @@ Called when trying to associate an alias with an existing room. The module must
`bool` indicating whether the given user (represented by their Matrix user ID) is allowed `bool` indicating whether the given user (represented by their Matrix user ID) is allowed
to set the given alias. to set the given alias.
If multiple modules implement this callback, they will be considered in order. If a
callback returns `True`, Synapse falls through to the next one. The value of the first
callback that does not return `True` will be used. If this happens, Synapse will not call
any of the subsequent implementations of this callback.
### `user_may_publish_room` ### `user_may_publish_room`
_First introduced in Synapse v1.37.0_
```python ```python
async def user_may_publish_room(user: str, room_id: str) -> bool async def user_may_publish_room(user: str, room_id: str) -> bool
``` ```
@ -137,8 +188,15 @@ Called when trying to publish a room to the homeserver's public rooms directory.
module must return a `bool` indicating whether the given user (represented by their module must return a `bool` indicating whether the given user (represented by their
Matrix user ID) is allowed to publish the given room. Matrix user ID) is allowed to publish the given room.
If multiple modules implement this callback, they will be considered in order. If a
callback returns `True`, Synapse falls through to the next one. The value of the first
callback that does not return `True` will be used. If this happens, Synapse will not call
any of the subsequent implementations of this callback.
### `check_username_for_spam` ### `check_username_for_spam`
_First introduced in Synapse v1.37.0_
```python ```python
async def check_username_for_spam(user_profile: Dict[str, str]) -> bool async def check_username_for_spam(user_profile: Dict[str, str]) -> bool
``` ```
@ -154,8 +212,15 @@ is represented as a dictionary with the following keys:
The module is given a copy of the original dictionary, so modifying it from within the The module is given a copy of the original dictionary, so modifying it from within the
module cannot modify a user's profile when included in user directory search results. module cannot modify a user's profile when included in user directory search results.
If multiple modules implement this callback, they will be considered in order. If a
callback returns `False`, Synapse falls through to the next one. The value of the first
callback that does not return `False` will be used. If this happens, Synapse will not call
any of the subsequent implementations of this callback.
### `check_registration_for_spam` ### `check_registration_for_spam`
_First introduced in Synapse v1.37.0_
```python ```python
async def check_registration_for_spam( async def check_registration_for_spam(
email_threepid: Optional[dict], email_threepid: Optional[dict],
@ -179,8 +244,16 @@ The arguments passed to this callback are:
used during the registration process. used during the registration process.
* `auth_provider_id`: The identifier of the SSO authentication provider, if any. * `auth_provider_id`: The identifier of the SSO authentication provider, if any.
If multiple modules implement this callback, they will be considered in order. If a
callback returns `RegistrationBehaviour.ALLOW`, Synapse falls through to the next one.
The value of the first callback that does not return `RegistrationBehaviour.ALLOW` will
be used. If this happens, Synapse will not call any of the subsequent implementations of
this callback.
### `check_media_file_for_spam` ### `check_media_file_for_spam`
_First introduced in Synapse v1.37.0_
```python ```python
async def check_media_file_for_spam( async def check_media_file_for_spam(
file_wrapper: "synapse.rest.media.v1.media_storage.ReadableFileWrapper", file_wrapper: "synapse.rest.media.v1.media_storage.ReadableFileWrapper",
@ -191,6 +264,11 @@ async def check_media_file_for_spam(
Called when storing a local or remote file. The module must return a boolean indicating Called when storing a local or remote file. The module must return a boolean indicating
whether the given file can be stored in the homeserver's media store. whether the given file can be stored in the homeserver's media store.
If multiple modules implement this callback, they will be considered in order. If a
callback returns `False`, Synapse falls through to the next one. The value of the first
callback that does not return `False` will be used. If this happens, Synapse will not call
any of the subsequent implementations of this callback.
## Example ## Example
The example below is a module that implements the spam checker callback The example below is a module that implements the spam checker callback

View File

@ -10,6 +10,8 @@ The available third party rules callbacks are:
### `check_event_allowed` ### `check_event_allowed`
_First introduced in Synapse v1.39.0_
```python ```python
async def check_event_allowed( async def check_event_allowed(
event: "synapse.events.EventBase", event: "synapse.events.EventBase",
@ -44,8 +46,15 @@ dictionary, and modify the returned dictionary accordingly.
Note that replacing the event only works for events sent by local users, not for events Note that replacing the event only works for events sent by local users, not for events
received over federation. received over federation.
If multiple modules implement this callback, they will be considered in order. If a
callback returns `True`, Synapse falls through to the next one. The value of the first
callback that does not return `True` will be used. If this happens, Synapse will not call
any of the subsequent implementations of this callback.
### `on_create_room` ### `on_create_room`
_First introduced in Synapse v1.39.0_
```python ```python
async def on_create_room( async def on_create_room(
requester: "synapse.types.Requester", requester: "synapse.types.Requester",
@ -63,8 +72,16 @@ the request is a server admin.
Modules can modify the `request_content` (by e.g. adding events to its `initial_state`), Modules can modify the `request_content` (by e.g. adding events to its `initial_state`),
or deny the room's creation by raising a `module_api.errors.SynapseError`. or deny the room's creation by raising a `module_api.errors.SynapseError`.
If multiple modules implement this callback, they will be considered in order. If a
callback returns without raising an exception, Synapse falls through to the next one. The
room creation will be forbidden as soon as one of the callbacks raises an exception. If
this happens, Synapse will not call any of the subsequent implementations of this
callback.
### `check_threepid_can_be_invited` ### `check_threepid_can_be_invited`
_First introduced in Synapse v1.39.0_
```python ```python
async def check_threepid_can_be_invited( async def check_threepid_can_be_invited(
medium: str, medium: str,
@ -76,8 +93,15 @@ async def check_threepid_can_be_invited(
Called when processing an invite via a third-party identifier (i.e. email or phone number). Called when processing an invite via a third-party identifier (i.e. email or phone number).
The module must return a boolean indicating whether the invite can go through. The module must return a boolean indicating whether the invite can go through.
If multiple modules implement this callback, they will be considered in order. If a
callback returns `True`, Synapse falls through to the next one. The value of the first
callback that does not return `True` will be used. If this happens, Synapse will not call
any of the subsequent implementations of this callback.
### `check_visibility_can_be_modified` ### `check_visibility_can_be_modified`
_First introduced in Synapse v1.39.0_
```python ```python
async def check_visibility_can_be_modified( async def check_visibility_can_be_modified(
room_id: str, room_id: str,
@ -90,6 +114,11 @@ Called when changing the visibility of a room in the local public room directory
visibility is a string that's either "public" or "private". The module must return a visibility is a string that's either "public" or "private". The module must return a
boolean indicating whether the change can go through. boolean indicating whether the change can go through.
If multiple modules implement this callback, they will be considered in order. If a
callback returns `True`, Synapse falls through to the next one. The value of the first
callback that does not return `True` will be used. If this happens, Synapse will not call
any of the subsequent implementations of this callback.
## Example ## Example
The example below is a module that implements the third-party rules callback The example below is a module that implements the third-party rules callback

View File

@ -12,6 +12,21 @@ configuration associated with the module in Synapse's configuration file.
See the documentation for the `ModuleApi` class See the documentation for the `ModuleApi` class
[here](https://github.com/matrix-org/synapse/blob/master/synapse/module_api/__init__.py). [here](https://github.com/matrix-org/synapse/blob/master/synapse/module_api/__init__.py).
## When Synapse runs with several modules configured
If Synapse is running with other modules configured, the order each module appears in
within the `modules` section of the Synapse configuration file might restrict what it can
or cannot register. See [this section](index.html#using-multiple-modules) for more
information.
On top of the rules listed in the link above, if a callback returns a value that should
cause the current operation to fail (e.g. if a callback checking an event returns with a
value that should cause the event to be denied), Synapse will fail the operation and
ignore any subsequent callbacks that should have been run after this one.
The documentation for each callback mentions how Synapse behaves when
multiple modules implement it.
## Handling the module's configuration ## Handling the module's configuration
A module can implement the following static method: A module can implement the following static method:

View File

@ -1,3 +1,9 @@
<h2 style="color:red">
This page of the Synapse documentation is now deprecated. For up to date
documentation on setting up or writing a password auth provider module, please see
<a href="modules.md">this page</a>.
</h2>
# Password auth provider modules # Password auth provider modules
Password auth providers offer a way for server administrators to Password auth providers offer a way for server administrators to

View File

@ -472,6 +472,48 @@ limit_remote_rooms:
# #
#user_ips_max_age: 14d #user_ips_max_age: 14d
# Inhibits the /requestToken endpoints from returning an error that might leak
# information about whether an e-mail address is in use or not on this
# homeserver.
# Note that for some endpoints the error situation is the e-mail already being
# used, and for others the error is entering the e-mail being unused.
# If this option is enabled, instead of returning an error, these endpoints will
# act as if no error happened and return a fake session ID ('sid') to clients.
#
#request_token_inhibit_3pid_errors: true
# A list of domains that the domain portion of 'next_link' parameters
# must match.
#
# This parameter is optionally provided by clients while requesting
# validation of an email or phone number, and maps to a link that
# users will be automatically redirected to after validation
# succeeds. Clients can make use this parameter to aid the validation
# process.
#
# The whitelist is applied whether the homeserver or an
# identity server is handling validation.
#
# The default value is no whitelist functionality; all domains are
# allowed. Setting this value to an empty list will instead disallow
# all domains.
#
#next_link_domain_whitelist: ["matrix.org"]
# Templates to use when generating email or HTML page contents.
#
templates:
# Directory in which Synapse will try to find template files to use to generate
# email or HTML page contents.
# If not set, or a file is not found within the template directory, a default
# template from within the Synapse package will be used.
#
# See https://matrix-org.github.io/synapse/latest/templates.html for more
# information about using custom templates.
#
#custom_template_directory: /path/to/custom/templates/
# Message retention policy at the server level. # Message retention policy at the server level.
# #
# Room admins and mods can define a retention period for their rooms using the # Room admins and mods can define a retention period for their rooms using the
@ -541,47 +583,6 @@ retention:
# - shortest_max_lifetime: 3d # - shortest_max_lifetime: 3d
# interval: 1d # interval: 1d
# Inhibits the /requestToken endpoints from returning an error that might leak
# information about whether an e-mail address is in use or not on this
# homeserver.
# Note that for some endpoints the error situation is the e-mail already being
# used, and for others the error is entering the e-mail being unused.
# If this option is enabled, instead of returning an error, these endpoints will
# act as if no error happened and return a fake session ID ('sid') to clients.
#
#request_token_inhibit_3pid_errors: true
# A list of domains that the domain portion of 'next_link' parameters
# must match.
#
# This parameter is optionally provided by clients while requesting
# validation of an email or phone number, and maps to a link that
# users will be automatically redirected to after validation
# succeeds. Clients can make use this parameter to aid the validation
# process.
#
# The whitelist is applied whether the homeserver or an
# identity server is handling validation.
#
# The default value is no whitelist functionality; all domains are
# allowed. Setting this value to an empty list will instead disallow
# all domains.
#
#next_link_domain_whitelist: ["matrix.org"]
# Templates to use when generating email or HTML page contents.
#
templates:
# Directory in which Synapse will try to find template files to use to generate
# email or HTML page contents.
# If not set, or a file is not found within the template directory, a default
# template from within the Synapse package will be used.
#
# See https://matrix-org.github.io/synapse/latest/templates.html for more
# information about using custom templates.
#
#custom_template_directory: /path/to/custom/templates/
## TLS ## ## TLS ##
@ -2260,34 +2261,6 @@ email:
#email_validation: "[%(server_name)s] Validate your email" #email_validation: "[%(server_name)s] Validate your email"
# Password providers allow homeserver administrators to integrate
# their Synapse installation with existing authentication methods
# ex. LDAP, external tokens, etc.
#
# For more information and known implementations, please see
# https://matrix-org.github.io/synapse/latest/password_auth_providers.html
#
# Note: instances wishing to use SAML or CAS authentication should
# instead use the `saml2_config` or `cas_config` options,
# respectively.
#
password_providers:
# # Example config for an LDAP auth provider
# - module: "ldap_auth_provider.LdapAuthProvider"
# config:
# enabled: true
# uri: "ldap://ldap.example.com:389"
# start_tls: true
# base: "ou=users,dc=example,dc=com"
# attributes:
# uid: "cn"
# mail: "email"
# name: "givenName"
# #bind_dn:
# #bind_password:
# #filter: "(objectClass=posixAccount)"
## Push ## ## Push ##

View File

@ -187,8 +187,8 @@ of this endpoint modifying the media store.
The current third-party rules module interface is deprecated in favour of the new generic The current third-party rules module interface is deprecated in favour of the new generic
modules system introduced in Synapse v1.37.0. Authors of third-party rules modules can refer modules system introduced in Synapse v1.37.0. Authors of third-party rules modules can refer
to [this documentation](modules.md#porting-an-existing-module-that-uses-the-old-interface) to [this documentation](modules/porting_legacy_module.md)
to update their modules. Synapse administrators can refer to [this documentation](modules.md#using-modules) to update their modules. Synapse administrators can refer to [this documentation](modules/index.md)
to update their configuration once the modules they are using have been updated. to update their configuration once the modules they are using have been updated.
We plan to remove support for the current third-party rules interface in September 2021. We plan to remove support for the current third-party rules interface in September 2021.
@ -237,9 +237,9 @@ SQLite databases are unaffected by this change.
The current spam checker interface is deprecated in favour of a new generic modules system. The current spam checker interface is deprecated in favour of a new generic modules system.
Authors of spam checker modules can refer to [this Authors of spam checker modules can refer to [this
documentation](modules.md#porting-an-existing-module-that-uses-the-old-interface) documentation](modules/porting_legacy_module.md
to update their modules. Synapse administrators can refer to [this to update their modules. Synapse administrators can refer to [this
documentation](modules.md#using-modules) documentation](modules/index.md)
to update their configuration once the modules they are using have been updated. to update their configuration once the modules they are using have been updated.
We plan to remove support for the current spam checker interface in August 2021. We plan to remove support for the current spam checker interface in August 2021.
@ -348,24 +348,24 @@ Please ensure your Application Services are up to date.
## Requirement for X-Forwarded-Proto header ## Requirement for X-Forwarded-Proto header
When using Synapse with a reverse proxy (in particular, when using the When using Synapse with a reverse proxy (in particular, when using the
[x_forwarded]{.title-ref} option on an HTTP listener), Synapse now `x_forwarded` option on an HTTP listener), Synapse now
expects to receive an [X-Forwarded-Proto]{.title-ref} header on incoming expects to receive an `X-Forwarded-Proto` header on incoming
HTTP requests. If it is not set, Synapse will log a warning on each HTTP requests. If it is not set, Synapse will log a warning on each
received request. received request.
To avoid the warning, administrators using a reverse proxy should ensure To avoid the warning, administrators using a reverse proxy should ensure
that the reverse proxy sets [X-Forwarded-Proto]{.title-ref} header to that the reverse proxy sets `X-Forwarded-Proto` header to
[https]{.title-ref} or [http]{.title-ref} to indicate the protocol used `https` or `http` to indicate the protocol used
by the client. by the client.
Synapse also requires the [Host]{.title-ref} header to be preserved. Synapse also requires the `Host` header to be preserved.
See the [reverse proxy documentation](reverse_proxy.md), where the See the [reverse proxy documentation](reverse_proxy.md), where the
example configurations have been updated to show how to set these example configurations have been updated to show how to set these
headers. headers.
(Users of [Caddy](https://caddyserver.com/) are unaffected, since we (Users of [Caddy](https://caddyserver.com/) are unaffected, since we
believe it sets [X-Forwarded-Proto]{.title-ref} by default.) believe it sets `X-Forwarded-Proto` by default.)
# Upgrading to v1.27.0 # Upgrading to v1.27.0
@ -529,13 +529,13 @@ mapping provider to specify different algorithms, instead of the
way](<https://matrix.org/docs/spec/appendices#mapping-from-other-character-sets>). way](<https://matrix.org/docs/spec/appendices#mapping-from-other-character-sets>).
If your Synapse configuration uses a custom mapping provider If your Synapse configuration uses a custom mapping provider
([oidc_config.user_mapping_provider.module]{.title-ref} is specified and (`oidc_config.user_mapping_provider.module` is specified and
not equal to not equal to
[synapse.handlers.oidc_handler.JinjaOidcMappingProvider]{.title-ref}) `synapse.handlers.oidc_handler.JinjaOidcMappingProvider`)
then you *must* ensure that [map_user_attributes]{.title-ref} of the then you *must* ensure that `map_user_attributes` of the
mapping provider performs some normalisation of the mapping provider performs some normalisation of the
[localpart]{.title-ref} returned. To match previous behaviour you can `localpart` returned. To match previous behaviour you can
use the [map_username_to_mxid_localpart]{.title-ref} function provided use the `map_username_to_mxid_localpart` function provided
by Synapse. An example is shown below: by Synapse. An example is shown below:
```python ```python
@ -564,7 +564,7 @@ v1.24.0. The Admin API is now only accessible under:
- `/_synapse/admin/v1` - `/_synapse/admin/v1`
The only exception is the [/admin/whois]{.title-ref} endpoint, which is The only exception is the `/admin/whois` endpoint, which is
[also available via the client-server [also available via the client-server
API](https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid). API](https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid).
@ -639,7 +639,7 @@ This page will appear to the user after clicking a password reset link
that has been emailed to them. that has been emailed to them.
To complete password reset, the page must include a way to make a To complete password reset, the page must include a way to make a
[POST]{.title-ref} request to `POST` request to
`/_synapse/client/password_reset/{medium}/submit_token` with the query `/_synapse/client/password_reset/{medium}/submit_token` with the query
parameters from the original link, presented as a URL-encoded form. See parameters from the original link, presented as a URL-encoded form. See
the file itself for more details. the file itself for more details.
@ -660,18 +660,18 @@ but the parameters are slightly different:
# Upgrading to v1.18.0 # Upgrading to v1.18.0
## Docker [-py3]{.title-ref} suffix will be removed in future versions ## Docker `-py3` suffix will be removed in future versions
From 10th August 2020, we will no longer publish Docker images with the From 10th August 2020, we will no longer publish Docker images with the
[-py3]{.title-ref} tag suffix. The images tagged with the `-py3` tag suffix. The images tagged with the
[-py3]{.title-ref} suffix have been identical to the non-suffixed tags `-py3` suffix have been identical to the non-suffixed tags
since release 0.99.0, and the suffix is obsolete. since release 0.99.0, and the suffix is obsolete.
On 10th August, we will remove the [latest-py3]{.title-ref} tag. On 10th August, we will remove the `latest-py3` tag.
Existing per-release tags (such as [v1.18.0-py3]{.title-ref}) will not Existing per-release tags (such as `v1.18.0-py3` will not
be removed, but no new [-py3]{.title-ref} tags will be added. be removed, but no new `-py3` tags will be added.
Scripts relying on the [-py3]{.title-ref} suffix will need to be Scripts relying on the `-py3` suffix will need to be
updated. updated.
## Redis replication is now recommended in lieu of TCP replication ## Redis replication is now recommended in lieu of TCP replication
@ -705,8 +705,8 @@ This will *not* be a problem for Synapse installations which were:
If completeness of the room directory is a concern, installations which If completeness of the room directory is a concern, installations which
are affected can be repaired as follows: are affected can be repaired as follows:
1. Run the following sql from a [psql]{.title-ref} or 1. Run the following sql from a `psql` or
[sqlite3]{.title-ref} console: `sqlite3` console:
```sql ```sql
INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
@ -770,8 +770,8 @@ participating in many rooms.
of any problems. of any problems.
1. As an initial check to see if you will be affected, you can try 1. As an initial check to see if you will be affected, you can try
running the following query from the [psql]{.title-ref} or running the following query from the `psql` or
[sqlite3]{.title-ref} console. It is safe to run it while Synapse is `sqlite3` console. It is safe to run it while Synapse is
still running. still running.
```sql ```sql
@ -1353,9 +1353,9 @@ first need to upgrade the database by running:
python scripts/upgrade_db_to_v0.6.0.py <db> <server_name> <signing_key> python scripts/upgrade_db_to_v0.6.0.py <db> <server_name> <signing_key>
Where [<db>]{.title-ref} is the location of the database, Where `<db>` is the location of the database,
[<server_name>]{.title-ref} is the server name as specified in the `<server_name>` is the server name as specified in the
synapse configuration, and [<signing_key>]{.title-ref} is the location synapse configuration, and `<signing_key>` is the location
of the signing key as specified in the synapse configuration. of the signing key as specified in the synapse configuration.
This may take some time to complete. Failures of signatures and content This may take some time to complete. Failures of signatures and content

View File

@ -149,7 +149,7 @@ POST /_synapse/admin/v1/registration_tokens/new
The request body must be a JSON object and can contain the following fields: The request body must be a JSON object and can contain the following fields:
- `token`: The registration token. A string of no more than 64 characters that - `token`: The registration token. A string of no more than 64 characters that
consists only of characters matched by the regex `[A-Za-z0-9-_]`. consists only of characters matched by the regex `[A-Za-z0-9._~-]`.
Default: randomly generated. Default: randomly generated.
- `uses_allowed`: The integer number of times the token can be used to complete - `uses_allowed`: The integer number of times the token can be used to complete
a registration before it becomes invalid. a registration before it becomes invalid.

View File

@ -2,13 +2,13 @@
Below is a sample logging configuration file. This file can be tweaked to control how your Below is a sample logging configuration file. This file can be tweaked to control how your
homeserver will output logs. A restart of the server is generally required to apply any homeserver will output logs. A restart of the server is generally required to apply any
changes made to this file. changes made to this file. The value of the `log_config` option in your homeserver
config should be the path to this file.
Note that the contents below are *not* intended to be copied and used as the basis for Note that a default logging configuration (shown below) is created automatically alongside
a real homeserver.yaml. Instead, if you are starting from scratch, please generate the homeserver config when following the [installation instructions](../../setup/installation.md).
a fresh config using Synapse by following the instructions in It should be named `<SERVERNAME>.log.config` by default.
[Installation](../../setup/installation.md).
```yaml ```yaml
{{#include ../../sample_log_config.yaml}} {{#include ../../sample_log_config.yaml}}
``` ```

View File

@ -22,8 +22,11 @@ files =
synapse/crypto, synapse/crypto,
synapse/event_auth.py, synapse/event_auth.py,
synapse/events/builder.py, synapse/events/builder.py,
synapse/events/presence_router.py,
synapse/events/snapshot.py,
synapse/events/spamcheck.py, synapse/events/spamcheck.py,
synapse/events/third_party_rules.py, synapse/events/third_party_rules.py,
synapse/events/utils.py,
synapse/events/validator.py, synapse/events/validator.py,
synapse/federation, synapse/federation,
synapse/groups, synapse/groups,
@ -53,6 +56,7 @@ files =
synapse/storage/_base.py, synapse/storage/_base.py,
synapse/storage/background_updates.py, synapse/storage/background_updates.py,
synapse/storage/databases/main/appservice.py, synapse/storage/databases/main/appservice.py,
synapse/storage/databases/main/client_ips.py,
synapse/storage/databases/main/events.py, synapse/storage/databases/main/events.py,
synapse/storage/databases/main/keys.py, synapse/storage/databases/main/keys.py,
synapse/storage/databases/main/pusher.py, synapse/storage/databases/main/pusher.py,
@ -88,11 +92,23 @@ files =
tests/handlers/test_user_directory.py, tests/handlers/test_user_directory.py,
tests/rest/client/test_login.py, tests/rest/client/test_login.py,
tests/rest/client/test_auth.py, tests/rest/client/test_auth.py,
tests/rest/client/test_relations.py,
tests/rest/media/v1/test_filepath.py,
tests/rest/media/v1/test_oembed.py,
tests/storage/test_state.py, tests/storage/test_state.py,
tests/storage/test_user_directory.py, tests/storage/test_user_directory.py,
tests/util/test_itertools.py, tests/util/test_itertools.py,
tests/util/test_stream_change_cache.py tests/util/test_stream_change_cache.py
[mypy-synapse.api.*]
disallow_untyped_defs = True
[mypy-synapse.crypto.*]
disallow_untyped_defs = True
[mypy-synapse.events.*]
disallow_untyped_defs = True
[mypy-synapse.handlers.*] [mypy-synapse.handlers.*]
disallow_untyped_defs = True disallow_untyped_defs = True
@ -108,6 +124,9 @@ disallow_untyped_defs = True
[mypy-synapse.state.*] [mypy-synapse.state.*]
disallow_untyped_defs = True disallow_untyped_defs = True
[mypy-synapse.storage.databases.main.client_ips]
disallow_untyped_defs = True
[mypy-synapse.storage.util.*] [mypy-synapse.storage.util.*]
disallow_untyped_defs = True disallow_untyped_defs = True

View File

@ -27,6 +27,7 @@ DISTS = (
"ubuntu:bionic", # 18.04 LTS (our EOL forced by Py36 on 2021-12-23) "ubuntu:bionic", # 18.04 LTS (our EOL forced by Py36 on 2021-12-23)
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14) "ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
"ubuntu:hirsute", # 21.04 (EOL 2022-01-05) "ubuntu:hirsute", # 21.04 (EOL 2022-01-05)
"ubuntu:impish", # 21.10 (EOL 2022-07)
) )
DESC = """\ DESC = """\

View File

@ -51,13 +51,19 @@ Example usage:
"request with.", "request with.",
) )
parser.add_argument(
"-K",
"--signing-key",
help="The private ed25519 key to sign the request with.",
)
parser.add_argument( parser.add_argument(
"-c", "-c",
"--config", "--config",
default="homeserver.yaml", default="homeserver.yaml",
help=( help=(
"Path to synapse config file, from which the server name and/or signing " "Path to synapse config file, from which the server name and/or signing "
"key path will be read. Ignored if --server-name and --signing-key-path " "key path will be read. Ignored if --server-name and --signing-key(-path) "
"are both given." "are both given."
), ),
) )
@ -87,11 +93,14 @@ Example usage:
args = parser.parse_args() args = parser.parse_args()
if not args.server_name or not args.signing_key_path: if not args.server_name or not (args.signing_key_path or args.signing_key):
read_args_from_config(args) read_args_from_config(args)
with open(args.signing_key_path) as f: if args.signing_key:
key = read_signing_keys(f)[0] keys = read_signing_keys([args.signing_key])
else:
with open(args.signing_key_path) as f:
keys = read_signing_keys(f)
json_to_sign = args.input_data json_to_sign = args.input_data
if json_to_sign is None: if json_to_sign is None:
@ -107,7 +116,7 @@ Example usage:
print("Input json was not an object", file=sys.stderr) print("Input json was not an object", file=sys.stderr)
sys.exit(1) sys.exit(1)
sign_json(obj, args.server_name, key) sign_json(obj, args.server_name, keys[0])
for c in json_encoder.iterencode(obj): for c in json_encoder.iterencode(obj):
args.output.write(c) args.output.write(c)
args.output.write("\n") args.output.write("\n")
@ -118,8 +127,17 @@ def read_args_from_config(args: argparse.Namespace) -> None:
config = yaml.safe_load(fh) config = yaml.safe_load(fh)
if not args.server_name: if not args.server_name:
args.server_name = config["server_name"] args.server_name = config["server_name"]
if not args.signing_key_path: if not args.signing_key_path and not args.signing_key:
args.signing_key_path = config["signing_key_path"] if "signing_key" in config:
args.signing_key = config["signing_key"]
elif "signing_key_path" in config:
args.signing_key_path = config["signing_key_path"]
else:
print(
"A signing key must be given on the commandline or in the config file.",
file=sys.stderr,
)
sys.exit(1)
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -47,7 +47,7 @@ try:
except ImportError: except ImportError:
pass pass
__version__ = "1.45.0" __version__ = "1.46.0rc1"
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
# We import here so that we don't have to install a bunch of deps when # We import here so that we don't have to install a bunch of deps when

View File

@ -245,7 +245,7 @@ class Auth:
async def validate_appservice_can_control_user_id( async def validate_appservice_can_control_user_id(
self, app_service: ApplicationService, user_id: str, also_allow_user: Optional[str] = None self, app_service: ApplicationService, user_id: str, also_allow_user: Optional[str] = None
): ) -> None:
"""Validates that the app service is allowed to control """Validates that the app service is allowed to control
the given user. the given user.
@ -619,5 +619,13 @@ class Auth:
% (user_id, room_id), % (user_id, room_id),
) )
async def check_auth_blocking(self, *args, **kwargs) -> None: async def check_auth_blocking(
await self._auth_blocking.check_auth_blocking(*args, **kwargs) self,
user_id: Optional[str] = None,
threepid: Optional[dict] = None,
user_type: Optional[str] = None,
requester: Optional[Requester] = None,
) -> None:
await self._auth_blocking.check_auth_blocking(
user_id=user_id, threepid=threepid, user_type=user_type, requester=requester
)

View File

@ -176,6 +176,7 @@ class RelationTypes:
ANNOTATION = "m.annotation" ANNOTATION = "m.annotation"
REPLACE = "m.replace" REPLACE = "m.replace"
REFERENCE = "m.reference" REFERENCE = "m.reference"
THREAD = "io.element.thread"
class LimitBlockingTypes: class LimitBlockingTypes:

View File

@ -18,7 +18,7 @@
import logging import logging
import typing import typing
from http import HTTPStatus from http import HTTPStatus
from typing import Dict, List, Optional, Union from typing import Any, Dict, List, Optional, Union
from twisted.web import http from twisted.web import http
@ -143,7 +143,7 @@ class SynapseError(CodeMessageException):
super().__init__(code, msg) super().__init__(code, msg)
self.errcode = errcode self.errcode = errcode
def error_dict(self): def error_dict(self) -> "JsonDict":
return cs_error(self.msg, self.errcode) return cs_error(self.msg, self.errcode)
@ -175,7 +175,7 @@ class ProxiedRequestError(SynapseError):
else: else:
self._additional_fields = dict(additional_fields) self._additional_fields = dict(additional_fields)
def error_dict(self): def error_dict(self) -> "JsonDict":
return cs_error(self.msg, self.errcode, **self._additional_fields) return cs_error(self.msg, self.errcode, **self._additional_fields)
@ -196,7 +196,7 @@ class ConsentNotGivenError(SynapseError):
) )
self._consent_uri = consent_uri self._consent_uri = consent_uri
def error_dict(self): def error_dict(self) -> "JsonDict":
return cs_error(self.msg, self.errcode, consent_uri=self._consent_uri) return cs_error(self.msg, self.errcode, consent_uri=self._consent_uri)
@ -262,14 +262,10 @@ class InteractiveAuthIncompleteError(Exception):
class UnrecognizedRequestError(SynapseError): class UnrecognizedRequestError(SynapseError):
"""An error indicating we don't understand the request you're trying to make""" """An error indicating we don't understand the request you're trying to make"""
def __init__(self, *args, **kwargs): def __init__(
if "errcode" not in kwargs: self, msg: str = "Unrecognized request", errcode: str = Codes.UNRECOGNIZED
kwargs["errcode"] = Codes.UNRECOGNIZED ):
if len(args) == 0: super().__init__(400, msg, errcode)
message = "Unrecognized request"
else:
message = args[0]
super().__init__(400, message, **kwargs)
class NotFoundError(SynapseError): class NotFoundError(SynapseError):
@ -284,10 +280,8 @@ class AuthError(SynapseError):
other poorly-defined times. other poorly-defined times.
""" """
def __init__(self, *args, **kwargs): def __init__(self, code: int, msg: str, errcode: str = Codes.FORBIDDEN):
if "errcode" not in kwargs: super().__init__(code, msg, errcode)
kwargs["errcode"] = Codes.FORBIDDEN
super().__init__(*args, **kwargs)
class InvalidClientCredentialsError(SynapseError): class InvalidClientCredentialsError(SynapseError):
@ -321,7 +315,7 @@ class InvalidClientTokenError(InvalidClientCredentialsError):
super().__init__(msg=msg, errcode="M_UNKNOWN_TOKEN") super().__init__(msg=msg, errcode="M_UNKNOWN_TOKEN")
self._soft_logout = soft_logout self._soft_logout = soft_logout
def error_dict(self): def error_dict(self) -> "JsonDict":
d = super().error_dict() d = super().error_dict()
d["soft_logout"] = self._soft_logout d["soft_logout"] = self._soft_logout
return d return d
@ -345,7 +339,7 @@ class ResourceLimitError(SynapseError):
self.limit_type = limit_type self.limit_type = limit_type
super().__init__(code, msg, errcode=errcode) super().__init__(code, msg, errcode=errcode)
def error_dict(self): def error_dict(self) -> "JsonDict":
return cs_error( return cs_error(
self.msg, self.msg,
self.errcode, self.errcode,
@ -357,32 +351,17 @@ class ResourceLimitError(SynapseError):
class EventSizeError(SynapseError): class EventSizeError(SynapseError):
"""An error raised when an event is too big.""" """An error raised when an event is too big."""
def __init__(self, *args, **kwargs): def __init__(self, msg: str):
if "errcode" not in kwargs: super().__init__(413, msg, Codes.TOO_LARGE)
kwargs["errcode"] = Codes.TOO_LARGE
super().__init__(413, *args, **kwargs)
class EventStreamError(SynapseError):
"""An error raised when there a problem with the event stream."""
def __init__(self, *args, **kwargs):
if "errcode" not in kwargs:
kwargs["errcode"] = Codes.BAD_PAGINATION
super().__init__(*args, **kwargs)
class LoginError(SynapseError): class LoginError(SynapseError):
"""An error raised when there was a problem logging in.""" """An error raised when there was a problem logging in."""
pass
class StoreError(SynapseError): class StoreError(SynapseError):
"""An error raised when there was a problem storing some data.""" """An error raised when there was a problem storing some data."""
pass
class InvalidCaptchaError(SynapseError): class InvalidCaptchaError(SynapseError):
def __init__( def __init__(
@ -395,7 +374,7 @@ class InvalidCaptchaError(SynapseError):
super().__init__(code, msg, errcode) super().__init__(code, msg, errcode)
self.error_url = error_url self.error_url = error_url
def error_dict(self): def error_dict(self) -> "JsonDict":
return cs_error(self.msg, self.errcode, error_url=self.error_url) return cs_error(self.msg, self.errcode, error_url=self.error_url)
@ -412,7 +391,7 @@ class LimitExceededError(SynapseError):
super().__init__(code, msg, errcode) super().__init__(code, msg, errcode)
self.retry_after_ms = retry_after_ms self.retry_after_ms = retry_after_ms
def error_dict(self): def error_dict(self) -> "JsonDict":
return cs_error(self.msg, self.errcode, retry_after_ms=self.retry_after_ms) return cs_error(self.msg, self.errcode, retry_after_ms=self.retry_after_ms)
@ -443,10 +422,8 @@ class UnsupportedRoomVersionError(SynapseError):
class ThreepidValidationError(SynapseError): class ThreepidValidationError(SynapseError):
"""An error raised when there was a problem authorising an event.""" """An error raised when there was a problem authorising an event."""
def __init__(self, *args, **kwargs): def __init__(self, msg: str, errcode: str = Codes.FORBIDDEN):
if "errcode" not in kwargs: super().__init__(400, msg, errcode)
kwargs["errcode"] = Codes.FORBIDDEN
super().__init__(*args, **kwargs)
class IncompatibleRoomVersionError(SynapseError): class IncompatibleRoomVersionError(SynapseError):
@ -466,7 +443,7 @@ class IncompatibleRoomVersionError(SynapseError):
self._room_version = room_version self._room_version = room_version
def error_dict(self): def error_dict(self) -> "JsonDict":
return cs_error(self.msg, self.errcode, room_version=self._room_version) return cs_error(self.msg, self.errcode, room_version=self._room_version)
@ -494,7 +471,7 @@ class RequestSendFailed(RuntimeError):
errors (like programming errors). errors (like programming errors).
""" """
def __init__(self, inner_exception, can_retry): def __init__(self, inner_exception: BaseException, can_retry: bool):
super().__init__( super().__init__(
"Failed to send request: %s: %s" "Failed to send request: %s: %s"
% (type(inner_exception).__name__, inner_exception) % (type(inner_exception).__name__, inner_exception)
@ -503,7 +480,7 @@ class RequestSendFailed(RuntimeError):
self.can_retry = can_retry self.can_retry = can_retry
def cs_error(msg: str, code: str = Codes.UNKNOWN, **kwargs): def cs_error(msg: str, code: str = Codes.UNKNOWN, **kwargs: Any) -> "JsonDict":
"""Utility method for constructing an error response for client-server """Utility method for constructing an error response for client-server
interactions. interactions.
@ -551,7 +528,7 @@ class FederationError(RuntimeError):
msg = "%s %s: %s" % (level, code, reason) msg = "%s %s: %s" % (level, code, reason)
super().__init__(msg) super().__init__(msg)
def get_dict(self): def get_dict(self) -> "JsonDict":
return { return {
"level": self.level, "level": self.level,
"code": self.code, "code": self.code,
@ -580,7 +557,7 @@ class HttpResponseException(CodeMessageException):
super().__init__(code, msg) super().__init__(code, msg)
self.response = response self.response = response
def to_synapse_error(self): def to_synapse_error(self) -> SynapseError:
"""Make a SynapseError based on an HTTPResponseException """Make a SynapseError based on an HTTPResponseException
This is useful when a proxied request has failed, and we need to This is useful when a proxied request has failed, and we need to

View File

@ -231,24 +231,24 @@ class FilterCollection:
def include_redundant_members(self) -> bool: def include_redundant_members(self) -> bool:
return self._room_state_filter.include_redundant_members() return self._room_state_filter.include_redundant_members()
def filter_presence(self, events): def filter_presence(
self, events: Iterable[UserPresenceState]
) -> List[UserPresenceState]:
return self._presence_filter.filter(events) return self._presence_filter.filter(events)
def filter_account_data(self, events): def filter_account_data(self, events: Iterable[JsonDict]) -> List[JsonDict]:
return self._account_data.filter(events) return self._account_data.filter(events)
def filter_room_state(self, events): def filter_room_state(self, events: Iterable[EventBase]) -> List[EventBase]:
return self._room_state_filter.filter(self._room_filter.filter(events)) return self._room_state_filter.filter(self._room_filter.filter(events))
def filter_room_timeline(self, events: Iterable[FilterEvent]) -> List[FilterEvent]: def filter_room_timeline(self, events: Iterable[EventBase]) -> List[EventBase]:
return self._room_timeline_filter.filter(self._room_filter.filter(events)) return self._room_timeline_filter.filter(self._room_filter.filter(events))
def filter_room_ephemeral(self, events: Iterable[FilterEvent]) -> List[FilterEvent]: def filter_room_ephemeral(self, events: Iterable[JsonDict]) -> List[JsonDict]:
return self._room_ephemeral_filter.filter(self._room_filter.filter(events)) return self._room_ephemeral_filter.filter(self._room_filter.filter(events))
def filter_room_account_data( def filter_room_account_data(self, events: Iterable[JsonDict]) -> List[JsonDict]:
self, events: Iterable[FilterEvent]
) -> List[FilterEvent]:
return self._room_account_data.filter(self._room_filter.filter(events)) return self._room_account_data.filter(self._room_filter.filter(events))
def blocks_all_presence(self) -> bool: def blocks_all_presence(self) -> bool:
@ -309,7 +309,7 @@ class Filter:
# except for presence which actually gets passed around as its own # except for presence which actually gets passed around as its own
# namedtuple type. # namedtuple type.
if isinstance(event, UserPresenceState): if isinstance(event, UserPresenceState):
sender = event.user_id sender: Optional[str] = event.user_id
room_id = None room_id = None
ev_type = "m.presence" ev_type = "m.presence"
contains_url = False contains_url = False

View File

@ -12,49 +12,48 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from collections import namedtuple from typing import Any, Optional
import attr
from synapse.api.constants import PresenceState from synapse.api.constants import PresenceState
from synapse.types import JsonDict
class UserPresenceState( @attr.s(slots=True, frozen=True, auto_attribs=True)
namedtuple( class UserPresenceState:
"UserPresenceState",
(
"user_id",
"state",
"last_active_ts",
"last_federation_update_ts",
"last_user_sync_ts",
"status_msg",
"currently_active",
),
)
):
"""Represents the current presence state of the user. """Represents the current presence state of the user.
user_id (str) user_id
last_active (int): Time in msec that the user last interacted with server. last_active: Time in msec that the user last interacted with server.
last_federation_update (int): Time in msec since either a) we sent a presence last_federation_update: Time in msec since either a) we sent a presence
update to other servers or b) we received a presence update, depending update to other servers or b) we received a presence update, depending
on if is a local user or not. on if is a local user or not.
last_user_sync (int): Time in msec that the user last *completed* a sync last_user_sync: Time in msec that the user last *completed* a sync
(or event stream). (or event stream).
status_msg (str): User set status message. status_msg: User set status message.
""" """
def as_dict(self): user_id: str
return dict(self._asdict()) state: str
last_active_ts: int
last_federation_update_ts: int
last_user_sync_ts: int
status_msg: Optional[str]
currently_active: bool
def as_dict(self) -> JsonDict:
return attr.asdict(self)
@staticmethod @staticmethod
def from_dict(d): def from_dict(d: JsonDict) -> "UserPresenceState":
return UserPresenceState(**d) return UserPresenceState(**d)
def copy_and_replace(self, **kwargs): def copy_and_replace(self, **kwargs: Any) -> "UserPresenceState":
return self._replace(**kwargs) return attr.evolve(self, **kwargs)
@classmethod @classmethod
def default(cls, user_id): def default(cls, user_id: str) -> "UserPresenceState":
"""Returns a default presence state.""" """Returns a default presence state."""
return cls( return cls(
user_id=user_id, user_id=user_id,

View File

@ -161,7 +161,7 @@ class Ratelimiter:
return allowed, time_allowed return allowed, time_allowed
def _prune_message_counts(self, time_now_s: float): def _prune_message_counts(self, time_now_s: float) -> None:
"""Remove message count entries that have not exceeded their defined """Remove message count entries that have not exceeded their defined
rate_hz limit rate_hz limit
@ -190,7 +190,7 @@ class Ratelimiter:
update: bool = True, update: bool = True,
n_actions: int = 1, n_actions: int = 1,
_time_now_s: Optional[float] = None, _time_now_s: Optional[float] = None,
): ) -> None:
"""Checks if an action can be performed. If not, raises a LimitExceededError """Checks if an action can be performed. If not, raises a LimitExceededError
Checks if the user has ratelimiting disabled in the database by looking Checks if the user has ratelimiting disabled in the database by looking

View File

@ -19,6 +19,7 @@ from hashlib import sha256
from urllib.parse import urlencode from urllib.parse import urlencode
from synapse.config import ConfigError from synapse.config import ConfigError
from synapse.config.homeserver import HomeServerConfig
SYNAPSE_CLIENT_API_PREFIX = "/_synapse/client" SYNAPSE_CLIENT_API_PREFIX = "/_synapse/client"
CLIENT_API_PREFIX = "/_matrix/client" CLIENT_API_PREFIX = "/_matrix/client"
@ -34,11 +35,7 @@ LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
class ConsentURIBuilder: class ConsentURIBuilder:
def __init__(self, hs_config): def __init__(self, hs_config: HomeServerConfig):
"""
Args:
hs_config (synapse.config.homeserver.HomeServerConfig):
"""
if hs_config.key.form_secret is None: if hs_config.key.form_secret is None:
raise ConfigError("form_secret not set in config") raise ConfigError("form_secret not set in config")
if hs_config.server.public_baseurl is None: if hs_config.server.public_baseurl is None:
@ -47,15 +44,15 @@ class ConsentURIBuilder:
self._hmac_secret = hs_config.key.form_secret.encode("utf-8") self._hmac_secret = hs_config.key.form_secret.encode("utf-8")
self._public_baseurl = hs_config.server.public_baseurl self._public_baseurl = hs_config.server.public_baseurl
def build_user_consent_uri(self, user_id): def build_user_consent_uri(self, user_id: str) -> str:
"""Build a URI which we can give to the user to do their privacy """Build a URI which we can give to the user to do their privacy
policy consent policy consent
Args: Args:
user_id (str): mxid or username of user user_id: mxid or username of user
Returns Returns
(str) the URI where the user can do consent The URI where the user can do consent
""" """
mac = hmac.new( mac = hmac.new(
key=self._hmac_secret, msg=user_id.encode("ascii"), digestmod=sha256 key=self._hmac_secret, msg=user_id.encode("ascii"), digestmod=sha256

View File

@ -31,6 +31,7 @@ import twisted
from twisted.internet import defer, error, reactor from twisted.internet import defer, error, reactor
from twisted.logger import LoggingFile, LogLevel from twisted.logger import LoggingFile, LogLevel
from twisted.protocols.tls import TLSMemoryBIOFactory from twisted.protocols.tls import TLSMemoryBIOFactory
from twisted.python.threadpool import ThreadPool
import synapse import synapse
from synapse.api.constants import MAX_PDU_SIZE from synapse.api.constants import MAX_PDU_SIZE
@ -42,11 +43,13 @@ from synapse.crypto import context_factory
from synapse.events.presence_router import load_legacy_presence_router from synapse.events.presence_router import load_legacy_presence_router
from synapse.events.spamcheck import load_legacy_spam_checkers from synapse.events.spamcheck import load_legacy_spam_checkers
from synapse.events.third_party_rules import load_legacy_third_party_event_rules from synapse.events.third_party_rules import load_legacy_third_party_event_rules
from synapse.handlers.auth import load_legacy_password_auth_providers
from synapse.logging.context import PreserveLoggingContext from synapse.logging.context import PreserveLoggingContext
from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.metrics.jemalloc import setup_jemalloc_stats from synapse.metrics.jemalloc import setup_jemalloc_stats
from synapse.util.caches.lrucache import setup_expire_lru_cache_entries from synapse.util.caches.lrucache import setup_expire_lru_cache_entries
from synapse.util.daemonize import daemonize_process from synapse.util.daemonize import daemonize_process
from synapse.util.gai_resolver import GAIResolver
from synapse.util.rlimit import change_resource_limit from synapse.util.rlimit import change_resource_limit
from synapse.util.versionstring import get_version_string from synapse.util.versionstring import get_version_string
@ -293,7 +296,7 @@ def listen_ssl(
return r return r
def refresh_certificate(hs): def refresh_certificate(hs: "HomeServer"):
""" """
Refresh the TLS certificates that Synapse is using by re-reading them from Refresh the TLS certificates that Synapse is using by re-reading them from
disk and updating the TLS context factories to use them. disk and updating the TLS context factories to use them.
@ -337,9 +340,19 @@ async def start(hs: "HomeServer"):
Args: Args:
hs: homeserver instance hs: homeserver instance
""" """
reactor = hs.get_reactor()
# We want to use a separate thread pool for the resolver so that large
# numbers of DNS requests don't starve out other users of the threadpool.
resolver_threadpool = ThreadPool(name="gai_resolver")
resolver_threadpool.start()
reactor.addSystemEventTrigger("during", "shutdown", resolver_threadpool.stop)
reactor.installNameResolver(
GAIResolver(reactor, getThreadPool=lambda: resolver_threadpool)
)
# Set up the SIGHUP machinery. # Set up the SIGHUP machinery.
if hasattr(signal, "SIGHUP"): if hasattr(signal, "SIGHUP"):
reactor = hs.get_reactor()
@wrap_as_background_process("sighup") @wrap_as_background_process("sighup")
def handle_sighup(*args, **kwargs): def handle_sighup(*args, **kwargs):
@ -379,6 +392,7 @@ async def start(hs: "HomeServer"):
load_legacy_spam_checkers(hs) load_legacy_spam_checkers(hs)
load_legacy_third_party_event_rules(hs) load_legacy_third_party_event_rules(hs)
load_legacy_presence_router(hs) load_legacy_presence_router(hs)
load_legacy_password_auth_providers(hs)
# If we've configured an expiry time for caches, start the background job now. # If we've configured an expiry time for caches, start the background job now.
setup_expire_lru_cache_entries(hs) setup_expire_lru_cache_entries(hs)
@ -417,11 +431,11 @@ async def start(hs: "HomeServer"):
atexit.register(gc.freeze) atexit.register(gc.freeze)
def setup_sentry(hs): def setup_sentry(hs: "HomeServer"):
"""Enable sentry integration, if enabled in configuration """Enable sentry integration, if enabled in configuration
Args: Args:
hs (synapse.server.HomeServer) hs
""" """
if not hs.config.metrics.sentry_enabled: if not hs.config.metrics.sentry_enabled:
@ -447,7 +461,7 @@ def setup_sentry(hs):
scope.set_tag("worker_name", name) scope.set_tag("worker_name", name)
def setup_sdnotify(hs): def setup_sdnotify(hs: "HomeServer"):
"""Adds process state hooks to tell systemd what we are up to.""" """Adds process state hooks to tell systemd what we are up to."""
# Tell systemd our state, if we're using it. This will silently fail if # Tell systemd our state, if we're using it. This will silently fail if

View File

@ -39,6 +39,7 @@ from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.server import HomeServer from synapse.server import HomeServer
from synapse.storage.databases.main.room import RoomWorkerStore
from synapse.util.logcontext import LoggingContext from synapse.util.logcontext import LoggingContext
from synapse.util.versionstring import get_version_string from synapse.util.versionstring import get_version_string
@ -58,6 +59,7 @@ class AdminCmdSlavedStore(
SlavedEventStore, SlavedEventStore,
SlavedClientIpStore, SlavedClientIpStore,
BaseSlavedStore, BaseSlavedStore,
RoomWorkerStore,
): ):
pass pass
@ -66,11 +68,11 @@ class AdminCmdServer(HomeServer):
DATASTORE_CLASS = AdminCmdSlavedStore DATASTORE_CLASS = AdminCmdSlavedStore
async def export_data_command(hs, args): async def export_data_command(hs: HomeServer, args):
"""Export data for a user. """Export data for a user.
Args: Args:
hs (HomeServer) hs
args (argparse.Namespace) args (argparse.Namespace)
""" """
@ -185,11 +187,7 @@ def start(config_options):
# a full worker config. # a full worker config.
config.worker.worker_app = "synapse.app.admin_cmd" config.worker.worker_app = "synapse.app.admin_cmd"
if ( if not config.worker.worker_daemonize and not config.worker.worker_log_config:
not config.worker.worker_daemonize
and not config.worker.worker_log_file
and not config.worker.worker_log_config
):
# Since we're meant to be run as a "command" let's not redirect stdio # Since we're meant to be run as a "command" let's not redirect stdio
# unless we've actually set log config. # unless we've actually set log config.
config.logging.no_redirect_stdio = True config.logging.no_redirect_stdio = True
@ -198,9 +196,9 @@ def start(config_options):
config.server.update_user_directory = False config.server.update_user_directory = False
config.worker.run_background_tasks = False config.worker.run_background_tasks = False
config.worker.start_pushers = False config.worker.start_pushers = False
config.pusher_shard_config.instances = [] config.worker.pusher_shard_config.instances = []
config.worker.send_federation = False config.worker.send_federation = False
config.federation_shard_config.instances = [] config.worker.federation_shard_config.instances = []
synapse.events.USE_FROZEN_DICTS = config.server.use_frozen_dicts synapse.events.USE_FROZEN_DICTS = config.server.use_frozen_dicts
@ -221,7 +219,7 @@ def start(config_options):
async def run(): async def run():
with LoggingContext("command"): with LoggingContext("command"):
_base.start(ss) await _base.start(ss)
await args.func(ss, args) await args.func(ss, args)
_base.start_worker_reactor( _base.start_worker_reactor(

View File

@ -131,10 +131,10 @@ class KeyUploadServlet(RestServlet):
PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$") PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
def __init__(self, hs): def __init__(self, hs: HomeServer):
""" """
Args: Args:
hs (synapse.server.HomeServer): server hs: server
""" """
super().__init__() super().__init__()
self.auth = hs.get_auth() self.auth = hs.get_auth()

View File

@ -412,7 +412,7 @@ def format_config_error(e: ConfigError) -> Iterator[str]:
e = e.__cause__ e = e.__cause__
def run(hs): def run(hs: HomeServer):
PROFILE_SYNAPSE = False PROFILE_SYNAPSE = False
if PROFILE_SYNAPSE: if PROFILE_SYNAPSE:

View File

@ -15,11 +15,15 @@ import logging
import math import math
import resource import resource
import sys import sys
from typing import TYPE_CHECKING
from prometheus_client import Gauge from prometheus_client import Gauge
from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.metrics.background_process_metrics import wrap_as_background_process
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger("synapse.app.homeserver") logger = logging.getLogger("synapse.app.homeserver")
# Contains the list of processes we will be monitoring # Contains the list of processes we will be monitoring
@ -41,7 +45,7 @@ registered_reserved_users_mau_gauge = Gauge(
@wrap_as_background_process("phone_stats_home") @wrap_as_background_process("phone_stats_home")
async def phone_stats_home(hs, stats, stats_process=_stats_process): async def phone_stats_home(hs: "HomeServer", stats, stats_process=_stats_process):
logger.info("Gathering stats for reporting") logger.info("Gathering stats for reporting")
now = int(hs.get_clock().time()) now = int(hs.get_clock().time())
uptime = int(now - hs.start_time) uptime = int(now - hs.start_time)
@ -142,7 +146,7 @@ async def phone_stats_home(hs, stats, stats_process=_stats_process):
logger.warning("Error reporting stats: %s", e) logger.warning("Error reporting stats: %s", e)
def start_phone_stats_home(hs): def start_phone_stats_home(hs: "HomeServer"):
""" """
Start the background tasks which report phone home stats. Start the background tasks which report phone home stats.
""" """

View File

@ -27,6 +27,7 @@ from synapse.util.caches.response_cache import ResponseCache
if TYPE_CHECKING: if TYPE_CHECKING:
from synapse.appservice import ApplicationService from synapse.appservice import ApplicationService
from synapse.server import HomeServer
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -84,7 +85,7 @@ class ApplicationServiceApi(SimpleHttpClient):
pushing. pushing.
""" """
def __init__(self, hs): def __init__(self, hs: "HomeServer"):
super().__init__(hs) super().__init__(hs)
self.clock = hs.get_clock() self.clock = hs.get_clock()

View File

@ -1,4 +1,5 @@
# Copyright 2015, 2016 OpenMarket Ltd # Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2021 The Matrix.org Foundation C.I.C.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
@ -11,25 +12,44 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import sys
from synapse.config._base import ConfigError from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
if __name__ == "__main__":
import sys
from synapse.config.homeserver import HomeServerConfig def main(args):
action = args[1] if len(args) > 1 and args[1] == "read" else None
# If we're reading a key in the config file, then `args[1]` will be `read` and `args[2]`
# will be the key to read.
# We'll want to rework this code if we want to support more actions than just `read`.
load_config_args = args[3:] if action else args[1:]
action = sys.argv[1] try:
config = HomeServerConfig.load_config("", load_config_args)
except ConfigError as e:
sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
print("Config parses OK!")
if action == "read": if action == "read":
key = sys.argv[2] key = args[2]
key_parts = key.split(".")
value = config
try: try:
config = HomeServerConfig.load_config("", sys.argv[3:]) while len(key_parts):
except ConfigError as e: value = getattr(value, key_parts[0])
sys.stderr.write("\n" + str(e) + "\n") key_parts.pop(0)
print(f"\n{key}: {value}")
except AttributeError:
print(
f"\nNo '{key}' key could be found in the provided configuration file."
)
sys.exit(1) sys.exit(1)
print(getattr(config, key))
sys.exit(0) if __name__ == "__main__":
else: main(sys.argv)
sys.stderr.write("Unknown command %r\n" % (action,))
sys.exit(1)

View File

@ -26,6 +26,7 @@ from synapse.config import (
redis, redis,
registration, registration,
repository, repository,
retention,
room_directory, room_directory,
saml2, saml2,
server, server,
@ -91,6 +92,7 @@ class RootConfig:
modules: modules.ModulesConfig modules: modules.ModulesConfig
caches: cache.CacheConfig caches: cache.CacheConfig
federation: federation.FederationConfig federation: federation.FederationConfig
retention: retention.RetentionConfig
config_classes: List = ... config_classes: List = ...
def __init__(self) -> None: ... def __init__(self) -> None: ...

View File

@ -24,6 +24,11 @@ class ExperimentalConfig(Config):
def read_config(self, config: JsonDict, **kwargs): def read_config(self, config: JsonDict, **kwargs):
experimental = config.get("experimental_features") or {} experimental = config.get("experimental_features") or {}
# Whether to enable experimental MSC1849 (aka relations) support
self.msc1849_enabled = config.get("experimental_msc1849_support_enabled", True)
# MSC3440 (thread relation)
self.msc3440_enabled: bool = experimental.get("msc3440_enabled", False)
# MSC3026 (busy presence state) # MSC3026 (busy presence state)
self.msc3026_enabled: bool = experimental.get("msc3026_enabled", False) self.msc3026_enabled: bool = experimental.get("msc3026_enabled", False)

View File

@ -39,6 +39,7 @@ from .ratelimiting import RatelimitConfig
from .redis import RedisConfig from .redis import RedisConfig
from .registration import RegistrationConfig from .registration import RegistrationConfig
from .repository import ContentRepositoryConfig from .repository import ContentRepositoryConfig
from .retention import RetentionConfig
from .room import RoomConfig from .room import RoomConfig
from .room_directory import RoomDirectoryConfig from .room_directory import RoomDirectoryConfig
from .saml2 import SAML2Config from .saml2 import SAML2Config
@ -61,6 +62,7 @@ class HomeServerConfig(RootConfig):
MeowConfig, MeowConfig,
ModulesConfig, ModulesConfig,
ServerConfig, ServerConfig,
RetentionConfig,
TlsConfig, TlsConfig,
FederationConfig, FederationConfig,
CacheConfig, CacheConfig,

View File

@ -18,6 +18,7 @@ import os
import sys import sys
import threading import threading
from string import Template from string import Template
from typing import TYPE_CHECKING
import yaml import yaml
from zope.interface import implementer from zope.interface import implementer
@ -38,6 +39,9 @@ from synapse.util.versionstring import get_version_string
from ._base import Config, ConfigError from ._base import Config, ConfigError
if TYPE_CHECKING:
from synapse.server import HomeServer
DEFAULT_LOG_CONFIG = Template( DEFAULT_LOG_CONFIG = Template(
"""\ """\
# Log configuration for Synapse. # Log configuration for Synapse.
@ -306,7 +310,10 @@ def _reload_logging_config(log_config_path):
def setup_logging( def setup_logging(
hs, config, use_worker_options=False, logBeginner: LogBeginner = globalLogBeginner hs: "HomeServer",
config,
use_worker_options=False,
logBeginner: LogBeginner = globalLogBeginner,
) -> None: ) -> None:
""" """
Set up the logging subsystem. Set up the logging subsystem.

View File

@ -25,6 +25,29 @@ class PasswordAuthProviderConfig(Config):
section = "authproviders" section = "authproviders"
def read_config(self, config, **kwargs): def read_config(self, config, **kwargs):
"""Parses the old password auth providers config. The config format looks like this:
password_providers:
# Example config for an LDAP auth provider
- module: "ldap_auth_provider.LdapAuthProvider"
config:
enabled: true
uri: "ldap://ldap.example.com:389"
start_tls: true
base: "ou=users,dc=example,dc=com"
attributes:
uid: "cn"
mail: "email"
name: "givenName"
#bind_dn:
#bind_password:
#filter: "(objectClass=posixAccount)"
We expect admins to use modules for this feature (which is why it doesn't appear
in the sample config file), but we want to keep support for it around for a bit
for backwards compatibility.
"""
self.password_providers: List[Tuple[Type, Any]] = [] self.password_providers: List[Tuple[Type, Any]] = []
providers = [] providers = []
@ -49,33 +72,3 @@ class PasswordAuthProviderConfig(Config):
) )
self.password_providers.append((provider_class, provider_config)) self.password_providers.append((provider_class, provider_config))
def generate_config_section(self, **kwargs):
return """\
# Password providers allow homeserver administrators to integrate
# their Synapse installation with existing authentication methods
# ex. LDAP, external tokens, etc.
#
# For more information and known implementations, please see
# https://matrix-org.github.io/synapse/latest/password_auth_providers.html
#
# Note: instances wishing to use SAML or CAS authentication should
# instead use the `saml2_config` or `cas_config` options,
# respectively.
#
password_providers:
# # Example config for an LDAP auth provider
# - module: "ldap_auth_provider.LdapAuthProvider"
# config:
# enabled: true
# uri: "ldap://ldap.example.com:389"
# start_tls: true
# base: "ou=users,dc=example,dc=com"
# attributes:
# uid: "cn"
# mail: "email"
# name: "givenName"
# #bind_dn:
# #bind_password:
# #filter: "(objectClass=posixAccount)"
"""

226
synapse/config/retention.py Normal file
View File

@ -0,0 +1,226 @@
# Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import List, Optional
import attr
from synapse.config._base import Config, ConfigError
logger = logging.getLogger(__name__)
@attr.s(slots=True, frozen=True, auto_attribs=True)
class RetentionPurgeJob:
"""Object describing the configuration of the manhole"""
interval: int
shortest_max_lifetime: Optional[int]
longest_max_lifetime: Optional[int]
class RetentionConfig(Config):
section = "retention"
def read_config(self, config, **kwargs):
retention_config = config.get("retention")
if retention_config is None:
retention_config = {}
self.retention_enabled = retention_config.get("enabled", False)
retention_default_policy = retention_config.get("default_policy")
if retention_default_policy is not None:
self.retention_default_min_lifetime = retention_default_policy.get(
"min_lifetime"
)
if self.retention_default_min_lifetime is not None:
self.retention_default_min_lifetime = self.parse_duration(
self.retention_default_min_lifetime
)
self.retention_default_max_lifetime = retention_default_policy.get(
"max_lifetime"
)
if self.retention_default_max_lifetime is not None:
self.retention_default_max_lifetime = self.parse_duration(
self.retention_default_max_lifetime
)
if (
self.retention_default_min_lifetime is not None
and self.retention_default_max_lifetime is not None
and (
self.retention_default_min_lifetime
> self.retention_default_max_lifetime
)
):
raise ConfigError(
"The default retention policy's 'min_lifetime' can not be greater"
" than its 'max_lifetime'"
)
else:
self.retention_default_min_lifetime = None
self.retention_default_max_lifetime = None
if self.retention_enabled:
logger.info(
"Message retention policies support enabled with the following default"
" policy: min_lifetime = %s ; max_lifetime = %s",
self.retention_default_min_lifetime,
self.retention_default_max_lifetime,
)
self.retention_allowed_lifetime_min = retention_config.get(
"allowed_lifetime_min"
)
if self.retention_allowed_lifetime_min is not None:
self.retention_allowed_lifetime_min = self.parse_duration(
self.retention_allowed_lifetime_min
)
self.retention_allowed_lifetime_max = retention_config.get(
"allowed_lifetime_max"
)
if self.retention_allowed_lifetime_max is not None:
self.retention_allowed_lifetime_max = self.parse_duration(
self.retention_allowed_lifetime_max
)
if (
self.retention_allowed_lifetime_min is not None
and self.retention_allowed_lifetime_max is not None
and self.retention_allowed_lifetime_min
> self.retention_allowed_lifetime_max
):
raise ConfigError(
"Invalid retention policy limits: 'allowed_lifetime_min' can not be"
" greater than 'allowed_lifetime_max'"
)
self.retention_purge_jobs: List[RetentionPurgeJob] = []
for purge_job_config in retention_config.get("purge_jobs", []):
interval_config = purge_job_config.get("interval")
if interval_config is None:
raise ConfigError(
"A retention policy's purge jobs configuration must have the"
" 'interval' key set."
)
interval = self.parse_duration(interval_config)
shortest_max_lifetime = purge_job_config.get("shortest_max_lifetime")
if shortest_max_lifetime is not None:
shortest_max_lifetime = self.parse_duration(shortest_max_lifetime)
longest_max_lifetime = purge_job_config.get("longest_max_lifetime")
if longest_max_lifetime is not None:
longest_max_lifetime = self.parse_duration(longest_max_lifetime)
if (
shortest_max_lifetime is not None
and longest_max_lifetime is not None
and shortest_max_lifetime > longest_max_lifetime
):
raise ConfigError(
"A retention policy's purge jobs configuration's"
" 'shortest_max_lifetime' value can not be greater than its"
" 'longest_max_lifetime' value."
)
self.retention_purge_jobs.append(
RetentionPurgeJob(interval, shortest_max_lifetime, longest_max_lifetime)
)
if not self.retention_purge_jobs:
self.retention_purge_jobs = [
RetentionPurgeJob(self.parse_duration("1d"), None, None)
]
def generate_config_section(self, config_dir_path, server_name, **kwargs):
return """\
# Message retention policy at the server level.
#
# Room admins and mods can define a retention period for their rooms using the
# 'm.room.retention' state event, and server admins can cap this period by setting
# the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options.
#
# If this feature is enabled, Synapse will regularly look for and purge events
# which are older than the room's maximum retention period. Synapse will also
# filter events received over federation so that events that should have been
# purged are ignored and not stored again.
#
retention:
# The message retention policies feature is disabled by default. Uncomment the
# following line to enable it.
#
#enabled: true
# Default retention policy. If set, Synapse will apply it to rooms that lack the
# 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't
# matter much because Synapse doesn't take it into account yet.
#
#default_policy:
# min_lifetime: 1d
# max_lifetime: 1y
# Retention policy limits. If set, and the state of a room contains a
# 'm.room.retention' event in its state which contains a 'min_lifetime' or a
# 'max_lifetime' that's out of these bounds, Synapse will cap the room's policy
# to these limits when running purge jobs.
#
#allowed_lifetime_min: 1d
#allowed_lifetime_max: 1y
# Server admins can define the settings of the background jobs purging the
# events which lifetime has expired under the 'purge_jobs' section.
#
# If no configuration is provided, a single job will be set up to delete expired
# events in every room daily.
#
# Each job's configuration defines which range of message lifetimes the job
# takes care of. For example, if 'shortest_max_lifetime' is '2d' and
# 'longest_max_lifetime' is '3d', the job will handle purging expired events in
# rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and
# lower than or equal to 3 days. Both the minimum and the maximum value of a
# range are optional, e.g. a job with no 'shortest_max_lifetime' and a
# 'longest_max_lifetime' of '3d' will handle every room with a retention policy
# which 'max_lifetime' is lower than or equal to three days.
#
# The rationale for this per-job configuration is that some rooms might have a
# retention policy with a low 'max_lifetime', where history needs to be purged
# of outdated messages on a more frequent basis than for the rest of the rooms
# (e.g. every 12h), but not want that purge to be performed by a job that's
# iterating over every room it knows, which could be heavy on the server.
#
# If any purge job is configured, it is strongly recommended to have at least
# a single job with neither 'shortest_max_lifetime' nor 'longest_max_lifetime'
# set, or one job without 'shortest_max_lifetime' and one job without
# 'longest_max_lifetime' set. Otherwise some rooms might be ignored, even if
# 'allowed_lifetime_min' and 'allowed_lifetime_max' are set, because capping a
# room's policy to these values is done after the policies are retrieved from
# Synapse's database (which is done using the range specified in a purge job's
# configuration).
#
#purge_jobs:
# - longest_max_lifetime: 3d
# interval: 12h
# - shortest_max_lifetime: 3d
# interval: 1d
"""

View File

@ -225,15 +225,6 @@ class ManholeConfig:
pub_key: Optional[Key] pub_key: Optional[Key]
@attr.s(slots=True, frozen=True, auto_attribs=True)
class RetentionConfig:
"""Object describing the configuration of the manhole"""
interval: int
shortest_max_lifetime: Optional[int]
longest_max_lifetime: Optional[int]
@attr.s(frozen=True) @attr.s(frozen=True)
class LimitRemoteRoomsConfig: class LimitRemoteRoomsConfig:
enabled: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False) enabled: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
@ -376,11 +367,6 @@ class ServerConfig(Config):
# (other than those sent by local server admins) # (other than those sent by local server admins)
self.block_non_admin_invites = config.get("block_non_admin_invites", False) self.block_non_admin_invites = config.get("block_non_admin_invites", False)
# Whether to enable experimental MSC1849 (aka relations) support
self.experimental_msc1849_support_enabled = config.get(
"experimental_msc1849_support_enabled", True
)
# Options to control access by tracking MAU # Options to control access by tracking MAU
self.limit_usage_by_mau = config.get("limit_usage_by_mau", False) self.limit_usage_by_mau = config.get("limit_usage_by_mau", False)
self.max_mau_value = 0 self.max_mau_value = 0
@ -466,124 +452,6 @@ class ServerConfig(Config):
# events with profile information that differ from the target's global profile. # events with profile information that differ from the target's global profile.
self.allow_per_room_profiles = config.get("allow_per_room_profiles", True) self.allow_per_room_profiles = config.get("allow_per_room_profiles", True)
retention_config = config.get("retention")
if retention_config is None:
retention_config = {}
self.retention_enabled = retention_config.get("enabled", False)
retention_default_policy = retention_config.get("default_policy")
if retention_default_policy is not None:
self.retention_default_min_lifetime = retention_default_policy.get(
"min_lifetime"
)
if self.retention_default_min_lifetime is not None:
self.retention_default_min_lifetime = self.parse_duration(
self.retention_default_min_lifetime
)
self.retention_default_max_lifetime = retention_default_policy.get(
"max_lifetime"
)
if self.retention_default_max_lifetime is not None:
self.retention_default_max_lifetime = self.parse_duration(
self.retention_default_max_lifetime
)
if (
self.retention_default_min_lifetime is not None
and self.retention_default_max_lifetime is not None
and (
self.retention_default_min_lifetime
> self.retention_default_max_lifetime
)
):
raise ConfigError(
"The default retention policy's 'min_lifetime' can not be greater"
" than its 'max_lifetime'"
)
else:
self.retention_default_min_lifetime = None
self.retention_default_max_lifetime = None
if self.retention_enabled:
logger.info(
"Message retention policies support enabled with the following default"
" policy: min_lifetime = %s ; max_lifetime = %s",
self.retention_default_min_lifetime,
self.retention_default_max_lifetime,
)
self.retention_allowed_lifetime_min = retention_config.get(
"allowed_lifetime_min"
)
if self.retention_allowed_lifetime_min is not None:
self.retention_allowed_lifetime_min = self.parse_duration(
self.retention_allowed_lifetime_min
)
self.retention_allowed_lifetime_max = retention_config.get(
"allowed_lifetime_max"
)
if self.retention_allowed_lifetime_max is not None:
self.retention_allowed_lifetime_max = self.parse_duration(
self.retention_allowed_lifetime_max
)
if (
self.retention_allowed_lifetime_min is not None
and self.retention_allowed_lifetime_max is not None
and self.retention_allowed_lifetime_min
> self.retention_allowed_lifetime_max
):
raise ConfigError(
"Invalid retention policy limits: 'allowed_lifetime_min' can not be"
" greater than 'allowed_lifetime_max'"
)
self.retention_purge_jobs: List[RetentionConfig] = []
for purge_job_config in retention_config.get("purge_jobs", []):
interval_config = purge_job_config.get("interval")
if interval_config is None:
raise ConfigError(
"A retention policy's purge jobs configuration must have the"
" 'interval' key set."
)
interval = self.parse_duration(interval_config)
shortest_max_lifetime = purge_job_config.get("shortest_max_lifetime")
if shortest_max_lifetime is not None:
shortest_max_lifetime = self.parse_duration(shortest_max_lifetime)
longest_max_lifetime = purge_job_config.get("longest_max_lifetime")
if longest_max_lifetime is not None:
longest_max_lifetime = self.parse_duration(longest_max_lifetime)
if (
shortest_max_lifetime is not None
and longest_max_lifetime is not None
and shortest_max_lifetime > longest_max_lifetime
):
raise ConfigError(
"A retention policy's purge jobs configuration's"
" 'shortest_max_lifetime' value can not be greater than its"
" 'longest_max_lifetime' value."
)
self.retention_purge_jobs.append(
RetentionConfig(interval, shortest_max_lifetime, longest_max_lifetime)
)
if not self.retention_purge_jobs:
self.retention_purge_jobs = [
RetentionConfig(self.parse_duration("1d"), None, None)
]
self.listeners = [parse_listener_def(x) for x in config.get("listeners", [])] self.listeners = [parse_listener_def(x) for x in config.get("listeners", [])]
# no_tls is not really supported any more, but let's grandfather it in # no_tls is not really supported any more, but let's grandfather it in
@ -1255,75 +1123,6 @@ class ServerConfig(Config):
# #
#user_ips_max_age: 14d #user_ips_max_age: 14d
# Message retention policy at the server level.
#
# Room admins and mods can define a retention period for their rooms using the
# 'm.room.retention' state event, and server admins can cap this period by setting
# the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options.
#
# If this feature is enabled, Synapse will regularly look for and purge events
# which are older than the room's maximum retention period. Synapse will also
# filter events received over federation so that events that should have been
# purged are ignored and not stored again.
#
retention:
# The message retention policies feature is disabled by default. Uncomment the
# following line to enable it.
#
#enabled: true
# Default retention policy. If set, Synapse will apply it to rooms that lack the
# 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't
# matter much because Synapse doesn't take it into account yet.
#
#default_policy:
# min_lifetime: 1d
# max_lifetime: 1y
# Retention policy limits. If set, and the state of a room contains a
# 'm.room.retention' event in its state which contains a 'min_lifetime' or a
# 'max_lifetime' that's out of these bounds, Synapse will cap the room's policy
# to these limits when running purge jobs.
#
#allowed_lifetime_min: 1d
#allowed_lifetime_max: 1y
# Server admins can define the settings of the background jobs purging the
# events which lifetime has expired under the 'purge_jobs' section.
#
# If no configuration is provided, a single job will be set up to delete expired
# events in every room daily.
#
# Each job's configuration defines which range of message lifetimes the job
# takes care of. For example, if 'shortest_max_lifetime' is '2d' and
# 'longest_max_lifetime' is '3d', the job will handle purging expired events in
# rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and
# lower than or equal to 3 days. Both the minimum and the maximum value of a
# range are optional, e.g. a job with no 'shortest_max_lifetime' and a
# 'longest_max_lifetime' of '3d' will handle every room with a retention policy
# which 'max_lifetime' is lower than or equal to three days.
#
# The rationale for this per-job configuration is that some rooms might have a
# retention policy with a low 'max_lifetime', where history needs to be purged
# of outdated messages on a more frequent basis than for the rest of the rooms
# (e.g. every 12h), but not want that purge to be performed by a job that's
# iterating over every room it knows, which could be heavy on the server.
#
# If any purge job is configured, it is strongly recommended to have at least
# a single job with neither 'shortest_max_lifetime' nor 'longest_max_lifetime'
# set, or one job without 'shortest_max_lifetime' and one job without
# 'longest_max_lifetime' set. Otherwise some rooms might be ignored, even if
# 'allowed_lifetime_min' and 'allowed_lifetime_max' are set, because capping a
# room's policy to these values is done after the policies are retrieved from
# Synapse's database (which is done using the range specified in a purge job's
# configuration).
#
#purge_jobs:
# - longest_max_lifetime: 3d
# interval: 12h
# - shortest_max_lifetime: 3d
# interval: 1d
# Inhibits the /requestToken endpoints from returning an error that might leak # Inhibits the /requestToken endpoints from returning an error that might leak
# information about whether an e-mail address is in use or not on this # information about whether an e-mail address is in use or not on this
# homeserver. # homeserver.

View File

@ -29,9 +29,12 @@ from twisted.internet.ssl import (
TLSVersion, TLSVersion,
platformTrust, platformTrust,
) )
from twisted.protocols.tls import TLSMemoryBIOProtocol
from twisted.python.failure import Failure from twisted.python.failure import Failure
from twisted.web.iweb import IPolicyForHTTPS from twisted.web.iweb import IPolicyForHTTPS
from synapse.config.homeserver import HomeServerConfig
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -51,7 +54,7 @@ class ServerContextFactory(ContextFactory):
per https://github.com/matrix-org/synapse/issues/1691 per https://github.com/matrix-org/synapse/issues/1691
""" """
def __init__(self, config): def __init__(self, config: HomeServerConfig):
# TODO: once pyOpenSSL exposes TLS_METHOD and SSL_CTX_set_min_proto_version, # TODO: once pyOpenSSL exposes TLS_METHOD and SSL_CTX_set_min_proto_version,
# switch to those (see https://github.com/pyca/cryptography/issues/5379). # switch to those (see https://github.com/pyca/cryptography/issues/5379).
# #
@ -64,7 +67,7 @@ class ServerContextFactory(ContextFactory):
self.configure_context(self._context, config) self.configure_context(self._context, config)
@staticmethod @staticmethod
def configure_context(context, config): def configure_context(context: SSL.Context, config: HomeServerConfig) -> None:
try: try:
_ecCurve = crypto.get_elliptic_curve(_defaultCurveName) _ecCurve = crypto.get_elliptic_curve(_defaultCurveName)
context.set_tmp_ecdh(_ecCurve) context.set_tmp_ecdh(_ecCurve)
@ -75,14 +78,15 @@ class ServerContextFactory(ContextFactory):
SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3 | SSL.OP_NO_TLSv1 | SSL.OP_NO_TLSv1_1 SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3 | SSL.OP_NO_TLSv1 | SSL.OP_NO_TLSv1_1
) )
context.use_certificate_chain_file(config.tls.tls_certificate_file) context.use_certificate_chain_file(config.tls.tls_certificate_file)
assert config.tls.tls_private_key is not None
context.use_privatekey(config.tls.tls_private_key) context.use_privatekey(config.tls.tls_private_key)
# https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/ # https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
context.set_cipher_list( context.set_cipher_list(
"ECDH+AESGCM:ECDH+CHACHA20:ECDH+AES256:ECDH+AES128:!aNULL:!SHA1:!AESCCM" b"ECDH+AESGCM:ECDH+CHACHA20:ECDH+AES256:ECDH+AES128:!aNULL:!SHA1:!AESCCM"
) )
def getContext(self): def getContext(self) -> SSL.Context:
return self._context return self._context
@ -98,7 +102,7 @@ class FederationPolicyForHTTPS:
constructs an SSLClientConnectionCreator factory accordingly. constructs an SSLClientConnectionCreator factory accordingly.
""" """
def __init__(self, config): def __init__(self, config: HomeServerConfig):
self._config = config self._config = config
# Check if we're using a custom list of a CA certificates # Check if we're using a custom list of a CA certificates
@ -131,7 +135,7 @@ class FederationPolicyForHTTPS:
self._config.tls.federation_certificate_verification_whitelist self._config.tls.federation_certificate_verification_whitelist
) )
def get_options(self, host: bytes): def get_options(self, host: bytes) -> IOpenSSLClientConnectionCreator:
# IPolicyForHTTPS.get_options takes bytes, but we want to compare # IPolicyForHTTPS.get_options takes bytes, but we want to compare
# against the str whitelist. The hostnames in the whitelist are already # against the str whitelist. The hostnames in the whitelist are already
# IDNA-encoded like the hosts will be here. # IDNA-encoded like the hosts will be here.
@ -153,7 +157,9 @@ class FederationPolicyForHTTPS:
return SSLClientConnectionCreator(host, ssl_context, should_verify) return SSLClientConnectionCreator(host, ssl_context, should_verify)
def creatorForNetloc(self, hostname, port): def creatorForNetloc(
self, hostname: bytes, port: int
) -> IOpenSSLClientConnectionCreator:
"""Implements the IPolicyForHTTPS interface so that this can be passed """Implements the IPolicyForHTTPS interface so that this can be passed
directly to agents. directly to agents.
""" """
@ -169,16 +175,18 @@ class RegularPolicyForHTTPS:
trust root. trust root.
""" """
def __init__(self): def __init__(self) -> None:
trust_root = platformTrust() trust_root = platformTrust()
self._ssl_context = CertificateOptions(trustRoot=trust_root).getContext() self._ssl_context = CertificateOptions(trustRoot=trust_root).getContext()
self._ssl_context.set_info_callback(_context_info_cb) self._ssl_context.set_info_callback(_context_info_cb)
def creatorForNetloc(self, hostname, port): def creatorForNetloc(
self, hostname: bytes, port: int
) -> IOpenSSLClientConnectionCreator:
return SSLClientConnectionCreator(hostname, self._ssl_context, True) return SSLClientConnectionCreator(hostname, self._ssl_context, True)
def _context_info_cb(ssl_connection, where, ret): def _context_info_cb(ssl_connection: SSL.Connection, where: int, ret: int) -> None:
"""The 'information callback' for our openssl context objects. """The 'information callback' for our openssl context objects.
Note: Once this is set as the info callback on a Context object, the Context should Note: Once this is set as the info callback on a Context object, the Context should
@ -204,11 +212,13 @@ class SSLClientConnectionCreator:
Replaces twisted.internet.ssl.ClientTLSOptions Replaces twisted.internet.ssl.ClientTLSOptions
""" """
def __init__(self, hostname: bytes, ctx, verify_certs: bool): def __init__(self, hostname: bytes, ctx: SSL.Context, verify_certs: bool):
self._ctx = ctx self._ctx = ctx
self._verifier = ConnectionVerifier(hostname, verify_certs) self._verifier = ConnectionVerifier(hostname, verify_certs)
def clientConnectionForTLS(self, tls_protocol): def clientConnectionForTLS(
self, tls_protocol: TLSMemoryBIOProtocol
) -> SSL.Connection:
context = self._ctx context = self._ctx
connection = SSL.Connection(context, None) connection = SSL.Connection(context, None)
@ -219,7 +229,7 @@ class SSLClientConnectionCreator:
# ... and we also gut-wrench a '_synapse_tls_verifier' attribute into the # ... and we also gut-wrench a '_synapse_tls_verifier' attribute into the
# tls_protocol so that the SSL context's info callback has something to # tls_protocol so that the SSL context's info callback has something to
# call to do the cert verification. # call to do the cert verification.
tls_protocol._synapse_tls_verifier = self._verifier tls_protocol._synapse_tls_verifier = self._verifier # type: ignore[attr-defined]
return connection return connection
@ -244,7 +254,9 @@ class ConnectionVerifier:
self._hostnameBytes = hostname self._hostnameBytes = hostname
self._hostnameASCII = self._hostnameBytes.decode("ascii") self._hostnameASCII = self._hostnameBytes.decode("ascii")
def verify_context_info_cb(self, ssl_connection, where): def verify_context_info_cb(
self, ssl_connection: SSL.Connection, where: int
) -> None:
if where & SSL.SSL_CB_HANDSHAKE_START and not self._is_ip_address: if where & SSL.SSL_CB_HANDSHAKE_START and not self._is_ip_address:
ssl_connection.set_tlsext_host_name(self._hostnameBytes) ssl_connection.set_tlsext_host_name(self._hostnameBytes)

View File

@ -100,7 +100,7 @@ def compute_content_hash(
def compute_event_reference_hash( def compute_event_reference_hash(
event, hash_algorithm: Hasher = hashlib.sha256 event: EventBase, hash_algorithm: Hasher = hashlib.sha256
) -> Tuple[str, bytes]: ) -> Tuple[str, bytes]:
"""Computes the event reference hash. This is the hash of the redacted """Computes the event reference hash. This is the hash of the redacted
event. event.

View File

@ -87,7 +87,7 @@ class VerifyJsonRequest:
server_name: str, server_name: str,
json_object: JsonDict, json_object: JsonDict,
minimum_valid_until_ms: int, minimum_valid_until_ms: int,
): ) -> "VerifyJsonRequest":
"""Create a VerifyJsonRequest to verify all signatures on a signed JSON """Create a VerifyJsonRequest to verify all signatures on a signed JSON
object for the given server. object for the given server.
""" """
@ -104,7 +104,7 @@ class VerifyJsonRequest:
server_name: str, server_name: str,
event: EventBase, event: EventBase,
minimum_valid_until_ms: int, minimum_valid_until_ms: int,
): ) -> "VerifyJsonRequest":
"""Create a VerifyJsonRequest to verify all signatures on an event """Create a VerifyJsonRequest to verify all signatures on an event
object for the given server. object for the given server.
""" """
@ -449,7 +449,9 @@ class StoreKeyFetcher(KeyFetcher):
self.store = hs.get_datastore() self.store = hs.get_datastore()
async def _fetch_keys(self, keys_to_fetch: List[_FetchKeyRequest]): async def _fetch_keys(
self, keys_to_fetch: List[_FetchKeyRequest]
) -> Dict[str, Dict[str, FetchKeyResult]]:
key_ids_to_fetch = ( key_ids_to_fetch = (
(queue_value.server_name, key_id) (queue_value.server_name, key_id)
for queue_value in keys_to_fetch for queue_value in keys_to_fetch

View File

@ -14,7 +14,7 @@
# limitations under the License. # limitations under the License.
import logging import logging
from typing import Any, Dict, List, Optional, Set, Tuple, Union from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
from canonicaljson import encode_canonical_json from canonicaljson import encode_canonical_json
from signedjson.key import decode_verify_key_bytes from signedjson.key import decode_verify_key_bytes
@ -113,7 +113,7 @@ def validate_event_for_room_version(
def check_auth_rules_for_event( def check_auth_rules_for_event(
room_version_obj: RoomVersion, event: EventBase, auth_events: StateMap[EventBase] room_version_obj: RoomVersion, event: EventBase, auth_events: Iterable[EventBase]
) -> None: ) -> None:
"""Check that an event complies with the auth rules """Check that an event complies with the auth rules
@ -137,8 +137,6 @@ def check_auth_rules_for_event(
Raises: Raises:
AuthError if the checks fail AuthError if the checks fail
""" """
assert isinstance(auth_events, dict)
# We need to ensure that the auth events are actually for the same room, to # We need to ensure that the auth events are actually for the same room, to
# stop people from using powers they've been granted in other rooms for # stop people from using powers they've been granted in other rooms for
# example. # example.
@ -147,7 +145,7 @@ def check_auth_rules_for_event(
# the state res algorithm isn't silly enough to give us events from different rooms. # the state res algorithm isn't silly enough to give us events from different rooms.
# Still, it's easier to do it anyway. # Still, it's easier to do it anyway.
room_id = event.room_id room_id = event.room_id
for auth_event in auth_events.values(): for auth_event in auth_events:
if auth_event.room_id != room_id: if auth_event.room_id != room_id:
raise AuthError( raise AuthError(
403, 403,
@ -186,8 +184,10 @@ def check_auth_rules_for_event(
logger.debug("Allowing! %s", event) logger.debug("Allowing! %s", event)
return return
auth_dict = {(e.type, e.state_key): e for e in auth_events}
# 3. If event does not have a m.room.create in its auth_events, reject. # 3. If event does not have a m.room.create in its auth_events, reject.
creation_event = auth_events.get((EventTypes.Create, ""), None) creation_event = auth_dict.get((EventTypes.Create, ""), None)
if not creation_event: if not creation_event:
raise AuthError(403, "No create event in auth events") raise AuthError(403, "No create event in auth events")
@ -195,7 +195,7 @@ def check_auth_rules_for_event(
creating_domain = get_domain_from_id(event.room_id) creating_domain = get_domain_from_id(event.room_id)
originating_domain = get_domain_from_id(event.sender) originating_domain = get_domain_from_id(event.sender)
if creating_domain != originating_domain: if creating_domain != originating_domain:
if not _can_federate(event, auth_events): if not _can_federate(event, auth_dict):
raise AuthError(403, "This room has been marked as unfederatable.") raise AuthError(403, "This room has been marked as unfederatable.")
# 4. If type is m.room.aliases # 4. If type is m.room.aliases
@ -217,23 +217,20 @@ def check_auth_rules_for_event(
logger.debug("Allowing! %s", event) logger.debug("Allowing! %s", event)
return return
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Auth events: %s", [a.event_id for a in auth_events.values()])
# 5. If type is m.room.membership # 5. If type is m.room.membership
if event.type == EventTypes.Member: if event.type == EventTypes.Member:
_is_membership_change_allowed(room_version_obj, event, auth_events) _is_membership_change_allowed(room_version_obj, event, auth_dict)
logger.debug("Allowing! %s", event) logger.debug("Allowing! %s", event)
return return
_check_event_sender_in_room(event, auth_events) _check_event_sender_in_room(event, auth_dict)
# Special case to allow m.room.third_party_invite events wherever # Special case to allow m.room.third_party_invite events wherever
# a user is allowed to issue invites. Fixes # a user is allowed to issue invites. Fixes
# https://github.com/vector-im/vector-web/issues/1208 hopefully # https://github.com/vector-im/vector-web/issues/1208 hopefully
if event.type == EventTypes.ThirdPartyInvite: if event.type == EventTypes.ThirdPartyInvite:
user_level = get_user_power_level(event.user_id, auth_events) user_level = get_user_power_level(event.user_id, auth_dict)
invite_level = get_named_level(auth_events, "invite", 0) invite_level = get_named_level(auth_dict, "invite", 0)
if user_level < invite_level: if user_level < invite_level:
raise AuthError(403, "You don't have permission to invite users") raise AuthError(403, "You don't have permission to invite users")
@ -241,20 +238,20 @@ def check_auth_rules_for_event(
logger.debug("Allowing! %s", event) logger.debug("Allowing! %s", event)
return return
_can_send_event(event, auth_events) _can_send_event(event, auth_dict)
if event.type == EventTypes.PowerLevels: if event.type == EventTypes.PowerLevels:
_check_power_levels(room_version_obj, event, auth_events) _check_power_levels(room_version_obj, event, auth_dict)
if event.type == EventTypes.Redaction: if event.type == EventTypes.Redaction:
check_redaction(room_version_obj, event, auth_events) check_redaction(room_version_obj, event, auth_dict)
if ( if (
event.type == EventTypes.MSC2716_INSERTION event.type == EventTypes.MSC2716_INSERTION
or event.type == EventTypes.MSC2716_BATCH or event.type == EventTypes.MSC2716_BATCH
or event.type == EventTypes.MSC2716_MARKER or event.type == EventTypes.MSC2716_MARKER
): ):
check_historical(room_version_obj, event, auth_events) check_historical(room_version_obj, event, auth_dict)
logger.debug("Allowing! %s", event) logger.debug("Allowing! %s", event)

View File

@ -348,12 +348,16 @@ class EventBase(metaclass=abc.ABCMeta):
return self.__repr__() return self.__repr__()
def __repr__(self): def __repr__(self):
return "<%s event_id=%r, type=%r, state_key=%r, outlier=%s>" % ( rejection = f"REJECTED={self.rejected_reason}, " if self.rejected_reason else ""
self.__class__.__name__,
self.event_id, return (
self.get("type", None), f"<{self.__class__.__name__} "
self.get("state_key", None), f"{rejection}"
self.internal_metadata.is_outlier(), f"event_id={self.event_id}, "
f"type={self.get('type')}, "
f"state_key={self.get('state_key')}, "
f"outlier={self.internal_metadata.is_outlier()}"
">"
) )

View File

@ -90,13 +90,13 @@ class EventBuilder:
) )
@property @property
def state_key(self): def state_key(self) -> str:
if self._state_key is not None: if self._state_key is not None:
return self._state_key return self._state_key
raise AttributeError("state_key") raise AttributeError("state_key")
def is_state(self): def is_state(self) -> bool:
return self._state_key is not None return self._state_key is not None
async def build( async def build(

View File

@ -14,6 +14,7 @@
import logging import logging
from typing import ( from typing import (
TYPE_CHECKING, TYPE_CHECKING,
Any,
Awaitable, Awaitable,
Callable, Callable,
Dict, Dict,
@ -33,14 +34,13 @@ if TYPE_CHECKING:
GET_USERS_FOR_STATES_CALLBACK = Callable[ GET_USERS_FOR_STATES_CALLBACK = Callable[
[Iterable[UserPresenceState]], Awaitable[Dict[str, Set[UserPresenceState]]] [Iterable[UserPresenceState]], Awaitable[Dict[str, Set[UserPresenceState]]]
] ]
GET_INTERESTED_USERS_CALLBACK = Callable[ # This must either return a set of strings or the constant PresenceRouter.ALL_USERS.
[str], Awaitable[Union[Set[str], "PresenceRouter.ALL_USERS"]] GET_INTERESTED_USERS_CALLBACK = Callable[[str], Awaitable[Union[Set[str], str]]]
]
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def load_legacy_presence_router(hs: "HomeServer"): def load_legacy_presence_router(hs: "HomeServer") -> None:
"""Wrapper that loads a presence router module configured using the old """Wrapper that loads a presence router module configured using the old
configuration, and registers the hooks they implement. configuration, and registers the hooks they implement.
""" """
@ -69,9 +69,10 @@ def load_legacy_presence_router(hs: "HomeServer"):
if f is None: if f is None:
return None return None
def run(*args, **kwargs): def run(*args: Any, **kwargs: Any) -> Awaitable:
# mypy doesn't do well across function boundaries so we need to tell it # Assertion required because mypy can't prove we won't change `f`
# f is definitely not None. # back to `None`. See
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
assert f is not None assert f is not None
return maybe_awaitable(f(*args, **kwargs)) return maybe_awaitable(f(*args, **kwargs))
@ -104,7 +105,7 @@ class PresenceRouter:
self, self,
get_users_for_states: Optional[GET_USERS_FOR_STATES_CALLBACK] = None, get_users_for_states: Optional[GET_USERS_FOR_STATES_CALLBACK] = None,
get_interested_users: Optional[GET_INTERESTED_USERS_CALLBACK] = None, get_interested_users: Optional[GET_INTERESTED_USERS_CALLBACK] = None,
): ) -> None:
# PresenceRouter modules are required to implement both of these methods # PresenceRouter modules are required to implement both of these methods
# or neither of them as they are assumed to act in a complementary manner # or neither of them as they are assumed to act in a complementary manner
paired_methods = [get_users_for_states, get_interested_users] paired_methods = [get_users_for_states, get_interested_users]
@ -142,7 +143,7 @@ class PresenceRouter:
# Don't include any extra destinations for presence updates # Don't include any extra destinations for presence updates
return {} return {}
users_for_states = {} users_for_states: Dict[str, Set[UserPresenceState]] = {}
# run all the callbacks for get_users_for_states and combine the results # run all the callbacks for get_users_for_states and combine the results
for callback in self._get_users_for_states_callbacks: for callback in self._get_users_for_states_callbacks:
try: try:
@ -171,7 +172,7 @@ class PresenceRouter:
return users_for_states return users_for_states
async def get_interested_users(self, user_id: str) -> Union[Set[str], ALL_USERS]: async def get_interested_users(self, user_id: str) -> Union[Set[str], str]:
""" """
Retrieve a list of users that `user_id` is interested in receiving the Retrieve a list of users that `user_id` is interested in receiving the
presence of. This will be in addition to those they share a room with. presence of. This will be in addition to those they share a room with.

View File

@ -11,17 +11,20 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from typing import TYPE_CHECKING, Optional, Union from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import attr import attr
from frozendict import frozendict from frozendict import frozendict
from twisted.internet.defer import Deferred
from synapse.appservice import ApplicationService from synapse.appservice import ApplicationService
from synapse.events import EventBase from synapse.events import EventBase
from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.types import StateMap from synapse.types import JsonDict, StateMap
if TYPE_CHECKING: if TYPE_CHECKING:
from synapse.storage import Storage
from synapse.storage.databases.main import DataStore from synapse.storage.databases.main import DataStore
@ -112,13 +115,13 @@ class EventContext:
@staticmethod @staticmethod
def with_state( def with_state(
state_group, state_group: Optional[int],
state_group_before_event, state_group_before_event: Optional[int],
current_state_ids, current_state_ids: Optional[StateMap[str]],
prev_state_ids, prev_state_ids: Optional[StateMap[str]],
prev_group=None, prev_group: Optional[int] = None,
delta_ids=None, delta_ids: Optional[StateMap[str]] = None,
): ) -> "EventContext":
return EventContext( return EventContext(
current_state_ids=current_state_ids, current_state_ids=current_state_ids,
prev_state_ids=prev_state_ids, prev_state_ids=prev_state_ids,
@ -129,22 +132,22 @@ class EventContext:
) )
@staticmethod @staticmethod
def for_outlier(): def for_outlier() -> "EventContext":
"""Return an EventContext instance suitable for persisting an outlier event""" """Return an EventContext instance suitable for persisting an outlier event"""
return EventContext( return EventContext(
current_state_ids={}, current_state_ids={},
prev_state_ids={}, prev_state_ids={},
) )
async def serialize(self, event: EventBase, store: "DataStore") -> dict: async def serialize(self, event: EventBase, store: "DataStore") -> JsonDict:
"""Converts self to a type that can be serialized as JSON, and then """Converts self to a type that can be serialized as JSON, and then
deserialized by `deserialize` deserialized by `deserialize`
Args: Args:
event (FrozenEvent): The event that this context relates to event: The event that this context relates to
Returns: Returns:
dict The serialized event.
""" """
# We don't serialize the full state dicts, instead they get pulled out # We don't serialize the full state dicts, instead they get pulled out
@ -170,17 +173,16 @@ class EventContext:
} }
@staticmethod @staticmethod
def deserialize(storage, input): def deserialize(storage: "Storage", input: JsonDict) -> "EventContext":
"""Converts a dict that was produced by `serialize` back into a """Converts a dict that was produced by `serialize` back into a
EventContext. EventContext.
Args: Args:
storage (Storage): Used to convert AS ID to AS object and fetch storage: Used to convert AS ID to AS object and fetch state.
state. input: A dict produced by `serialize`
input (dict): A dict produced by `serialize`
Returns: Returns:
EventContext The event context.
""" """
context = _AsyncEventContextImpl( context = _AsyncEventContextImpl(
# We use the state_group and prev_state_id stuff to pull the # We use the state_group and prev_state_id stuff to pull the
@ -241,22 +243,25 @@ class EventContext:
await self._ensure_fetched() await self._ensure_fetched()
return self._current_state_ids return self._current_state_ids
async def get_prev_state_ids(self): async def get_prev_state_ids(self) -> StateMap[str]:
""" """
Gets the room state map, excluding this event. Gets the room state map, excluding this event.
For a non-state event, this will be the same as get_current_state_ids(). For a non-state event, this will be the same as get_current_state_ids().
Returns: Returns:
dict[(str, str), str]|None: Returns None if state_group Returns {} if state_group is None, which happens when the associated
is None, which happens when the associated event is an outlier. event is an outlier.
Maps a (type, state_key) to the event ID of the state event matching
this tuple. Maps a (type, state_key) to the event ID of the state event matching
this tuple.
""" """
await self._ensure_fetched() await self._ensure_fetched()
# There *should* be previous state IDs now.
assert self._prev_state_ids is not None
return self._prev_state_ids return self._prev_state_ids
def get_cached_current_state_ids(self): def get_cached_current_state_ids(self) -> Optional[StateMap[str]]:
"""Gets the current state IDs if we have them already cached. """Gets the current state IDs if we have them already cached.
It is an error to access this for a rejected event, since rejected state should It is an error to access this for a rejected event, since rejected state should
@ -264,16 +269,17 @@ class EventContext:
``rejected`` is set. ``rejected`` is set.
Returns: Returns:
dict[(str, str), str]|None: Returns None if we haven't cached the Returns None if we haven't cached the state or if state_group is None
state or if state_group is None, which happens when the associated (which happens when the associated event is an outlier).
event is an outlier.
Otherwise, returns the the current state IDs.
""" """
if self.rejected: if self.rejected:
raise RuntimeError("Attempt to access state_ids of rejected event") raise RuntimeError("Attempt to access state_ids of rejected event")
return self._current_state_ids return self._current_state_ids
async def _ensure_fetched(self): async def _ensure_fetched(self) -> None:
return None return None
@ -285,46 +291,46 @@ class _AsyncEventContextImpl(EventContext):
Attributes: Attributes:
_storage (Storage) _storage
_fetching_state_deferred (Deferred|None): Resolves when *_state_ids have _fetching_state_deferred: Resolves when *_state_ids have been calculated.
been calculated. None if we haven't started calculating yet None if we haven't started calculating yet
_event_type (str): The type of the event the context is associated with. _event_type: The type of the event the context is associated with.
_event_state_key (str): The state_key of the event the context is _event_state_key: The state_key of the event the context is associated with.
associated with.
_prev_state_id (str|None): If the event associated with the context is _prev_state_id: If the event associated with the context is a state event,
a state event, then `_prev_state_id` is the event_id of the state then `_prev_state_id` is the event_id of the state that was replaced.
that was replaced.
""" """
# This needs to have a default as we're inheriting # This needs to have a default as we're inheriting
_storage = attr.ib(default=None) _storage: "Storage" = attr.ib(default=None)
_prev_state_id = attr.ib(default=None) _prev_state_id: Optional[str] = attr.ib(default=None)
_event_type = attr.ib(default=None) _event_type: str = attr.ib(default=None)
_event_state_key = attr.ib(default=None) _event_state_key: Optional[str] = attr.ib(default=None)
_fetching_state_deferred = attr.ib(default=None) _fetching_state_deferred: Optional["Deferred[None]"] = attr.ib(default=None)
async def _ensure_fetched(self): async def _ensure_fetched(self) -> None:
if not self._fetching_state_deferred: if not self._fetching_state_deferred:
self._fetching_state_deferred = run_in_background(self._fill_out_state) self._fetching_state_deferred = run_in_background(self._fill_out_state)
return await make_deferred_yieldable(self._fetching_state_deferred) await make_deferred_yieldable(self._fetching_state_deferred)
async def _fill_out_state(self): async def _fill_out_state(self) -> None:
"""Called to populate the _current_state_ids and _prev_state_ids """Called to populate the _current_state_ids and _prev_state_ids
attributes by loading from the database. attributes by loading from the database.
""" """
if self.state_group is None: if self.state_group is None:
return return
self._current_state_ids = await self._storage.state.get_state_ids_for_group( current_state_ids = await self._storage.state.get_state_ids_for_group(
self.state_group self.state_group
) )
# Set this separately so mypy knows current_state_ids is not None.
self._current_state_ids = current_state_ids
if self._event_state_key is not None: if self._event_state_key is not None:
self._prev_state_ids = dict(self._current_state_ids) self._prev_state_ids = dict(current_state_ids)
key = (self._event_type, self._event_state_key) key = (self._event_type, self._event_state_key)
if self._prev_state_id: if self._prev_state_id:
@ -332,10 +338,12 @@ class _AsyncEventContextImpl(EventContext):
else: else:
self._prev_state_ids.pop(key, None) self._prev_state_ids.pop(key, None)
else: else:
self._prev_state_ids = self._current_state_ids self._prev_state_ids = current_state_ids
def _encode_state_dict(state_dict): def _encode_state_dict(
state_dict: Optional[StateMap[str]],
) -> Optional[List[Tuple[str, str, str]]]:
"""Since dicts of (type, state_key) -> event_id cannot be serialized in """Since dicts of (type, state_key) -> event_id cannot be serialized in
JSON we need to convert them to a form that can. JSON we need to convert them to a form that can.
""" """
@ -345,7 +353,9 @@ def _encode_state_dict(state_dict):
return [(etype, state_key, v) for (etype, state_key), v in state_dict.items()] return [(etype, state_key, v) for (etype, state_key), v in state_dict.items()]
def _decode_state_dict(input): def _decode_state_dict(
input: Optional[List[Tuple[str, str, str]]]
) -> Optional[StateMap[str]]:
"""Decodes a state dict encoded using `_encode_state_dict` above""" """Decodes a state dict encoded using `_encode_state_dict` above"""
if input is None: if input is None:
return None return None

View File

@ -77,7 +77,7 @@ CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK = Callable[
] ]
def load_legacy_spam_checkers(hs: "synapse.server.HomeServer"): def load_legacy_spam_checkers(hs: "synapse.server.HomeServer") -> None:
"""Wrapper that loads spam checkers configured using the old configuration, and """Wrapper that loads spam checkers configured using the old configuration, and
registers the spam checker hooks they implement. registers the spam checker hooks they implement.
""" """
@ -129,9 +129,9 @@ def load_legacy_spam_checkers(hs: "synapse.server.HomeServer"):
request_info: Collection[Tuple[str, str]], request_info: Collection[Tuple[str, str]],
auth_provider_id: Optional[str], auth_provider_id: Optional[str],
) -> Union[Awaitable[RegistrationBehaviour], RegistrationBehaviour]: ) -> Union[Awaitable[RegistrationBehaviour], RegistrationBehaviour]:
# We've already made sure f is not None above, but mypy doesn't # Assertion required because mypy can't prove we won't
# do well across function boundaries so we need to tell it f is # change `f` back to `None`. See
# definitely not None. # https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
assert f is not None assert f is not None
return f( return f(
@ -146,9 +146,10 @@ def load_legacy_spam_checkers(hs: "synapse.server.HomeServer"):
"Bad signature for callback check_registration_for_spam", "Bad signature for callback check_registration_for_spam",
) )
def run(*args, **kwargs): def run(*args: Any, **kwargs: Any) -> Awaitable:
# mypy doesn't do well across function boundaries so we need to tell it # Assertion required because mypy can't prove we won't change `f`
# wrapped_func is definitely not None. # back to `None`. See
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
assert wrapped_func is not None assert wrapped_func is not None
return maybe_awaitable(wrapped_func(*args, **kwargs)) return maybe_awaitable(wrapped_func(*args, **kwargs))
@ -165,7 +166,7 @@ def load_legacy_spam_checkers(hs: "synapse.server.HomeServer"):
class SpamChecker: class SpamChecker:
def __init__(self): def __init__(self) -> None:
self._check_event_for_spam_callbacks: List[CHECK_EVENT_FOR_SPAM_CALLBACK] = [] self._check_event_for_spam_callbacks: List[CHECK_EVENT_FOR_SPAM_CALLBACK] = []
self._user_may_join_room_callbacks: List[USER_MAY_JOIN_ROOM_CALLBACK] = [] self._user_may_join_room_callbacks: List[USER_MAY_JOIN_ROOM_CALLBACK] = []
self._user_may_invite_callbacks: List[USER_MAY_INVITE_CALLBACK] = [] self._user_may_invite_callbacks: List[USER_MAY_INVITE_CALLBACK] = []
@ -209,7 +210,7 @@ class SpamChecker:
CHECK_REGISTRATION_FOR_SPAM_CALLBACK CHECK_REGISTRATION_FOR_SPAM_CALLBACK
] = None, ] = None,
check_media_file_for_spam: Optional[CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK] = None, check_media_file_for_spam: Optional[CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK] = None,
): ) -> None:
"""Register callbacks from module for each hook.""" """Register callbacks from module for each hook."""
if check_event_for_spam is not None: if check_event_for_spam is not None:
self._check_event_for_spam_callbacks.append(check_event_for_spam) self._check_event_for_spam_callbacks.append(check_event_for_spam)
@ -275,7 +276,9 @@ class SpamChecker:
return False return False
async def user_may_join_room(self, user_id: str, room_id: str, is_invited: bool): async def user_may_join_room(
self, user_id: str, room_id: str, is_invited: bool
) -> bool:
"""Checks if a given users is allowed to join a room. """Checks if a given users is allowed to join a room.
Not called when a user creates a room. Not called when a user creates a room.
@ -285,7 +288,7 @@ class SpamChecker:
is_invited: Whether the user is invited into the room is_invited: Whether the user is invited into the room
Returns: Returns:
bool: Whether the user may join the room Whether the user may join the room
""" """
for callback in self._user_may_join_room_callbacks: for callback in self._user_may_join_room_callbacks:
if await callback(user_id, room_id, is_invited) is False: if await callback(user_id, room_id, is_invited) is False:

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import logging import logging
from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional, Tuple from typing import TYPE_CHECKING, Any, Awaitable, Callable, List, Optional, Tuple
from synapse.api.errors import SynapseError from synapse.api.errors import SynapseError
from synapse.events import EventBase from synapse.events import EventBase
@ -38,7 +38,7 @@ CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK = Callable[
] ]
def load_legacy_third_party_event_rules(hs: "HomeServer"): def load_legacy_third_party_event_rules(hs: "HomeServer") -> None:
"""Wrapper that loads a third party event rules module configured using the old """Wrapper that loads a third party event rules module configured using the old
configuration, and registers the hooks they implement. configuration, and registers the hooks they implement.
""" """
@ -77,9 +77,9 @@ def load_legacy_third_party_event_rules(hs: "HomeServer"):
event: EventBase, event: EventBase,
state_events: StateMap[EventBase], state_events: StateMap[EventBase],
) -> Tuple[bool, Optional[dict]]: ) -> Tuple[bool, Optional[dict]]:
# We've already made sure f is not None above, but mypy doesn't do well # Assertion required because mypy can't prove we won't change
# across function boundaries so we need to tell it f is definitely not # `f` back to `None`. See
# None. # https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
assert f is not None assert f is not None
res = await f(event, state_events) res = await f(event, state_events)
@ -98,9 +98,9 @@ def load_legacy_third_party_event_rules(hs: "HomeServer"):
async def wrap_on_create_room( async def wrap_on_create_room(
requester: Requester, config: dict, is_requester_admin: bool requester: Requester, config: dict, is_requester_admin: bool
) -> None: ) -> None:
# We've already made sure f is not None above, but mypy doesn't do well # Assertion required because mypy can't prove we won't change
# across function boundaries so we need to tell it f is definitely not # `f` back to `None`. See
# None. # https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
assert f is not None assert f is not None
res = await f(requester, config, is_requester_admin) res = await f(requester, config, is_requester_admin)
@ -112,9 +112,10 @@ def load_legacy_third_party_event_rules(hs: "HomeServer"):
return wrap_on_create_room return wrap_on_create_room
def run(*args, **kwargs): def run(*args: Any, **kwargs: Any) -> Awaitable:
# mypy doesn't do well across function boundaries so we need to tell it # Assertion required because mypy can't prove we won't change `f`
# f is definitely not None. # back to `None`. See
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
assert f is not None assert f is not None
return maybe_awaitable(f(*args, **kwargs)) return maybe_awaitable(f(*args, **kwargs))
@ -162,7 +163,7 @@ class ThirdPartyEventRules:
check_visibility_can_be_modified: Optional[ check_visibility_can_be_modified: Optional[
CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK
] = None, ] = None,
): ) -> None:
"""Register callbacks from modules for each hook.""" """Register callbacks from modules for each hook."""
if check_event_allowed is not None: if check_event_allowed is not None:
self._check_event_allowed_callbacks.append(check_event_allowed) self._check_event_allowed_callbacks.append(check_event_allowed)

View File

@ -13,18 +13,32 @@
# limitations under the License. # limitations under the License.
import collections.abc import collections.abc
import re import re
from typing import Any, Mapping, Union from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
Optional,
Union,
)
from frozendict import frozendict from frozendict import frozendict
from synapse.api.constants import EventContentFields, EventTypes, RelationTypes from synapse.api.constants import EventContentFields, EventTypes, RelationTypes
from synapse.api.errors import Codes, SynapseError from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import RoomVersion from synapse.api.room_versions import RoomVersion
from synapse.types import JsonDict
from synapse.util.async_helpers import yieldable_gather_results from synapse.util.async_helpers import yieldable_gather_results
from synapse.util.frozenutils import unfreeze from synapse.util.frozenutils import unfreeze
from . import EventBase from . import EventBase
if TYPE_CHECKING:
from synapse.server import HomeServer
# Split strings on "." but not "\." This uses a negative lookbehind assertion for '\' # Split strings on "." but not "\." This uses a negative lookbehind assertion for '\'
# (?<!stuff) matches if the current position in the string is not preceded # (?<!stuff) matches if the current position in the string is not preceded
# by a match for 'stuff'. # by a match for 'stuff'.
@ -65,7 +79,7 @@ def prune_event(event: EventBase) -> EventBase:
return pruned_event return pruned_event
def prune_event_dict(room_version: RoomVersion, event_dict: dict) -> dict: def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDict:
"""Redacts the event_dict in the same way as `prune_event`, except it """Redacts the event_dict in the same way as `prune_event`, except it
operates on dicts rather than event objects operates on dicts rather than event objects
@ -97,7 +111,7 @@ def prune_event_dict(room_version: RoomVersion, event_dict: dict) -> dict:
new_content = {} new_content = {}
def add_fields(*fields): def add_fields(*fields: str) -> None:
for field in fields: for field in fields:
if field in event_dict["content"]: if field in event_dict["content"]:
new_content[field] = event_dict["content"][field] new_content[field] = event_dict["content"][field]
@ -151,7 +165,7 @@ def prune_event_dict(room_version: RoomVersion, event_dict: dict) -> dict:
allowed_fields["content"] = new_content allowed_fields["content"] = new_content
unsigned = {} unsigned: JsonDict = {}
allowed_fields["unsigned"] = unsigned allowed_fields["unsigned"] = unsigned
event_unsigned = event_dict.get("unsigned", {}) event_unsigned = event_dict.get("unsigned", {})
@ -164,16 +178,16 @@ def prune_event_dict(room_version: RoomVersion, event_dict: dict) -> dict:
return allowed_fields return allowed_fields
def _copy_field(src, dst, field): def _copy_field(src: JsonDict, dst: JsonDict, field: List[str]) -> None:
"""Copy the field in 'src' to 'dst'. """Copy the field in 'src' to 'dst'.
For example, if src={"foo":{"bar":5}} and dst={}, and field=["foo","bar"] For example, if src={"foo":{"bar":5}} and dst={}, and field=["foo","bar"]
then dst={"foo":{"bar":5}}. then dst={"foo":{"bar":5}}.
Args: Args:
src(dict): The dict to read from. src: The dict to read from.
dst(dict): The dict to modify. dst: The dict to modify.
field(list<str>): List of keys to drill down to in 'src'. field: List of keys to drill down to in 'src'.
""" """
if len(field) == 0: # this should be impossible if len(field) == 0: # this should be impossible
return return
@ -205,7 +219,7 @@ def _copy_field(src, dst, field):
sub_out_dict[key_to_move] = sub_dict[key_to_move] sub_out_dict[key_to_move] = sub_dict[key_to_move]
def only_fields(dictionary, fields): def only_fields(dictionary: JsonDict, fields: List[str]) -> JsonDict:
"""Return a new dict with only the fields in 'dictionary' which are present """Return a new dict with only the fields in 'dictionary' which are present
in 'fields'. in 'fields'.
@ -215,11 +229,11 @@ def only_fields(dictionary, fields):
A literal '.' character in a field name may be escaped using a '\'. A literal '.' character in a field name may be escaped using a '\'.
Args: Args:
dictionary(dict): The dictionary to read from. dictionary: The dictionary to read from.
fields(list<str>): A list of fields to copy over. Only shallow refs are fields: A list of fields to copy over. Only shallow refs are
taken. taken.
Returns: Returns:
dict: A new dictionary with only the given fields. If fields was empty, A new dictionary with only the given fields. If fields was empty,
the same dictionary is returned. the same dictionary is returned.
""" """
if len(fields) == 0: if len(fields) == 0:
@ -235,17 +249,17 @@ def only_fields(dictionary, fields):
[f.replace(r"\.", r".") for f in field_array] for field_array in split_fields [f.replace(r"\.", r".") for f in field_array] for field_array in split_fields
] ]
output = {} output: JsonDict = {}
for field_array in split_fields: for field_array in split_fields:
_copy_field(dictionary, output, field_array) _copy_field(dictionary, output, field_array)
return output return output
def format_event_raw(d): def format_event_raw(d: JsonDict) -> JsonDict:
return d return d
def format_event_for_client_v1(d): def format_event_for_client_v1(d: JsonDict) -> JsonDict:
d = format_event_for_client_v2(d) d = format_event_for_client_v2(d)
sender = d.get("sender") sender = d.get("sender")
@ -267,7 +281,7 @@ def format_event_for_client_v1(d):
return d return d
def format_event_for_client_v2(d): def format_event_for_client_v2(d: JsonDict) -> JsonDict:
drop_keys = ( drop_keys = (
"auth_events", "auth_events",
"prev_events", "prev_events",
@ -282,37 +296,37 @@ def format_event_for_client_v2(d):
return d return d
def format_event_for_client_v2_without_room_id(d): def format_event_for_client_v2_without_room_id(d: JsonDict) -> JsonDict:
d = format_event_for_client_v2(d) d = format_event_for_client_v2(d)
d.pop("room_id", None) d.pop("room_id", None)
return d return d
def serialize_event( def serialize_event(
e, e: Union[JsonDict, EventBase],
time_now_ms, time_now_ms: int,
as_client_event=True, as_client_event: bool = True,
event_format=format_event_for_client_v1, event_format: Callable[[JsonDict], JsonDict] = format_event_for_client_v1,
token_id=None, token_id: Optional[str] = None,
only_event_fields=None, only_event_fields: Optional[List[str]] = None,
include_stripped_room_state=False, include_stripped_room_state: bool = False,
): ) -> JsonDict:
"""Serialize event for clients """Serialize event for clients
Args: Args:
e (EventBase) e
time_now_ms (int) time_now_ms
as_client_event (bool) as_client_event
event_format event_format
token_id token_id
only_event_fields only_event_fields
include_stripped_room_state (bool): Some events can have stripped room state include_stripped_room_state: Some events can have stripped room state
stored in the `unsigned` field. This is required for invite and knock stored in the `unsigned` field. This is required for invite and knock
functionality. If this option is False, that state will be removed from the functionality. If this option is False, that state will be removed from the
event before it is returned. Otherwise, it will be kept. event before it is returned. Otherwise, it will be kept.
Returns: Returns:
dict The serialized event dictionary.
""" """
# FIXME(erikj): To handle the case of presence events and the like # FIXME(erikj): To handle the case of presence events and the like
@ -369,25 +383,28 @@ class EventClientSerializer:
clients. clients.
""" """
def __init__(self, hs): def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastore() self.store = hs.get_datastore()
self.experimental_msc1849_support_enabled = ( self._msc1849_enabled = hs.config.experimental.msc1849_enabled
hs.config.server.experimental_msc1849_support_enabled self._msc3440_enabled = hs.config.experimental.msc3440_enabled
)
async def serialize_event( async def serialize_event(
self, event, time_now, bundle_aggregations=True, **kwargs self,
): event: Union[JsonDict, EventBase],
time_now: int,
bundle_aggregations: bool = True,
**kwargs: Any,
) -> JsonDict:
"""Serializes a single event. """Serializes a single event.
Args: Args:
event (EventBase) event
time_now (int): The current time in milliseconds time_now: The current time in milliseconds
bundle_aggregations (bool): Whether to bundle in related events bundle_aggregations: Whether to bundle in related events
**kwargs: Arguments to pass to `serialize_event` **kwargs: Arguments to pass to `serialize_event`
Returns: Returns:
dict: The serialized event The serialized event
""" """
# To handle the case of presence events and the like # To handle the case of presence events and the like
if not isinstance(event, EventBase): if not isinstance(event, EventBase):
@ -400,7 +417,7 @@ class EventClientSerializer:
# we need to bundle in with the event. # we need to bundle in with the event.
# Do not bundle relations if the event has been redacted # Do not bundle relations if the event has been redacted
if not event.internal_metadata.is_redacted() and ( if not event.internal_metadata.is_redacted() and (
self.experimental_msc1849_support_enabled and bundle_aggregations self._msc1849_enabled and bundle_aggregations
): ):
annotations = await self.store.get_aggregation_groups_for_event(event_id) annotations = await self.store.get_aggregation_groups_for_event(event_id)
references = await self.store.get_relations_for_event( references = await self.store.get_relations_for_event(
@ -446,27 +463,45 @@ class EventClientSerializer:
"sender": edit.sender, "sender": edit.sender,
} }
# If this event is the start of a thread, include a summary of the replies.
if self._msc3440_enabled:
(
thread_count,
latest_thread_event,
) = await self.store.get_thread_summary(event_id)
if latest_thread_event:
r = serialized_event["unsigned"].setdefault("m.relations", {})
r[RelationTypes.THREAD] = {
# Don't bundle aggregations as this could recurse forever.
"latest_event": await self.serialize_event(
latest_thread_event, time_now, bundle_aggregations=False
),
"count": thread_count,
}
return serialized_event return serialized_event
def serialize_events(self, events, time_now, **kwargs): async def serialize_events(
self, events: Iterable[Union[JsonDict, EventBase]], time_now: int, **kwargs: Any
) -> List[JsonDict]:
"""Serializes multiple events. """Serializes multiple events.
Args: Args:
event (iter[EventBase]) event
time_now (int): The current time in milliseconds time_now: The current time in milliseconds
**kwargs: Arguments to pass to `serialize_event` **kwargs: Arguments to pass to `serialize_event`
Returns: Returns:
Deferred[list[dict]]: The list of serialized events The list of serialized events
""" """
return yieldable_gather_results( return await yieldable_gather_results(
self.serialize_event, events, time_now=time_now, **kwargs self.serialize_event, events, time_now=time_now, **kwargs
) )
def copy_power_levels_contents( def copy_power_levels_contents(
old_power_levels: Mapping[str, Union[int, Mapping[str, int]]] old_power_levels: Mapping[str, Union[int, Mapping[str, int]]]
): ) -> Dict[str, Union[int, Dict[str, int]]]:
"""Copy the content of a power_levels event, unfreezing frozendicts along the way """Copy the content of a power_levels event, unfreezing frozendicts along the way
Raises: Raises:
@ -475,7 +510,7 @@ def copy_power_levels_contents(
if not isinstance(old_power_levels, collections.abc.Mapping): if not isinstance(old_power_levels, collections.abc.Mapping):
raise TypeError("Not a valid power-levels content: %r" % (old_power_levels,)) raise TypeError("Not a valid power-levels content: %r" % (old_power_levels,))
power_levels = {} power_levels: Dict[str, Union[int, Dict[str, int]]] = {}
for k, v in old_power_levels.items(): for k, v in old_power_levels.items():
if isinstance(v, int): if isinstance(v, int):
@ -483,7 +518,8 @@ def copy_power_levels_contents(
continue continue
if isinstance(v, collections.abc.Mapping): if isinstance(v, collections.abc.Mapping):
power_levels[k] = h = {} h: Dict[str, int] = {}
power_levels[k] = h
for k1, v1 in v.items(): for k1, v1 in v.items():
# we should only have one level of nesting # we should only have one level of nesting
if not isinstance(v1, int): if not isinstance(v1, int):
@ -498,7 +534,7 @@ def copy_power_levels_contents(
return power_levels return power_levels
def validate_canonicaljson(value: Any): def validate_canonicaljson(value: Any) -> None:
""" """
Ensure that the JSON object is valid according to the rules of canonical JSON. Ensure that the JSON object is valid according to the rules of canonical JSON.

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import collections.abc import collections.abc
from typing import Union from typing import Iterable, Union
import jsonschema import jsonschema
@ -28,11 +28,11 @@ from synapse.events.utils import (
validate_canonicaljson, validate_canonicaljson,
) )
from synapse.federation.federation_server import server_matches_acl_event from synapse.federation.federation_server import server_matches_acl_event
from synapse.types import EventID, RoomID, UserID from synapse.types import EventID, JsonDict, RoomID, UserID
class EventValidator: class EventValidator:
def validate_new(self, event: EventBase, config: HomeServerConfig): def validate_new(self, event: EventBase, config: HomeServerConfig) -> None:
"""Validates the event has roughly the right format """Validates the event has roughly the right format
Args: Args:
@ -122,7 +122,7 @@ class EventValidator:
errcode=Codes.BAD_JSON, errcode=Codes.BAD_JSON,
) )
def _validate_retention(self, event: EventBase): def _validate_retention(self, event: EventBase) -> None:
"""Checks that an event that defines the retention policy for a room respects the """Checks that an event that defines the retention policy for a room respects the
format enforced by the spec. format enforced by the spec.
@ -162,7 +162,7 @@ class EventValidator:
errcode=Codes.BAD_JSON, errcode=Codes.BAD_JSON,
) )
def validate_builder(self, event: Union[EventBase, EventBuilder], config: HomeServerConfig): def validate_builder(self, event: Union[EventBase, EventBuilder], config: HomeServerConfig) -> None:
"""Validates that the builder/event has roughly the right format. Only """Validates that the builder/event has roughly the right format. Only
checks values that we expect a proto event to have, rather than all the checks values that we expect a proto event to have, rather than all the
fields an event would have fields an event would have
@ -214,14 +214,14 @@ class EventValidator:
self._ensure_state_event(event) self._ensure_state_event(event)
def _ensure_strings(self, d, keys): def _ensure_strings(self, d: JsonDict, keys: Iterable[str]) -> None:
for s in keys: for s in keys:
if s not in d: if s not in d:
raise SynapseError(400, "'%s' not in content" % (s,)) raise SynapseError(400, "'%s' not in content" % (s,))
if not isinstance(d[s], str): if not isinstance(d[s], str):
raise SynapseError(400, "'%s' not a string type" % (s,)) raise SynapseError(400, "'%s' not a string type" % (s,))
def _ensure_state_event(self, event): def _ensure_state_event(self, event: Union[EventBase, EventBuilder]) -> None:
if not event.is_state(): if not event.is_state():
raise SynapseError(400, "'%s' must be state events" % (event.type,)) raise SynapseError(400, "'%s' must be state events" % (event.type,))
@ -254,7 +254,9 @@ POWER_LEVELS_SCHEMA = {
} }
def _create_power_level_validator(): # This could return something newer than Draft 7, but that's the current "latest"
# validator.
def _create_power_level_validator() -> jsonschema.Draft7Validator:
validator = jsonschema.validators.validator_for(POWER_LEVELS_SCHEMA) validator = jsonschema.validators.validator_for(POWER_LEVELS_SCHEMA)
# by default jsonschema does not consider a frozendict to be an object so # by default jsonschema does not consider a frozendict to be an object so

View File

@ -14,6 +14,7 @@
# limitations under the License. # limitations under the License.
import logging import logging
from collections import namedtuple from collections import namedtuple
from typing import TYPE_CHECKING
from synapse.api.constants import MAX_DEPTH, EventContentFields, EventTypes, Membership from synapse.api.constants import MAX_DEPTH, EventContentFields, EventTypes, Membership
from synapse.api.errors import Codes, SynapseError from synapse.api.errors import Codes, SynapseError
@ -25,11 +26,15 @@ from synapse.events.utils import prune_event, validate_canonicaljson
from synapse.http.servlet import assert_params_in_dict from synapse.http.servlet import assert_params_in_dict
from synapse.types import JsonDict, get_domain_from_id from synapse.types import JsonDict, get_domain_from_id
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class FederationBase: class FederationBase:
def __init__(self, hs): def __init__(self, hs: "HomeServer"):
self.hs = hs self.hs = hs
self.server_name = hs.hostname self.server_name = hs.hostname

View File

@ -467,7 +467,7 @@ class FederationServer(FederationBase):
async def on_room_state_request( async def on_room_state_request(
self, origin: str, room_id: str, event_id: Optional[str] self, origin: str, room_id: str, event_id: Optional[str]
) -> Tuple[int, Dict[str, Any]]: ) -> Tuple[int, JsonDict]:
origin_host, _ = parse_server_name(origin) origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id) await self.check_server_matches_acl(origin_host, room_id)
@ -481,7 +481,7 @@ class FederationServer(FederationBase):
# - but that's non-trivial to get right, and anyway somewhat defeats # - but that's non-trivial to get right, and anyway somewhat defeats
# the point of the linearizer. # the point of the linearizer.
with (await self._server_linearizer.queue((origin, room_id))): with (await self._server_linearizer.queue((origin, room_id))):
resp = dict( resp: JsonDict = dict(
await self._state_resp_cache.wrap( await self._state_resp_cache.wrap(
(room_id, event_id), (room_id, event_id),
self._on_context_state_request_compute, self._on_context_state_request_compute,
@ -1061,11 +1061,12 @@ class FederationServer(FederationBase):
origin, event = next origin, event = next
lock = await self.store.try_acquire_lock( new_lock = await self.store.try_acquire_lock(
_INBOUND_EVENT_HANDLING_LOCK_NAME, room_id _INBOUND_EVENT_HANDLING_LOCK_NAME, room_id
) )
if not lock: if not new_lock:
return return
lock = new_lock
def __str__(self) -> str: def __str__(self) -> str:
return "<ReplicationLayer(%s)>" % self.server_name return "<ReplicationLayer(%s)>" % self.server_name

View File

@ -185,19 +185,26 @@ class ApplicationServicesHandler:
new_token: Optional[int], new_token: Optional[int],
users: Optional[Collection[Union[str, UserID]]] = None, users: Optional[Collection[Union[str, UserID]]] = None,
) -> None: ) -> None:
"""This is called by the notifier in the background """
when a ephemeral event handled by the homeserver. This is called by the notifier in the background when an ephemeral event is handled
by the homeserver.
This will determine which appservices This will determine which appservices are interested in the event, and submit them.
are interested in the event, and submit them.
Events will only be pushed to appservices
that have opted into ephemeral events
Args: Args:
stream_key: The stream the event came from. stream_key: The stream the event came from.
new_token: The latest stream token
users: The user(s) involved with the event. `stream_key` can be "typing_key", "receipt_key" or "presence_key". Any other
value for `stream_key` will cause this function to return early.
Ephemeral events will only be pushed to appservices that have opted into
them.
Appservices will only receive ephemeral events that fall within their
registered user and room namespaces.
new_token: The latest stream token.
users: The users that should be informed of the new event, if any.
""" """
if not self.notify_appservices: if not self.notify_appservices:
return return
@ -232,21 +239,32 @@ class ApplicationServicesHandler:
for service in services: for service in services:
# Only handle typing if we have the latest token # Only handle typing if we have the latest token
if stream_key == "typing_key" and new_token is not None: if stream_key == "typing_key" and new_token is not None:
# Note that we don't persist the token (via set_type_stream_id_for_appservice)
# for typing_key due to performance reasons and due to their highly
# ephemeral nature.
#
# Instead we simply grab the latest typing updates in _handle_typing
# and, if they apply to this application service, send it off.
events = await self._handle_typing(service, new_token) events = await self._handle_typing(service, new_token)
if events: if events:
self.scheduler.submit_ephemeral_events_for_as(service, events) self.scheduler.submit_ephemeral_events_for_as(service, events)
# We don't persist the token for typing_key for performance reasons
elif stream_key == "receipt_key": elif stream_key == "receipt_key":
events = await self._handle_receipts(service) events = await self._handle_receipts(service)
if events: if events:
self.scheduler.submit_ephemeral_events_for_as(service, events) self.scheduler.submit_ephemeral_events_for_as(service, events)
# Persist the latest handled stream token for this appservice
await self.store.set_type_stream_id_for_appservice( await self.store.set_type_stream_id_for_appservice(
service, "read_receipt", new_token service, "read_receipt", new_token
) )
elif stream_key == "presence_key": elif stream_key == "presence_key":
events = await self._handle_presence(service, users) events = await self._handle_presence(service, users)
if events: if events:
self.scheduler.submit_ephemeral_events_for_as(service, events) self.scheduler.submit_ephemeral_events_for_as(service, events)
# Persist the latest handled stream token for this appservice
await self.store.set_type_stream_id_for_appservice( await self.store.set_type_stream_id_for_appservice(
service, "presence", new_token service, "presence", new_token
) )
@ -254,18 +272,54 @@ class ApplicationServicesHandler:
async def _handle_typing( async def _handle_typing(
self, service: ApplicationService, new_token: int self, service: ApplicationService, new_token: int
) -> List[JsonDict]: ) -> List[JsonDict]:
"""
Return the typing events since the given stream token that the given application
service should receive.
First fetch all typing events between the given typing stream token (non-inclusive)
and the latest typing event stream token (inclusive). Then return only those typing
events that the given application service may be interested in.
Args:
service: The application service to check for which events it should receive.
new_token: A typing event stream token.
Returns:
A list of JSON dictionaries containing data derived from the typing events that
should be sent to the given application service.
"""
typing_source = self.event_sources.sources.typing typing_source = self.event_sources.sources.typing
# Get the typing events from just before current # Get the typing events from just before current
typing, _ = await typing_source.get_new_events_as( typing, _ = await typing_source.get_new_events_as(
service=service, service=service,
# For performance reasons, we don't persist the previous # For performance reasons, we don't persist the previous
# token in the DB and instead fetch the latest typing information # token in the DB and instead fetch the latest typing event
# for appservices. # for appservices.
# TODO: It'd likely be more efficient to simply fetch the
# typing event with the given 'new_token' stream token and
# check if the given service was interested, rather than
# iterating over all typing events and only grabbing the
# latest few.
from_key=new_token - 1, from_key=new_token - 1,
) )
return typing return typing
async def _handle_receipts(self, service: ApplicationService) -> List[JsonDict]: async def _handle_receipts(self, service: ApplicationService) -> List[JsonDict]:
"""
Return the latest read receipts that the given application service should receive.
First fetch all read receipts between the last receipt stream token that this
application service should have previously received (non-inclusive) and the
latest read receipt stream token (inclusive). Then from that set, return only
those read receipts that the given application service may be interested in.
Args:
service: The application service to check for which events it should receive.
Returns:
A list of JSON dictionaries containing data derived from the read receipts that
should be sent to the given application service.
"""
from_key = await self.store.get_type_stream_id_for_appservice( from_key = await self.store.get_type_stream_id_for_appservice(
service, "read_receipt" service, "read_receipt"
) )
@ -278,6 +332,22 @@ class ApplicationServicesHandler:
async def _handle_presence( async def _handle_presence(
self, service: ApplicationService, users: Collection[Union[str, UserID]] self, service: ApplicationService, users: Collection[Union[str, UserID]]
) -> List[JsonDict]: ) -> List[JsonDict]:
"""
Return the latest presence updates that the given application service should receive.
First, filter the given users list to those that the application service is
interested in. Then retrieve the latest presence updates since the
the last-known previously received presence stream token for the given
application service. Return those presence updates.
Args:
service: The application service that ephemeral events are being sent to.
users: The users that should receive the presence update.
Returns:
A list of json dictionaries containing data derived from the presence events
that should be sent to the given application service.
"""
events: List[JsonDict] = [] events: List[JsonDict] = []
presence_source = self.event_sources.sources.presence presence_source = self.event_sources.sources.presence
from_key = await self.store.get_type_stream_id_for_appservice( from_key = await self.store.get_type_stream_id_for_appservice(
@ -290,9 +360,9 @@ class ApplicationServicesHandler:
interested = await service.is_interested_in_presence(user, self.store) interested = await service.is_interested_in_presence(user, self.store)
if not interested: if not interested:
continue continue
presence_events, _ = await presence_source.get_new_events( presence_events, _ = await presence_source.get_new_events(
user=user, user=user,
service=service,
from_key=from_key, from_key=from_key,
) )
time_now = self.clock.time_msec() time_now = self.clock.time_msec()

View File

@ -62,7 +62,6 @@ from synapse.http.server import finish_request, respond_with_html
from synapse.http.site import SynapseRequest from synapse.http.site import SynapseRequest
from synapse.logging.context import defer_to_thread from synapse.logging.context import defer_to_thread
from synapse.metrics.background_process_metrics import run_as_background_process from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.module_api import ModuleApi
from synapse.storage.roommember import ProfileInfo from synapse.storage.roommember import ProfileInfo
from synapse.types import JsonDict, Requester, UserID from synapse.types import JsonDict, Requester, UserID
from synapse.util import stringutils as stringutils from synapse.util import stringutils as stringutils
@ -73,6 +72,7 @@ from synapse.util.stringutils import base62_encode
from synapse.util.threepids import canonicalise_email from synapse.util.threepids import canonicalise_email
if TYPE_CHECKING: if TYPE_CHECKING:
from synapse.module_api import ModuleApi
from synapse.rest.client.login import LoginResponse from synapse.rest.client.login import LoginResponse
from synapse.server import HomeServer from synapse.server import HomeServer
@ -200,46 +200,13 @@ class AuthHandler:
self.bcrypt_rounds = hs.config.registration.bcrypt_rounds self.bcrypt_rounds = hs.config.registration.bcrypt_rounds
# we can't use hs.get_module_api() here, because to do so will create an self.password_auth_provider = hs.get_password_auth_provider()
# import loop.
#
# TODO: refactor this class to separate the lower-level stuff that
# ModuleApi can use from the higher-level stuff that uses ModuleApi, as
# better way to break the loop
account_handler = ModuleApi(hs, self)
self.password_providers = [
PasswordProvider.load(module, config, account_handler)
for module, config in hs.config.authproviders.password_providers
]
logger.info("Extra password_providers: %s", self.password_providers)
self.hs = hs # FIXME better possibility to access registrationHandler later? self.hs = hs # FIXME better possibility to access registrationHandler later?
self.macaroon_gen = hs.get_macaroon_generator() self.macaroon_gen = hs.get_macaroon_generator()
self._password_enabled = hs.config.auth.password_enabled self._password_enabled = hs.config.auth.password_enabled
self._password_localdb_enabled = hs.config.auth.password_localdb_enabled self._password_localdb_enabled = hs.config.auth.password_localdb_enabled
# start out by assuming PASSWORD is enabled; we will remove it later if not.
login_types = set()
if self._password_localdb_enabled:
login_types.add(LoginType.PASSWORD)
for provider in self.password_providers:
login_types.update(provider.get_supported_login_types().keys())
if not self._password_enabled:
login_types.discard(LoginType.PASSWORD)
# Some clients just pick the first type in the list. In this case, we want
# them to use PASSWORD (rather than token or whatever), so we want to make sure
# that comes first, where it's present.
self._supported_login_types = []
if LoginType.PASSWORD in login_types:
self._supported_login_types.append(LoginType.PASSWORD)
login_types.remove(LoginType.PASSWORD)
self._supported_login_types.extend(login_types)
# Ratelimiter for failed auth during UIA. Uses same ratelimit config # Ratelimiter for failed auth during UIA. Uses same ratelimit config
# as per `rc_login.failed_attempts`. # as per `rc_login.failed_attempts`.
self._failed_uia_attempts_ratelimiter = Ratelimiter( self._failed_uia_attempts_ratelimiter = Ratelimiter(
@ -427,11 +394,10 @@ class AuthHandler:
ui_auth_types.add(LoginType.PASSWORD) ui_auth_types.add(LoginType.PASSWORD)
# also allow auth from password providers # also allow auth from password providers
for provider in self.password_providers: for t in self.password_auth_provider.get_supported_login_types().keys():
for t in provider.get_supported_login_types().keys(): if t == LoginType.PASSWORD and not self._password_enabled:
if t == LoginType.PASSWORD and not self._password_enabled: continue
continue ui_auth_types.add(t)
ui_auth_types.add(t)
# if sso is enabled, allow the user to log in via SSO iff they have a mapping # if sso is enabled, allow the user to log in via SSO iff they have a mapping
# from sso to mxid. # from sso to mxid.
@ -1038,7 +1004,25 @@ class AuthHandler:
Returns: Returns:
login types login types
""" """
return self._supported_login_types # Load any login types registered by modules
# This is stored in the password_auth_provider so this doesn't trigger
# any callbacks
types = list(self.password_auth_provider.get_supported_login_types().keys())
# This list should include PASSWORD if (either _password_localdb_enabled is
# true or if one of the modules registered it) AND _password_enabled is true
# Also:
# Some clients just pick the first type in the list. In this case, we want
# them to use PASSWORD (rather than token or whatever), so we want to make sure
# that comes first, where it's present.
if LoginType.PASSWORD in types:
types.remove(LoginType.PASSWORD)
if self._password_enabled:
types.insert(0, LoginType.PASSWORD)
elif self._password_localdb_enabled and self._password_enabled:
types.insert(0, LoginType.PASSWORD)
return types
async def validate_login( async def validate_login(
self, self,
@ -1217,15 +1201,20 @@ class AuthHandler:
known_login_type = False known_login_type = False
for provider in self.password_providers: # Check if login_type matches a type registered by one of the modules
supported_login_types = provider.get_supported_login_types() # We don't need to remove LoginType.PASSWORD from the list if password login is
if login_type not in supported_login_types: # disabled, since if that were the case then by this point we know that the
# this password provider doesn't understand this login type # login_type is not LoginType.PASSWORD
continue supported_login_types = self.password_auth_provider.get_supported_login_types()
# check if the login type being used is supported by a module
if login_type in supported_login_types:
# Make a note that this login type is supported by the server
known_login_type = True known_login_type = True
# Get all the fields expected for this login types
login_fields = supported_login_types[login_type] login_fields = supported_login_types[login_type]
# go through the login submission and keep track of which required fields are
# provided/not provided
missing_fields = [] missing_fields = []
login_dict = {} login_dict = {}
for f in login_fields: for f in login_fields:
@ -1233,6 +1222,7 @@ class AuthHandler:
missing_fields.append(f) missing_fields.append(f)
else: else:
login_dict[f] = login_submission[f] login_dict[f] = login_submission[f]
# raise an error if any of the expected fields for that login type weren't provided
if missing_fields: if missing_fields:
raise SynapseError( raise SynapseError(
400, 400,
@ -1240,10 +1230,15 @@ class AuthHandler:
% (login_type, missing_fields), % (login_type, missing_fields),
) )
result = await provider.check_auth(username, login_type, login_dict) # call all of the check_auth hooks for that login_type
# it will return a result once the first success is found (or None otherwise)
result = await self.password_auth_provider.check_auth(
username, login_type, login_dict
)
if result: if result:
return result return result
# if no module managed to authenticate the user, then fallback to built in password based auth
if login_type == LoginType.PASSWORD and self._password_localdb_enabled: if login_type == LoginType.PASSWORD and self._password_localdb_enabled:
known_login_type = True known_login_type = True
@ -1282,11 +1277,16 @@ class AuthHandler:
completed login/registration, or `None`. If authentication was completed login/registration, or `None`. If authentication was
unsuccessful, `user_id` and `callback` are both `None`. unsuccessful, `user_id` and `callback` are both `None`.
""" """
for provider in self.password_providers: # call all of the check_3pid_auth callbacks
result = await provider.check_3pid_auth(medium, address, password) # Result will be from the first callback that returns something other than None
if result: # If all the callbacks return None, then result is also set to None
return result result = await self.password_auth_provider.check_3pid_auth(
medium, address, password
)
if result:
return result
# if result is None then return (None, None)
return None, None return None, None
async def _check_local_password(self, user_id: str, password: str) -> Optional[str]: async def _check_local_password(self, user_id: str, password: str) -> Optional[str]:
@ -1365,13 +1365,12 @@ class AuthHandler:
user_info = await self.auth.get_user_by_access_token(access_token) user_info = await self.auth.get_user_by_access_token(access_token)
await self.store.delete_access_token(access_token) await self.store.delete_access_token(access_token)
# see if any of our auth providers want to know about this # see if any modules want to know about this
for provider in self.password_providers: await self.password_auth_provider.on_logged_out(
await provider.on_logged_out( user_id=user_info.user_id,
user_id=user_info.user_id, device_id=user_info.device_id,
device_id=user_info.device_id, access_token=access_token,
access_token=access_token, )
)
# delete pushers associated with this access token # delete pushers associated with this access token
if user_info.token_id is not None: if user_info.token_id is not None:
@ -1398,12 +1397,11 @@ class AuthHandler:
user_id, except_token_id=except_token_id, device_id=device_id user_id, except_token_id=except_token_id, device_id=device_id
) )
# see if any of our auth providers want to know about this # see if any modules want to know about this
for provider in self.password_providers: for token, _, device_id in tokens_and_devices:
for token, _, device_id in tokens_and_devices: await self.password_auth_provider.on_logged_out(
await provider.on_logged_out( user_id=user_id, device_id=device_id, access_token=token
user_id=user_id, device_id=device_id, access_token=token )
)
# delete pushers associated with the access tokens # delete pushers associated with the access tokens
await self.hs.get_pusherpool().remove_pushers_by_access_token( await self.hs.get_pusherpool().remove_pushers_by_access_token(
@ -1811,40 +1809,230 @@ class MacaroonGenerator:
return macaroon return macaroon
class PasswordProvider: def load_legacy_password_auth_providers(hs: "HomeServer") -> None:
"""Wrapper for a password auth provider module module_api = hs.get_module_api()
for module, config in hs.config.authproviders.password_providers:
load_single_legacy_password_auth_provider(
module=module, config=config, api=module_api
)
This class abstracts out all of the backwards-compatibility hacks for
password providers, to provide a consistent interface. def load_single_legacy_password_auth_provider(
module: Type,
config: JsonDict,
api: "ModuleApi",
) -> None:
try:
provider = module(config=config, account_handler=api)
except Exception as e:
logger.error("Error while initializing %r: %s", module, e)
raise
# The known hooks. If a module implements a method who's name appears in this set
# we'll want to register it
password_auth_provider_methods = {
"check_3pid_auth",
"on_logged_out",
}
# All methods that the module provides should be async, but this wasn't enforced
# in the old module system, so we wrap them if needed
def async_wrapper(f: Optional[Callable]) -> Optional[Callable[..., Awaitable]]:
# f might be None if the callback isn't implemented by the module. In this
# case we don't want to register a callback at all so we return None.
if f is None:
return None
# We need to wrap check_password because its old form would return a boolean
# but we now want it to behave just like check_auth() and return the matrix id of
# the user if authentication succeeded or None otherwise
if f.__name__ == "check_password":
async def wrapped_check_password(
username: str, login_type: str, login_dict: JsonDict
) -> Optional[Tuple[str, Optional[Callable]]]:
# We've already made sure f is not None above, but mypy doesn't do well
# across function boundaries so we need to tell it f is definitely not
# None.
assert f is not None
matrix_user_id = api.get_qualified_user_id(username)
password = login_dict["password"]
is_valid = await f(matrix_user_id, password)
if is_valid:
return matrix_user_id, None
return None
return wrapped_check_password
# We need to wrap check_auth as in the old form it could return
# just a str, but now it must return Optional[Tuple[str, Optional[Callable]]
if f.__name__ == "check_auth":
async def wrapped_check_auth(
username: str, login_type: str, login_dict: JsonDict
) -> Optional[Tuple[str, Optional[Callable]]]:
# We've already made sure f is not None above, but mypy doesn't do well
# across function boundaries so we need to tell it f is definitely not
# None.
assert f is not None
result = await f(username, login_type, login_dict)
if isinstance(result, str):
return result, None
return result
return wrapped_check_auth
# We need to wrap check_3pid_auth as in the old form it could return
# just a str, but now it must return Optional[Tuple[str, Optional[Callable]]
if f.__name__ == "check_3pid_auth":
async def wrapped_check_3pid_auth(
medium: str, address: str, password: str
) -> Optional[Tuple[str, Optional[Callable]]]:
# We've already made sure f is not None above, but mypy doesn't do well
# across function boundaries so we need to tell it f is definitely not
# None.
assert f is not None
result = await f(medium, address, password)
if isinstance(result, str):
return result, None
return result
return wrapped_check_3pid_auth
def run(*args: Tuple, **kwargs: Dict) -> Awaitable:
# mypy doesn't do well across function boundaries so we need to tell it
# f is definitely not None.
assert f is not None
return maybe_awaitable(f(*args, **kwargs))
return run
# populate hooks with the implemented methods, wrapped with async_wrapper
hooks = {
hook: async_wrapper(getattr(provider, hook, None))
for hook in password_auth_provider_methods
}
supported_login_types = {}
# call get_supported_login_types and add that to the dict
g = getattr(provider, "get_supported_login_types", None)
if g is not None:
# Note the old module style also called get_supported_login_types at loading time
# and it is synchronous
supported_login_types.update(g())
auth_checkers = {}
# Legacy modules have a check_auth method which expects to be called with one of
# the keys returned by get_supported_login_types. New style modules register a
# dictionary of login_type->check_auth_method mappings
check_auth = async_wrapper(getattr(provider, "check_auth", None))
if check_auth is not None:
for login_type, fields in supported_login_types.items():
# need tuple(fields) since fields can be any Iterable type (so may not be hashable)
auth_checkers[(login_type, tuple(fields))] = check_auth
# if it has a "check_password" method then it should handle all auth checks
# with login type of LoginType.PASSWORD
check_password = async_wrapper(getattr(provider, "check_password", None))
if check_password is not None:
# need to use a tuple here for ("password",) not a list since lists aren't hashable
auth_checkers[(LoginType.PASSWORD, ("password",))] = check_password
api.register_password_auth_provider_callbacks(hooks, auth_checkers=auth_checkers)
CHECK_3PID_AUTH_CALLBACK = Callable[
[str, str, str],
Awaitable[
Optional[Tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]]
],
]
ON_LOGGED_OUT_CALLBACK = Callable[[str, Optional[str], str], Awaitable]
CHECK_AUTH_CALLBACK = Callable[
[str, str, JsonDict],
Awaitable[
Optional[Tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]]
],
]
class PasswordAuthProvider:
"""
A class that the AuthHandler calls when authenticating users
It allows modules to provide alternative methods for authentication
""" """
@classmethod def __init__(self) -> None:
def load( # lists of callbacks
cls, module: Type, config: JsonDict, module_api: ModuleApi self.check_3pid_auth_callbacks: List[CHECK_3PID_AUTH_CALLBACK] = []
) -> "PasswordProvider": self.on_logged_out_callbacks: List[ON_LOGGED_OUT_CALLBACK] = []
try:
pp = module(config=config, account_handler=module_api)
except Exception as e:
logger.error("Error while initializing %r: %s", module, e)
raise
return cls(pp, module_api)
def __init__(self, pp: "PasswordProvider", module_api: ModuleApi): # Mapping from login type to login parameters
self._pp = pp self._supported_login_types: Dict[str, Iterable[str]] = {}
self._module_api = module_api
self._supported_login_types = {} # Mapping from login type to auth checker callbacks
self.auth_checker_callbacks: Dict[str, List[CHECK_AUTH_CALLBACK]] = {}
# grandfather in check_password support def register_password_auth_provider_callbacks(
if hasattr(self._pp, "check_password"): self,
self._supported_login_types[LoginType.PASSWORD] = ("password",) check_3pid_auth: Optional[CHECK_3PID_AUTH_CALLBACK] = None,
on_logged_out: Optional[ON_LOGGED_OUT_CALLBACK] = None,
auth_checkers: Optional[Dict[Tuple[str, Tuple], CHECK_AUTH_CALLBACK]] = None,
) -> None:
# Register check_3pid_auth callback
if check_3pid_auth is not None:
self.check_3pid_auth_callbacks.append(check_3pid_auth)
g = getattr(self._pp, "get_supported_login_types", None) # register on_logged_out callback
if g: if on_logged_out is not None:
self._supported_login_types.update(g()) self.on_logged_out_callbacks.append(on_logged_out)
def __str__(self) -> str: if auth_checkers is not None:
return str(self._pp) # register a new supported login_type
# Iterate through all of the types being registered
for (login_type, fields), callback in auth_checkers.items():
# Note: fields may be empty here. This would allow a modules auth checker to
# be called with just 'login_type' and no password or other secrets
# Need to check that all the field names are strings or may get nasty errors later
for f in fields:
if not isinstance(f, str):
raise RuntimeError(
"A module tried to register support for login type: %s with parameters %s"
" but all parameter names must be strings"
% (login_type, fields)
)
# 2 modules supporting the same login type must expect the same fields
# e.g. 1 can't expect "pass" if the other expects "password"
# so throw an exception if that happens
if login_type not in self._supported_login_types.get(login_type, []):
self._supported_login_types[login_type] = fields
else:
fields_currently_supported = self._supported_login_types.get(
login_type
)
if fields_currently_supported != fields:
raise RuntimeError(
"A module tried to register support for login type: %s with parameters %s"
" but another module had already registered support for that type with parameters %s"
% (login_type, fields, fields_currently_supported)
)
# Add the new method to the list of auth_checker_callbacks for this login type
self.auth_checker_callbacks.setdefault(login_type, []).append(callback)
def get_supported_login_types(self) -> Mapping[str, Iterable[str]]: def get_supported_login_types(self) -> Mapping[str, Iterable[str]]:
"""Get the login types supported by this password provider """Get the login types supported by this password provider
@ -1852,20 +2040,15 @@ class PasswordProvider:
Returns a map from a login type identifier (such as m.login.password) to an Returns a map from a login type identifier (such as m.login.password) to an
iterable giving the fields which must be provided by the user in the submission iterable giving the fields which must be provided by the user in the submission
to the /login API. to the /login API.
This wrapper adds m.login.password to the list if the underlying password
provider supports the check_password() api.
""" """
return self._supported_login_types return self._supported_login_types
async def check_auth( async def check_auth(
self, username: str, login_type: str, login_dict: JsonDict self, username: str, login_type: str, login_dict: JsonDict
) -> Optional[Tuple[str, Optional[Callable]]]: ) -> Optional[Tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]]:
"""Check if the user has presented valid login credentials """Check if the user has presented valid login credentials
This wrapper also calls check_password() if the underlying password provider
supports the check_password() api and the login type is m.login.password.
Args: Args:
username: user id presented by the client. Either an MXID or an unqualified username: user id presented by the client. Either an MXID or an unqualified
username. username.
@ -1879,63 +2062,130 @@ class PasswordProvider:
user, and `callback` is an optional callback which will be called with the user, and `callback` is an optional callback which will be called with the
result from the /login call (including access_token, device_id, etc.) result from the /login call (including access_token, device_id, etc.)
""" """
# first grandfather in a call to check_password
if login_type == LoginType.PASSWORD:
check_password = getattr(self._pp, "check_password", None)
if check_password:
qualified_user_id = self._module_api.get_qualified_user_id(username)
is_valid = await check_password(
qualified_user_id, login_dict["password"]
)
if is_valid:
return qualified_user_id, None
check_auth = getattr(self._pp, "check_auth", None) # Go through all callbacks for the login type until one returns with a value
if not check_auth: # other than None (i.e. until a callback returns a success)
return None for callback in self.auth_checker_callbacks[login_type]:
result = await check_auth(username, login_type, login_dict) try:
result = await callback(username, login_type, login_dict)
except Exception as e:
logger.warning("Failed to run module API callback %s: %s", callback, e)
continue
# Check if the return value is a str or a tuple if result is not None:
if isinstance(result, str): # Check that the callback returned a Tuple[str, Optional[Callable]]
# If it's a str, set callback function to None # "type: ignore[unreachable]" is used after some isinstance checks because mypy thinks
return result, None # result is always the right type, but as it is 3rd party code it might not be
return result if not isinstance(result, tuple) or len(result) != 2:
logger.warning(
"Wrong type returned by module API callback %s: %s, expected"
" Optional[Tuple[str, Optional[Callable]]]",
callback,
result,
)
continue
# pull out the two parts of the tuple so we can do type checking
str_result, callback_result = result
# the 1st item in the tuple should be a str
if not isinstance(str_result, str):
logger.warning( # type: ignore[unreachable]
"Wrong type returned by module API callback %s: %s, expected"
" Optional[Tuple[str, Optional[Callable]]]",
callback,
result,
)
continue
# the second should be Optional[Callable]
if callback_result is not None:
if not callable(callback_result):
logger.warning( # type: ignore[unreachable]
"Wrong type returned by module API callback %s: %s, expected"
" Optional[Tuple[str, Optional[Callable]]]",
callback,
result,
)
continue
# The result is a (str, Optional[callback]) tuple so return the successful result
return result
# If this point has been reached then none of the callbacks successfully authenticated
# the user so return None
return None
async def check_3pid_auth( async def check_3pid_auth(
self, medium: str, address: str, password: str self, medium: str, address: str, password: str
) -> Optional[Tuple[str, Optional[Callable]]]: ) -> Optional[Tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]]:
g = getattr(self._pp, "check_3pid_auth", None)
if not g:
return None
# This function is able to return a deferred that either # This function is able to return a deferred that either
# resolves None, meaning authentication failure, or upon # resolves None, meaning authentication failure, or upon
# success, to a str (which is the user_id) or a tuple of # success, to a str (which is the user_id) or a tuple of
# (user_id, callback_func), where callback_func should be run # (user_id, callback_func), where callback_func should be run
# after we've finished everything else # after we've finished everything else
result = await g(medium, address, password)
# Check if the return value is a str or a tuple for callback in self.check_3pid_auth_callbacks:
if isinstance(result, str): try:
# If it's a str, set callback function to None result = await callback(medium, address, password)
return result, None except Exception as e:
logger.warning("Failed to run module API callback %s: %s", callback, e)
continue
return result if result is not None:
# Check that the callback returned a Tuple[str, Optional[Callable]]
# "type: ignore[unreachable]" is used after some isinstance checks because mypy thinks
# result is always the right type, but as it is 3rd party code it might not be
if not isinstance(result, tuple) or len(result) != 2:
logger.warning(
"Wrong type returned by module API callback %s: %s, expected"
" Optional[Tuple[str, Optional[Callable]]]",
callback,
result,
)
continue
# pull out the two parts of the tuple so we can do type checking
str_result, callback_result = result
# the 1st item in the tuple should be a str
if not isinstance(str_result, str):
logger.warning( # type: ignore[unreachable]
"Wrong type returned by module API callback %s: %s, expected"
" Optional[Tuple[str, Optional[Callable]]]",
callback,
result,
)
continue
# the second should be Optional[Callable]
if callback_result is not None:
if not callable(callback_result):
logger.warning( # type: ignore[unreachable]
"Wrong type returned by module API callback %s: %s, expected"
" Optional[Tuple[str, Optional[Callable]]]",
callback,
result,
)
continue
# The result is a (str, Optional[callback]) tuple so return the successful result
return result
# If this point has been reached then none of the callbacks successfully authenticated
# the user so return None
return None
async def on_logged_out( async def on_logged_out(
self, user_id: str, device_id: Optional[str], access_token: str self, user_id: str, device_id: Optional[str], access_token: str
) -> None: ) -> None:
g = getattr(self._pp, "on_logged_out", None)
if not g:
return
# This might return an awaitable, if it does block the log out # call all of the on_logged_out callbacks
# until it completes. for callback in self.on_logged_out_callbacks:
await maybe_awaitable( try:
g( callback(user_id, device_id, access_token)
user_id=user_id, except Exception as e:
device_id=device_id, logger.warning("Failed to run module API callback %s: %s", callback, e)
access_token=access_token, continue
)
)

View File

@ -131,10 +131,6 @@ class DeactivateAccountHandler:
# delete from user directory # delete from user directory
await self.user_directory_handler.handle_local_user_deactivated(user_id) await self.user_directory_handler.handle_local_user_deactivated(user_id)
# If the user is present in the monthly active users table
# remove them
await self.store.remove_deactivated_user_from_mau_table(user_id)
# Mark the user as erased, if they asked for that # Mark the user as erased, if they asked for that
if erase_data: if erase_data:
user = UserID.from_string(user_id) user = UserID.from_string(user_id)

View File

@ -14,7 +14,18 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import logging import logging
from typing import TYPE_CHECKING, Collection, Dict, Iterable, List, Optional, Set, Tuple from typing import (
TYPE_CHECKING,
Any,
Collection,
Dict,
Iterable,
List,
Mapping,
Optional,
Set,
Tuple,
)
from synapse.api import errors from synapse.api import errors
from synapse.api.constants import EventTypes from synapse.api.constants import EventTypes
@ -443,6 +454,10 @@ class DeviceHandler(DeviceWorkerHandler):
) -> None: ) -> None:
"""Notify that a user's device(s) has changed. Pokes the notifier, and """Notify that a user's device(s) has changed. Pokes the notifier, and
remote servers if the user is local. remote servers if the user is local.
Args:
user_id: The Matrix ID of the user who's device list has been updated.
device_ids: The device IDs that have changed.
""" """
if not device_ids: if not device_ids:
# No changes to notify about, so this is a no-op. # No changes to notify about, so this is a no-op.
@ -595,7 +610,7 @@ class DeviceHandler(DeviceWorkerHandler):
def _update_device_from_client_ips( def _update_device_from_client_ips(
device: JsonDict, client_ips: Dict[Tuple[str, str], JsonDict] device: JsonDict, client_ips: Mapping[Tuple[str, str], Mapping[str, Any]]
) -> None: ) -> None:
ip = client_ips.get((device["user_id"], device["device_id"]), {}) ip = client_ips.get((device["user_id"], device["device_id"]), {})
device.update({"last_seen_ts": ip.get("last_seen"), "last_seen_ip": ip.get("ip")}) device.update({"last_seen_ts": ip.get("last_seen"), "last_seen_ip": ip.get("ip")})

View File

@ -147,7 +147,7 @@ class DirectoryHandler:
if not self.config.roomdirectory.is_alias_creation_allowed( if not self.config.roomdirectory.is_alias_creation_allowed(
user_id, room_id, room_alias_str user_id, room_id, room_alias_str
): ):
# Lets just return a generic message, as there may be all sorts of # Let's just return a generic message, as there may be all sorts of
# reasons why we said no. TODO: Allow configurable error messages # reasons why we said no. TODO: Allow configurable error messages
# per alias creation rule? # per alias creation rule?
raise SynapseError(403, "Not allowed to create alias") raise SynapseError(403, "Not allowed to create alias")
@ -463,7 +463,7 @@ class DirectoryHandler:
if not self.config.roomdirectory.is_publishing_room_allowed( if not self.config.roomdirectory.is_publishing_room_allowed(
user_id, room_id, room_aliases user_id, room_id, room_aliases
): ):
# Lets just return a generic message, as there may be all sorts of # Let's just return a generic message, as there may be all sorts of
# reasons why we said no. TODO: Allow configurable error messages # reasons why we said no. TODO: Allow configurable error messages
# per alias creation rule? # per alias creation rule?
raise SynapseError(403, "Not allowed to publish room") raise SynapseError(403, "Not allowed to publish room")

View File

@ -55,8 +55,7 @@ class EventAuthHandler:
"""Check an event passes the auth rules at its own auth events""" """Check an event passes the auth rules at its own auth events"""
auth_event_ids = event.auth_event_ids() auth_event_ids = event.auth_event_ids()
auth_events_by_id = await self._store.get_events(auth_event_ids) auth_events_by_id = await self._store.get_events(auth_event_ids)
auth_events = {(e.type, e.state_key): e for e in auth_events_by_id.values()} check_auth_rules_for_event(room_version_obj, event, auth_events_by_id.values())
check_auth_rules_for_event(room_version_obj, event, auth_events)
def compute_auth_events( def compute_auth_events(
self, self,

View File

@ -15,7 +15,6 @@
"""Contains handlers for federation events.""" """Contains handlers for federation events."""
import itertools
import logging import logging
from http import HTTPStatus from http import HTTPStatus
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, Union from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, Union
@ -27,12 +26,7 @@ from unpaddedbase64 import decode_base64
from twisted.internet import defer from twisted.internet import defer
from synapse import event_auth from synapse import event_auth
from synapse.api.constants import ( from synapse.api.constants import EventContentFields, EventTypes, Membership
EventContentFields,
EventTypes,
Membership,
RejectedReason,
)
from synapse.api.errors import ( from synapse.api.errors import (
AuthError, AuthError,
CodeMessageException, CodeMessageException,
@ -43,12 +37,9 @@ from synapse.api.errors import (
RequestSendFailed, RequestSendFailed,
SynapseError, SynapseError,
) )
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion, RoomVersions from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
from synapse.crypto.event_signing import compute_event_signature from synapse.crypto.event_signing import compute_event_signature
from synapse.event_auth import ( from synapse.event_auth import validate_event_for_room_version
check_auth_rules_for_event,
validate_event_for_room_version,
)
from synapse.events import EventBase from synapse.events import EventBase
from synapse.events.snapshot import EventContext from synapse.events.snapshot import EventContext
from synapse.events.validator import EventValidator from synapse.events.validator import EventValidator
@ -238,18 +229,10 @@ class FederationHandler:
) )
return False return False
logger.debug(
"room_id: %s, backfill: current_depth: %s, max_depth: %s, extrems: %s",
room_id,
current_depth,
max_depth,
sorted_extremeties_tuple,
)
# We ignore extremities that have a greater depth than our current depth # We ignore extremities that have a greater depth than our current depth
# as: # as:
# 1. we don't really care about getting events that have happened # 1. we don't really care about getting events that have happened
# before our current position; and # after our current position; and
# 2. we have likely previously tried and failed to backfill from that # 2. we have likely previously tried and failed to backfill from that
# extremity, so to avoid getting "stuck" requesting the same # extremity, so to avoid getting "stuck" requesting the same
# backfill repeatedly we drop those extremities. # backfill repeatedly we drop those extremities.
@ -257,9 +240,19 @@ class FederationHandler:
t for t in sorted_extremeties_tuple if int(t[1]) <= current_depth t for t in sorted_extremeties_tuple if int(t[1]) <= current_depth
] ]
logger.debug(
"room_id: %s, backfill: current_depth: %s, limit: %s, max_depth: %s, extrems: %s filtered_sorted_extremeties_tuple: %s",
room_id,
current_depth,
limit,
max_depth,
sorted_extremeties_tuple,
filtered_sorted_extremeties_tuple,
)
# However, we need to check that the filtered extremities are non-empty. # However, we need to check that the filtered extremities are non-empty.
# If they are empty then either we can a) bail or b) still attempt to # If they are empty then either we can a) bail or b) still attempt to
# backill. We opt to try backfilling anyway just in case we do get # backfill. We opt to try backfilling anyway just in case we do get
# relevant events. # relevant events.
if filtered_sorted_extremeties_tuple: if filtered_sorted_extremeties_tuple:
sorted_extremeties_tuple = filtered_sorted_extremeties_tuple sorted_extremeties_tuple = filtered_sorted_extremeties_tuple
@ -389,7 +382,7 @@ class FederationHandler:
for key, state_dict in states.items() for key, state_dict in states.items()
} }
for e_id, _ in sorted_extremeties_tuple: for e_id in event_ids:
likely_extremeties_domains = get_domains_from_state(states[e_id]) likely_extremeties_domains = get_domains_from_state(states[e_id])
success = await try_backfill( success = await try_backfill(
@ -517,7 +510,7 @@ class FederationHandler:
auth_events=auth_chain, auth_events=auth_chain,
) )
max_stream_id = await self._persist_auth_tree( max_stream_id = await self._federation_event_handler.process_remote_join(
origin, room_id, auth_chain, state, event, room_version_obj origin, room_id, auth_chain, state, event, room_version_obj
) )
@ -1093,119 +1086,6 @@ class FederationHandler:
else: else:
return None return None
async def _persist_auth_tree(
self,
origin: str,
room_id: str,
auth_events: List[EventBase],
state: List[EventBase],
event: EventBase,
room_version: RoomVersion,
) -> int:
"""Checks the auth chain is valid (and passes auth checks) for the
state and event. Then persists the auth chain and state atomically.
Persists the event separately. Notifies about the persisted events
where appropriate.
Will attempt to fetch missing auth events.
Args:
origin: Where the events came from
room_id,
auth_events
state
event
room_version: The room version we expect this room to have, and
will raise if it doesn't match the version in the create event.
"""
events_to_context = {}
for e in itertools.chain(auth_events, state):
e.internal_metadata.outlier = True
events_to_context[e.event_id] = EventContext.for_outlier()
event_map = {
e.event_id: e for e in itertools.chain(auth_events, state, [event])
}
create_event = None
for e in auth_events:
if (e.type, e.state_key) == (EventTypes.Create, ""):
create_event = e
break
if create_event is None:
# If the state doesn't have a create event then the room is
# invalid, and it would fail auth checks anyway.
raise SynapseError(400, "No create event in state")
room_version_id = create_event.content.get(
"room_version", RoomVersions.V1.identifier
)
if room_version.identifier != room_version_id:
raise SynapseError(400, "Room version mismatch")
missing_auth_events = set()
for e in itertools.chain(auth_events, state, [event]):
for e_id in e.auth_event_ids():
if e_id not in event_map:
missing_auth_events.add(e_id)
for e_id in missing_auth_events:
m_ev = await self.federation_client.get_pdu(
[origin],
e_id,
room_version=room_version,
outlier=True,
timeout=10000,
)
if m_ev and m_ev.event_id == e_id:
event_map[e_id] = m_ev
else:
logger.info("Failed to find auth event %r", e_id)
for e in itertools.chain(auth_events, state, [event]):
auth_for_e = {
(event_map[e_id].type, event_map[e_id].state_key): event_map[e_id]
for e_id in e.auth_event_ids()
if e_id in event_map
}
if create_event:
auth_for_e[(EventTypes.Create, "")] = create_event
try:
validate_event_for_room_version(room_version, e)
check_auth_rules_for_event(room_version, e, auth_for_e)
except SynapseError as err:
# we may get SynapseErrors here as well as AuthErrors. For
# instance, there are a couple of (ancient) events in some
# rooms whose senders do not have the correct sigil; these
# cause SynapseErrors in auth.check. We don't want to give up
# the attempt to federate altogether in such cases.
logger.warning("Rejecting %s because %s", e.event_id, err.msg)
if e == event:
raise
events_to_context[e.event_id].rejected = RejectedReason.AUTH_ERROR
if auth_events or state:
await self._federation_event_handler.persist_events_and_notify(
room_id,
[
(e, events_to_context[e.event_id])
for e in itertools.chain(auth_events, state)
],
)
new_event_context = await self.state_handler.compute_event_context(
event, old_state=state
)
return await self._federation_event_handler.persist_events_and_notify(
room_id, [(event, new_event_context)]
)
async def on_get_missing_events( async def on_get_missing_events(
self, self,
origin: str, origin: str,

View File

@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import itertools
import logging import logging
from http import HTTPStatus from http import HTTPStatus
from typing import ( from typing import (
@ -45,7 +46,7 @@ from synapse.api.errors import (
RequestSendFailed, RequestSendFailed,
SynapseError, SynapseError,
) )
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion, RoomVersions
from synapse.event_auth import ( from synapse.event_auth import (
auth_types_for_event, auth_types_for_event,
check_auth_rules_for_event, check_auth_rules_for_event,
@ -64,7 +65,6 @@ from synapse.replication.http.federation import (
from synapse.state import StateResolutionStore from synapse.state import StateResolutionStore
from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.types import ( from synapse.types import (
MutableStateMap,
PersistedEventPosition, PersistedEventPosition,
RoomStreamToken, RoomStreamToken,
StateMap, StateMap,
@ -214,7 +214,7 @@ class FederationEventHandler:
if missing_prevs: if missing_prevs:
# We only backfill backwards to the min depth. # We only backfill backwards to the min depth.
min_depth = await self.get_min_depth_for_context(pdu.room_id) min_depth = await self._store.get_min_depth(pdu.room_id)
logger.debug("min_depth: %d", min_depth) logger.debug("min_depth: %d", min_depth)
if min_depth is not None and pdu.depth > min_depth: if min_depth is not None and pdu.depth > min_depth:
@ -361,6 +361,7 @@ class FederationEventHandler:
# need to. # need to.
await self._event_creation_handler.cache_joined_hosts_for_event(event, context) await self._event_creation_handler.cache_joined_hosts_for_event(event, context)
await self._check_for_soft_fail(event, None, origin=origin)
await self._run_push_actions_and_persist_event(event, context) await self._run_push_actions_and_persist_event(event, context)
return event, context return event, context
@ -390,9 +391,93 @@ class FederationEventHandler:
prev_member_event, prev_member_event,
) )
async def process_remote_join(
self,
origin: str,
room_id: str,
auth_events: List[EventBase],
state: List[EventBase],
event: EventBase,
room_version: RoomVersion,
) -> int:
"""Persists the events returned by a send_join
Checks the auth chain is valid (and passes auth checks) for the
state and event. Then persists all of the events.
Notifies about the persisted events where appropriate.
Args:
origin: Where the events came from
room_id:
auth_events
state
event
room_version: The room version we expect this room to have, and
will raise if it doesn't match the version in the create event.
Returns:
The stream ID after which all events have been persisted.
Raises:
SynapseError if the response is in some way invalid.
"""
for e in itertools.chain(auth_events, state):
e.internal_metadata.outlier = True
event_map = {e.event_id: e for e in itertools.chain(auth_events, state)}
create_event = None
for e in auth_events:
if (e.type, e.state_key) == (EventTypes.Create, ""):
create_event = e
break
if create_event is None:
# If the state doesn't have a create event then the room is
# invalid, and it would fail auth checks anyway.
raise SynapseError(400, "No create event in state")
room_version_id = create_event.content.get(
"room_version", RoomVersions.V1.identifier
)
if room_version.identifier != room_version_id:
raise SynapseError(400, "Room version mismatch")
# filter out any events we have already seen
seen_remotes = await self._store.have_seen_events(room_id, event_map.keys())
for s in seen_remotes:
event_map.pop(s, None)
# persist the auth chain and state events.
#
# any invalid events here will be marked as rejected, and we'll carry on.
#
# any events whose auth events are missing (ie, not in the send_join response,
# and not already in our db) will just be ignored. This is correct behaviour,
# because the reason that auth_events are missing might be due to us being
# unable to validate their signatures. The fact that we can't validate their
# signatures right now doesn't mean that we will *never* be able to, so it
# is premature to reject them.
#
await self._auth_and_persist_outliers(room_id, event_map.values())
# and now persist the join event itself.
logger.info("Peristing join-via-remote %s", event)
with nested_logging_context(suffix=event.event_id):
context = await self._state_handler.compute_event_context(
event, old_state=state
)
context = await self._check_event_auth(origin, event, context)
if context.rejected:
raise SynapseError(400, "Join event was rejected")
return await self.persist_events_and_notify(room_id, [(event, context)])
@log_function @log_function
async def backfill( async def backfill(
self, dest: str, room_id: str, limit: int, extremities: List[str] self, dest: str, room_id: str, limit: int, extremities: Iterable[str]
) -> None: ) -> None:
"""Trigger a backfill request to `dest` for the given `room_id` """Trigger a backfill request to `dest` for the given `room_id`
@ -861,9 +946,15 @@ class FederationEventHandler:
) -> None: ) -> None:
"""Called when we have a new non-outlier event. """Called when we have a new non-outlier event.
This is called when we have a new event to add to the room DAG - either directly This is called when we have a new event to add to the room DAG. This can be
via a /send request, retrieved via get_missing_events after a /send request, or due to:
backfilled after a client request. * events received directly via a /send request
* events retrieved via get_missing_events after a /send request
* events backfilled after a client request.
It's not currently used for events received from incoming send_{join,knock,leave}
requests (which go via on_send_membership_event), nor for joins created by a
remote join dance (which go via process_remote_join).
We need to do auth checks and put it through the StateHandler. We need to do auth checks and put it through the StateHandler.
@ -899,11 +990,19 @@ class FederationEventHandler:
logger.exception("Unexpected AuthError from _check_event_auth") logger.exception("Unexpected AuthError from _check_event_auth")
raise FederationError("ERROR", e.code, e.msg, affected=event.event_id) raise FederationError("ERROR", e.code, e.msg, affected=event.event_id)
if not backfilled and not context.rejected:
# For new (non-backfilled and non-outlier) events we check if the event
# passes auth based on the current state. If it doesn't then we
# "soft-fail" the event.
await self._check_for_soft_fail(event, state, origin=origin)
await self._run_push_actions_and_persist_event(event, context, backfilled) await self._run_push_actions_and_persist_event(event, context, backfilled)
if backfilled: if backfilled or context.rejected:
return return
await self._maybe_kick_guest_users(event)
# For encrypted messages we check that we know about the sending device, # For encrypted messages we check that we know about the sending device,
# if we don't then we mark the device cache for that user as stale. # if we don't then we mark the device cache for that user as stale.
if event.type == EventTypes.Encrypted: if event.type == EventTypes.Encrypted:
@ -1116,14 +1215,12 @@ class FederationEventHandler:
await concurrently_execute(get_event, event_ids, 5) await concurrently_execute(get_event, event_ids, 5)
logger.info("Fetched %i events of %i requested", len(events), len(event_ids)) logger.info("Fetched %i events of %i requested", len(events), len(event_ids))
await self._auth_and_persist_fetched_events(destination, room_id, events) await self._auth_and_persist_outliers(room_id, events)
async def _auth_and_persist_fetched_events( async def _auth_and_persist_outliers(
self, origin: str, room_id: str, events: Iterable[EventBase] self, room_id: str, events: Iterable[EventBase]
) -> None: ) -> None:
"""Persist the events fetched by _get_events_and_persist or _get_remote_auth_chain_for_event """Persist a batch of outlier events fetched from remote servers.
The events to be persisted must be outliers.
We first sort the events to make sure that we process each event's auth_events We first sort the events to make sure that we process each event's auth_events
before the event itself, and then auth and persist them. before the event itself, and then auth and persist them.
@ -1131,7 +1228,6 @@ class FederationEventHandler:
Notifies about the events where appropriate. Notifies about the events where appropriate.
Params: Params:
origin: where the events came from
room_id: the room that the events are meant to be in (though this has room_id: the room that the events are meant to be in (though this has
not yet been checked) not yet been checked)
events: the events that have been fetched events: the events that have been fetched
@ -1167,15 +1263,15 @@ class FederationEventHandler:
shortstr(e.event_id for e in roots), shortstr(e.event_id for e in roots),
) )
await self._auth_and_persist_fetched_events_inner(origin, room_id, roots) await self._auth_and_persist_outliers_inner(room_id, roots)
for ev in roots: for ev in roots:
del event_map[ev.event_id] del event_map[ev.event_id]
async def _auth_and_persist_fetched_events_inner( async def _auth_and_persist_outliers_inner(
self, origin: str, room_id: str, fetched_events: Collection[EventBase] self, room_id: str, fetched_events: Collection[EventBase]
) -> None: ) -> None:
"""Helper for _auth_and_persist_fetched_events """Helper for _auth_and_persist_outliers
Persists a batch of events where we have (theoretically) already persisted all Persists a batch of events where we have (theoretically) already persisted all
of their auth events. of their auth events.
@ -1203,20 +1299,20 @@ class FederationEventHandler:
def prep(event: EventBase) -> Optional[Tuple[EventBase, EventContext]]: def prep(event: EventBase) -> Optional[Tuple[EventBase, EventContext]]:
with nested_logging_context(suffix=event.event_id): with nested_logging_context(suffix=event.event_id):
auth = {} auth = []
for auth_event_id in event.auth_event_ids(): for auth_event_id in event.auth_event_ids():
ae = persisted_events.get(auth_event_id) ae = persisted_events.get(auth_event_id)
if not ae: if not ae:
logger.warning(
"Event %s relies on auth_event %s, which could not be found.",
event,
auth_event_id,
)
# the fact we can't find the auth event doesn't mean it doesn't # the fact we can't find the auth event doesn't mean it doesn't
# exist, which means it is premature to reject `event`. Instead we # exist, which means it is premature to reject `event`. Instead we
# just ignore it for now. # just ignore it for now.
logger.warning(
"Dropping event %s, which relies on auth_event %s, which could not be found",
event,
auth_event_id,
)
return None return None
auth[(ae.type, ae.state_key)] = ae auth.append(ae)
context = EventContext.for_outlier() context = EventContext.for_outlier()
try: try:
@ -1256,6 +1352,10 @@ class FederationEventHandler:
Returns: Returns:
The updated context object. The updated context object.
Raises:
AuthError if we were unable to find copies of the event's auth events.
(Most other failures just cause us to set `context.rejected`.)
""" """
# This method should only be used for non-outliers # This method should only be used for non-outliers
assert not event.internal_metadata.outlier assert not event.internal_metadata.outlier
@ -1272,7 +1372,26 @@ class FederationEventHandler:
context.rejected = RejectedReason.AUTH_ERROR context.rejected = RejectedReason.AUTH_ERROR
return context return context
# calculate what the auth events *should* be, to use as a basis for auth. # next, check that we have all of the event's auth events.
#
# Note that this can raise AuthError, which we want to propagate to the
# caller rather than swallow with `context.rejected` (since we cannot be
# certain that there is a permanent problem with the event).
claimed_auth_events = await self._load_or_fetch_auth_events_for_event(
origin, event
)
# ... and check that the event passes auth at those auth events.
try:
check_auth_rules_for_event(room_version_obj, event, claimed_auth_events)
except AuthError as e:
logger.warning(
"While checking auth of %r against auth_events: %s", event, e
)
context.rejected = RejectedReason.AUTH_ERROR
return context
# now check auth against what we think the auth events *should* be.
prev_state_ids = await context.get_prev_state_ids() prev_state_ids = await context.get_prev_state_ids()
auth_events_ids = self._event_auth_handler.compute_auth_events( auth_events_ids = self._event_auth_handler.compute_auth_events(
event, prev_state_ids, for_verification=True event, prev_state_ids, for_verification=True
@ -1283,13 +1402,8 @@ class FederationEventHandler:
} }
try: try:
( updated_auth_events = await self._update_auth_events_for_auth(
context,
auth_events_for_auth,
) = await self._update_auth_events_and_context_for_auth(
origin,
event, event,
context,
calculated_auth_event_map=calculated_auth_event_map, calculated_auth_event_map=calculated_auth_event_map,
) )
except Exception: except Exception:
@ -1302,17 +1416,23 @@ class FederationEventHandler:
"Ignoring failure and continuing processing of event.", "Ignoring failure and continuing processing of event.",
event.event_id, event.event_id,
) )
updated_auth_events = None
if updated_auth_events:
context = await self._update_context_for_auth_events(
event, context, updated_auth_events
)
auth_events_for_auth = updated_auth_events
else:
auth_events_for_auth = calculated_auth_event_map auth_events_for_auth = calculated_auth_event_map
try: try:
check_auth_rules_for_event(room_version_obj, event, auth_events_for_auth) check_auth_rules_for_event(
room_version_obj, event, auth_events_for_auth.values()
)
except AuthError as e: except AuthError as e:
logger.warning("Failed auth resolution for %r because %s", event, e) logger.warning("Failed auth resolution for %r because %s", event, e)
context.rejected = RejectedReason.AUTH_ERROR context.rejected = RejectedReason.AUTH_ERROR
return context
await self._check_for_soft_fail(event, state, backfilled, origin=origin)
await self._maybe_kick_guest_users(event)
return context return context
@ -1332,7 +1452,6 @@ class FederationEventHandler:
self, self,
event: EventBase, event: EventBase,
state: Optional[Iterable[EventBase]], state: Optional[Iterable[EventBase]],
backfilled: bool,
origin: str, origin: str,
) -> None: ) -> None:
"""Checks if we should soft fail the event; if so, marks the event as """Checks if we should soft fail the event; if so, marks the event as
@ -1341,15 +1460,8 @@ class FederationEventHandler:
Args: Args:
event event
state: The state at the event if we don't have all the event's prev events state: The state at the event if we don't have all the event's prev events
backfilled: Whether the event is from backfill
origin: The host the event originates from. origin: The host the event originates from.
""" """
# For new (non-backfilled and non-outlier) events we check if the event
# passes auth based on the current state. If it doesn't then we
# "soft-fail" the event.
if backfilled or event.internal_metadata.is_outlier():
return
extrem_ids_list = await self._store.get_latest_event_ids_in_room(event.room_id) extrem_ids_list = await self._store.get_latest_event_ids_in_room(event.room_id)
extrem_ids = set(extrem_ids_list) extrem_ids = set(extrem_ids_list)
prev_event_ids = set(event.prev_event_ids()) prev_event_ids = set(event.prev_event_ids())
@ -1403,11 +1515,9 @@ class FederationEventHandler:
current_state_ids_list = [ current_state_ids_list = [
e for k, e in current_state_ids.items() if k in auth_types e for k, e in current_state_ids.items() if k in auth_types
] ]
current_auth_events = await self._store.get_events_as_list(
auth_events_map = await self._store.get_events(current_state_ids_list) current_state_ids_list
current_auth_events = { )
(e.type, e.state_key): e for e in auth_events_map.values()
}
try: try:
check_auth_rules_for_event(room_version_obj, event, current_auth_events) check_auth_rules_for_event(room_version_obj, event, current_auth_events)
@ -1426,13 +1536,11 @@ class FederationEventHandler:
soft_failed_event_counter.inc() soft_failed_event_counter.inc()
event.internal_metadata.soft_failed = True event.internal_metadata.soft_failed = True
async def _update_auth_events_and_context_for_auth( async def _update_auth_events_for_auth(
self, self,
origin: str,
event: EventBase, event: EventBase,
context: EventContext,
calculated_auth_event_map: StateMap[EventBase], calculated_auth_event_map: StateMap[EventBase],
) -> Tuple[EventContext, StateMap[EventBase]]: ) -> Optional[StateMap[EventBase]]:
"""Helper for _check_event_auth. See there for docs. """Helper for _check_event_auth. See there for docs.
Checks whether a given event has the expected auth events. If it Checks whether a given event has the expected auth events. If it
@ -1445,93 +1553,27 @@ class FederationEventHandler:
processing of the event. processing of the event.
Args: Args:
origin:
event: event:
context:
calculated_auth_event_map: calculated_auth_event_map:
Our calculated auth_events based on the state of the room Our calculated auth_events based on the state of the room
at the event's position in the DAG. at the event's position in the DAG.
Returns: Returns:
updated context, updated auth event map updated auth event map, or None if no changes are needed.
""" """
assert not event.internal_metadata.outlier assert not event.internal_metadata.outlier
# take a copy of calculated_auth_event_map before we modify it. # check for events which are in the event's claimed auth_events, but not
auth_events: MutableStateMap[EventBase] = dict(calculated_auth_event_map) # in our calculated event map.
event_auth_events = set(event.auth_event_ids()) event_auth_events = set(event.auth_event_ids())
# missing_auth is the set of the event's auth_events which we don't yet have
# in auth_events.
missing_auth = event_auth_events.difference(
e.event_id for e in auth_events.values()
)
# if we have missing events, we need to fetch those events from somewhere.
#
# we start by checking if they are in the store, and then try calling /event_auth/.
if missing_auth:
have_events = await self._store.have_seen_events(
event.room_id, missing_auth
)
logger.debug("Events %s are in the store", have_events)
missing_auth.difference_update(have_events)
# missing_auth is now the set of event_ids which:
# a. are listed in event.auth_events, *and*
# b. are *not* part of our calculated auth events based on room state, *and*
# c. are *not* yet in our database.
if missing_auth:
# If we don't have all the auth events, we need to get them.
logger.info("auth_events contains unknown events: %s", missing_auth)
try:
await self._get_remote_auth_chain_for_event(
origin, event.room_id, event.event_id
)
except Exception:
logger.exception("Failed to get auth chain")
else:
# load any auth events we might have persisted from the database. This
# has the side-effect of correctly setting the rejected_reason on them.
auth_events.update(
{
(ae.type, ae.state_key): ae
for ae in await self._store.get_events_as_list(
missing_auth, allow_rejected=True
)
}
)
# auth_events now contains
# 1. our *calculated* auth events based on the room state, plus:
# 2. any events which:
# a. are listed in `event.auth_events`, *and*
# b. are not part of our calculated auth events, *and*
# c. were not in our database before the call to /event_auth
# d. have since been added to our database (most likely by /event_auth).
different_auth = event_auth_events.difference( different_auth = event_auth_events.difference(
e.event_id for e in auth_events.values() e.event_id for e in calculated_auth_event_map.values()
) )
# different_auth is the set of events which *are* in `event.auth_events`, but
# which are *not* in `auth_events`. Comparing with (2.) above, this means
# exclusively the set of `event.auth_events` which we already had in our
# database before any call to /event_auth.
#
# I'm reasonably sure that the fact that events returned by /event_auth are
# blindly added to auth_events (and hence excluded from different_auth) is a bug
# - though it's a very long-standing one (see
# https://github.com/matrix-org/synapse/commit/78015948a7febb18e000651f72f8f58830a55b93#diff-0bc92da3d703202f5b9be2d3f845e375f5b1a6bc6ba61705a8af9be1121f5e42R786
# from Jan 2015 which seems to add it, though it actually just moves it from
# elsewhere (before that, it gets lost in a mess of huge "various bug fixes"
# PRs).
if not different_auth: if not different_auth:
return context, auth_events return None
logger.info( logger.info(
"auth_events refers to events which are not in our calculated auth " "auth_events refers to events which are not in our calculated auth "
@ -1543,27 +1585,18 @@ class FederationEventHandler:
# necessary? # necessary?
different_events = await self._store.get_events_as_list(different_auth) different_events = await self._store.get_events_as_list(different_auth)
# double-check they're all in the same room - we should already have checked
# this but it doesn't hurt to check again.
for d in different_events: for d in different_events:
if d.room_id != event.room_id: assert (
logger.warning( d.room_id == event.room_id
"Event %s refers to auth_event %s which is in a different room", ), f"Event {event.event_id} refers to auth_event {d.event_id} which is in a different room"
event.event_id,
d.event_id,
)
# don't attempt to resolve the claimed auth events against our own
# in this case: just use our own auth events.
#
# XXX: should we reject the event in this case? It feels like we should,
# but then shouldn't we also do so if we've failed to fetch any of the
# auth events?
return context, auth_events
# now we state-resolve between our own idea of the auth events, and the remote's # now we state-resolve between our own idea of the auth events, and the remote's
# idea of them. # idea of them.
local_state = auth_events.values() local_state = calculated_auth_event_map.values()
remote_auth_events = dict(auth_events) remote_auth_events = dict(calculated_auth_event_map)
remote_auth_events.update({(d.type, d.state_key): d for d in different_events}) remote_auth_events.update({(d.type, d.state_key): d for d in different_events})
remote_state = remote_auth_events.values() remote_state = remote_auth_events.values()
@ -1571,23 +1604,93 @@ class FederationEventHandler:
new_state = await self._state_handler.resolve_events( new_state = await self._state_handler.resolve_events(
room_version, (local_state, remote_state), event room_version, (local_state, remote_state), event
) )
different_state = {
(d.type, d.state_key): d
for d in new_state.values()
if calculated_auth_event_map.get((d.type, d.state_key)) != d
}
if not different_state:
logger.info("State res returned no new state")
return None
logger.info( logger.info(
"After state res: updating auth_events with new state %s", "After state res: updating auth_events with new state %s",
{ different_state.values(),
(d.type, d.state_key): d.event_id
for d in new_state.values()
if auth_events.get((d.type, d.state_key)) != d
},
) )
auth_events.update(new_state) # take a copy of calculated_auth_event_map before we modify it.
auth_events = dict(calculated_auth_event_map)
auth_events.update(different_state)
return auth_events
context = await self._update_context_for_auth_events( async def _load_or_fetch_auth_events_for_event(
event, context, auth_events self, destination: str, event: EventBase
) -> Collection[EventBase]:
"""Fetch this event's auth_events, from database or remote
Loads any of the auth_events that we already have from the database/cache. If
there are any that are missing, calls /event_auth to get the complete auth
chain for the event (and then attempts to load the auth_events again).
If any of the auth_events cannot be found, raises an AuthError. This can happen
for a number of reasons; eg: the events don't exist, or we were unable to talk
to `destination`, or we couldn't validate the signature on the event (which
in turn has multiple potential causes).
Args:
destination: where to send the /event_auth request. Typically the server
that sent us `event` in the first place.
event: the event whose auth_events we want
Returns:
all of the events in `event.auth_events`, after deduplication
Raises:
AuthError if we were unable to fetch the auth_events for any reason.
"""
event_auth_event_ids = set(event.auth_event_ids())
event_auth_events = await self._store.get_events(
event_auth_event_ids, allow_rejected=True
) )
missing_auth_event_ids = event_auth_event_ids.difference(
event_auth_events.keys()
)
if not missing_auth_event_ids:
return event_auth_events.values()
return context, auth_events logger.info(
"Event %s refers to unknown auth events %s: fetching auth chain",
event,
missing_auth_event_ids,
)
try:
await self._get_remote_auth_chain_for_event(
destination, event.room_id, event.event_id
)
except Exception as e:
logger.warning("Failed to get auth chain for %s: %s", event, e)
# in this case, it's very likely we still won't have all the auth
# events - but we pick that up below.
# try to fetch the auth events we missed list time.
extra_auth_events = await self._store.get_events(
missing_auth_event_ids, allow_rejected=True
)
missing_auth_event_ids.difference_update(extra_auth_events.keys())
event_auth_events.update(extra_auth_events)
if not missing_auth_event_ids:
return event_auth_events.values()
# we still don't have all the auth events.
logger.warning(
"Missing auth events for %s: %s",
event,
shortstr(missing_auth_event_ids),
)
# the fact we can't find the auth event doesn't mean it doesn't
# exist, which means it is premature to store `event` as rejected.
# instead we raise an AuthError, which will make the caller ignore it.
raise AuthError(code=HTTPStatus.FORBIDDEN, msg="Auth events could not be found")
async def _get_remote_auth_chain_for_event( async def _get_remote_auth_chain_for_event(
self, destination: str, room_id: str, event_id: str self, destination: str, room_id: str, event_id: str
@ -1624,9 +1727,7 @@ class FederationEventHandler:
for s in seen_remotes: for s in seen_remotes:
remote_event_map.pop(s, None) remote_event_map.pop(s, None)
await self._auth_and_persist_fetched_events( await self._auth_and_persist_outliers(room_id, remote_event_map.values())
destination, room_id, remote_event_map.values()
)
async def _update_context_for_auth_events( async def _update_context_for_auth_events(
self, event: EventBase, context: EventContext, auth_events: StateMap[EventBase] self, event: EventBase, context: EventContext, auth_events: StateMap[EventBase]
@ -1696,16 +1797,27 @@ class FederationEventHandler:
# persist_events_and_notify directly.) # persist_events_and_notify directly.)
assert not event.internal_metadata.outlier assert not event.internal_metadata.outlier
try: if not backfilled and not context.rejected:
if ( min_depth = await self._store.get_min_depth(event.room_id)
not backfilled if min_depth is None or min_depth > event.depth:
and not context.rejected # XXX richvdh 2021/10/07: I don't really understand what this
and (await self._store.get_min_depth(event.room_id)) <= event.depth # condition is doing. I think it's trying not to send pushes
): # for events that predate our join - but that's not really what
# min_depth means, and anyway ancient events are a more general
# problem.
#
# for now I'm just going to log about it.
logger.info(
"Skipping push actions for old event with depth %s < %s",
event.depth,
min_depth,
)
else:
await self._action_generator.handle_push_actions_for_event( await self._action_generator.handle_push_actions_for_event(
event, context event, context
) )
try:
await self.persist_events_and_notify( await self.persist_events_and_notify(
event.room_id, [(event, context)], backfilled=backfilled event.room_id, [(event, context)], backfilled=backfilled
) )
@ -1837,6 +1949,3 @@ class FederationEventHandler:
len(ev.auth_event_ids()), len(ev.auth_event_ids()),
) )
raise SynapseError(HTTPStatus.BAD_REQUEST, "Too many auth_events") raise SynapseError(HTTPStatus.BAD_REQUEST, "Too many auth_events")
async def get_min_depth_for_context(self, context: str) -> int:
return await self._store.get_min_depth(context)

View File

@ -54,7 +54,9 @@ class IdentityHandler:
self.http_client = SimpleHttpClient(hs) self.http_client = SimpleHttpClient(hs)
# An HTTP client for contacting identity servers specified by clients. # An HTTP client for contacting identity servers specified by clients.
self.blacklisting_http_client = SimpleHttpClient( self.blacklisting_http_client = SimpleHttpClient(
hs, ip_blacklist=hs.config.server.federation_ip_range_blacklist hs,
ip_blacklist=hs.config.server.federation_ip_range_blacklist,
ip_whitelist=hs.config.server.federation_ip_range_whitelist,
) )
self.federation_http_client = hs.get_federation_http_client() self.federation_http_client = hs.get_federation_http_client()
self.hs = hs self.hs = hs

View File

@ -609,29 +609,6 @@ class EventCreationHandler:
builder.internal_metadata.historical = historical builder.internal_metadata.historical = historical
# Strip down the auth_event_ids to only what we need to auth the event.
# For example, we don't need extra m.room.member that don't match event.sender
if auth_event_ids is not None:
# If auth events are provided, prev events must be also.
assert prev_event_ids is not None
temp_event = await builder.build(
prev_event_ids=prev_event_ids,
auth_event_ids=auth_event_ids,
depth=depth,
)
auth_events = await self.store.get_events_as_list(auth_event_ids)
# Create a StateMap[str]
auth_event_state_map = {
(e.type, e.state_key): e.event_id for e in auth_events
}
# Actually strip down and use the necessary auth events
auth_event_ids = self._event_auth_handler.compute_auth_events(
event=temp_event,
current_state_ids=auth_event_state_map,
for_verification=False,
)
event, context = await self.create_new_client_event( event, context = await self.create_new_client_event(
builder=builder, builder=builder,
requester=requester, requester=requester,
@ -938,6 +915,33 @@ class EventCreationHandler:
Tuple of created event, context Tuple of created event, context
""" """
# Strip down the auth_event_ids to only what we need to auth the event.
# For example, we don't need extra m.room.member that don't match event.sender
full_state_ids_at_event = None
if auth_event_ids is not None:
# If auth events are provided, prev events must be also.
assert prev_event_ids is not None
# Copy the full auth state before it stripped down
full_state_ids_at_event = auth_event_ids.copy()
temp_event = await builder.build(
prev_event_ids=prev_event_ids,
auth_event_ids=auth_event_ids,
depth=depth,
)
auth_events = await self.store.get_events_as_list(auth_event_ids)
# Create a StateMap[str]
auth_event_state_map = {
(e.type, e.state_key): e.event_id for e in auth_events
}
# Actually strip down and use the necessary auth events
auth_event_ids = self._event_auth_handler.compute_auth_events(
event=temp_event,
current_state_ids=auth_event_state_map,
for_verification=False,
)
if prev_event_ids is not None: if prev_event_ids is not None:
assert ( assert (
len(prev_event_ids) <= 10 len(prev_event_ids) <= 10
@ -967,6 +971,13 @@ class EventCreationHandler:
if builder.internal_metadata.outlier: if builder.internal_metadata.outlier:
event.internal_metadata.outlier = True event.internal_metadata.outlier = True
context = EventContext.for_outlier() context = EventContext.for_outlier()
elif (
event.type == EventTypes.MSC2716_INSERTION
and full_state_ids_at_event
and builder.internal_metadata.is_historical()
):
old_state = await self.store.get_events_as_list(full_state_ids_at_event)
context = await self.state.compute_event_context(event, old_state=old_state)
else: else:
context = await self.state.compute_event_context(event) context = await self.state.compute_event_context(event)

View File

@ -86,19 +86,22 @@ class PaginationHandler:
self._event_serializer = hs.get_event_client_serializer() self._event_serializer = hs.get_event_client_serializer()
self._retention_default_max_lifetime = ( self._retention_default_max_lifetime = (
hs.config.server.retention_default_max_lifetime hs.config.retention.retention_default_max_lifetime
) )
self._retention_allowed_lifetime_min = ( self._retention_allowed_lifetime_min = (
hs.config.server.retention_allowed_lifetime_min hs.config.retention.retention_allowed_lifetime_min
) )
self._retention_allowed_lifetime_max = ( self._retention_allowed_lifetime_max = (
hs.config.server.retention_allowed_lifetime_max hs.config.retention.retention_allowed_lifetime_max
) )
if hs.config.worker.run_background_tasks and hs.config.server.retention_enabled: if (
hs.config.worker.run_background_tasks
and hs.config.retention.retention_enabled
):
# Run the purge jobs described in the configuration file. # Run the purge jobs described in the configuration file.
for job in hs.config.server.retention_purge_jobs: for job in hs.config.retention.retention_purge_jobs:
logger.info("Setting up purge job with config: %s", job) logger.info("Setting up purge job with config: %s", job)
self.clock.looping_call( self.clock.looping_call(

View File

@ -52,7 +52,6 @@ import synapse.metrics
from synapse.api.constants import EventTypes, Membership, PresenceState from synapse.api.constants import EventTypes, Membership, PresenceState
from synapse.api.errors import SynapseError from synapse.api.errors import SynapseError
from synapse.api.presence import UserPresenceState from synapse.api.presence import UserPresenceState
from synapse.appservice import ApplicationService
from synapse.events.presence_router import PresenceRouter from synapse.events.presence_router import PresenceRouter
from synapse.logging.context import run_in_background from synapse.logging.context import run_in_background
from synapse.logging.utils import log_function from synapse.logging.utils import log_function
@ -1483,13 +1482,39 @@ def should_notify(old_state: UserPresenceState, new_state: UserPresenceState) ->
def format_user_presence_state( def format_user_presence_state(
state: UserPresenceState, now: int, include_user_id: bool = True state: UserPresenceState, now: int, include_user_id: bool = True
) -> JsonDict: ) -> JsonDict:
"""Convert UserPresenceState to a format that can be sent down to clients """Convert UserPresenceState to a JSON format that can be sent down to clients
and to other servers. and to other servers.
The "user_id" is optional so that this function can be used to format presence Args:
updates for client /sync responses and for federation /send requests. state: The user presence state to format.
now: The current timestamp since the epoch in ms.
include_user_id: Whether to include `user_id` in the returned dictionary.
As this function can be used both to format presence updates for client /sync
responses and for federation /send requests, only the latter needs the include
the `user_id` field.
Returns:
A JSON dictionary with the following keys:
* presence: The presence state as a str.
* user_id: Optional. Included if `include_user_id` is truthy. The canonical
Matrix ID of the user.
* last_active_ago: Optional. Included if `last_active_ts` is set on `state`.
The timestamp that the user was last active.
* status_msg: Optional. Included if `status_msg` is set on `state`. The user's
status.
* currently_active: Optional. Included only if `state.state` is "online".
Example:
{
"presence": "online",
"user_id": "@alice:example.com",
"last_active_ago": 16783813918,
"status_msg": "Hello world!",
"currently_active": True
}
""" """
content = {"presence": state.state} content: JsonDict = {"presence": state.state}
if include_user_id: if include_user_id:
content["user_id"] = state.user_id content["user_id"] = state.user_id
if state.last_active_ts: if state.last_active_ts:
@ -1526,7 +1551,6 @@ class PresenceEventSource(EventSource[int, UserPresenceState]):
is_guest: bool = False, is_guest: bool = False,
explicit_room_id: Optional[str] = None, explicit_room_id: Optional[str] = None,
include_offline: bool = True, include_offline: bool = True,
service: Optional[ApplicationService] = None,
) -> Tuple[List[UserPresenceState], int]: ) -> Tuple[List[UserPresenceState], int]:
# The process for getting presence events are: # The process for getting presence events are:
# 1. Get the rooms the user is in. # 1. Get the rooms the user is in.

View File

@ -242,12 +242,18 @@ class ReceiptEventSource(EventSource[int, JsonDict]):
async def get_new_events_as( async def get_new_events_as(
self, from_key: int, service: ApplicationService self, from_key: int, service: ApplicationService
) -> Tuple[List[JsonDict], int]: ) -> Tuple[List[JsonDict], int]:
"""Returns a set of new receipt events that an appservice """Returns a set of new read receipt events that an appservice
may be interested in. may be interested in.
Args: Args:
from_key: the stream position at which events should be fetched from from_key: the stream position at which events should be fetched from
service: The appservice which may be interested service: The appservice which may be interested
Returns:
A two-tuple containing the following:
* A list of json dictionaries derived from read receipts that the
appservice may be interested in.
* The current read receipt stream token.
""" """
from_key = int(from_key) from_key = int(from_key)
to_key = self.get_current_key() to_key = self.get_current_key()

View File

@ -465,17 +465,35 @@ class RoomCreationHandler:
# the room has been created # the room has been created
# Calculate the minimum power level needed to clone the room # Calculate the minimum power level needed to clone the room
event_power_levels = power_levels.get("events", {}) event_power_levels = power_levels.get("events", {})
if not isinstance(event_power_levels, dict):
event_power_levels = {}
state_default = power_levels.get("state_default", 50) state_default = power_levels.get("state_default", 50)
try:
state_default_int = int(state_default) # type: ignore[arg-type]
except (TypeError, ValueError):
state_default_int = 50
ban = power_levels.get("ban", 50) ban = power_levels.get("ban", 50)
needed_power_level = max(state_default, ban, max(event_power_levels.values())) try:
ban = int(ban) # type: ignore[arg-type]
except (TypeError, ValueError):
ban = 50
needed_power_level = max(
state_default_int, ban, max(event_power_levels.values())
)
# Get the user's current power level, this matches the logic in get_user_power_level, # Get the user's current power level, this matches the logic in get_user_power_level,
# but without the entire state map. # but without the entire state map.
user_power_levels = power_levels.setdefault("users", {}) user_power_levels = power_levels.setdefault("users", {})
if not isinstance(user_power_levels, dict):
user_power_levels = {}
users_default = power_levels.get("users_default", 0) users_default = power_levels.get("users_default", 0)
current_power_level = user_power_levels.get(user_id, users_default) current_power_level = user_power_levels.get(user_id, users_default)
try:
current_power_level_int = int(current_power_level) # type: ignore[arg-type]
except (TypeError, ValueError):
current_power_level_int = 0
# Raise the requester's power level in the new room if necessary # Raise the requester's power level in the new room if necessary
if current_power_level < needed_power_level: if current_power_level_int < needed_power_level:
user_power_levels[user_id] = needed_power_level user_power_levels[user_id] = needed_power_level
await self._send_events_for_new_room( await self._send_events_for_new_room(
@ -765,6 +783,15 @@ class RoomCreationHandler:
if not allowed_by_third_party_rules: if not allowed_by_third_party_rules:
raise SynapseError(403, "Room visibility value not allowed.") raise SynapseError(403, "Room visibility value not allowed.")
if is_public:
if not self.config.roomdirectory.is_publishing_room_allowed(
user_id, room_id, room_alias
):
# Let's just return a generic message, as there may be all sorts of
# reasons why we said no. TODO: Allow configurable error messages
# per alias creation rule?
raise SynapseError(403, "Not allowed to publish room")
directory_handler = self.hs.get_directory_handler() directory_handler = self.hs.get_directory_handler()
if room_alias: if room_alias:
await directory_handler.create_association( await directory_handler.create_association(
@ -775,15 +802,6 @@ class RoomCreationHandler:
check_membership=False, check_membership=False,
) )
if is_public:
if not self.config.roomdirectory.is_publishing_room_allowed(
user_id, room_id, room_alias
):
# Lets just return a generic message, as there may be all sorts of
# reasons why we said no. TODO: Allow configurable error messages
# per alias creation rule?
raise SynapseError(403, "Not allowed to publish room")
preset_config = config.get( preset_config = config.get(
"preset", "preset",
RoomCreationPreset.PRIVATE_CHAT RoomCreationPreset.PRIVATE_CHAT

View File

@ -13,6 +13,10 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def generate_fake_event_id() -> str:
return "$fake_" + random_string(43)
class RoomBatchHandler: class RoomBatchHandler:
def __init__(self, hs: "HomeServer"): def __init__(self, hs: "HomeServer"):
self.hs = hs self.hs = hs
@ -180,6 +184,11 @@ class RoomBatchHandler:
state_event_ids_at_start = [] state_event_ids_at_start = []
auth_event_ids = initial_auth_event_ids.copy() auth_event_ids = initial_auth_event_ids.copy()
# Make the state events float off on their own so we don't have a
# bunch of `@mxid joined the room` noise between each batch
prev_event_id_for_state_chain = generate_fake_event_id()
for state_event in state_events_at_start: for state_event in state_events_at_start:
assert_params_in_dict( assert_params_in_dict(
state_event, ["type", "origin_server_ts", "content", "sender"] state_event, ["type", "origin_server_ts", "content", "sender"]
@ -203,10 +212,6 @@ class RoomBatchHandler:
# Mark all events as historical # Mark all events as historical
event_dict["content"][EventContentFields.MSC2716_HISTORICAL] = True event_dict["content"][EventContentFields.MSC2716_HISTORICAL] = True
# Make the state events float off on their own so we don't have a
# bunch of `@mxid joined the room` noise between each batch
fake_prev_event_id = "$" + random_string(43)
# TODO: This is pretty much the same as some other code to handle inserting state in this file # TODO: This is pretty much the same as some other code to handle inserting state in this file
if event_dict["type"] == EventTypes.Member: if event_dict["type"] == EventTypes.Member:
membership = event_dict["content"].get("membership", None) membership = event_dict["content"].get("membership", None)
@ -220,7 +225,7 @@ class RoomBatchHandler:
action=membership, action=membership,
content=event_dict["content"], content=event_dict["content"],
outlier=True, outlier=True,
prev_event_ids=[fake_prev_event_id], prev_event_ids=[prev_event_id_for_state_chain],
# Make sure to use a copy of this list because we modify it # Make sure to use a copy of this list because we modify it
# later in the loop here. Otherwise it will be the same # later in the loop here. Otherwise it will be the same
# reference and also update in the event when we append later. # reference and also update in the event when we append later.
@ -240,7 +245,7 @@ class RoomBatchHandler:
), ),
event_dict, event_dict,
outlier=True, outlier=True,
prev_event_ids=[fake_prev_event_id], prev_event_ids=[prev_event_id_for_state_chain],
# Make sure to use a copy of this list because we modify it # Make sure to use a copy of this list because we modify it
# later in the loop here. Otherwise it will be the same # later in the loop here. Otherwise it will be the same
# reference and also update in the event when we append later. # reference and also update in the event when we append later.
@ -250,6 +255,8 @@ class RoomBatchHandler:
state_event_ids_at_start.append(event_id) state_event_ids_at_start.append(event_id)
auth_event_ids.append(event_id) auth_event_ids.append(event_id)
# Connect all the state in a floating chain
prev_event_id_for_state_chain = event_id
return state_event_ids_at_start return state_event_ids_at_start
@ -296,6 +303,10 @@ class RoomBatchHandler:
for ev in events_to_create: for ev in events_to_create:
assert_params_in_dict(ev, ["type", "origin_server_ts", "content", "sender"]) assert_params_in_dict(ev, ["type", "origin_server_ts", "content", "sender"])
assert self.hs.is_mine_id(ev["sender"]), "User must be our own: %s" % (
ev["sender"],
)
event_dict = { event_dict = {
"type": ev["type"], "type": ev["type"],
"origin_server_ts": ev["origin_server_ts"], "origin_server_ts": ev["origin_server_ts"],
@ -318,6 +329,19 @@ class RoomBatchHandler:
historical=True, historical=True,
depth=inherited_depth, depth=inherited_depth,
) )
assert context._state_group
# Normally this is done when persisting the event but we have to
# pre-emptively do it here because we create all the events first,
# then persist them in another pass below. And we want to share
# state_groups across the whole batch so this lookup needs to work
# for the next event in the batch in this loop.
await self.store.store_state_group_id_for_event_id(
event_id=event.event_id,
state_group_id=context._state_group,
)
logger.debug( logger.debug(
"RoomBatchSendEventRestServlet inserting event=%s, prev_event_ids=%s, auth_event_ids=%s", "RoomBatchSendEventRestServlet inserting event=%s, prev_event_ids=%s, auth_event_ids=%s",
event, event,
@ -325,10 +349,6 @@ class RoomBatchHandler:
auth_event_ids, auth_event_ids,
) )
assert self.hs.is_mine_id(event.sender), "User must be our own: %s" % (
event.sender,
)
events_to_persist.append((event, context)) events_to_persist.append((event, context))
event_id = event.event_id event_id = event.event_id

View File

@ -465,17 +465,23 @@ class TypingNotificationEventSource(EventSource[int, JsonDict]):
may be interested in. may be interested in.
Args: Args:
from_key: the stream position at which events should be fetched from from_key: the stream position at which events should be fetched from.
service: The appservice which may be interested service: The appservice which may be interested.
Returns:
A two-tuple containing the following:
* A list of json dictionaries derived from typing events that the
appservice may be interested in.
* The latest known room serial.
""" """
with Measure(self.clock, "typing.get_new_events_as"): with Measure(self.clock, "typing.get_new_events_as"):
from_key = int(from_key)
handler = self.get_typing_handler() handler = self.get_typing_handler()
events = [] events = []
for room_id in handler._room_serials.keys(): for room_id in handler._room_serials.keys():
if handler._room_serials[room_id] <= from_key: if handler._room_serials[room_id] <= from_key:
continue continue
if not await service.matches_user_in_member_list( if not await service.matches_user_in_member_list(
room_id, handler.store room_id, handler.store
): ):

View File

@ -196,63 +196,12 @@ class UserDirectoryHandler(StateDeltasHandler):
room_id, prev_event_id, event_id, typ room_id, prev_event_id, event_id, typ
) )
elif typ == EventTypes.Member: elif typ == EventTypes.Member:
change = await self._get_key_change( await self._handle_room_membership_event(
room_id,
prev_event_id, prev_event_id,
event_id, event_id,
key_name="membership", state_key,
public_value=Membership.JOIN,
) )
is_remote = not self.is_mine_id(state_key)
if change is MatchChange.now_false:
# Need to check if the server left the room entirely, if so
# we might need to remove all the users in that room
is_in_room = await self.store.is_host_joined(
room_id, self.server_name
)
if not is_in_room:
logger.debug("Server left room: %r", room_id)
# Fetch all the users that we marked as being in user
# directory due to being in the room and then check if
# need to remove those users or not
user_ids = await self.store.get_users_in_dir_due_to_room(
room_id
)
for user_id in user_ids:
await self._handle_remove_user(room_id, user_id)
continue
else:
logger.debug("Server is still in room: %r", room_id)
include_in_dir = (
is_remote
or await self.store.should_include_local_user_in_dir(state_key)
)
if include_in_dir:
if change is MatchChange.no_change:
# Handle any profile changes for remote users.
# (For local users we are not forced to scan membership
# events; instead the rest of the application calls
# `handle_local_profile_change`.)
if is_remote:
await self._handle_profile_change(
state_key, room_id, prev_event_id, event_id
)
continue
if change is MatchChange.now_true: # The user joined
# This may be the first time we've seen a remote user. If
# so, ensure we have a directory entry for them. (We don't
# need to do this for local users: their directory entry
# is created at the point of registration.
if is_remote:
await self._upsert_directory_entry_for_remote_user(
state_key, event_id
)
await self._track_user_joined_room(room_id, state_key)
else: # The user left
await self._handle_remove_user(room_id, state_key)
else: else:
logger.debug("Ignoring irrelevant type: %r", typ) logger.debug("Ignoring irrelevant type: %r", typ)
@ -317,14 +266,83 @@ class UserDirectoryHandler(StateDeltasHandler):
for user_id in users_in_room: for user_id in users_in_room:
await self.store.remove_user_who_share_room(user_id, room_id) await self.store.remove_user_who_share_room(user_id, room_id)
# Then, re-add them to the tables. # Then, re-add all remote users and some local users to the tables.
# NOTE: this is not the most efficient method, as _track_user_joined_room sets # NOTE: this is not the most efficient method, as _track_user_joined_room sets
# up local_user -> other_user and other_user_whos_local -> local_user, # up local_user -> other_user and other_user_whos_local -> local_user,
# which when ran over an entire room, will result in the same values # which when ran over an entire room, will result in the same values
# being added multiple times. The batching upserts shouldn't make this # being added multiple times. The batching upserts shouldn't make this
# too bad, though. # too bad, though.
for user_id in users_in_room: for user_id in users_in_room:
await self._track_user_joined_room(room_id, user_id) if not self.is_mine_id(
user_id
) or await self.store.should_include_local_user_in_dir(user_id):
await self._track_user_joined_room(room_id, user_id)
async def _handle_room_membership_event(
self,
room_id: str,
prev_event_id: str,
event_id: str,
state_key: str,
) -> None:
"""Process a single room membershp event.
We have to do two things:
1. Update the room-sharing tables.
This applies to remote users and non-excluded local users.
2. Update the user_directory and user_directory_search tables.
This applies to remote users only, because we only become aware of
the (and any profile changes) by listening to these events.
The rest of the application knows exactly when local users are
created or their profile changed---it will directly call methods
on this class.
"""
joined = await self._get_key_change(
prev_event_id,
event_id,
key_name="membership",
public_value=Membership.JOIN,
)
# Both cases ignore excluded local users, so start by discarding them.
is_remote = not self.is_mine_id(state_key)
if not is_remote and not await self.store.should_include_local_user_in_dir(
state_key
):
return
if joined is MatchChange.now_false:
# Need to check if the server left the room entirely, if so
# we might need to remove all the users in that room
is_in_room = await self.store.is_host_joined(room_id, self.server_name)
if not is_in_room:
logger.debug("Server left room: %r", room_id)
# Fetch all the users that we marked as being in user
# directory due to being in the room and then check if
# need to remove those users or not
user_ids = await self.store.get_users_in_dir_due_to_room(room_id)
for user_id in user_ids:
await self._handle_remove_user(room_id, user_id)
else:
logger.debug("Server is still in room: %r", room_id)
await self._handle_remove_user(room_id, state_key)
elif joined is MatchChange.no_change:
# Handle any profile changes for remote users.
# (For local users the rest of the application calls
# `handle_local_profile_change`.)
if is_remote:
await self._handle_possible_remote_profile_change(
state_key, room_id, prev_event_id, event_id
)
elif joined is MatchChange.now_true: # The user joined
# This may be the first time we've seen a remote user. If
# so, ensure we have a directory entry for them. (For local users,
# the rest of the application calls `handle_local_profile_change`.)
if is_remote:
await self._upsert_directory_entry_for_remote_user(state_key, event_id)
await self._track_user_joined_room(room_id, state_key)
async def _upsert_directory_entry_for_remote_user( async def _upsert_directory_entry_for_remote_user(
self, user_id: str, event_id: str self, user_id: str, event_id: str
@ -349,61 +367,67 @@ class UserDirectoryHandler(StateDeltasHandler):
"""Someone's just joined a room. Update `users_in_public_rooms` or """Someone's just joined a room. Update `users_in_public_rooms` or
`users_who_share_private_rooms` as appropriate. `users_who_share_private_rooms` as appropriate.
The caller is responsible for ensuring that the given user is not excluded The caller is responsible for ensuring that the given user should be
from the user directory. included in the user directory.
""" """
is_public = await self.store.is_room_world_readable_or_publicly_joinable( is_public = await self.store.is_room_world_readable_or_publicly_joinable(
room_id room_id
) )
other_users_in_room = await self.store.get_users_in_room(room_id)
if is_public: if is_public:
await self.store.add_users_in_public_rooms(room_id, (user_id,)) await self.store.add_users_in_public_rooms(room_id, (user_id,))
else: else:
users_in_room = await self.store.get_users_in_room(room_id)
other_users_in_room = [
other
for other in users_in_room
if other != user_id
and (
not self.is_mine_id(other)
or await self.store.should_include_local_user_in_dir(other)
)
]
to_insert = set() to_insert = set()
# First, if they're our user then we need to update for every user # First, if they're our user then we need to update for every user
if self.is_mine_id(user_id): if self.is_mine_id(user_id):
if await self.store.should_include_local_user_in_dir(user_id): for other_user_id in other_users_in_room:
for other_user_id in other_users_in_room: to_insert.add((user_id, other_user_id))
if user_id == other_user_id:
continue
to_insert.add((user_id, other_user_id))
# Next we need to update for every local user in the room # Next we need to update for every local user in the room
for other_user_id in other_users_in_room: for other_user_id in other_users_in_room:
if user_id == other_user_id: if self.is_mine_id(other_user_id):
continue
include_other_user = self.is_mine_id(
other_user_id
) and await self.store.should_include_local_user_in_dir(other_user_id)
if include_other_user:
to_insert.add((other_user_id, user_id)) to_insert.add((other_user_id, user_id))
if to_insert: if to_insert:
await self.store.add_users_who_share_private_room(room_id, to_insert) await self.store.add_users_who_share_private_room(room_id, to_insert)
async def _handle_remove_user(self, room_id: str, user_id: str) -> None: async def _handle_remove_user(self, room_id: str, user_id: str) -> None:
"""Called when we might need to remove user from directory """Called when when someone leaves a room. The user may be local or remote.
(If the person who left was the last local user in this room, the server
is no longer in the room. We call this function to forget that the remaining
remote users are in the room, even though they haven't left. So the name is
a little misleading!)
Args: Args:
room_id: The room ID that user left or stopped being public that room_id: The room ID that user left or stopped being public that
user_id user_id
""" """
logger.debug("Removing user %r", user_id) logger.debug("Removing user %r from room %r", user_id, room_id)
# Remove user from sharing tables # Remove user from sharing tables
await self.store.remove_user_who_share_room(user_id, room_id) await self.store.remove_user_who_share_room(user_id, room_id)
# Are they still in any rooms? If not, remove them entirely. # Additionally, if they're a remote user and we're no longer joined
rooms_user_is_in = await self.store.get_user_dir_rooms_user_is_in(user_id) # to any rooms they're in, remove them from the user directory.
if not self.is_mine_id(user_id):
rooms_user_is_in = await self.store.get_user_dir_rooms_user_is_in(user_id)
if len(rooms_user_is_in) == 0: if len(rooms_user_is_in) == 0:
await self.store.remove_from_user_dir(user_id) logger.debug("Removing user %r from directory", user_id)
await self.store.remove_from_user_dir(user_id)
async def _handle_profile_change( async def _handle_possible_remote_profile_change(
self, self,
user_id: str, user_id: str,
room_id: str, room_id: str,
@ -411,7 +435,8 @@ class UserDirectoryHandler(StateDeltasHandler):
event_id: Optional[str], event_id: Optional[str],
) -> None: ) -> None:
"""Check member event changes for any profile changes and update the """Check member event changes for any profile changes and update the
database if there are. database if there are. This is intended for remote users only. The caller
is responsible for checking that the given user is remote.
""" """
if not prev_event_id or not event_id: if not prev_event_id or not event_id:
return return

View File

@ -21,6 +21,7 @@ import typing
import urllib.parse import urllib.parse
from io import BytesIO, StringIO from io import BytesIO, StringIO
from typing import ( from typing import (
TYPE_CHECKING,
Callable, Callable,
Dict, Dict,
Generic, Generic,
@ -73,6 +74,9 @@ from synapse.util import json_decoder
from synapse.util.async_helpers import timeout_deferred from synapse.util.async_helpers import timeout_deferred
from synapse.util.metrics import Measure from synapse.util.metrics import Measure
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
outgoing_requests_counter = Counter( outgoing_requests_counter = Counter(
@ -319,7 +323,7 @@ class MatrixFederationHttpClient:
requests. requests.
""" """
def __init__(self, hs, tls_client_options_factory): def __init__(self, hs: "HomeServer", tls_client_options_factory):
self.hs = hs self.hs = hs
self.signing_key = hs.signing_key self.signing_key = hs.signing_key
self.server_name = hs.hostname self.server_name = hs.hostname
@ -711,7 +715,7 @@ class MatrixFederationHttpClient:
Returns: Returns:
A list of headers to be added as "Authorization:" headers A list of headers to be added as "Authorization:" headers
""" """
request = { request: JsonDict = {
"method": method.decode("ascii"), "method": method.decode("ascii"),
"uri": url_bytes.decode("ascii"), "uri": url_bytes.decode("ascii"),
"origin": self.server_name, "origin": self.server_name,

View File

@ -22,6 +22,7 @@ import urllib
from http import HTTPStatus from http import HTTPStatus
from inspect import isawaitable from inspect import isawaitable
from typing import ( from typing import (
TYPE_CHECKING,
Any, Any,
Awaitable, Awaitable,
Callable, Callable,
@ -61,6 +62,9 @@ from synapse.util import json_encoder
from synapse.util.caches import intern_dict from synapse.util.caches import intern_dict
from synapse.util.iterutils import chunk_seq from synapse.util.iterutils import chunk_seq
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
HTML_ERROR_TEMPLATE = """<!DOCTYPE html> HTML_ERROR_TEMPLATE = """<!DOCTYPE html>
@ -343,6 +347,11 @@ class DirectServeJsonResource(_AsyncResource):
return_json_error(f, request) return_json_error(f, request)
_PathEntry = collections.namedtuple(
"_PathEntry", ["pattern", "callback", "servlet_classname"]
)
class JsonResource(DirectServeJsonResource): class JsonResource(DirectServeJsonResource):
"""This implements the HttpServer interface and provides JSON support for """This implements the HttpServer interface and provides JSON support for
Resources. Resources.
@ -359,14 +368,10 @@ class JsonResource(DirectServeJsonResource):
isLeaf = True isLeaf = True
_PathEntry = collections.namedtuple( def __init__(self, hs: "HomeServer", canonical_json=True, extract_context=False):
"_PathEntry", ["pattern", "callback", "servlet_classname"]
)
def __init__(self, hs, canonical_json=True, extract_context=False):
super().__init__(canonical_json, extract_context) super().__init__(canonical_json, extract_context)
self.clock = hs.get_clock() self.clock = hs.get_clock()
self.path_regexs = {} self.path_regexs: Dict[bytes, List[_PathEntry]] = {}
self.hs = hs self.hs = hs
def register_paths(self, method, path_patterns, callback, servlet_classname): def register_paths(self, method, path_patterns, callback, servlet_classname):
@ -391,7 +396,7 @@ class JsonResource(DirectServeJsonResource):
for path_pattern in path_patterns: for path_pattern in path_patterns:
logger.debug("Registering for %s %s", method, path_pattern.pattern) logger.debug("Registering for %s %s", method, path_pattern.pattern)
self.path_regexs.setdefault(method, []).append( self.path_regexs.setdefault(method, []).append(
self._PathEntry(path_pattern, callback, servlet_classname) _PathEntry(path_pattern, callback, servlet_classname)
) )
def _get_handler_for_request( def _get_handler_for_request(

View File

@ -45,6 +45,8 @@ from synapse.http.servlet import parse_json_object_from_request
from synapse.http.site import SynapseRequest from synapse.http.site import SynapseRequest
from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.metrics.background_process_metrics import run_as_background_process from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.rest.client.login import LoginResponse
from synapse.storage import DataStore
from synapse.storage.database import DatabasePool, LoggingTransaction from synapse.storage.database import DatabasePool, LoggingTransaction
from synapse.storage.databases.main.roommember import ProfileInfo from synapse.storage.databases.main.roommember import ProfileInfo
from synapse.storage.state import StateFilter from synapse.storage.state import StateFilter
@ -60,6 +62,7 @@ from synapse.util import Clock
from synapse.util.caches.descriptors import cached from synapse.util.caches.descriptors import cached
if TYPE_CHECKING: if TYPE_CHECKING:
from synapse.app.generic_worker import GenericWorkerSlavedStore
from synapse.server import HomeServer from synapse.server import HomeServer
""" """
@ -83,6 +86,8 @@ __all__ = [
"DirectServeJsonResource", "DirectServeJsonResource",
"ModuleApi", "ModuleApi",
"PRESENCE_ALL_USERS", "PRESENCE_ALL_USERS",
"LoginResponse",
"JsonDict",
] ]
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -108,7 +113,9 @@ class ModuleApi:
def __init__(self, hs: "HomeServer", auth_handler): def __init__(self, hs: "HomeServer", auth_handler):
self._hs = hs self._hs = hs
self._store = hs.get_datastore() # TODO: Fix this type hint once the types for the data stores have been ironed
# out.
self._store: Union[DataStore, "GenericWorkerSlavedStore"] = hs.get_datastore()
self._auth = hs.get_auth() self._auth = hs.get_auth()
self._auth_handler = auth_handler self._auth_handler = auth_handler
self._server_name = hs.hostname self._server_name = hs.hostname
@ -139,6 +146,7 @@ class ModuleApi:
self._spam_checker = hs.get_spam_checker() self._spam_checker = hs.get_spam_checker()
self._account_validity_handler = hs.get_account_validity_handler() self._account_validity_handler = hs.get_account_validity_handler()
self._third_party_event_rules = hs.get_third_party_event_rules() self._third_party_event_rules = hs.get_third_party_event_rules()
self._password_auth_provider = hs.get_password_auth_provider()
self._presence_router = hs.get_presence_router() self._presence_router = hs.get_presence_router()
################################################################################# #################################################################################
@ -146,24 +154,44 @@ class ModuleApi:
@property @property
def register_spam_checker_callbacks(self): def register_spam_checker_callbacks(self):
"""Registers callbacks for spam checking capabilities.""" """Registers callbacks for spam checking capabilities.
Added in Synapse v1.37.0.
"""
return self._spam_checker.register_callbacks return self._spam_checker.register_callbacks
@property @property
def register_account_validity_callbacks(self): def register_account_validity_callbacks(self):
"""Registers callbacks for account validity capabilities.""" """Registers callbacks for account validity capabilities.
Added in Synapse v1.39.0.
"""
return self._account_validity_handler.register_account_validity_callbacks return self._account_validity_handler.register_account_validity_callbacks
@property @property
def register_third_party_rules_callbacks(self): def register_third_party_rules_callbacks(self):
"""Registers callbacks for third party event rules capabilities.""" """Registers callbacks for third party event rules capabilities.
Added in Synapse v1.39.0.
"""
return self._third_party_event_rules.register_third_party_rules_callbacks return self._third_party_event_rules.register_third_party_rules_callbacks
@property @property
def register_presence_router_callbacks(self): def register_presence_router_callbacks(self):
"""Registers callbacks for presence router capabilities.""" """Registers callbacks for presence router capabilities.
Added in Synapse v1.42.0.
"""
return self._presence_router.register_presence_router_callbacks return self._presence_router.register_presence_router_callbacks
@property
def register_password_auth_provider_callbacks(self):
"""Registers callbacks for password auth provider capabilities.
Added in Synapse v1.46.0.
"""
return self._password_auth_provider.register_password_auth_provider_callbacks
def register_web_resource(self, path: str, resource: IResource): def register_web_resource(self, path: str, resource: IResource):
"""Registers a web resource to be served at the given path. """Registers a web resource to be served at the given path.
@ -172,6 +200,8 @@ class ModuleApi:
If multiple modules register a resource for the same path, the module that If multiple modules register a resource for the same path, the module that
appears the highest in the configuration file takes priority. appears the highest in the configuration file takes priority.
Added in Synapse v1.37.0.
Args: Args:
path: The path to register the resource for. path: The path to register the resource for.
resource: The resource to attach to this path. resource: The resource to attach to this path.
@ -186,6 +216,8 @@ class ModuleApi:
"""Allows making outbound HTTP requests to remote resources. """Allows making outbound HTTP requests to remote resources.
An instance of synapse.http.client.SimpleHttpClient An instance of synapse.http.client.SimpleHttpClient
Added in Synapse v1.22.0.
""" """
return self._http_client return self._http_client
@ -195,22 +227,32 @@ class ModuleApi:
public room list. public room list.
An instance of synapse.module_api.PublicRoomListManager An instance of synapse.module_api.PublicRoomListManager
Added in Synapse v1.22.0.
""" """
return self._public_room_list_manager return self._public_room_list_manager
@property @property
def public_baseurl(self) -> str: def public_baseurl(self) -> str:
"""The configured public base URL for this homeserver.""" """The configured public base URL for this homeserver.
Added in Synapse v1.39.0.
"""
return self._hs.config.server.public_baseurl return self._hs.config.server.public_baseurl
@property @property
def email_app_name(self) -> str: def email_app_name(self) -> str:
"""The application name configured in the homeserver's configuration.""" """The application name configured in the homeserver's configuration.
Added in Synapse v1.39.0.
"""
return self._hs.config.email.email_app_name return self._hs.config.email.email_app_name
async def get_userinfo_by_id(self, user_id: str) -> Optional[UserInfo]: async def get_userinfo_by_id(self, user_id: str) -> Optional[UserInfo]:
"""Get user info by user_id """Get user info by user_id
Added in Synapse v1.41.0.
Args: Args:
user_id: Fully qualified user id. user_id: Fully qualified user id.
Returns: Returns:
@ -226,6 +268,8 @@ class ModuleApi:
) -> Requester: ) -> Requester:
"""Check the access_token provided for a request """Check the access_token provided for a request
Added in Synapse v1.39.0.
Args: Args:
req: Incoming HTTP request req: Incoming HTTP request
allow_guest: True if guest users should be allowed. If this allow_guest: True if guest users should be allowed. If this
@ -251,6 +295,8 @@ class ModuleApi:
async def is_user_admin(self, user_id: str) -> bool: async def is_user_admin(self, user_id: str) -> bool:
"""Checks if a user is a server admin. """Checks if a user is a server admin.
Added in Synapse v1.39.0.
Args: Args:
user_id: The Matrix ID of the user to check. user_id: The Matrix ID of the user to check.
@ -265,6 +311,8 @@ class ModuleApi:
Takes a user id provided by the user and adds the @ and :domain to Takes a user id provided by the user and adds the @ and :domain to
qualify it, if necessary qualify it, if necessary
Added in Synapse v0.25.0.
Args: Args:
username (str): provided user id username (str): provided user id
@ -278,6 +326,8 @@ class ModuleApi:
async def get_profile_for_user(self, localpart: str) -> ProfileInfo: async def get_profile_for_user(self, localpart: str) -> ProfileInfo:
"""Look up the profile info for the user with the given localpart. """Look up the profile info for the user with the given localpart.
Added in Synapse v1.39.0.
Args: Args:
localpart: The localpart to look up profile information for. localpart: The localpart to look up profile information for.
@ -290,6 +340,8 @@ class ModuleApi:
"""Look up the threepids (email addresses and phone numbers) associated with the """Look up the threepids (email addresses and phone numbers) associated with the
given Matrix user ID. given Matrix user ID.
Added in Synapse v1.39.0.
Args: Args:
user_id: The Matrix user ID to look up threepids for. user_id: The Matrix user ID to look up threepids for.
@ -304,6 +356,8 @@ class ModuleApi:
def check_user_exists(self, user_id): def check_user_exists(self, user_id):
"""Check if user exists. """Check if user exists.
Added in Synapse v0.25.0.
Args: Args:
user_id (str): Complete @user:id user_id (str): Complete @user:id
@ -323,6 +377,8 @@ class ModuleApi:
return that device to the user. Prefer separate calls to register_user and return that device to the user. Prefer separate calls to register_user and
register_device. register_device.
Added in Synapse v0.25.0.
Args: Args:
localpart (str): The localpart of the new user. localpart (str): The localpart of the new user.
displayname (str|None): The displayname of the new user. displayname (str|None): The displayname of the new user.
@ -343,6 +399,8 @@ class ModuleApi:
): ):
"""Registers a new user with given localpart and optional displayname, emails. """Registers a new user with given localpart and optional displayname, emails.
Added in Synapse v1.2.0.
Args: Args:
localpart (str): The localpart of the new user. localpart (str): The localpart of the new user.
displayname (str|None): The displayname of the new user. displayname (str|None): The displayname of the new user.
@ -366,6 +424,8 @@ class ModuleApi:
def register_device(self, user_id, device_id=None, initial_display_name=None): def register_device(self, user_id, device_id=None, initial_display_name=None):
"""Register a device for a user and generate an access token. """Register a device for a user and generate an access token.
Added in Synapse v1.2.0.
Args: Args:
user_id (str): full canonical @user:id user_id (str): full canonical @user:id
device_id (str|None): The device ID to check, or None to generate device_id (str|None): The device ID to check, or None to generate
@ -389,6 +449,8 @@ class ModuleApi:
) -> defer.Deferred: ) -> defer.Deferred:
"""Record a mapping from an external user id to a mxid """Record a mapping from an external user id to a mxid
Added in Synapse v1.9.0.
Args: Args:
auth_provider: identifier for the remote auth provider auth_provider: identifier for the remote auth provider
external_id: id on that system external_id: id on that system
@ -408,6 +470,8 @@ class ModuleApi:
) -> str: ) -> str:
"""Generate a login token suitable for m.login.token authentication """Generate a login token suitable for m.login.token authentication
Added in Synapse v1.9.0.
Args: Args:
user_id: gives the ID of the user that the token is for user_id: gives the ID of the user that the token is for
@ -427,6 +491,8 @@ class ModuleApi:
def invalidate_access_token(self, access_token): def invalidate_access_token(self, access_token):
"""Invalidate an access token for a user """Invalidate an access token for a user
Added in Synapse v0.25.0.
Args: Args:
access_token(str): access token access_token(str): access token
@ -457,6 +523,8 @@ class ModuleApi:
def run_db_interaction(self, desc, func, *args, **kwargs): def run_db_interaction(self, desc, func, *args, **kwargs):
"""Run a function with a database connection """Run a function with a database connection
Added in Synapse v0.25.0.
Args: Args:
desc (str): description for the transaction, for metrics etc desc (str): description for the transaction, for metrics etc
func (func): function to be run. Passed a database cursor object func (func): function to be run. Passed a database cursor object
@ -480,6 +548,8 @@ class ModuleApi:
This is deprecated in favor of complete_sso_login_async. This is deprecated in favor of complete_sso_login_async.
Added in Synapse v1.11.1.
Args: Args:
registered_user_id: The MXID that has been registered as a previous step of registered_user_id: The MXID that has been registered as a previous step of
of this SSO login. of this SSO login.
@ -506,6 +576,8 @@ class ModuleApi:
want their access token sent to `client_redirect_url`, or redirect them to that want their access token sent to `client_redirect_url`, or redirect them to that
URL with a token directly if the URL matches with one of the whitelisted clients. URL with a token directly if the URL matches with one of the whitelisted clients.
Added in Synapse v1.13.0.
Args: Args:
registered_user_id: The MXID that has been registered as a previous step of registered_user_id: The MXID that has been registered as a previous step of
of this SSO login. of this SSO login.
@ -534,6 +606,8 @@ class ModuleApi:
(This is exposed for compatibility with the old SpamCheckerApi. We should (This is exposed for compatibility with the old SpamCheckerApi. We should
probably deprecate it and replace it with an async method in a subclass.) probably deprecate it and replace it with an async method in a subclass.)
Added in Synapse v1.22.0.
Args: Args:
room_id: The room ID to get state events in. room_id: The room ID to get state events in.
types: The event type and state key (using None types: The event type and state key (using None
@ -554,6 +628,8 @@ class ModuleApi:
async def create_and_send_event_into_room(self, event_dict: JsonDict) -> EventBase: async def create_and_send_event_into_room(self, event_dict: JsonDict) -> EventBase:
"""Create and send an event into a room. Membership events are currently not supported. """Create and send an event into a room. Membership events are currently not supported.
Added in Synapse v1.22.0.
Args: Args:
event_dict: A dictionary representing the event to send. event_dict: A dictionary representing the event to send.
Required keys are `type`, `room_id`, `sender` and `content`. Required keys are `type`, `room_id`, `sender` and `content`.
@ -594,6 +670,8 @@ class ModuleApi:
Note that this method can only be run on the process that is configured to write to the Note that this method can only be run on the process that is configured to write to the
presence stream. By default this is the main process. presence stream. By default this is the main process.
Added in Synapse v1.32.0.
""" """
if self._hs._instance_name not in self._hs.config.worker.writers.presence: if self._hs._instance_name not in self._hs.config.worker.writers.presence:
raise Exception( raise Exception(
@ -648,6 +726,8 @@ class ModuleApi:
Waits `msec` initially before calling `f` for the first time. Waits `msec` initially before calling `f` for the first time.
Added in Synapse v1.39.0.
Args: Args:
f: The function to call repeatedly. f can be either synchronous or f: The function to call repeatedly. f can be either synchronous or
asynchronous, and must follow Synapse's logcontext rules. asynchronous, and must follow Synapse's logcontext rules.
@ -687,6 +767,8 @@ class ModuleApi:
): ):
"""Send an email on behalf of the homeserver. """Send an email on behalf of the homeserver.
Added in Synapse v1.39.0.
Args: Args:
recipient: The email address for the recipient. recipient: The email address for the recipient.
subject: The email's subject. subject: The email's subject.
@ -710,6 +792,8 @@ class ModuleApi:
By default, Synapse will look for these templates in its configured template By default, Synapse will look for these templates in its configured template
directory, but another directory to search in can be provided. directory, but another directory to search in can be provided.
Added in Synapse v1.39.0.
Args: Args:
filenames: The name of the template files to look for. filenames: The name of the template files to look for.
custom_template_directory: An additional directory to look for the files in. custom_template_directory: An additional directory to look for the files in.
@ -727,13 +811,13 @@ class ModuleApi:
""" """
Checks whether an ID (user id, room, ...) comes from this homeserver. Checks whether an ID (user id, room, ...) comes from this homeserver.
Added in Synapse v1.44.0.
Args: Args:
id: any Matrix id (e.g. user id, room id, ...), either as a raw id, id: any Matrix id (e.g. user id, room id, ...), either as a raw id,
e.g. string "@user:example.com" or as a parsed UserID, RoomID, ... e.g. string "@user:example.com" or as a parsed UserID, RoomID, ...
Returns: Returns:
True if id comes from this homeserver, False otherwise. True if id comes from this homeserver, False otherwise.
Added in Synapse v1.44.0.
""" """
if isinstance(id, DomainSpecificString): if isinstance(id, DomainSpecificString):
return self._hs.is_mine(id) return self._hs.is_mine(id)
@ -746,6 +830,8 @@ class ModuleApi:
""" """
Return the list of user IPs and agents for a user. Return the list of user IPs and agents for a user.
Added in Synapse v1.44.0.
Args: Args:
user_id: the id of a user, local or remote user_id: the id of a user, local or remote
since_ts: a timestamp in seconds since the epoch, since_ts: a timestamp in seconds since the epoch,
@ -754,8 +840,6 @@ class ModuleApi:
The list of all UserIpAndAgent that the user has The list of all UserIpAndAgent that the user has
used to connect to this homeserver since `since_ts`. used to connect to this homeserver since `since_ts`.
If the user is remote, this list is empty. If the user is remote, this list is empty.
Added in Synapse v1.44.0.
""" """
# Don't hit the db if this is not a local user. # Don't hit the db if this is not a local user.
is_mine = False is_mine = False
@ -773,9 +857,9 @@ class ModuleApi:
# Sanitize some of the data. We don't want to return tokens. # Sanitize some of the data. We don't want to return tokens.
return [ return [
UserIpAndAgent( UserIpAndAgent(
ip=str(data["ip"]), ip=data["ip"],
user_agent=str(data["user_agent"]), user_agent=data["user_agent"],
last_seen=int(data["last_seen"]), last_seen=data["last_seen"],
) )
for data in raw_data for data in raw_data
] ]
@ -794,6 +878,8 @@ class PublicRoomListManager:
async def room_is_in_public_room_list(self, room_id: str) -> bool: async def room_is_in_public_room_list(self, room_id: str) -> bool:
"""Checks whether a room is in the public room list. """Checks whether a room is in the public room list.
Added in Synapse v1.22.0.
Args: Args:
room_id: The ID of the room. room_id: The ID of the room.
@ -810,6 +896,8 @@ class PublicRoomListManager:
async def add_room_to_public_room_list(self, room_id: str) -> None: async def add_room_to_public_room_list(self, room_id: str) -> None:
"""Publishes a room to the public room list. """Publishes a room to the public room list.
Added in Synapse v1.22.0.
Args: Args:
room_id: The ID of the room. room_id: The ID of the room.
""" """
@ -818,6 +906,8 @@ class PublicRoomListManager:
async def remove_room_from_public_room_list(self, room_id: str) -> None: async def remove_room_from_public_room_list(self, room_id: str) -> None:
"""Removes a room from the public room list. """Removes a room from the public room list.
Added in Synapse v1.22.0.
Args: Args:
room_id: The ID of the room. room_id: The ID of the room.
""" """

View File

@ -14,9 +14,16 @@
"""Exception types which are exposed as part of the stable module API""" """Exception types which are exposed as part of the stable module API"""
from synapse.api.errors import ( # noqa: F401 from synapse.api.errors import (
InvalidClientCredentialsError, InvalidClientCredentialsError,
RedirectException, RedirectException,
SynapseError, SynapseError,
) )
from synapse.config._base import ConfigError # noqa: F401 from synapse.config._base import ConfigError
__all__ = [
"InvalidClientCredentialsError",
"RedirectException",
"SynapseError",
"ConfigError",
]

View File

@ -379,7 +379,14 @@ class Notifier:
stream_key: str, stream_key: str,
new_token: Union[int, RoomStreamToken], new_token: Union[int, RoomStreamToken],
users: Optional[Collection[Union[str, UserID]]] = None, users: Optional[Collection[Union[str, UserID]]] = None,
): ) -> None:
"""Notify application services of ephemeral event activity.
Args:
stream_key: The stream the event came from.
new_token: The value of the new stream token.
users: The users that should be informed of the new event, if any.
"""
try: try:
stream_token = None stream_token = None
if isinstance(new_token, int): if isinstance(new_token, int):
@ -402,10 +409,17 @@ class Notifier:
new_token: Union[int, RoomStreamToken], new_token: Union[int, RoomStreamToken],
users: Optional[Collection[Union[str, UserID]]] = None, users: Optional[Collection[Union[str, UserID]]] = None,
rooms: Optional[Collection[str]] = None, rooms: Optional[Collection[str]] = None,
): ) -> None:
"""Used to inform listeners that something has happened event wise. """Used to inform listeners that something has happened event wise.
Will wake up all listeners for the given users and rooms. Will wake up all listeners for the given users and rooms.
Args:
stream_key: The stream the event came from.
new_token: The value of the new stream token.
users: The users that should be informed of the new event.
rooms: A collection of room IDs for which each joined member will be
informed of the new event.
""" """
users = users or [] users = users or []
rooms = rooms or [] rooms = rooms or []

0
synapse/py.typed Normal file
View File

View File

@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from typing import TYPE_CHECKING
from synapse.http.server import JsonResource from synapse.http.server import JsonResource
from synapse.replication.http import ( from synapse.replication.http import (
account_data, account_data,
@ -26,16 +28,19 @@ from synapse.replication.http import (
streams, streams,
) )
if TYPE_CHECKING:
from synapse.server import HomeServer
REPLICATION_PREFIX = "/_synapse/replication" REPLICATION_PREFIX = "/_synapse/replication"
class ReplicationRestResource(JsonResource): class ReplicationRestResource(JsonResource):
def __init__(self, hs): def __init__(self, hs: "HomeServer"):
# We enable extracting jaeger contexts here as these are internal APIs. # We enable extracting jaeger contexts here as these are internal APIs.
super().__init__(hs, canonical_json=False, extract_context=True) super().__init__(hs, canonical_json=False, extract_context=True)
self.register_servlets(hs) self.register_servlets(hs)
def register_servlets(self, hs): def register_servlets(self, hs: "HomeServer"):
send_event.register_servlets(hs, self) send_event.register_servlets(hs, self)
federation.register_servlets(hs, self) federation.register_servlets(hs, self)
presence.register_servlets(hs, self) presence.register_servlets(hs, self)

View File

@ -17,7 +17,7 @@ import logging
import re import re
import urllib import urllib
from inspect import signature from inspect import signature
from typing import TYPE_CHECKING, Dict, List, Tuple from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, List, Tuple
from prometheus_client import Counter, Gauge from prometheus_client import Counter, Gauge
@ -156,7 +156,7 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
pass pass
@classmethod @classmethod
def make_client(cls, hs): def make_client(cls, hs: "HomeServer"):
"""Create a client that makes requests. """Create a client that makes requests.
Returns a callable that accepts the same parameters as Returns a callable that accepts the same parameters as
@ -208,7 +208,9 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
url_args.append(txn_id) url_args.append(txn_id)
if cls.METHOD == "POST": if cls.METHOD == "POST":
request_func = client.post_json_get_json request_func: Callable[
..., Awaitable[Any]
] = client.post_json_get_json
elif cls.METHOD == "PUT": elif cls.METHOD == "PUT":
request_func = client.put_json request_func = client.put_json
elif cls.METHOD == "GET": elif cls.METHOD == "GET":

View File

@ -13,10 +13,14 @@
# limitations under the License. # limitations under the License.
import logging import logging
from typing import TYPE_CHECKING
from synapse.http.servlet import parse_json_object_from_request from synapse.http.servlet import parse_json_object_from_request
from synapse.replication.http._base import ReplicationEndpoint from synapse.replication.http._base import ReplicationEndpoint
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -37,7 +41,7 @@ class ReplicationUserAccountDataRestServlet(ReplicationEndpoint):
PATH_ARGS = ("user_id", "account_data_type") PATH_ARGS = ("user_id", "account_data_type")
CACHE = False CACHE = False
def __init__(self, hs): def __init__(self, hs: "HomeServer"):
super().__init__(hs) super().__init__(hs)
self.handler = hs.get_account_data_handler() self.handler = hs.get_account_data_handler()
@ -78,7 +82,7 @@ class ReplicationRoomAccountDataRestServlet(ReplicationEndpoint):
PATH_ARGS = ("user_id", "room_id", "account_data_type") PATH_ARGS = ("user_id", "room_id", "account_data_type")
CACHE = False CACHE = False
def __init__(self, hs): def __init__(self, hs: "HomeServer"):
super().__init__(hs) super().__init__(hs)
self.handler = hs.get_account_data_handler() self.handler = hs.get_account_data_handler()
@ -119,7 +123,7 @@ class ReplicationAddTagRestServlet(ReplicationEndpoint):
PATH_ARGS = ("user_id", "room_id", "tag") PATH_ARGS = ("user_id", "room_id", "tag")
CACHE = False CACHE = False
def __init__(self, hs): def __init__(self, hs: "HomeServer"):
super().__init__(hs) super().__init__(hs)
self.handler = hs.get_account_data_handler() self.handler = hs.get_account_data_handler()
@ -162,7 +166,7 @@ class ReplicationRemoveTagRestServlet(ReplicationEndpoint):
) )
CACHE = False CACHE = False
def __init__(self, hs): def __init__(self, hs: "HomeServer"):
super().__init__(hs) super().__init__(hs)
self.handler = hs.get_account_data_handler() self.handler = hs.get_account_data_handler()
@ -183,7 +187,7 @@ class ReplicationRemoveTagRestServlet(ReplicationEndpoint):
return 200, {"max_stream_id": max_stream_id} return 200, {"max_stream_id": max_stream_id}
def register_servlets(hs, http_server): def register_servlets(hs: "HomeServer", http_server):
ReplicationUserAccountDataRestServlet(hs).register(http_server) ReplicationUserAccountDataRestServlet(hs).register(http_server)
ReplicationRoomAccountDataRestServlet(hs).register(http_server) ReplicationRoomAccountDataRestServlet(hs).register(http_server)
ReplicationAddTagRestServlet(hs).register(http_server) ReplicationAddTagRestServlet(hs).register(http_server)

View File

@ -13,9 +13,13 @@
# limitations under the License. # limitations under the License.
import logging import logging
from typing import TYPE_CHECKING
from synapse.replication.http._base import ReplicationEndpoint from synapse.replication.http._base import ReplicationEndpoint
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -51,7 +55,7 @@ class ReplicationUserDevicesResyncRestServlet(ReplicationEndpoint):
PATH_ARGS = ("user_id",) PATH_ARGS = ("user_id",)
CACHE = False CACHE = False
def __init__(self, hs): def __init__(self, hs: "HomeServer"):
super().__init__(hs) super().__init__(hs)
self.device_list_updater = hs.get_device_handler().device_list_updater self.device_list_updater = hs.get_device_handler().device_list_updater
@ -68,5 +72,5 @@ class ReplicationUserDevicesResyncRestServlet(ReplicationEndpoint):
return 200, user_devices return 200, user_devices
def register_servlets(hs, http_server): def register_servlets(hs: "HomeServer", http_server):
ReplicationUserDevicesResyncRestServlet(hs).register(http_server) ReplicationUserDevicesResyncRestServlet(hs).register(http_server)

View File

@ -13,6 +13,7 @@
# limitations under the License. # limitations under the License.
import logging import logging
from typing import TYPE_CHECKING
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.events import make_event_from_dict from synapse.events import make_event_from_dict
@ -21,6 +22,9 @@ from synapse.http.servlet import parse_json_object_from_request
from synapse.replication.http._base import ReplicationEndpoint from synapse.replication.http._base import ReplicationEndpoint
from synapse.util.metrics import Measure from synapse.util.metrics import Measure
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -56,7 +60,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
NAME = "fed_send_events" NAME = "fed_send_events"
PATH_ARGS = () PATH_ARGS = ()
def __init__(self, hs): def __init__(self, hs: "HomeServer"):
super().__init__(hs) super().__init__(hs)
self.store = hs.get_datastore() self.store = hs.get_datastore()
@ -151,7 +155,7 @@ class ReplicationFederationSendEduRestServlet(ReplicationEndpoint):
NAME = "fed_send_edu" NAME = "fed_send_edu"
PATH_ARGS = ("edu_type",) PATH_ARGS = ("edu_type",)
def __init__(self, hs): def __init__(self, hs: "HomeServer"):
super().__init__(hs) super().__init__(hs)
self.store = hs.get_datastore() self.store = hs.get_datastore()
@ -194,7 +198,7 @@ class ReplicationGetQueryRestServlet(ReplicationEndpoint):
# This is a query, so let's not bother caching # This is a query, so let's not bother caching
CACHE = False CACHE = False
def __init__(self, hs): def __init__(self, hs: "HomeServer"):
super().__init__(hs) super().__init__(hs)
self.store = hs.get_datastore() self.store = hs.get_datastore()
@ -238,7 +242,7 @@ class ReplicationCleanRoomRestServlet(ReplicationEndpoint):
NAME = "fed_cleanup_room" NAME = "fed_cleanup_room"
PATH_ARGS = ("room_id",) PATH_ARGS = ("room_id",)
def __init__(self, hs): def __init__(self, hs: "HomeServer"):
super().__init__(hs) super().__init__(hs)
self.store = hs.get_datastore() self.store = hs.get_datastore()
@ -273,7 +277,7 @@ class ReplicationStoreRoomOnOutlierMembershipRestServlet(ReplicationEndpoint):
NAME = "store_room_on_outlier_membership" NAME = "store_room_on_outlier_membership"
PATH_ARGS = ("room_id",) PATH_ARGS = ("room_id",)
def __init__(self, hs): def __init__(self, hs: "HomeServer"):
super().__init__(hs) super().__init__(hs)
self.store = hs.get_datastore() self.store = hs.get_datastore()
@ -289,7 +293,7 @@ class ReplicationStoreRoomOnOutlierMembershipRestServlet(ReplicationEndpoint):
return 200, {} return 200, {}
def register_servlets(hs, http_server): def register_servlets(hs: "HomeServer", http_server):
ReplicationFederationSendEventsRestServlet(hs).register(http_server) ReplicationFederationSendEventsRestServlet(hs).register(http_server)
ReplicationFederationSendEduRestServlet(hs).register(http_server) ReplicationFederationSendEduRestServlet(hs).register(http_server)
ReplicationGetQueryRestServlet(hs).register(http_server) ReplicationGetQueryRestServlet(hs).register(http_server)

View File

@ -13,10 +13,14 @@
# limitations under the License. # limitations under the License.
import logging import logging
from typing import TYPE_CHECKING
from synapse.http.servlet import parse_json_object_from_request from synapse.http.servlet import parse_json_object_from_request
from synapse.replication.http._base import ReplicationEndpoint from synapse.replication.http._base import ReplicationEndpoint
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -30,7 +34,7 @@ class RegisterDeviceReplicationServlet(ReplicationEndpoint):
NAME = "device_check_registered" NAME = "device_check_registered"
PATH_ARGS = ("user_id",) PATH_ARGS = ("user_id",)
def __init__(self, hs): def __init__(self, hs: "HomeServer"):
super().__init__(hs) super().__init__(hs)
self.registration_handler = hs.get_registration_handler() self.registration_handler = hs.get_registration_handler()
@ -82,5 +86,5 @@ class RegisterDeviceReplicationServlet(ReplicationEndpoint):
return 200, res return 200, res
def register_servlets(hs, http_server): def register_servlets(hs: "HomeServer", http_server):
RegisterDeviceReplicationServlet(hs).register(http_server) RegisterDeviceReplicationServlet(hs).register(http_server)

View File

@ -45,7 +45,7 @@ class ReplicationRemoteJoinRestServlet(ReplicationEndpoint):
NAME = "remote_join" NAME = "remote_join"
PATH_ARGS = ("room_id", "user_id") PATH_ARGS = ("room_id", "user_id")
def __init__(self, hs): def __init__(self, hs: "HomeServer"):
super().__init__(hs) super().__init__(hs)
self.federation_handler = hs.get_federation_handler() self.federation_handler = hs.get_federation_handler()
@ -320,7 +320,7 @@ class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint):
PATH_ARGS = ("room_id", "user_id", "change") PATH_ARGS = ("room_id", "user_id", "change")
CACHE = False # No point caching as should return instantly. CACHE = False # No point caching as should return instantly.
def __init__(self, hs): def __init__(self, hs: "HomeServer"):
super().__init__(hs) super().__init__(hs)
self.registeration_handler = hs.get_registration_handler() self.registeration_handler = hs.get_registration_handler()
@ -360,7 +360,7 @@ class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint):
return 200, {} return 200, {}
def register_servlets(hs, http_server): def register_servlets(hs: "HomeServer", http_server):
ReplicationRemoteJoinRestServlet(hs).register(http_server) ReplicationRemoteJoinRestServlet(hs).register(http_server)
ReplicationRemoteRejectInviteRestServlet(hs).register(http_server) ReplicationRemoteRejectInviteRestServlet(hs).register(http_server)
ReplicationUserJoinedLeftRoomRestServlet(hs).register(http_server) ReplicationUserJoinedLeftRoomRestServlet(hs).register(http_server)

View File

@ -117,6 +117,6 @@ class ReplicationPresenceSetState(ReplicationEndpoint):
) )
def register_servlets(hs, http_server): def register_servlets(hs: "HomeServer", http_server):
ReplicationBumpPresenceActiveTime(hs).register(http_server) ReplicationBumpPresenceActiveTime(hs).register(http_server)
ReplicationPresenceSetState(hs).register(http_server) ReplicationPresenceSetState(hs).register(http_server)

View File

@ -67,5 +67,5 @@ class ReplicationRemovePusherRestServlet(ReplicationEndpoint):
return 200, {} return 200, {}
def register_servlets(hs, http_server): def register_servlets(hs: "HomeServer", http_server):
ReplicationRemovePusherRestServlet(hs).register(http_server) ReplicationRemovePusherRestServlet(hs).register(http_server)

View File

@ -13,10 +13,14 @@
# limitations under the License. # limitations under the License.
import logging import logging
from typing import TYPE_CHECKING
from synapse.http.servlet import parse_json_object_from_request from synapse.http.servlet import parse_json_object_from_request
from synapse.replication.http._base import ReplicationEndpoint from synapse.replication.http._base import ReplicationEndpoint
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -26,7 +30,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
NAME = "register_user" NAME = "register_user"
PATH_ARGS = ("user_id",) PATH_ARGS = ("user_id",)
def __init__(self, hs): def __init__(self, hs: "HomeServer"):
super().__init__(hs) super().__init__(hs)
self.store = hs.get_datastore() self.store = hs.get_datastore()
self.registration_handler = hs.get_registration_handler() self.registration_handler = hs.get_registration_handler()
@ -100,7 +104,7 @@ class ReplicationPostRegisterActionsServlet(ReplicationEndpoint):
NAME = "post_register" NAME = "post_register"
PATH_ARGS = ("user_id",) PATH_ARGS = ("user_id",)
def __init__(self, hs): def __init__(self, hs: "HomeServer"):
super().__init__(hs) super().__init__(hs)
self.store = hs.get_datastore() self.store = hs.get_datastore()
self.registration_handler = hs.get_registration_handler() self.registration_handler = hs.get_registration_handler()
@ -130,6 +134,6 @@ class ReplicationPostRegisterActionsServlet(ReplicationEndpoint):
return 200, {} return 200, {}
def register_servlets(hs, http_server): def register_servlets(hs: "HomeServer", http_server):
ReplicationRegisterServlet(hs).register(http_server) ReplicationRegisterServlet(hs).register(http_server)
ReplicationPostRegisterActionsServlet(hs).register(http_server) ReplicationPostRegisterActionsServlet(hs).register(http_server)

View File

@ -13,6 +13,7 @@
# limitations under the License. # limitations under the License.
import logging import logging
from typing import TYPE_CHECKING
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.events import make_event_from_dict from synapse.events import make_event_from_dict
@ -22,6 +23,9 @@ from synapse.replication.http._base import ReplicationEndpoint
from synapse.types import Requester, UserID from synapse.types import Requester, UserID
from synapse.util.metrics import Measure from synapse.util.metrics import Measure
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -57,7 +61,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
NAME = "send_event" NAME = "send_event"
PATH_ARGS = ("event_id",) PATH_ARGS = ("event_id",)
def __init__(self, hs): def __init__(self, hs: "HomeServer"):
super().__init__(hs) super().__init__(hs)
self.event_creation_handler = hs.get_event_creation_handler() self.event_creation_handler = hs.get_event_creation_handler()
@ -135,5 +139,5 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
) )
def register_servlets(hs, http_server): def register_servlets(hs: "HomeServer", http_server):
ReplicationSendEventRestServlet(hs).register(http_server) ReplicationSendEventRestServlet(hs).register(http_server)

View File

@ -13,11 +13,15 @@
# limitations under the License. # limitations under the License.
import logging import logging
from typing import TYPE_CHECKING
from synapse.api.errors import SynapseError from synapse.api.errors import SynapseError
from synapse.http.servlet import parse_integer from synapse.http.servlet import parse_integer
from synapse.replication.http._base import ReplicationEndpoint from synapse.replication.http._base import ReplicationEndpoint
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -46,7 +50,7 @@ class ReplicationGetStreamUpdates(ReplicationEndpoint):
PATH_ARGS = ("stream_name",) PATH_ARGS = ("stream_name",)
METHOD = "GET" METHOD = "GET"
def __init__(self, hs): def __init__(self, hs: "HomeServer"):
super().__init__(hs) super().__init__(hs)
self._instance_name = hs.get_instance_name() self._instance_name = hs.get_instance_name()
@ -74,5 +78,5 @@ class ReplicationGetStreamUpdates(ReplicationEndpoint):
) )
def register_servlets(hs, http_server): def register_servlets(hs: "HomeServer", http_server):
ReplicationGetStreamUpdates(hs).register(http_server) ReplicationGetStreamUpdates(hs).register(http_server)

View File

@ -13,18 +13,21 @@
# limitations under the License. # limitations under the License.
import logging import logging
from typing import Optional from typing import TYPE_CHECKING, Optional
from synapse.storage.database import DatabasePool from synapse.storage.database import DatabasePool
from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
from synapse.storage.engines import PostgresEngine from synapse.storage.engines import PostgresEngine
from synapse.storage.util.id_generators import MultiWriterIdGenerator from synapse.storage.util.id_generators import MultiWriterIdGenerator
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class BaseSlavedStore(CacheInvalidationWorkerStore): class BaseSlavedStore(CacheInvalidationWorkerStore):
def __init__(self, database: DatabasePool, db_conn, hs): def __init__(self, database: DatabasePool, db_conn, hs: "HomeServer"):
super().__init__(database, db_conn, hs) super().__init__(database, db_conn, hs)
if isinstance(self.database_engine, PostgresEngine): if isinstance(self.database_engine, PostgresEngine):
self._cache_id_gen: Optional[ self._cache_id_gen: Optional[

View File

@ -12,15 +12,20 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from typing import TYPE_CHECKING
from synapse.storage.database import DatabasePool from synapse.storage.database import DatabasePool
from synapse.storage.databases.main.client_ips import LAST_SEEN_GRANULARITY from synapse.storage.databases.main.client_ips import LAST_SEEN_GRANULARITY
from synapse.util.caches.lrucache import LruCache from synapse.util.caches.lrucache import LruCache
from ._base import BaseSlavedStore from ._base import BaseSlavedStore
if TYPE_CHECKING:
from synapse.server import HomeServer
class SlavedClientIpStore(BaseSlavedStore): class SlavedClientIpStore(BaseSlavedStore):
def __init__(self, database: DatabasePool, db_conn, hs): def __init__(self, database: DatabasePool, db_conn, hs: "HomeServer"):
super().__init__(database, db_conn, hs) super().__init__(database, db_conn, hs)
self.client_ip_last_seen: LruCache[tuple, int] = LruCache( self.client_ip_last_seen: LruCache[tuple, int] = LruCache(

View File

@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from typing import TYPE_CHECKING
from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
from synapse.replication.tcp.streams._base import DeviceListsStream, UserSignatureStream from synapse.replication.tcp.streams._base import DeviceListsStream, UserSignatureStream
@ -20,9 +22,12 @@ from synapse.storage.databases.main.devices import DeviceWorkerStore
from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyWorkerStore from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyWorkerStore
from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.caches.stream_change_cache import StreamChangeCache
if TYPE_CHECKING:
from synapse.server import HomeServer
class SlavedDeviceStore(EndToEndKeyWorkerStore, DeviceWorkerStore, BaseSlavedStore): class SlavedDeviceStore(EndToEndKeyWorkerStore, DeviceWorkerStore, BaseSlavedStore):
def __init__(self, database: DatabasePool, db_conn, hs): def __init__(self, database: DatabasePool, db_conn, hs: "HomeServer"):
super().__init__(database, db_conn, hs) super().__init__(database, db_conn, hs)
self.hs = hs self.hs = hs

Some files were not shown because too many files have changed in this diff Show More