Merge remote-tracking branch 'upstream/release-v1.73'

This commit is contained in:
Tulir Asokan 2022-11-29 15:51:33 +02:00
commit bb26f5f0a9
167 changed files with 3234 additions and 1676 deletions

View file

@ -4,7 +4,7 @@
root = true root = true
# 4 space indentation # 4 space indentation
[*.py] [*.{py,pyi}]
indent_style = space indent_style = space
indent_size = 4 indent_size = 4
max_line_length = 88 max_line_length = 88

View file

@ -74,6 +74,36 @@ body:
- Debian packages from packages.matrix.org - Debian packages from packages.matrix.org
- pip (from PyPI) - pip (from PyPI)
- Other (please mention below) - Other (please mention below)
- I don't know
validations:
required: true
- type: input
id: database
attributes:
label: Database
description: |
Are you using SQLite or PostgreSQL? What's the version of your database?
If PostgreSQL, please also answer the following:
- are you using a single PostgreSQL server
or [separate servers for `main` and `state`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#databases)?
- have you previously ported from SQLite using the Synapse "portdb" script?
- have you previously restored from a backup?
validations:
required: true
- type: dropdown
id: workers
attributes:
label: Workers
description: |
Are you running a single Synapse process, or are you running
[2 or more workers](https://matrix-org.github.io/synapse/latest/workers.html)?
options:
- Single process
- Multiple workers
- I don't know
validations:
required: true
- type: textarea - type: textarea
id: platform id: platform
attributes: attributes:
@ -83,17 +113,28 @@ body:
e.g. distro, hardware, if it's running in a vm/container, etc. e.g. distro, hardware, if it's running in a vm/container, etc.
validations: validations:
required: true required: true
- type: textarea
id: config
attributes:
label: Configuration
description: |
Do you have any unusual config options turned on? If so, please provide details.
- Experimental or undocumented features
- [Presence](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#presence)
- [Message retention](https://matrix-org.github.io/synapse/latest/message_retention_policies.html)
- [Synapse modules](https://matrix-org.github.io/synapse/latest/modules/index.html)
- type: textarea - type: textarea
id: logs id: logs
attributes: attributes:
label: Relevant log output label: Relevant log output
description: | description: |
Please copy and paste any relevant log output, ideally at INFO or DEBUG log level. Please copy and paste any relevant log output, ideally at INFO or DEBUG log level.
This will be automatically formatted into code, so there is no need for backticks. This will be automatically formatted into code, so there is no need for backticks (`\``).
Please be careful to remove any personal or private data. Please be careful to remove any personal or private data.
**Bug reports are usually very difficult to diagnose without logging.** **Bug reports are usually impossible to diagnose without logging.**
render: shell render: shell
validations: validations:
required: true required: true

View file

@ -27,7 +27,7 @@ jobs:
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with: with:
toolchain: stable toolchain: stable
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2
@ -61,7 +61,7 @@ jobs:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with: with:
toolchain: stable toolchain: stable
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2
@ -134,7 +134,7 @@ jobs:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with: with:
toolchain: stable toolchain: stable
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2

View file

@ -0,0 +1,74 @@
# This task does not run complement tests, see tests.yaml instead.
# This task does not build docker images for synapse for use on docker hub, see docker.yaml instead
name: Store complement-synapse image in ghcr.io
on:
push:
branches: [ "master" ]
schedule:
- cron: '0 5 * * *'
workflow_dispatch:
inputs:
branch:
required: true
default: 'develop'
type: choice
options:
- develop
- master
# Only run this action once per pull request/branch; restart if a new commit arrives.
# C.f. https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#concurrency
# and https://docs.github.com/en/actions/reference/context-and-expression-syntax-for-github-actions#github-context
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build:
name: Build and push complement image
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- name: Checkout specific branch (debug build)
uses: actions/checkout@v3
if: github.event_name == 'workflow_dispatch'
with:
ref: ${{ inputs.branch }}
- name: Checkout clean copy of develop (scheduled build)
uses: actions/checkout@v3
if: github.event_name == 'schedule'
with:
ref: develop
- name: Checkout clean copy of master (on-push)
uses: actions/checkout@v3
if: github.event_name == 'push'
with:
ref: master
- name: Login to registry
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Work out labels for complement image
id: meta
uses: docker/metadata-action@v4
with:
images: ghcr.io/${{ github.repository }}/complement-synapse
tags: |
type=schedule,pattern=nightly,enable=${{ github.event_name == 'schedule'}}
type=raw,value=develop,enable=${{ github.event_name == 'schedule' || inputs.branch == 'develop' }}
type=raw,value=latest,enable=${{ github.event_name == 'push' || inputs.branch == 'master' }}
type=sha,format=long
- name: Run scripts-dev/complement.sh to generate complement-synapse:latest image.
run: scripts-dev/complement.sh --build-only
- name: Tag and push generated image
run: |
for TAG in ${{ join(fromJson(steps.meta.outputs.json).tags, ' ') }}; do
echo "tag and push $TAG"
docker tag complement-synapse $TAG
docker push $TAG
done

View file

@ -27,6 +27,7 @@ jobs:
rust: rust:
- 'rust/**' - 'rust/**'
- 'Cargo.toml' - 'Cargo.toml'
- 'Cargo.lock'
check-sampleconfig: check-sampleconfig:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -102,7 +103,7 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust # There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master. # version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake. # We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with: with:
toolchain: 1.58.1 toolchain: 1.58.1
components: clippy components: clippy
@ -122,7 +123,7 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust # There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master. # version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake. # We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with: with:
toolchain: 1.58.1 toolchain: 1.58.1
components: rustfmt components: rustfmt
@ -184,7 +185,7 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust # There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master. # version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake. # We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with: with:
toolchain: 1.58.1 toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2
@ -228,7 +229,7 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust # There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master. # version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake. # We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with: with:
toolchain: 1.58.1 toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2
@ -346,7 +347,7 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust # There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master. # version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake. # We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with: with:
toolchain: 1.58.1 toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2
@ -489,7 +490,7 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust # There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master. # version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake. # We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with: with:
toolchain: 1.58.1 toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2
@ -517,7 +518,7 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust # There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master. # version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake. # We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with: with:
toolchain: 1.58.1 toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2

View file

@ -18,7 +18,7 @@ jobs:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with: with:
toolchain: stable toolchain: stable
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2
@ -43,7 +43,7 @@ jobs:
- run: sudo apt-get -qq install xmlsec1 - run: sudo apt-get -qq install xmlsec1
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with: with:
toolchain: stable toolchain: stable
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2
@ -82,7 +82,7 @@ jobs:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with: with:
toolchain: stable toolchain: stable
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2

View file

@ -1,3 +1,68 @@
Synapse 1.73.0rc1 (2022-11-29)
==============================
Please note that legacy Prometheus metric names have been removed in this release; see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.73/docs/upgrade.md#legacy-prometheus-metric-names-have-now-been-removed) for more details.
Features
--------
- Speed-up `/messages` with `filter_events_for_client` optimizations. ([\#14527](https://github.com/matrix-org/synapse/issues/14527))
- Improve DB performance by reducing amount of data that gets read in `device_lists_changes_in_room`. ([\#14534](https://github.com/matrix-org/synapse/issues/14534))
- Adds support for handling avatar in SSO login. Contributed by @ashfame. ([\#13917](https://github.com/matrix-org/synapse/issues/13917))
- Move MSC3030 `/timestamp_to_event` endpoints to stable `v1` location (`/_matrix/client/v1/rooms/<roomID>/timestamp_to_event?ts=<timestamp>&dir=<direction>`, `/_matrix/federation/v1/timestamp_to_event/<roomID>?ts=<timestamp>&dir=<direction>`). ([\#14471](https://github.com/matrix-org/synapse/issues/14471))
- Reduce database load of [Client-Server endpoints](https://spec.matrix.org/v1.5/client-server-api/#aggregations) which return bundled aggregations. ([\#14491](https://github.com/matrix-org/synapse/issues/14491), [\#14508](https://github.com/matrix-org/synapse/issues/14508), [\#14510](https://github.com/matrix-org/synapse/issues/14510))
- Add unstable support for an Extensible Events room version (`org.matrix.msc1767.10`) via [MSC1767](https://github.com/matrix-org/matrix-spec-proposals/pull/1767), [MSC3931](https://github.com/matrix-org/matrix-spec-proposals/pull/3931), [MSC3932](https://github.com/matrix-org/matrix-spec-proposals/pull/3932), and [MSC3933](https://github.com/matrix-org/matrix-spec-proposals/pull/3933). ([\#14520](https://github.com/matrix-org/synapse/issues/14520), [\#14521](https://github.com/matrix-org/synapse/issues/14521), [\#14524](https://github.com/matrix-org/synapse/issues/14524))
- Prune user's old devices on login if they have too many. ([\#14038](https://github.com/matrix-org/synapse/issues/14038), [\#14580](https://github.com/matrix-org/synapse/issues/14580))
Bugfixes
--------
- Fix a long-standing bug where paginating from the start of a room did not work. Contributed by @gnunicorn. ([\#14149](https://github.com/matrix-org/synapse/issues/14149))
- Fix a bug introduced in Synapse 1.58.0 where a user with presence state `org.matrix.msc3026.busy` would mistakenly be set to `online` when calling `/sync` or `/events` on a worker process. ([\#14393](https://github.com/matrix-org/synapse/issues/14393))
- Fix a bug introduced in Synapse 1.70.0 where a receipt's thread ID was not sent over federation. ([\#14466](https://github.com/matrix-org/synapse/issues/14466))
- Fix a long-standing bug where the [List media admin API](https://matrix-org.github.io/synapse/latest/admin_api/media_admin_api.html#list-all-media-in-a-room) would fail when processing an image with broken thumbnail information. ([\#14537](https://github.com/matrix-org/synapse/issues/14537))
- Fix a bug introduced in Synapse 1.67.0 where two logging context warnings would be logged on startup. ([\#14574](https://github.com/matrix-org/synapse/issues/14574))
- In application service transactions that include the experimental `org.matrix.msc3202.device_one_time_key_counts` key, include a duplicate key of `org.matrix.msc3202.device_one_time_keys_count` to match the name proposed by [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/blob/travis/msc/otk-dl-appservice/proposals/3202-encrypted-appservices.md). ([\#14565](https://github.com/matrix-org/synapse/issues/14565))
- Fix a bug introduced in Synapse 0.9 where Synapse would fail to fetch server keys whose IDs contain a forward slash. ([\#14490](https://github.com/matrix-org/synapse/issues/14490))
Improved Documentation
----------------------
- Fixed link to 'Synapse administration endpoints'. ([\#14499](https://github.com/matrix-org/synapse/issues/14499))
Deprecations and Removals
-------------------------
- Remove legacy Prometheus metrics names. They were deprecated in Synapse v1.69.0 and disabled by default in Synapse v1.71.0. ([\#14538](https://github.com/matrix-org/synapse/issues/14538))
Internal Changes
----------------
- Improve type hinting throughout Synapse. ([\#14055](https://github.com/matrix-org/synapse/issues/14055), [\#14412](https://github.com/matrix-org/synapse/issues/14412), [\#14529](https://github.com/matrix-org/synapse/issues/14529), [\#14452](https://github.com/matrix-org/synapse/issues/14452)).
- Remove old stream ID tracking code. Contributed by Nick @Beeper (@fizzadar). ([\#14376](https://github.com/matrix-org/synapse/issues/14376), [\#14468](https://github.com/matrix-org/synapse/issues/14468))
- Remove the `worker_main_http_uri` configuration setting. This is now handled via internal replication. ([\#14400](https://github.com/matrix-org/synapse/issues/14400), [\#14476](https://github.com/matrix-org/synapse/issues/14476))
- Refactor `federation_sender` and `pusher` configuration loading. ([\#14496](https://github.com/matrix-org/synapse/issues/14496))
([\#14509](https://github.com/matrix-org/synapse/issues/14509), [\#14573](https://github.com/matrix-org/synapse/issues/14573))
- Faster joins: do not wait for full state when creating events to send. ([\#14403](https://github.com/matrix-org/synapse/issues/14403))
- Faster joins: filter out non local events when a room doesn't have its full state. ([\#14404](https://github.com/matrix-org/synapse/issues/14404))
- Faster joins: send events to initial list of servers if we don't have the full state yet. ([\#14408](https://github.com/matrix-org/synapse/issues/14408))
- Faster joins: use servers list approximation received during `send_join` (potentially updated with received membership events) in `assert_host_in_room`. ([\#14515](https://github.com/matrix-org/synapse/issues/14515))
- Fix type logic in TCP replication code that prevented correctly ignoring blank commands. ([\#14449](https://github.com/matrix-org/synapse/issues/14449))
- Remove option to skip locking of tables when performing emulated upserts, to avoid a class of bugs in future. ([\#14469](https://github.com/matrix-org/synapse/issues/14469))
- `scripts-dev/federation_client`: Fix routing on servers with `.well-known` files. ([\#14479](https://github.com/matrix-org/synapse/issues/14479))
- Reduce default third party invite rate limit to 216 invites per day. ([\#14487](https://github.com/matrix-org/synapse/issues/14487))
- Refactor conversion of device list changes in room to outbound pokes to track unconverted rows using a `(stream ID, room ID)` position instead of updating the `converted_to_destinations` flag on every row. ([\#14516](https://github.com/matrix-org/synapse/issues/14516))
- Add more prompts to the bug report form. ([\#14522](https://github.com/matrix-org/synapse/issues/14522))
- Extend editorconfig rules on indent and line length to `.pyi` files. ([\#14526](https://github.com/matrix-org/synapse/issues/14526))
- Run Rust CI when `Cargo.lock` changes. This is particularly useful for dependabot updates. ([\#14571](https://github.com/matrix-org/synapse/issues/14571))
- Fix a possible variable shadow in `create_new_client_event`. ([\#14575](https://github.com/matrix-org/synapse/issues/14575))
- Bump various dependencies in the `poetry.lock` file and in CI scripts. ([\#14557](https://github.com/matrix-org/synapse/issues/14557), [\#14559](https://github.com/matrix-org/synapse/issues/14559), [\#14560](https://github.com/matrix-org/synapse/issues/14560), [\#14500](https://github.com/matrix-org/synapse/issues/14500), [\#14501](https://github.com/matrix-org/synapse/issues/14501), [\#14502](https://github.com/matrix-org/synapse/issues/14502), [\#14503](https://github.com/matrix-org/synapse/issues/14503), [\#14504](https://github.com/matrix-org/synapse/issues/14504), [\#14505](https://github.com/matrix-org/synapse/issues/14505)).
Synapse 1.72.0 (2022-11-22) Synapse 1.72.0 (2022-11-22)
=========================== ===========================

16
Cargo.lock generated
View file

@ -323,18 +323,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]] [[package]]
name = "serde" name = "serde"
version = "1.0.147" version = "1.0.148"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965" checksum = "e53f64bb4ba0191d6d0676e1b141ca55047d83b74f5607e6d8eb88126c52c2dc"
dependencies = [ dependencies = [
"serde_derive", "serde_derive",
] ]
[[package]] [[package]]
name = "serde_derive" name = "serde_derive"
version = "1.0.147" version = "1.0.148"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852" checksum = "a55492425aa53521babf6137309e7d34c20bbfbbfcfe2c7f3a047fd1f6b92c0c"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@ -343,9 +343,9 @@ dependencies = [
[[package]] [[package]]
name = "serde_json" name = "serde_json"
version = "1.0.87" version = "1.0.89"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45" checksum = "020ff22c755c2ed3f8cf162dbb41a7268d934702f3ed3631656ea597e08fc3db"
dependencies = [ dependencies = [
"itoa", "itoa",
"ryu", "ryu",
@ -366,9 +366,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
[[package]] [[package]]
name = "syn" name = "syn"
version = "1.0.102" version = "1.0.104"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fcd952facd492f9be3ef0d0b7032a6e442ee9b361d4acc2b1d0c4aaa5f613a1" checksum = "4ae548ec36cf198c0ef7710d3c230987c2d6d7bd98ad6edc0274462724c585ce"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",

6
debian/changelog vendored
View file

@ -1,3 +1,9 @@
matrix-synapse-py3 (1.73.0~rc1) stable; urgency=medium
* New Synapse release 1.73.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 29 Nov 2022 12:28:13 +0000
matrix-synapse-py3 (1.72.0) stable; urgency=medium matrix-synapse-py3 (1.72.0) stable; urgency=medium
* New Synapse release 1.72.0. * New Synapse release 1.72.0.

View file

@ -100,8 +100,6 @@ experimental_features:
# client-side support for partial state in /send_join responses # client-side support for partial state in /send_join responses
faster_joins: true faster_joins: true
{% endif %} {% endif %}
# Enable jump to date endpoint
msc3030_enabled: true
# Filtering /messages by relation type. # Filtering /messages by relation type.
msc3874_enabled: true msc3874_enabled: true

View file

@ -140,6 +140,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event", "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event",
"^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms", "^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms",
"^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases", "^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases",
"^/_matrix/client/v1/rooms/.*/timestamp_to_event$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/search", "^/_matrix/client/(api/v1|r0|v3|unstable)/search",
], ],
"shared_extra_conf": {}, "shared_extra_conf": {},
@ -163,6 +164,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"^/_matrix/federation/(v1|v2)/invite/", "^/_matrix/federation/(v1|v2)/invite/",
"^/_matrix/federation/(v1|v2)/query_auth/", "^/_matrix/federation/(v1|v2)/query_auth/",
"^/_matrix/federation/(v1|v2)/event_auth/", "^/_matrix/federation/(v1|v2)/event_auth/",
"^/_matrix/federation/v1/timestamp_to_event/",
"^/_matrix/federation/(v1|v2)/exchange_third_party_invite/", "^/_matrix/federation/(v1|v2)/exchange_third_party_invite/",
"^/_matrix/federation/(v1|v2)/user/devices/", "^/_matrix/federation/(v1|v2)/user/devices/",
"^/_matrix/federation/(v1|v2)/get_groups_publicised$", "^/_matrix/federation/(v1|v2)/get_groups_publicised$",
@ -213,10 +215,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"listener_resources": ["client", "replication"], "listener_resources": ["client", "replication"],
"endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload"], "endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload"],
"shared_extra_conf": {}, "shared_extra_conf": {},
"worker_extra_conf": ( "worker_extra_conf": "",
"worker_main_http_uri: http://127.0.0.1:%d"
% (MAIN_PROCESS_HTTP_LISTENER_PORT,)
),
}, },
"account_data": { "account_data": {
"app": "synapse.app.generic_worker", "app": "synapse.app.generic_worker",

View file

@ -88,6 +88,28 @@ process, for example:
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
``` ```
# Upgrading to v1.73.0
## Legacy Prometheus metric names have now been removed
Synapse v1.69.0 included the deprecation of legacy Prometheus metric names
and offered an option to disable them.
Synapse v1.71.0 disabled legacy Prometheus metric names by default.
This version, v1.73.0, removes those legacy Prometheus metric names entirely.
This also means that the `enable_legacy_metrics` configuration option has been
removed; it will no longer be possible to re-enable the legacy metric names.
If you use metrics and have not yet updated your Grafana dashboard(s),
Prometheus console(s) or alerting rule(s), please consider doing so when upgrading
to this version.
Note that the included Grafana dashboard was updated in v1.72.0 to correct some
metric names which were missed when legacy metrics were disabled by default.
See [v1.69.0: Deprecation of legacy Prometheus metric names](#deprecation-of-legacy-prometheus-metric-names)
for more context.
# Upgrading to v1.72.0 # Upgrading to v1.72.0
## Dropping support for PostgreSQL 10 ## Dropping support for PostgreSQL 10

View file

@ -19,7 +19,7 @@ already on your `$PATH` depending on how Synapse was installed.
Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings. Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings.
## Making an Admin API request ## Making an Admin API request
For security reasons, we [recommend](reverse_proxy.md#synapse-administration-endpoints) For security reasons, we [recommend](../../../reverse_proxy.md#synapse-administration-endpoints)
that the Admin API (`/_synapse/admin/...`) should be hidden from public view using a that the Admin API (`/_synapse/admin/...`) should be hidden from public view using a
reverse proxy. This means you should typically query the Admin API from a terminal on reverse proxy. This means you should typically query the Admin API from a terminal on
the machine which runs Synapse. the machine which runs Synapse.

View file

@ -2437,31 +2437,6 @@ Example configuration:
enable_metrics: true enable_metrics: true
``` ```
--- ---
### `enable_legacy_metrics`
Set to `true` to publish both legacy and non-legacy Prometheus metric names,
or to `false` to only publish non-legacy Prometheus metric names.
Defaults to `false`. Has no effect if `enable_metrics` is `false`.
**In Synapse v1.67.0 up to and including Synapse v1.70.1, this defaulted to `true`.**
Legacy metric names include:
- metrics containing colons in the name, such as `synapse_util_caches_response_cache:hits`, because colons are supposed to be reserved for user-defined recording rules;
- counters that don't end with the `_total` suffix, such as `synapse_federation_client_sent_edus`, therefore not adhering to the OpenMetrics standard.
These legacy metric names are unconventional and not compliant with OpenMetrics standards.
They are included for backwards compatibility.
Example configuration:
```yaml
enable_legacy_metrics: false
```
See https://github.com/matrix-org/synapse/issues/11106 for context.
*Since v1.67.0.*
**Will be removed in v1.73.0.**
---
### `sentry` ### `sentry`
Use this option to enable sentry integration. Provide the DSN assigned to you by sentry Use this option to enable sentry integration. Provide the DSN assigned to you by sentry
@ -2993,10 +2968,17 @@ Options for each entry include:
For the default provider, the following settings are available: For the default provider, the following settings are available:
* subject_claim: name of the claim containing a unique identifier * `subject_claim`: name of the claim containing a unique identifier
for the user. Defaults to 'sub', which OpenID Connect for the user. Defaults to 'sub', which OpenID Connect
compliant providers should provide. compliant providers should provide.
* `picture_claim`: name of the claim containing an url for the user's profile picture.
Defaults to 'picture', which OpenID Connect compliant providers should provide
and has to refer to a direct image file such as PNG, JPEG, or GIF image file.
Currently only supported in monolithic (single-process) server configurations
where the media repository runs within the Synapse process.
* `localpart_template`: Jinja2 template for the localpart of the MXID. * `localpart_template`: Jinja2 template for the localpart of the MXID.
If this is not set, the user will be prompted to choose their If this is not set, the user will be prompted to choose their
own username (see the documentation for the `sso_auth_account_details.html` own username (see the documentation for the `sso_auth_account_details.html`

View file

@ -135,8 +135,8 @@ In the config file for each worker, you must specify:
[`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)). [`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)).
* If handling HTTP requests, a [`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners) option * If handling HTTP requests, a [`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners) option
with an `http` listener. with an `http` listener.
* If handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for * **Synapse 1.72 and older:** if handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
the main process (`worker_main_http_uri`). the main process (`worker_main_http_uri`). This config option is no longer required and is ignored when running Synapse 1.73 and newer.
For example: For example:
@ -191,6 +191,7 @@ information.
^/_matrix/federation/(v1|v2)/send_leave/ ^/_matrix/federation/(v1|v2)/send_leave/
^/_matrix/federation/(v1|v2)/invite/ ^/_matrix/federation/(v1|v2)/invite/
^/_matrix/federation/v1/event_auth/ ^/_matrix/federation/v1/event_auth/
^/_matrix/federation/v1/timestamp_to_event/
^/_matrix/federation/v1/exchange_third_party_invite/ ^/_matrix/federation/v1/exchange_third_party_invite/
^/_matrix/federation/v1/user/devices/ ^/_matrix/federation/v1/user/devices/
^/_matrix/key/v2/query ^/_matrix/key/v2/query
@ -218,10 +219,10 @@ information.
^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$ ^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event/ ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event/
^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms$ ^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms$
^/_matrix/client/v1/rooms/.*/timestamp_to_event$
^/_matrix/client/(api/v1|r0|v3|unstable)/search$ ^/_matrix/client/(api/v1|r0|v3|unstable)/search$
# Encryption requests # Encryption requests
# Note that ^/_matrix/client/(r0|v3|unstable)/keys/upload/ requires `worker_main_http_uri`
^/_matrix/client/(r0|v3|unstable)/keys/query$ ^/_matrix/client/(r0|v3|unstable)/keys/query$
^/_matrix/client/(r0|v3|unstable)/keys/changes$ ^/_matrix/client/(r0|v3|unstable)/keys/changes$
^/_matrix/client/(r0|v3|unstable)/keys/claim$ ^/_matrix/client/(r0|v3|unstable)/keys/claim$
@ -376,7 +377,7 @@ responsible for
- persisting them to the DB, and finally - persisting them to the DB, and finally
- updating the events stream. - updating the events stream.
Because load is sharded in this way, you *must* restart all worker instances when Because load is sharded in this way, you *must* restart all worker instances when
adding or removing event persisters. adding or removing event persisters.
An `event_persister` should not be mistaken for an `event_creator`. An `event_persister` should not be mistaken for an `event_creator`.

View file

@ -11,6 +11,7 @@ warn_unused_ignores = True
local_partial_types = True local_partial_types = True
no_implicit_optional = True no_implicit_optional = True
disallow_untyped_defs = True disallow_untyped_defs = True
strict_equality = True
files = files =
docker/, docker/,
@ -58,11 +59,6 @@ exclude = (?x)
|tests/server_notices/test_resource_limits_server_notices.py |tests/server_notices/test_resource_limits_server_notices.py
|tests/test_state.py |tests/test_state.py
|tests/test_terms_auth.py |tests/test_terms_auth.py
|tests/util/caches/test_cached_call.py
|tests/util/caches/test_deferred_cache.py
|tests/util/caches/test_descriptors.py
|tests/util/caches/test_response_cache.py
|tests/util/caches/test_ttlcache.py
|tests/util/test_async_helpers.py |tests/util/test_async_helpers.py
|tests/util/test_batching_queue.py |tests/util/test_batching_queue.py
|tests/util/test_dict_cache.py |tests/util/test_dict_cache.py
@ -117,9 +113,15 @@ disallow_untyped_defs = True
[mypy-tests.state.test_profile] [mypy-tests.state.test_profile]
disallow_untyped_defs = True disallow_untyped_defs = True
[mypy-tests.storage.test_id_generators]
disallow_untyped_defs = True
[mypy-tests.storage.test_profile] [mypy-tests.storage.test_profile]
disallow_untyped_defs = True disallow_untyped_defs = True
[mypy-tests.handlers.test_sso]
disallow_untyped_defs = True
[mypy-tests.storage.test_user_directory] [mypy-tests.storage.test_user_directory]
disallow_untyped_defs = True disallow_untyped_defs = True
@ -129,9 +131,14 @@ disallow_untyped_defs = True
[mypy-tests.federation.transport.test_client] [mypy-tests.federation.transport.test_client]
disallow_untyped_defs = True disallow_untyped_defs = True
[mypy-tests.utils] [mypy-tests.util.caches.*]
disallow_untyped_defs = True disallow_untyped_defs = True
[mypy-tests.util.caches.test_descriptors]
disallow_untyped_defs = False
[mypy-tests.utils]
disallow_untyped_defs = True
;; Dependencies without annotations ;; Dependencies without annotations
;; Before ignoring a module, check to see if type stubs are available. ;; Before ignoring a module, check to see if type stubs are available.

39
poetry.lock generated
View file

@ -663,7 +663,7 @@ python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
[[package]] [[package]]
name = "phonenumbers" name = "phonenumbers"
version = "8.12.56" version = "8.13.0"
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers." description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
category = "main" category = "main"
optional = false optional = false
@ -814,15 +814,15 @@ python-versions = ">=3.6"
[[package]] [[package]]
name = "pygithub" name = "pygithub"
version = "1.56" version = "1.57"
description = "Use the full Github API v3" description = "Use the full Github API v3"
category = "dev" category = "dev"
optional = false optional = false
python-versions = ">=3.6" python-versions = ">=3.7"
[package.dependencies] [package.dependencies]
deprecated = "*" deprecated = "*"
pyjwt = ">=2.0" pyjwt = ">=2.4.0"
pynacl = ">=1.4.0" pynacl = ">=1.4.0"
requests = ">=2.14.0" requests = ">=2.14.0"
@ -1076,7 +1076,7 @@ doc = ["Sphinx", "sphinx-rtd-theme"]
[[package]] [[package]]
name = "sentry-sdk" name = "sentry-sdk"
version = "1.10.1" version = "1.11.0"
description = "Python client for Sentry (https://sentry.io)" description = "Python client for Sentry (https://sentry.io)"
category = "main" category = "main"
optional = true optional = true
@ -1098,6 +1098,7 @@ fastapi = ["fastapi (>=0.79.0)"]
flask = ["blinker (>=1.1)", "flask (>=0.11)"] flask = ["blinker (>=1.1)", "flask (>=0.11)"]
httpx = ["httpx (>=0.16.0)"] httpx = ["httpx (>=0.16.0)"]
pure-eval = ["asttokens", "executing", "pure-eval"] pure-eval = ["asttokens", "executing", "pure-eval"]
pymongo = ["pymongo (>=3.1)"]
pyspark = ["pyspark (>=2.4.4)"] pyspark = ["pyspark (>=2.4.4)"]
quart = ["blinker (>=1.1)", "quart (>=0.16.1)"] quart = ["blinker (>=1.1)", "quart (>=0.16.1)"]
rq = ["rq (>=0.6)"] rq = ["rq (>=0.6)"]
@ -1256,11 +1257,11 @@ python-versions = ">= 3.5"
[[package]] [[package]]
name = "towncrier" name = "towncrier"
version = "21.9.0" version = "22.8.0"
description = "Building newsfiles for your project." description = "Building newsfiles for your project."
category = "dev" category = "dev"
optional = false optional = false
python-versions = "*" python-versions = ">=3.7"
[package.dependencies] [package.dependencies]
click = "*" click = "*"
@ -1268,7 +1269,7 @@ click-default-group = "*"
incremental = "*" incremental = "*"
jinja2 = "*" jinja2 = "*"
setuptools = "*" setuptools = "*"
tomli = {version = "*", markers = "python_version >= \"3.6\""} tomli = "*"
[package.extras] [package.extras]
dev = ["packaging"] dev = ["packaging"]
@ -1439,7 +1440,7 @@ python-versions = "*"
[[package]] [[package]]
name = "types-pillow" name = "types-pillow"
version = "9.2.2.1" version = "9.3.0.1"
description = "Typing stubs for Pillow" description = "Typing stubs for Pillow"
category = "dev" category = "dev"
optional = false optional = false
@ -2257,8 +2258,8 @@ pathspec = [
{file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"}, {file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"},
] ]
phonenumbers = [ phonenumbers = [
{file = "phonenumbers-8.12.56-py2.py3-none-any.whl", hash = "sha256:80a7422cf0999a6f9b7a2e6cfbdbbfcc56ab5b75414dc3b805bbec91276b64a3"}, {file = "phonenumbers-8.13.0-py2.py3-none-any.whl", hash = "sha256:dbaea9e4005a976bcf18fbe2bb87cb9cd0a3f119136f04188ac412d7741cebf0"},
{file = "phonenumbers-8.12.56.tar.gz", hash = "sha256:82a4f226c930d02dcdf6d4b29e4cfd8678991fe65c2efd5fdd143557186f0868"}, {file = "phonenumbers-8.13.0.tar.gz", hash = "sha256:93745d7afd38e246660bb601b07deac54eeb76c8e5e43f5e83333b0383a0a1e4"},
] ]
pillow = [ pillow = [
{file = "Pillow-9.3.0-1-cp37-cp37m-win32.whl", hash = "sha256:e6ea6b856a74d560d9326c0f5895ef8050126acfdc7ca08ad703eb0081e82b74"}, {file = "Pillow-9.3.0-1-cp37-cp37m-win32.whl", hash = "sha256:e6ea6b856a74d560d9326c0f5895ef8050126acfdc7ca08ad703eb0081e82b74"},
@ -2419,8 +2420,8 @@ pyflakes = [
{file = "pyflakes-2.5.0.tar.gz", hash = "sha256:491feb020dca48ccc562a8c0cbe8df07ee13078df59813b83959cbdada312ea3"}, {file = "pyflakes-2.5.0.tar.gz", hash = "sha256:491feb020dca48ccc562a8c0cbe8df07ee13078df59813b83959cbdada312ea3"},
] ]
pygithub = [ pygithub = [
{file = "PyGithub-1.56-py3-none-any.whl", hash = "sha256:d15f13d82165306da8a68aefc0f848a6f6432d5febbff13b60a94758ce3ef8b5"}, {file = "PyGithub-1.57-py3-none-any.whl", hash = "sha256:5822febeac2391f1306c55a99af2bc8f86c8bf82ded000030cd02c18f31b731f"},
{file = "PyGithub-1.56.tar.gz", hash = "sha256:80c6d85cf0f9418ffeb840fd105840af694c4f17e102970badbaf678251f2a01"}, {file = "PyGithub-1.57.tar.gz", hash = "sha256:c273f252b278fb81f1769505cc6921bdb6791e1cebd6ac850cc97dad13c31ff3"},
] ]
pygments = [ pygments = [
{file = "Pygments-2.11.2-py3-none-any.whl", hash = "sha256:44238f1b60a76d78fc8ca0528ee429702aae011c265fe6a8dd8b63049ae41c65"}, {file = "Pygments-2.11.2-py3-none-any.whl", hash = "sha256:44238f1b60a76d78fc8ca0528ee429702aae011c265fe6a8dd8b63049ae41c65"},
@ -2568,8 +2569,8 @@ semantic-version = [
{file = "semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c"}, {file = "semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c"},
] ]
sentry-sdk = [ sentry-sdk = [
{file = "sentry-sdk-1.10.1.tar.gz", hash = "sha256:105faf7bd7b7fa25653404619ee261527266b14103fe1389e0ce077bd23a9691"}, {file = "sentry-sdk-1.11.0.tar.gz", hash = "sha256:e7b78a1ddf97a5f715a50ab8c3f7a93f78b114c67307785ee828ef67a5d6f117"},
{file = "sentry_sdk-1.10.1-py2.py3-none-any.whl", hash = "sha256:06c0fa9ccfdc80d7e3b5d2021978d6eb9351fa49db9b5847cf4d1f2a473414ad"}, {file = "sentry_sdk-1.11.0-py2.py3-none-any.whl", hash = "sha256:f467e6c7fac23d4d42bc83eb049c400f756cd2d65ab44f0cc1165d0c7c3d40bc"},
] ]
service-identity = [ service-identity = [
{file = "service-identity-21.1.0.tar.gz", hash = "sha256:6e6c6086ca271dc11b033d17c3a8bea9f24ebff920c587da090afc9519419d34"}, {file = "service-identity-21.1.0.tar.gz", hash = "sha256:6e6c6086ca271dc11b033d17c3a8bea9f24ebff920c587da090afc9519419d34"},
@ -2720,8 +2721,8 @@ tornado = [
{file = "tornado-6.1.tar.gz", hash = "sha256:33c6e81d7bd55b468d2e793517c909b139960b6c790a60b7991b9b6b76fb9791"}, {file = "tornado-6.1.tar.gz", hash = "sha256:33c6e81d7bd55b468d2e793517c909b139960b6c790a60b7991b9b6b76fb9791"},
] ]
towncrier = [ towncrier = [
{file = "towncrier-21.9.0-py2.py3-none-any.whl", hash = "sha256:fc5a88a2a54988e3a8ed2b60d553599da8330f65722cc607c839614ed87e0f92"}, {file = "towncrier-22.8.0-py2.py3-none-any.whl", hash = "sha256:3b780c3d966e1b26414830aec3d15000654b31e64e024f3e5fd128b4c6eb8f47"},
{file = "towncrier-21.9.0.tar.gz", hash = "sha256:9cb6f45c16e1a1eec9d0e7651165e7be60cd0ab81d13a5c96ca97a498ae87f48"}, {file = "towncrier-22.8.0.tar.gz", hash = "sha256:7d3839b033859b45fb55df82b74cfd702431933c0cc9f287a5a7ea3e05d042cb"},
] ]
treq = [ treq = [
{file = "treq-22.2.0-py3-none-any.whl", hash = "sha256:27d95b07c5c14be3e7b280416139b036087617ad5595be913b1f9b3ce981b9b2"}, {file = "treq-22.2.0-py3-none-any.whl", hash = "sha256:27d95b07c5c14be3e7b280416139b036087617ad5595be913b1f9b3ce981b9b2"},
@ -2808,8 +2809,8 @@ types-opentracing = [
{file = "types_opentracing-2.4.10-py3-none-any.whl", hash = "sha256:66d9cfbbdc4a6f8ca8189a15ad26f0fe41cee84c07057759c5d194e2505b84c2"}, {file = "types_opentracing-2.4.10-py3-none-any.whl", hash = "sha256:66d9cfbbdc4a6f8ca8189a15ad26f0fe41cee84c07057759c5d194e2505b84c2"},
] ]
types-pillow = [ types-pillow = [
{file = "types-Pillow-9.2.2.1.tar.gz", hash = "sha256:85c139e06e1c46ec5f9c634d5c54a156b0958d5d0e8be024ed353db0c804b426"}, {file = "types-Pillow-9.3.0.1.tar.gz", hash = "sha256:f3b7cada3fa496c78d75253c6b1f07a843d625f42e5639b320a72acaff6f7cfb"},
{file = "types_Pillow-9.2.2.1-py3-none-any.whl", hash = "sha256:3a6a871cade8428433a21ef459bb0a65532b87d05f9e836a0664431ce445bdcf"}, {file = "types_Pillow-9.3.0.1-py3-none-any.whl", hash = "sha256:79837755fe9659f29efd1016e9903ac4a500e0c73260483f07296bd6ca47668b"},
] ]
types-psycopg2 = [ types-psycopg2 = [
{file = "types-psycopg2-2.9.21.1.tar.gz", hash = "sha256:f5532cf15afdc6b5ebb1e59b7d896617217321f488fd1fbd74e7efb94decfab6"}, {file = "types-psycopg2-2.9.21.1.tar.gz", hash = "sha256:f5532cf15afdc6b5ebb1e59b7d896617217321f488fd1fbd74e7efb94decfab6"},

View file

@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml"
[tool.poetry] [tool.poetry]
name = "matrix-synapse" name = "matrix-synapse"
version = "1.72.0" version = "1.73.0rc1"
description = "Homeserver for the Matrix decentralised comms protocol" description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"] authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "Apache-2.0" license = "Apache-2.0"

View file

@ -443,9 +443,9 @@ packaging==21.3 ; python_full_version >= "3.7.1" and python_full_version < "4.0.
parameterized==0.8.1 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \ parameterized==0.8.1 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
--hash=sha256:41bbff37d6186430f77f900d777e5bb6a24928a1c46fb1de692f8b52b8833b5c \ --hash=sha256:41bbff37d6186430f77f900d777e5bb6a24928a1c46fb1de692f8b52b8833b5c \
--hash=sha256:9cbb0b69a03e8695d68b3399a8a5825200976536fe1cb79db60ed6a4c8c9efe9 --hash=sha256:9cbb0b69a03e8695d68b3399a8a5825200976536fe1cb79db60ed6a4c8c9efe9
phonenumbers==8.12.56 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \ phonenumbers==8.13.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
--hash=sha256:80a7422cf0999a6f9b7a2e6cfbdbbfcc56ab5b75414dc3b805bbec91276b64a3 \ --hash=sha256:93745d7afd38e246660bb601b07deac54eeb76c8e5e43f5e83333b0383a0a1e4 \
--hash=sha256:82a4f226c930d02dcdf6d4b29e4cfd8678991fe65c2efd5fdd143557186f0868 --hash=sha256:dbaea9e4005a976bcf18fbe2bb87cb9cd0a3f119136f04188ac412d7741cebf0
pillow==9.3.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \ pillow==9.3.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
--hash=sha256:03150abd92771742d4a8cd6f2fa6246d847dcd2e332a18d0c15cc75bf6703040 \ --hash=sha256:03150abd92771742d4a8cd6f2fa6246d847dcd2e332a18d0c15cc75bf6703040 \
--hash=sha256:073adb2ae23431d3b9bcbcff3fe698b62ed47211d0716b067385538a1b0f28b8 \ --hash=sha256:073adb2ae23431d3b9bcbcff3fe698b62ed47211d0716b067385538a1b0f28b8 \

View file

@ -274,6 +274,156 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
default: true, default: true,
default_enabled: true, default_enabled: true,
}, },
PushRule {
rule_id: Cow::Borrowed(
"global/underride/.org.matrix.msc3933.rule.extensible.encrypted_room_one_to_one",
),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("org.matrix.msc1767.encrypted")),
pattern_type: None,
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
}),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
}),
]),
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed(
"global/underride/.org.matrix.msc3933.rule.extensible.message.room_one_to_one",
),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("org.matrix.msc1767.message")),
pattern_type: None,
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
}),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
}),
]),
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed(
"global/underride/.org.matrix.msc3933.rule.extensible.file.room_one_to_one",
),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("org.matrix.msc1767.file")),
pattern_type: None,
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
}),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
}),
]),
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed(
"global/underride/.org.matrix.msc3933.rule.extensible.image.room_one_to_one",
),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("org.matrix.msc1767.image")),
pattern_type: None,
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
}),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
}),
]),
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed(
"global/underride/.org.matrix.msc3933.rule.extensible.video.room_one_to_one",
),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("org.matrix.msc1767.video")),
pattern_type: None,
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
}),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
}),
]),
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed(
"global/underride/.org.matrix.msc3933.rule.extensible.audio.room_one_to_one",
),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("org.matrix.msc1767.audio")),
pattern_type: None,
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
}),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
}),
]),
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule { PushRule {
rule_id: Cow::Borrowed("global/underride/.m.rule.message"), rule_id: Cow::Borrowed("global/underride/.m.rule.message"),
priority_class: 1, priority_class: 1,
@ -302,6 +452,126 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
default: true, default: true,
default_enabled: true, default_enabled: true,
}, },
PushRule {
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.encrypted"),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("m.encrypted")),
pattern_type: None,
})),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
}),
]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.message"),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("m.message")),
pattern_type: None,
})),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
}),
]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.file"),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("m.file")),
pattern_type: None,
})),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
}),
]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.image"),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("m.image")),
pattern_type: None,
})),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
}),
]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.video"),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("m.video")),
pattern_type: None,
})),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
}),
]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.audio"),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("m.audio")),
pattern_type: None,
})),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
}),
]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule { PushRule {
rule_id: Cow::Borrowed("global/underride/.im.vector.jitsi"), rule_id: Cow::Borrowed("global/underride/.im.vector.jitsi"),
priority_class: 1, priority_class: 1,

View file

@ -12,8 +12,10 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
use std::borrow::Cow;
use std::collections::BTreeMap; use std::collections::BTreeMap;
use crate::push::{PushRule, PushRules};
use anyhow::{Context, Error}; use anyhow::{Context, Error};
use lazy_static::lazy_static; use lazy_static::lazy_static;
use log::warn; use log::warn;
@ -29,6 +31,33 @@ use super::{
lazy_static! { lazy_static! {
/// Used to parse the `is` clause in the room member count condition. /// Used to parse the `is` clause in the room member count condition.
static ref INEQUALITY_EXPR: Regex = Regex::new(r"^([=<>]*)([0-9]+)$").expect("valid regex"); static ref INEQUALITY_EXPR: Regex = Regex::new(r"^([=<>]*)([0-9]+)$").expect("valid regex");
/// Used to determine which MSC3931 room version feature flags are actually known to
/// the push evaluator.
static ref KNOWN_RVER_FLAGS: Vec<String> = vec![
RoomVersionFeatures::ExtensibleEvents.as_str().to_string(),
];
/// The "safe" rule IDs which are not affected by MSC3932's behaviour (room versions which
/// declare Extensible Events support ultimately *disable* push rules which do not declare
/// *any* MSC3931 room_version_supports condition).
static ref SAFE_EXTENSIBLE_EVENTS_RULE_IDS: Vec<String> = vec![
"global/override/.m.rule.master".to_string(),
"global/override/.m.rule.roomnotif".to_string(),
"global/content/.m.rule.contains_user_name".to_string(),
];
}
enum RoomVersionFeatures {
ExtensibleEvents,
}
impl RoomVersionFeatures {
fn as_str(&self) -> &'static str {
match self {
RoomVersionFeatures::ExtensibleEvents => "org.matrix.msc3932.extensible_events",
}
}
} }
/// Allows running a set of push rules against a particular event. /// Allows running a set of push rules against a particular event.
@ -57,6 +86,13 @@ pub struct PushRuleEvaluator {
/// If msc3664, push rules for related events, is enabled. /// If msc3664, push rules for related events, is enabled.
related_event_match_enabled: bool, related_event_match_enabled: bool,
/// If MSC3931 is applicable, the feature flags for the room version.
room_version_feature_flags: Vec<String>,
/// If MSC3931 (room version feature flags) is enabled. Usually controlled by the same
/// flag as MSC1767 (extensible events core).
msc3931_enabled: bool,
} }
#[pymethods] #[pymethods]
@ -70,6 +106,8 @@ impl PushRuleEvaluator {
notification_power_levels: BTreeMap<String, i64>, notification_power_levels: BTreeMap<String, i64>,
related_events_flattened: BTreeMap<String, BTreeMap<String, String>>, related_events_flattened: BTreeMap<String, BTreeMap<String, String>>,
related_event_match_enabled: bool, related_event_match_enabled: bool,
room_version_feature_flags: Vec<String>,
msc3931_enabled: bool,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
let body = flattened_keys let body = flattened_keys
.get("content.body") .get("content.body")
@ -84,6 +122,8 @@ impl PushRuleEvaluator {
sender_power_level, sender_power_level,
related_events_flattened, related_events_flattened,
related_event_match_enabled, related_event_match_enabled,
room_version_feature_flags,
msc3931_enabled,
}) })
} }
@ -106,7 +146,22 @@ impl PushRuleEvaluator {
continue; continue;
} }
let rule_id = &push_rule.rule_id().to_string();
let extev_flag = &RoomVersionFeatures::ExtensibleEvents.as_str().to_string();
let supports_extensible_events = self.room_version_feature_flags.contains(extev_flag);
let safe_from_rver_condition = SAFE_EXTENSIBLE_EVENTS_RULE_IDS.contains(rule_id);
let mut has_rver_condition = false;
for condition in push_rule.conditions.iter() { for condition in push_rule.conditions.iter() {
has_rver_condition = has_rver_condition
|| match condition {
Condition::Known(known) => match known {
// per MSC3932, we just need *any* room version condition to match
KnownCondition::RoomVersionSupports { feature: _ } => true,
_ => false,
},
_ => false,
};
match self.match_condition(condition, user_id, display_name) { match self.match_condition(condition, user_id, display_name) {
Ok(true) => {} Ok(true) => {}
Ok(false) => continue 'outer, Ok(false) => continue 'outer,
@ -117,6 +172,13 @@ impl PushRuleEvaluator {
} }
} }
// MSC3932: Disable push rules in extensible event-supporting room versions if they
// don't describe *any* MSC3931 room version condition, unless the rule is on the
// safe list.
if !has_rver_condition && !safe_from_rver_condition && supports_extensible_events {
continue;
}
let actions = push_rule let actions = push_rule
.actions .actions
.iter() .iter()
@ -204,6 +266,15 @@ impl PushRuleEvaluator {
false false
} }
} }
KnownCondition::RoomVersionSupports { feature } => {
if !self.msc3931_enabled {
false
} else {
let flag = feature.to_string();
KNOWN_RVER_FLAGS.contains(&flag)
&& self.room_version_feature_flags.contains(&flag)
}
}
}; };
Ok(result) Ok(result)
@ -362,9 +433,59 @@ fn push_rule_evaluator() {
BTreeMap::new(), BTreeMap::new(),
BTreeMap::new(), BTreeMap::new(),
true, true,
vec![],
true,
) )
.unwrap(); .unwrap();
let result = evaluator.run(&FilteredPushRules::default(), None, Some("bob")); let result = evaluator.run(&FilteredPushRules::default(), None, Some("bob"));
assert_eq!(result.len(), 3); assert_eq!(result.len(), 3);
} }
#[test]
fn test_requires_room_version_supports_condition() {
let mut flattened_keys = BTreeMap::new();
flattened_keys.insert("content.body".to_string(), "foo bar bob hello".to_string());
let flags = vec![RoomVersionFeatures::ExtensibleEvents.as_str().to_string()];
let evaluator = PushRuleEvaluator::py_new(
flattened_keys,
10,
Some(0),
BTreeMap::new(),
BTreeMap::new(),
false,
flags,
true,
)
.unwrap();
// first test: are the master and contains_user_name rules excluded from the "requires room
// version condition" check?
let mut result = evaluator.run(
&FilteredPushRules::default(),
Some("@bob:example.org"),
None,
);
assert_eq!(result.len(), 3);
// second test: if an appropriate push rule is in play, does it get handled?
let custom_rule = PushRule {
rule_id: Cow::from("global/underride/.org.example.extensible"),
priority_class: 1, // underride
conditions: Cow::from(vec![Condition::Known(
KnownCondition::RoomVersionSupports {
feature: Cow::from(RoomVersionFeatures::ExtensibleEvents.as_str().to_string()),
},
)]),
actions: Cow::from(vec![Action::Notify]),
default: false,
default_enabled: true,
};
let rules = PushRules::new(vec![custom_rule]);
result = evaluator.run(
&FilteredPushRules::py_new(rules, BTreeMap::new(), true, true),
None,
None,
);
assert_eq!(result.len(), 1);
}

View file

@ -277,6 +277,10 @@ pub enum KnownCondition {
SenderNotificationPermission { SenderNotificationPermission {
key: Cow<'static, str>, key: Cow<'static, str>,
}, },
#[serde(rename = "org.matrix.msc3931.room_version_supports")]
RoomVersionSupports {
feature: Cow<'static, str>,
},
} }
impl IntoPy<PyObject> for Condition { impl IntoPy<PyObject> for Condition {
@ -408,6 +412,7 @@ pub struct FilteredPushRules {
push_rules: PushRules, push_rules: PushRules,
enabled_map: BTreeMap<String, bool>, enabled_map: BTreeMap<String, bool>,
msc3664_enabled: bool, msc3664_enabled: bool,
msc1767_enabled: bool,
} }
#[pymethods] #[pymethods]
@ -417,11 +422,13 @@ impl FilteredPushRules {
push_rules: PushRules, push_rules: PushRules,
enabled_map: BTreeMap<String, bool>, enabled_map: BTreeMap<String, bool>,
msc3664_enabled: bool, msc3664_enabled: bool,
msc1767_enabled: bool,
) -> Self { ) -> Self {
Self { Self {
push_rules, push_rules,
enabled_map, enabled_map,
msc3664_enabled, msc3664_enabled,
msc1767_enabled,
} }
} }
@ -446,6 +453,10 @@ impl FilteredPushRules {
return false; return false;
} }
if !self.msc1767_enabled && rule.rule_id.contains("org.matrix.msc1767") {
return false;
}
true true
}) })
.map(|r| { .map(|r| {
@ -491,6 +502,18 @@ fn test_deserialize_unstable_msc3664_condition() {
)); ));
} }
#[test]
fn test_deserialize_unstable_msc3931_condition() {
let json =
r#"{"kind":"org.matrix.msc3931.room_version_supports","feature":"org.example.feature"}"#;
let condition: Condition = serde_json::from_str(json).unwrap();
assert!(matches!(
condition,
Condition::Known(KnownCondition::RoomVersionSupports { feature: _ })
));
}
#[test] #[test]
fn test_deserialize_custom_condition() { fn test_deserialize_custom_condition() {
let json = r#"{"kind":"custom_tag"}"#; let json = r#"{"kind":"custom_tag"}"#;

View file

@ -162,9 +162,9 @@ else
# We only test faster room joins on monoliths, because they are purposefully # We only test faster room joins on monoliths, because they are purposefully
# being developed without worker support to start with. # being developed without worker support to start with.
# #
# The tests for importing historical messages (MSC2716) and jump to date (MSC3030) # The tests for importing historical messages (MSC2716) also only pass with monoliths,
# also only pass with monoliths, currently. # currently.
test_tags="$test_tags,faster_joins,msc2716,msc3030" test_tags="$test_tags,faster_joins,msc2716"
fi fi

View file

@ -46,11 +46,12 @@ import signedjson.key
import signedjson.types import signedjson.types
import srvlookup import srvlookup
import yaml import yaml
from requests import PreparedRequest, Response
from requests.adapters import HTTPAdapter from requests.adapters import HTTPAdapter
from urllib3 import HTTPConnectionPool from urllib3 import HTTPConnectionPool
# uncomment the following to enable debug logging of http requests # uncomment the following to enable debug logging of http requests
# from httplib import HTTPConnection # from http.client import HTTPConnection
# HTTPConnection.debuglevel = 1 # HTTPConnection.debuglevel = 1
@ -103,6 +104,7 @@ def request(
destination: str, destination: str,
path: str, path: str,
content: Optional[str], content: Optional[str],
verify_tls: bool,
) -> requests.Response: ) -> requests.Response:
if method is None: if method is None:
if content is None: if content is None:
@ -141,7 +143,6 @@ def request(
s.mount("matrix://", MatrixConnectionAdapter()) s.mount("matrix://", MatrixConnectionAdapter())
headers: Dict[str, str] = { headers: Dict[str, str] = {
"Host": destination,
"Authorization": authorization_headers[0], "Authorization": authorization_headers[0],
} }
@ -152,7 +153,7 @@ def request(
method=method, method=method,
url=dest, url=dest,
headers=headers, headers=headers,
verify=False, verify=verify_tls,
data=content, data=content,
stream=True, stream=True,
) )
@ -202,6 +203,12 @@ def main() -> None:
parser.add_argument("--body", help="Data to send as the body of the HTTP request") parser.add_argument("--body", help="Data to send as the body of the HTTP request")
parser.add_argument(
"--insecure",
action="store_true",
help="Disable TLS certificate verification",
)
parser.add_argument( parser.add_argument(
"path", help="request path, including the '/_matrix/federation/...' prefix." "path", help="request path, including the '/_matrix/federation/...' prefix."
) )
@ -227,6 +234,7 @@ def main() -> None:
args.destination, args.destination,
args.path, args.path,
content=args.body, content=args.body,
verify_tls=not args.insecure,
) )
sys.stderr.write("Status Code: %d\n" % (result.status_code,)) sys.stderr.write("Status Code: %d\n" % (result.status_code,))
@ -254,36 +262,93 @@ def read_args_from_config(args: argparse.Namespace) -> None:
class MatrixConnectionAdapter(HTTPAdapter): class MatrixConnectionAdapter(HTTPAdapter):
@staticmethod def send(
def lookup(s: str, skip_well_known: bool = False) -> Tuple[str, int]: self,
if s[-1] == "]": request: PreparedRequest,
# ipv6 literal (with no port) *args: Any,
return s, 8448 **kwargs: Any,
) -> Response:
# overrides the send() method in the base class.
if ":" in s: # We need to look for .well-known redirects before passing the request up to
out = s.rsplit(":", 1) # HTTPAdapter.send().
assert isinstance(request.url, str)
parsed = urlparse.urlsplit(request.url)
server_name = parsed.netloc
well_known = self._get_well_known(parsed.netloc)
if well_known:
server_name = well_known
# replace the scheme in the uri with https, so that cert verification is done
# also replace the hostname if we got a .well-known result
request.url = urlparse.urlunsplit(
("https", server_name, parsed.path, parsed.query, parsed.fragment)
)
# at this point we also add the host header (otherwise urllib will add one
# based on the `host` from the connection returned by `get_connection`,
# which will be wrong if there is an SRV record).
request.headers["Host"] = server_name
return super().send(request, *args, **kwargs)
def get_connection(
self, url: str, proxies: Optional[Dict[str, str]] = None
) -> HTTPConnectionPool:
# overrides the get_connection() method in the base class
parsed = urlparse.urlsplit(url)
(host, port, ssl_server_name) = self._lookup(parsed.netloc)
print(
f"Connecting to {host}:{port} with SNI {ssl_server_name}", file=sys.stderr
)
return self.poolmanager.connection_from_host(
host,
port=port,
scheme="https",
pool_kwargs={"server_hostname": ssl_server_name},
)
@staticmethod
def _lookup(server_name: str) -> Tuple[str, int, str]:
"""
Do an SRV lookup on a server name and return the host:port to connect to
Given the server_name (after any .well-known lookup), return the host, port and
the ssl server name
"""
if server_name[-1] == "]":
# ipv6 literal (with no port)
return server_name, 8448, server_name
if ":" in server_name:
# explicit port
out = server_name.rsplit(":", 1)
try: try:
port = int(out[1]) port = int(out[1])
except ValueError: except ValueError:
raise ValueError("Invalid host:port '%s'" % s) raise ValueError("Invalid host:port '%s'" % (server_name,))
return out[0], port return out[0], port, out[0]
# try a .well-known lookup
if not skip_well_known:
well_known = MatrixConnectionAdapter.get_well_known(s)
if well_known:
return MatrixConnectionAdapter.lookup(well_known, skip_well_known=True)
try: try:
srv = srvlookup.lookup("matrix", "tcp", s)[0] srv = srvlookup.lookup("matrix", "tcp", server_name)[0]
return srv.host, srv.port print(
f"SRV lookup on _matrix._tcp.{server_name} gave {srv}",
file=sys.stderr,
)
return srv.host, srv.port, server_name
except Exception: except Exception:
return s, 8448 return server_name, 8448, server_name
@staticmethod @staticmethod
def get_well_known(server_name: str) -> Optional[str]: def _get_well_known(server_name: str) -> Optional[str]:
uri = "https://%s/.well-known/matrix/server" % (server_name,) if ":" in server_name:
print("fetching %s" % (uri,), file=sys.stderr) # explicit port, or ipv6 literal. Either way, no .well-known
return None
# TODO: check for ipv4 literals
uri = f"https://{server_name}/.well-known/matrix/server"
print(f"fetching {uri}", file=sys.stderr)
try: try:
resp = requests.get(uri) resp = requests.get(uri)
@ -304,19 +369,6 @@ class MatrixConnectionAdapter(HTTPAdapter):
print("Invalid response from %s: %s" % (uri, e), file=sys.stderr) print("Invalid response from %s: %s" % (uri, e), file=sys.stderr)
return None return None
def get_connection(
self, url: str, proxies: Optional[Dict[str, str]] = None
) -> HTTPConnectionPool:
parsed = urlparse.urlparse(url)
(host, port) = self.lookup(parsed.netloc)
netloc = "%s:%d" % (host, port)
print("Connecting to %s" % (netloc,), file=sys.stderr)
url = urlparse.urlunparse(
("https", netloc, parsed.path, parsed.params, parsed.query, parsed.fragment)
)
return super().get_connection(url, proxies)
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View file

@ -26,7 +26,11 @@ class PushRules:
class FilteredPushRules: class FilteredPushRules:
def __init__( def __init__(
self, push_rules: PushRules, enabled_map: Dict[str, bool], msc3664_enabled: bool self,
push_rules: PushRules,
enabled_map: Dict[str, bool],
msc3664_enabled: bool,
msc1767_enabled: bool,
): ... ): ...
def rules(self) -> Collection[Tuple[PushRule, bool]]: ... def rules(self) -> Collection[Tuple[PushRule, bool]]: ...
@ -41,6 +45,8 @@ class PushRuleEvaluator:
notification_power_levels: Mapping[str, int], notification_power_levels: Mapping[str, int],
related_events_flattened: Mapping[str, Mapping[str, str]], related_events_flattened: Mapping[str, Mapping[str, str]],
related_event_match_enabled: bool, related_event_match_enabled: bool,
room_version_feature_flags: list[str],
msc3931_enabled: bool,
): ... ): ...
def run( def run(
self, self,

View file

@ -713,7 +713,7 @@ class HttpResponseException(CodeMessageException):
set to the reason code from the HTTP response. set to the reason code from the HTTP response.
Returns: Returns:
SynapseError: The error converted to a SynapseError.
""" """
# try to parse the body as json, to get better errcode/msg, but # try to parse the body as json, to get better errcode/msg, but
# default to M_UNKNOWN with the HTTP status as the error text # default to M_UNKNOWN with the HTTP status as the error text

View file

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from typing import Callable, Dict, Optional from typing import Callable, Dict, List, Optional
import attr import attr
@ -51,6 +51,13 @@ class RoomDisposition:
UNSTABLE = "unstable" UNSTABLE = "unstable"
class PushRuleRoomFlag:
"""Enum for listing possible MSC3931 room version feature flags, for push rules"""
# MSC3932: Room version supports MSC1767 Extensible Events.
EXTENSIBLE_EVENTS = "org.matrix.msc3932.extensible_events"
@attr.s(slots=True, frozen=True, auto_attribs=True) @attr.s(slots=True, frozen=True, auto_attribs=True)
class RoomVersion: class RoomVersion:
"""An object which describes the unique attributes of a room version.""" """An object which describes the unique attributes of a room version."""
@ -91,6 +98,12 @@ class RoomVersion:
msc3787_knock_restricted_join_rule: bool msc3787_knock_restricted_join_rule: bool
# MSC3667: Enforce integer power levels # MSC3667: Enforce integer power levels
msc3667_int_only_power_levels: bool msc3667_int_only_power_levels: bool
# MSC3931: Adds a push rule condition for "room version feature flags", making
# some push rules room version dependent. Note that adding a flag to this list
# is not enough to mark it "supported": the push rule evaluator also needs to
# support the flag. Unknown flags are ignored by the evaluator, making conditions
# fail if used.
msc3931_push_features: List[str] # values from PushRuleRoomFlag
class RoomVersions: class RoomVersions:
@ -111,6 +124,7 @@ class RoomVersions:
msc2716_redactions=False, msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False, msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False, msc3667_int_only_power_levels=False,
msc3931_push_features=[],
) )
V2 = RoomVersion( V2 = RoomVersion(
"2", "2",
@ -129,6 +143,7 @@ class RoomVersions:
msc2716_redactions=False, msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False, msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False, msc3667_int_only_power_levels=False,
msc3931_push_features=[],
) )
V3 = RoomVersion( V3 = RoomVersion(
"3", "3",
@ -147,6 +162,7 @@ class RoomVersions:
msc2716_redactions=False, msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False, msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False, msc3667_int_only_power_levels=False,
msc3931_push_features=[],
) )
V4 = RoomVersion( V4 = RoomVersion(
"4", "4",
@ -165,6 +181,7 @@ class RoomVersions:
msc2716_redactions=False, msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False, msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False, msc3667_int_only_power_levels=False,
msc3931_push_features=[],
) )
V5 = RoomVersion( V5 = RoomVersion(
"5", "5",
@ -183,6 +200,7 @@ class RoomVersions:
msc2716_redactions=False, msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False, msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False, msc3667_int_only_power_levels=False,
msc3931_push_features=[],
) )
V6 = RoomVersion( V6 = RoomVersion(
"6", "6",
@ -201,6 +219,7 @@ class RoomVersions:
msc2716_redactions=False, msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False, msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False, msc3667_int_only_power_levels=False,
msc3931_push_features=[],
) )
MSC2176 = RoomVersion( MSC2176 = RoomVersion(
"org.matrix.msc2176", "org.matrix.msc2176",
@ -219,6 +238,7 @@ class RoomVersions:
msc2716_redactions=False, msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False, msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False, msc3667_int_only_power_levels=False,
msc3931_push_features=[],
) )
V7 = RoomVersion( V7 = RoomVersion(
"7", "7",
@ -237,6 +257,7 @@ class RoomVersions:
msc2716_redactions=False, msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False, msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False, msc3667_int_only_power_levels=False,
msc3931_push_features=[],
) )
V8 = RoomVersion( V8 = RoomVersion(
"8", "8",
@ -255,6 +276,7 @@ class RoomVersions:
msc2716_redactions=False, msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False, msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False, msc3667_int_only_power_levels=False,
msc3931_push_features=[],
) )
V9 = RoomVersion( V9 = RoomVersion(
"9", "9",
@ -273,6 +295,7 @@ class RoomVersions:
msc2716_redactions=False, msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False, msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False, msc3667_int_only_power_levels=False,
msc3931_push_features=[],
) )
MSC3787 = RoomVersion( MSC3787 = RoomVersion(
"org.matrix.msc3787", "org.matrix.msc3787",
@ -291,6 +314,7 @@ class RoomVersions:
msc2716_redactions=False, msc2716_redactions=False,
msc3787_knock_restricted_join_rule=True, msc3787_knock_restricted_join_rule=True,
msc3667_int_only_power_levels=False, msc3667_int_only_power_levels=False,
msc3931_push_features=[],
) )
V10 = RoomVersion( V10 = RoomVersion(
"10", "10",
@ -309,6 +333,7 @@ class RoomVersions:
msc2716_redactions=False, msc2716_redactions=False,
msc3787_knock_restricted_join_rule=True, msc3787_knock_restricted_join_rule=True,
msc3667_int_only_power_levels=True, msc3667_int_only_power_levels=True,
msc3931_push_features=[],
) )
MSC2716v4 = RoomVersion( MSC2716v4 = RoomVersion(
"org.matrix.msc2716v4", "org.matrix.msc2716v4",
@ -327,6 +352,27 @@ class RoomVersions:
msc2716_redactions=True, msc2716_redactions=True,
msc3787_knock_restricted_join_rule=False, msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False, msc3667_int_only_power_levels=False,
msc3931_push_features=[],
)
MSC1767v10 = RoomVersion(
# MSC1767 (Extensible Events) based on room version "10"
"org.matrix.msc1767.10",
RoomDisposition.UNSTABLE,
EventFormatVersions.ROOM_V4_PLUS,
StateResolutionVersions.V2,
enforce_key_validity=True,
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
msc2176_redaction_rules=False,
msc3083_join_rules=True,
msc3375_redaction_rules=True,
msc2403_knocking=True,
msc2716_historical=False,
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=True,
msc3667_int_only_power_levels=True,
msc3931_push_features=[PushRuleRoomFlag.EXTENSIBLE_EVENTS],
) )

View file

@ -266,26 +266,18 @@ def register_start(
reactor.callWhenRunning(lambda: defer.ensureDeferred(wrapper())) reactor.callWhenRunning(lambda: defer.ensureDeferred(wrapper()))
def listen_metrics( def listen_metrics(bind_addresses: Iterable[str], port: int) -> None:
bind_addresses: Iterable[str], port: int, enable_legacy_metric_names: bool
) -> None:
""" """
Start Prometheus metrics server. Start Prometheus metrics server.
""" """
from prometheus_client import start_http_server as start_http_server_prometheus from prometheus_client import start_http_server as start_http_server_prometheus
from synapse.metrics import ( from synapse.metrics import RegistryProxy
RegistryProxy,
start_http_server as start_http_server_legacy,
)
for host in bind_addresses: for host in bind_addresses:
logger.info("Starting metrics listener on %s:%d", host, port) logger.info("Starting metrics listener on %s:%d", host, port)
if enable_legacy_metric_names: _set_prometheus_client_use_created_metrics(False)
start_http_server_legacy(port, addr=host, registry=RegistryProxy) start_http_server_prometheus(port, addr=host, registry=RegistryProxy)
else:
_set_prometheus_client_use_created_metrics(False)
start_http_server_prometheus(port, addr=host, registry=RegistryProxy)
def _set_prometheus_client_use_created_metrics(new_value: bool) -> None: def _set_prometheus_client_use_created_metrics(new_value: bool) -> None:

View file

@ -14,14 +14,12 @@
# limitations under the License. # limitations under the License.
import logging import logging
import sys import sys
from typing import Dict, List, Optional, Tuple from typing import Dict, List
from twisted.internet import address
from twisted.web.resource import Resource from twisted.web.resource import Resource
import synapse import synapse
import synapse.events import synapse.events
from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
from synapse.api.urls import ( from synapse.api.urls import (
CLIENT_API_PREFIX, CLIENT_API_PREFIX,
FEDERATION_PREFIX, FEDERATION_PREFIX,
@ -43,8 +41,6 @@ from synapse.config.logger import setup_logging
from synapse.config.server import ListenerConfig from synapse.config.server import ListenerConfig
from synapse.federation.transport.server import TransportLayerServer from synapse.federation.transport.server import TransportLayerServer
from synapse.http.server import JsonResource, OptionsResource from synapse.http.server import JsonResource, OptionsResource
from synapse.http.servlet import RestServlet, parse_json_object_from_request
from synapse.http.site import SynapseRequest
from synapse.logging.context import LoggingContext from synapse.logging.context import LoggingContext
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
@ -70,12 +66,12 @@ from synapse.rest.client import (
versions, versions,
voip, voip,
) )
from synapse.rest.client._base import client_patterns
from synapse.rest.client.account import ThreepidRestServlet, WhoamiRestServlet from synapse.rest.client.account import ThreepidRestServlet, WhoamiRestServlet
from synapse.rest.client.devices import DevicesRestServlet from synapse.rest.client.devices import DevicesRestServlet
from synapse.rest.client.keys import ( from synapse.rest.client.keys import (
KeyChangesServlet, KeyChangesServlet,
KeyQueryServlet, KeyQueryServlet,
KeyUploadServlet,
OneTimeKeyServlet, OneTimeKeyServlet,
) )
from synapse.rest.client.register import ( from synapse.rest.client.register import (
@ -132,107 +128,12 @@ from synapse.storage.databases.main.transactions import TransactionWorkerStore
from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore
from synapse.storage.databases.main.user_directory import UserDirectoryStore from synapse.storage.databases.main.user_directory import UserDirectoryStore
from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore
from synapse.types import JsonDict
from synapse.util import SYNAPSE_VERSION from synapse.util import SYNAPSE_VERSION
from synapse.util.httpresourcetree import create_resource_tree from synapse.util.httpresourcetree import create_resource_tree
logger = logging.getLogger("synapse.app.generic_worker") logger = logging.getLogger("synapse.app.generic_worker")
class KeyUploadServlet(RestServlet):
"""An implementation of the `KeyUploadServlet` that responds to read only
requests, but otherwise proxies through to the master instance.
"""
PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
def __init__(self, hs: HomeServer):
"""
Args:
hs: server
"""
super().__init__()
self.auth = hs.get_auth()
self.store = hs.get_datastores().main
self.http_client = hs.get_simple_http_client()
self.main_uri = hs.config.worker.worker_main_http_uri
async def on_POST(
self, request: SynapseRequest, device_id: Optional[str]
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
user_id = requester.user.to_string()
body = parse_json_object_from_request(request)
if device_id is not None:
# passing the device_id here is deprecated; however, we allow it
# for now for compatibility with older clients.
if requester.device_id is not None and device_id != requester.device_id:
logger.warning(
"Client uploading keys for a different device "
"(logged in as %s, uploading for %s)",
requester.device_id,
device_id,
)
else:
device_id = requester.device_id
if device_id is None:
raise SynapseError(
400, "To upload keys, you must pass device_id when authenticating"
)
if body:
# They're actually trying to upload something, proxy to main synapse.
# Proxy headers from the original request, such as the auth headers
# (in case the access token is there) and the original IP /
# User-Agent of the request.
headers: Dict[bytes, List[bytes]] = {
header: list(request.requestHeaders.getRawHeaders(header, []))
for header in (b"Authorization", b"User-Agent")
}
# Add the previous hop to the X-Forwarded-For header.
x_forwarded_for = list(
request.requestHeaders.getRawHeaders(b"X-Forwarded-For", [])
)
# we use request.client here, since we want the previous hop, not the
# original client (as returned by request.getClientAddress()).
if isinstance(request.client, (address.IPv4Address, address.IPv6Address)):
previous_host = request.client.host.encode("ascii")
# If the header exists, add to the comma-separated list of the first
# instance of the header. Otherwise, generate a new header.
if x_forwarded_for:
x_forwarded_for = [x_forwarded_for[0] + b", " + previous_host]
x_forwarded_for.extend(x_forwarded_for[1:])
else:
x_forwarded_for = [previous_host]
headers[b"X-Forwarded-For"] = x_forwarded_for
# Replicate the original X-Forwarded-Proto header. Note that
# XForwardedForRequest overrides isSecure() to give us the original protocol
# used by the client, as opposed to the protocol used by our upstream proxy
# - which is what we want here.
headers[b"X-Forwarded-Proto"] = [
b"https" if request.isSecure() else b"http"
]
try:
result = await self.http_client.post_json_get_json(
self.main_uri + request.uri.decode("ascii"), body, headers=headers
)
except HttpResponseException as e:
raise e.to_synapse_error() from e
except RequestSendFailed as e:
raise SynapseError(502, "Failed to talk to master") from e
return 200, result
else:
# Just interested in counts.
result = await self.store.count_e2e_one_time_keys(user_id, device_id)
return 200, {"one_time_key_counts": result}
class GenericWorkerSlavedStore( class GenericWorkerSlavedStore(
# FIXME(#3714): We need to add UserDirectoryStore as we write directly # FIXME(#3714): We need to add UserDirectoryStore as we write directly
# rather than going via the correct worker. # rather than going via the correct worker.
@ -419,7 +320,6 @@ class GenericWorkerServer(HomeServer):
_base.listen_metrics( _base.listen_metrics(
listener.bind_addresses, listener.bind_addresses,
listener.port, listener.port,
enable_legacy_metric_names=self.config.metrics.enable_legacy_metrics,
) )
else: else:
logger.warning("Unsupported listener type: %s", listener.type) logger.warning("Unsupported listener type: %s", listener.type)

View file

@ -265,7 +265,6 @@ class SynapseHomeServer(HomeServer):
_base.listen_metrics( _base.listen_metrics(
listener.bind_addresses, listener.bind_addresses,
listener.port, listener.port,
enable_legacy_metric_names=self.config.metrics.enable_legacy_metrics,
) )
else: else:
# this shouldn't happen, as the listener type should have been checked # this shouldn't happen, as the listener type should have been checked

View file

@ -32,9 +32,9 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
# Type for the `device_one_time_key_counts` field in an appservice transaction # Type for the `device_one_time_keys_count` field in an appservice transaction
# user ID -> {device ID -> {algorithm -> count}} # user ID -> {device ID -> {algorithm -> count}}
TransactionOneTimeKeyCounts = Dict[str, Dict[str, Dict[str, int]]] TransactionOneTimeKeysCount = Dict[str, Dict[str, Dict[str, int]]]
# Type for the `device_unused_fallback_key_types` field in an appservice transaction # Type for the `device_unused_fallback_key_types` field in an appservice transaction
# user ID -> {device ID -> [algorithm]} # user ID -> {device ID -> [algorithm]}
@ -376,7 +376,7 @@ class AppServiceTransaction:
events: List[EventBase], events: List[EventBase],
ephemeral: List[JsonDict], ephemeral: List[JsonDict],
to_device_messages: List[JsonDict], to_device_messages: List[JsonDict],
one_time_key_counts: TransactionOneTimeKeyCounts, one_time_keys_count: TransactionOneTimeKeysCount,
unused_fallback_keys: TransactionUnusedFallbackKeys, unused_fallback_keys: TransactionUnusedFallbackKeys,
device_list_summary: DeviceListUpdates, device_list_summary: DeviceListUpdates,
): ):
@ -385,7 +385,7 @@ class AppServiceTransaction:
self.events = events self.events = events
self.ephemeral = ephemeral self.ephemeral = ephemeral
self.to_device_messages = to_device_messages self.to_device_messages = to_device_messages
self.one_time_key_counts = one_time_key_counts self.one_time_keys_count = one_time_keys_count
self.unused_fallback_keys = unused_fallback_keys self.unused_fallback_keys = unused_fallback_keys
self.device_list_summary = device_list_summary self.device_list_summary = device_list_summary
@ -402,7 +402,7 @@ class AppServiceTransaction:
events=self.events, events=self.events,
ephemeral=self.ephemeral, ephemeral=self.ephemeral,
to_device_messages=self.to_device_messages, to_device_messages=self.to_device_messages,
one_time_key_counts=self.one_time_key_counts, one_time_keys_count=self.one_time_keys_count,
unused_fallback_keys=self.unused_fallback_keys, unused_fallback_keys=self.unused_fallback_keys,
device_list_summary=self.device_list_summary, device_list_summary=self.device_list_summary,
txn_id=self.id, txn_id=self.id,

View file

@ -23,7 +23,7 @@ from synapse.api.constants import EventTypes, Membership, ThirdPartyEntityKind
from synapse.api.errors import CodeMessageException from synapse.api.errors import CodeMessageException
from synapse.appservice import ( from synapse.appservice import (
ApplicationService, ApplicationService,
TransactionOneTimeKeyCounts, TransactionOneTimeKeysCount,
TransactionUnusedFallbackKeys, TransactionUnusedFallbackKeys,
) )
from synapse.events import EventBase from synapse.events import EventBase
@ -262,7 +262,7 @@ class ApplicationServiceApi(SimpleHttpClient):
events: List[EventBase], events: List[EventBase],
ephemeral: List[JsonDict], ephemeral: List[JsonDict],
to_device_messages: List[JsonDict], to_device_messages: List[JsonDict],
one_time_key_counts: TransactionOneTimeKeyCounts, one_time_keys_count: TransactionOneTimeKeysCount,
unused_fallback_keys: TransactionUnusedFallbackKeys, unused_fallback_keys: TransactionUnusedFallbackKeys,
device_list_summary: DeviceListUpdates, device_list_summary: DeviceListUpdates,
txn_id: Optional[int] = None, txn_id: Optional[int] = None,
@ -310,10 +310,13 @@ class ApplicationServiceApi(SimpleHttpClient):
# TODO: Update to stable prefixes once MSC3202 completes FCP merge # TODO: Update to stable prefixes once MSC3202 completes FCP merge
if service.msc3202_transaction_extensions: if service.msc3202_transaction_extensions:
if one_time_key_counts: if one_time_keys_count:
body[ body[
"org.matrix.msc3202.device_one_time_key_counts" "org.matrix.msc3202.device_one_time_key_counts"
] = one_time_key_counts ] = one_time_keys_count
body[
"org.matrix.msc3202.device_one_time_keys_count"
] = one_time_keys_count
if unused_fallback_keys: if unused_fallback_keys:
body[ body[
"org.matrix.msc3202.device_unused_fallback_key_types" "org.matrix.msc3202.device_unused_fallback_key_types"

View file

@ -64,7 +64,7 @@ from typing import (
from synapse.appservice import ( from synapse.appservice import (
ApplicationService, ApplicationService,
ApplicationServiceState, ApplicationServiceState,
TransactionOneTimeKeyCounts, TransactionOneTimeKeysCount,
TransactionUnusedFallbackKeys, TransactionUnusedFallbackKeys,
) )
from synapse.appservice.api import ApplicationServiceApi from synapse.appservice.api import ApplicationServiceApi
@ -258,7 +258,7 @@ class _ServiceQueuer:
): ):
return return
one_time_key_counts: Optional[TransactionOneTimeKeyCounts] = None one_time_keys_count: Optional[TransactionOneTimeKeysCount] = None
unused_fallback_keys: Optional[TransactionUnusedFallbackKeys] = None unused_fallback_keys: Optional[TransactionUnusedFallbackKeys] = None
if ( if (
@ -269,7 +269,7 @@ class _ServiceQueuer:
# for the users which are mentioned in this transaction, # for the users which are mentioned in this transaction,
# as well as the appservice's sender. # as well as the appservice's sender.
( (
one_time_key_counts, one_time_keys_count,
unused_fallback_keys, unused_fallback_keys,
) = await self._compute_msc3202_otk_counts_and_fallback_keys( ) = await self._compute_msc3202_otk_counts_and_fallback_keys(
service, events, ephemeral, to_device_messages_to_send service, events, ephemeral, to_device_messages_to_send
@ -281,7 +281,7 @@ class _ServiceQueuer:
events, events,
ephemeral, ephemeral,
to_device_messages_to_send, to_device_messages_to_send,
one_time_key_counts, one_time_keys_count,
unused_fallback_keys, unused_fallback_keys,
device_list_summary, device_list_summary,
) )
@ -296,7 +296,7 @@ class _ServiceQueuer:
events: Iterable[EventBase], events: Iterable[EventBase],
ephemerals: Iterable[JsonDict], ephemerals: Iterable[JsonDict],
to_device_messages: Iterable[JsonDict], to_device_messages: Iterable[JsonDict],
) -> Tuple[TransactionOneTimeKeyCounts, TransactionUnusedFallbackKeys]: ) -> Tuple[TransactionOneTimeKeysCount, TransactionUnusedFallbackKeys]:
""" """
Given a list of the events, ephemeral messages and to-device messages, Given a list of the events, ephemeral messages and to-device messages,
- first computes a list of application services users that may have - first computes a list of application services users that may have
@ -367,7 +367,7 @@ class _TransactionController:
events: List[EventBase], events: List[EventBase],
ephemeral: Optional[List[JsonDict]] = None, ephemeral: Optional[List[JsonDict]] = None,
to_device_messages: Optional[List[JsonDict]] = None, to_device_messages: Optional[List[JsonDict]] = None,
one_time_key_counts: Optional[TransactionOneTimeKeyCounts] = None, one_time_keys_count: Optional[TransactionOneTimeKeysCount] = None,
unused_fallback_keys: Optional[TransactionUnusedFallbackKeys] = None, unused_fallback_keys: Optional[TransactionUnusedFallbackKeys] = None,
device_list_summary: Optional[DeviceListUpdates] = None, device_list_summary: Optional[DeviceListUpdates] = None,
) -> None: ) -> None:
@ -380,7 +380,7 @@ class _TransactionController:
events: The persistent events to include in the transaction. events: The persistent events to include in the transaction.
ephemeral: The ephemeral events to include in the transaction. ephemeral: The ephemeral events to include in the transaction.
to_device_messages: The to-device messages to include in the transaction. to_device_messages: The to-device messages to include in the transaction.
one_time_key_counts: Counts of remaining one-time keys for relevant one_time_keys_count: Counts of remaining one-time keys for relevant
appservice devices in the transaction. appservice devices in the transaction.
unused_fallback_keys: Lists of unused fallback keys for relevant unused_fallback_keys: Lists of unused fallback keys for relevant
appservice devices in the transaction. appservice devices in the transaction.
@ -397,7 +397,7 @@ class _TransactionController:
events=events, events=events,
ephemeral=ephemeral or [], ephemeral=ephemeral or [],
to_device_messages=to_device_messages or [], to_device_messages=to_device_messages or [],
one_time_key_counts=one_time_key_counts or {}, one_time_keys_count=one_time_keys_count or {},
unused_fallback_keys=unused_fallback_keys or {}, unused_fallback_keys=unused_fallback_keys or {},
device_list_summary=device_list_summary or DeviceListUpdates(), device_list_summary=device_list_summary or DeviceListUpdates(),
) )

View file

@ -16,6 +16,7 @@ from typing import Any, Optional
import attr import attr
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
from synapse.config._base import Config from synapse.config._base import Config
from synapse.types import JsonDict from synapse.types import JsonDict
@ -53,9 +54,6 @@ class ExperimentalConfig(Config):
# MSC3266 (room summary api) # MSC3266 (room summary api)
self.msc3266_enabled: bool = experimental.get("msc3266_enabled", False) self.msc3266_enabled: bool = experimental.get("msc3266_enabled", False)
# MSC3030 (Jump to date API endpoint)
self.msc3030_enabled: bool = experimental.get("msc3030_enabled", False)
# MSC2409 (this setting only relates to optionally sending to-device messages). # MSC2409 (this setting only relates to optionally sending to-device messages).
# Presence, typing and read receipt EDUs are already sent to application services that # Presence, typing and read receipt EDUs are already sent to application services that
# have opted in to receive them. If enabled, this adds to-device messages to that list. # have opted in to receive them. If enabled, this adds to-device messages to that list.
@ -131,3 +129,10 @@ class ExperimentalConfig(Config):
# MSC3912: Relation-based redactions. # MSC3912: Relation-based redactions.
self.msc3912_enabled: bool = experimental.get("msc3912_enabled", False) self.msc3912_enabled: bool = experimental.get("msc3912_enabled", False)
# MSC1767 and friends: Extensible Events
self.msc1767_enabled: bool = experimental.get("msc1767_enabled", False)
if self.msc1767_enabled:
# Enable room version (and thus applicable push rules from MSC3931/3932)
version_id = RoomVersions.MSC1767v10.identifier
KNOWN_ROOM_VERSIONS[version_id] = RoomVersions.MSC1767v10

View file

@ -317,10 +317,9 @@ def setup_logging(
Set up the logging subsystem. Set up the logging subsystem.
Args: Args:
config (LoggingConfig | synapse.config.worker.WorkerConfig): config: configuration data
configuration data
use_worker_options (bool): True to use the 'worker_log_config' option use_worker_options: True to use the 'worker_log_config' option
instead of 'log_config'. instead of 'log_config'.
logBeginner: The Twisted logBeginner to use. logBeginner: The Twisted logBeginner to use.

View file

@ -43,8 +43,6 @@ class MetricsConfig(Config):
def read_config(self, config: JsonDict, **kwargs: Any) -> None: def read_config(self, config: JsonDict, **kwargs: Any) -> None:
self.enable_metrics = config.get("enable_metrics", False) self.enable_metrics = config.get("enable_metrics", False)
self.enable_legacy_metrics = config.get("enable_legacy_metrics", False)
self.report_stats = config.get("report_stats", None) self.report_stats = config.get("report_stats", None)
self.report_stats_endpoint = config.get( self.report_stats_endpoint = config.get(
"report_stats_endpoint", "https://matrix.org/report-usage-stats/push" "report_stats_endpoint", "https://matrix.org/report-usage-stats/push"

View file

@ -150,8 +150,5 @@ class RatelimitConfig(Config):
self.rc_third_party_invite = RatelimitSettings( self.rc_third_party_invite = RatelimitSettings(
config.get("rc_third_party_invite", {}), config.get("rc_third_party_invite", {}),
defaults={ defaults={"per_second": 0.0025, "burst_count": 5},
"per_second": self.rc_message.per_second,
"burst_count": self.rc_message.burst_count,
},
) )

View file

@ -29,20 +29,6 @@ from ._base import (
) )
from .server import DIRECT_TCP_ERROR, ListenerConfig, parse_listener_def from .server import DIRECT_TCP_ERROR, ListenerConfig, parse_listener_def
_FEDERATION_SENDER_WITH_SEND_FEDERATION_ENABLED_ERROR = """
The send_federation config option must be disabled in the main
synapse process before they can be run in a separate worker.
Please add ``send_federation: false`` to the main config
"""
_PUSHER_WITH_START_PUSHERS_ENABLED_ERROR = """
The start_pushers config option must be disabled in the main
synapse process before they can be run in a separate worker.
Please add ``start_pushers: false`` to the main config
"""
_DEPRECATED_WORKER_DUTY_OPTION_USED = """ _DEPRECATED_WORKER_DUTY_OPTION_USED = """
The '%s' configuration option is deprecated and will be removed in a future The '%s' configuration option is deprecated and will be removed in a future
Synapse version. Please use ``%s: name_of_worker`` instead. Synapse version. Please use ``%s: name_of_worker`` instead.
@ -162,7 +148,13 @@ class WorkerConfig(Config):
self.worker_name = config.get("worker_name", self.worker_app) self.worker_name = config.get("worker_name", self.worker_app)
self.instance_name = self.worker_name or "master" self.instance_name = self.worker_name or "master"
# FIXME: Remove this check after a suitable amount of time.
self.worker_main_http_uri = config.get("worker_main_http_uri", None) self.worker_main_http_uri = config.get("worker_main_http_uri", None)
if self.worker_main_http_uri is not None:
logger.warning(
"The config option worker_main_http_uri is unused since Synapse 1.73. "
"It can be safely removed from your configuration."
)
# This option is really only here to support `--manhole` command line # This option is really only here to support `--manhole` command line
# argument. # argument.
@ -176,40 +168,12 @@ class WorkerConfig(Config):
) )
) )
# Handle federation sender configuration. federation_sender_instances = self._worker_names_performing_this_duty(
# config,
# There are two ways of configuring which instances handle federation "send_federation",
# sending: "synapse.app.federation_sender",
# 1. The old way where "send_federation" is set to false and running a "federation_sender_instances",
# `synapse.app.federation_sender` worker app. )
# 2. Specifying the workers sending federation in
# `federation_sender_instances`.
#
send_federation = config.get("send_federation", True)
federation_sender_instances = config.get("federation_sender_instances")
if federation_sender_instances is None:
# Default to an empty list, which means "another, unknown, worker is
# responsible for it".
federation_sender_instances = []
# If no federation sender instances are set we check if
# `send_federation` is set, which means use master
if send_federation:
federation_sender_instances = ["master"]
if self.worker_app == "synapse.app.federation_sender":
if send_federation:
# If we're running federation senders, and not using
# `federation_sender_instances`, then we should have
# explicitly set `send_federation` to false.
raise ConfigError(
_FEDERATION_SENDER_WITH_SEND_FEDERATION_ENABLED_ERROR
)
federation_sender_instances = [self.worker_name]
self.send_federation = self.instance_name in federation_sender_instances self.send_federation = self.instance_name in federation_sender_instances
self.federation_shard_config = ShardedWorkerHandlingConfig( self.federation_shard_config = ShardedWorkerHandlingConfig(
federation_sender_instances federation_sender_instances
@ -276,27 +240,12 @@ class WorkerConfig(Config):
) )
# Handle sharded push # Handle sharded push
start_pushers = config.get("start_pushers", True) pusher_instances = self._worker_names_performing_this_duty(
pusher_instances = config.get("pusher_instances") config,
if pusher_instances is None: "start_pushers",
# Default to an empty list, which means "another, unknown, worker is "synapse.app.pusher",
# responsible for it". "pusher_instances",
pusher_instances = [] )
# If no pushers instances are set we check if `start_pushers` is
# set, which means use master
if start_pushers:
pusher_instances = ["master"]
if self.worker_app == "synapse.app.pusher":
if start_pushers:
# If we're running pushers, and not using
# `pusher_instances`, then we should have explicitly set
# `start_pushers` to false.
raise ConfigError(_PUSHER_WITH_START_PUSHERS_ENABLED_ERROR)
pusher_instances = [self.instance_name]
self.start_pushers = self.instance_name in pusher_instances self.start_pushers = self.instance_name in pusher_instances
self.pusher_shard_config = ShardedWorkerHandlingConfig(pusher_instances) self.pusher_shard_config = ShardedWorkerHandlingConfig(pusher_instances)
@ -419,6 +368,64 @@ class WorkerConfig(Config):
# (By this point, these are either the same value or only one is not None.) # (By this point, these are either the same value or only one is not None.)
return bool(new_option_should_run_here or legacy_option_should_run_here) return bool(new_option_should_run_here or legacy_option_should_run_here)
def _worker_names_performing_this_duty(
self,
config: Dict[str, Any],
legacy_option_name: str,
legacy_app_name: str,
modern_instance_list_name: str,
) -> List[str]:
"""
Retrieves the names of the workers handling a given duty, by either legacy
option or instance list.
There are two ways of configuring which instances handle a given duty, e.g.
for configuring pushers:
1. The old way where "start_pushers" is set to false and running a
`synapse.app.pusher'` worker app.
2. Specifying the workers sending federation in `pusher_instances`.
Args:
config: settings read from yaml.
legacy_option_name: the old way of enabling options. e.g. 'start_pushers'
legacy_app_name: The historical app name. e.g. 'synapse.app.pusher'
modern_instance_list_name: the string name of the new instance_list. e.g.
'pusher_instances'
Returns:
A list of worker instance names handling the given duty.
"""
legacy_option = config.get(legacy_option_name, True)
worker_instances = config.get(modern_instance_list_name)
if worker_instances is None:
# Default to an empty list, which means "another, unknown, worker is
# responsible for it".
worker_instances = []
# If no worker instances are set we check if the legacy option
# is set, which means use the main process.
if legacy_option:
worker_instances = ["master"]
if self.worker_app == legacy_app_name:
if legacy_option:
# If we're using `legacy_app_name`, and not using
# `modern_instance_list_name`, then we should have
# explicitly set `legacy_option_name` to false.
raise ConfigError(
f"The '{legacy_option_name}' config option must be disabled in "
"the main synapse process before they can be run in a separate "
"worker.\n"
f"Please add `{legacy_option_name}: false` to the main config.\n",
)
worker_instances = [self.worker_name]
return worker_instances
def read_arguments(self, args: argparse.Namespace) -> None: def read_arguments(self, args: argparse.Namespace) -> None:
# We support a bunch of command line arguments that override options in # We support a bunch of command line arguments that override options in
# the config. A lot of these options have a worker_* prefix when running # the config. A lot of these options have a worker_* prefix when running

View file

@ -213,7 +213,7 @@ class Keyring:
def verify_json_objects_for_server( def verify_json_objects_for_server(
self, server_and_json: Iterable[Tuple[str, dict, int]] self, server_and_json: Iterable[Tuple[str, dict, int]]
) -> List[defer.Deferred]: ) -> List["defer.Deferred[None]"]:
"""Bulk verifies signatures of json objects, bulk fetching keys as """Bulk verifies signatures of json objects, bulk fetching keys as
necessary. necessary.
@ -226,10 +226,9 @@ class Keyring:
valid. valid.
Returns: Returns:
List<Deferred[None]>: for each input triplet, a deferred indicating success For each input triplet, a deferred indicating success or failure to
or failure to verify each json object's signature for the given verify each json object's signature for the given server_name. The
server_name. The deferreds run their callbacks in the sentinel deferreds run their callbacks in the sentinel logcontext.
logcontext.
""" """
return [ return [
run_in_background( run_in_background(
@ -858,7 +857,7 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
response = await self.client.get_json( response = await self.client.get_json(
destination=server_name, destination=server_name,
path="/_matrix/key/v2/server/" path="/_matrix/key/v2/server/"
+ urllib.parse.quote(requested_key_id), + urllib.parse.quote(requested_key_id, safe=""),
ignore_backoff=True, ignore_backoff=True,
# we only give the remote server 10s to respond. It should be an # we only give the remote server 10s to respond. It should be an
# easy request to handle, so if it doesn't reply within 10s, it's # easy request to handle, so if it doesn't reply within 10s, it's

View file

@ -597,8 +597,7 @@ def _event_type_from_format_version(
format_version: The event format version format_version: The event format version
Returns: Returns:
type: A type that can be initialized as per the initializer of A type that can be initialized as per the initializer of `FrozenEvent`
`FrozenEvent`
""" """
if format_version == EventFormatVersions.ROOM_V1_V2: if format_version == EventFormatVersions.ROOM_V1_V2:

View file

@ -128,6 +128,7 @@ class EventBuilder:
state_filter=StateFilter.from_types( state_filter=StateFilter.from_types(
auth_types_for_event(self.room_version, self) auth_types_for_event(self.room_version, self)
), ),
await_full_state=False,
) )
auth_event_ids = self._event_auth_handler.compute_auth_events( auth_event_ids = self._event_auth_handler.compute_auth_events(
self, state_ids self, state_ids

View file

@ -1691,9 +1691,19 @@ class FederationClient(FederationBase):
# to return events on *both* sides of the timestamp to # to return events on *both* sides of the timestamp to
# help reconcile the gap faster. # help reconcile the gap faster.
_timestamp_to_event_from_destination, _timestamp_to_event_from_destination,
# Since this endpoint is new, we should try other servers before giving up.
# We can safely remove this in a year (remove after 2023-11-16).
failover_on_unknown_endpoint=True,
) )
return timestamp_to_event_response return timestamp_to_event_response
except SynapseError: except SynapseError as e:
logger.warn(
"timestamp_to_event(room_id=%s, timestamp=%s, direction=%s): encountered error when trying to fetch from destinations: %s",
room_id,
timestamp,
direction,
e,
)
return None return None
async def _timestamp_to_event_from_destination( async def _timestamp_to_event_from_destination(

View file

@ -434,7 +434,23 @@ class FederationSender(AbstractFederationSender):
# If there are no prev event IDs then the state is empty # If there are no prev event IDs then the state is empty
# and so no remote servers in the room # and so no remote servers in the room
destinations = set() destinations = set()
else:
if destinations is None:
# During partial join we use the set of servers that we got
# when beginning the join. It's still possible that we send
# events to servers that left the room in the meantime, but
# we consider that an acceptable risk since it is only our own
# events that we leak and not other server's ones.
partial_state_destinations = (
await self.store.get_partial_state_servers_at_join(
event.room_id
)
)
if len(partial_state_destinations) > 0:
destinations = partial_state_destinations
if destinations is None:
# We check the external cache for the destinations, which is # We check the external cache for the destinations, which is
# stored per state group. # stored per state group.

View file

@ -35,7 +35,7 @@ from synapse.logging import issue9533_logger
from synapse.logging.opentracing import SynapseTags, set_tag from synapse.logging.opentracing import SynapseTags, set_tag
from synapse.metrics import sent_transactions_counter from synapse.metrics import sent_transactions_counter
from synapse.metrics.background_process_metrics import run_as_background_process from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import ReadReceipt from synapse.types import JsonDict, ReadReceipt
from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
from synapse.visibility import filter_events_for_server from synapse.visibility import filter_events_for_server
@ -136,8 +136,11 @@ class PerDestinationQueue:
# destination # destination
self._pending_presence: Dict[str, UserPresenceState] = {} self._pending_presence: Dict[str, UserPresenceState] = {}
# room_id -> receipt_type -> user_id -> receipt_dict # List of room_id -> receipt_type -> user_id -> receipt_dict,
self._pending_rrs: Dict[str, Dict[str, Dict[str, dict]]] = {} #
# Each receipt can only have a single receipt per
# (room ID, receipt type, user ID, thread ID) tuple.
self._pending_receipt_edus: List[Dict[str, Dict[str, Dict[str, dict]]]] = []
self._rrs_pending_flush = False self._rrs_pending_flush = False
# stream_id of last successfully sent to-device message. # stream_id of last successfully sent to-device message.
@ -202,17 +205,53 @@ class PerDestinationQueue:
Args: Args:
receipt: receipt to be queued receipt: receipt to be queued
""" """
self._pending_rrs.setdefault(receipt.room_id, {}).setdefault( serialized_receipt: JsonDict = {
receipt.receipt_type, {} "event_ids": receipt.event_ids,
)[receipt.user_id] = {"event_ids": receipt.event_ids, "data": receipt.data} "data": receipt.data,
}
if receipt.thread_id is not None:
serialized_receipt["data"]["thread_id"] = receipt.thread_id
# Find which EDU to add this receipt to. There's three situations depending
# on the (room ID, receipt type, user, thread ID) tuple:
#
# 1. If it fully matches, clobber the information.
# 2. If it is missing, add the information.
# 3. If the subset tuple of (room ID, receipt type, user) matches, check
# the next EDU (or add a new EDU).
for edu in self._pending_receipt_edus:
receipt_content = edu.setdefault(receipt.room_id, {}).setdefault(
receipt.receipt_type, {}
)
# If this room ID, receipt type, user ID is not in this EDU, OR if
# the full tuple matches, use the current EDU.
if (
receipt.user_id not in receipt_content
or receipt_content[receipt.user_id].get("thread_id")
== receipt.thread_id
):
receipt_content[receipt.user_id] = serialized_receipt
break
# If no matching EDU was found, create a new one.
else:
self._pending_receipt_edus.append(
{
receipt.room_id: {
receipt.receipt_type: {receipt.user_id: serialized_receipt}
}
}
)
def flush_read_receipts_for_room(self, room_id: str) -> None: def flush_read_receipts_for_room(self, room_id: str) -> None:
# if we don't have any read-receipts for this room, it may be that we've already # If there are any pending receipts for this room then force-flush them
# sent them out, so we don't need to flush. # in a new transaction.
if room_id not in self._pending_rrs: for edu in self._pending_receipt_edus:
return if room_id in edu:
self._rrs_pending_flush = True self._rrs_pending_flush = True
self.attempt_new_transaction() self.attempt_new_transaction()
# No use in checking remaining EDUs if the room was found.
break
def send_keyed_edu(self, edu: Edu, key: Hashable) -> None: def send_keyed_edu(self, edu: Edu, key: Hashable) -> None:
self._pending_edus_keyed[(edu.edu_type, key)] = edu self._pending_edus_keyed[(edu.edu_type, key)] = edu
@ -351,7 +390,7 @@ class PerDestinationQueue:
self._pending_edus = [] self._pending_edus = []
self._pending_edus_keyed = {} self._pending_edus_keyed = {}
self._pending_presence = {} self._pending_presence = {}
self._pending_rrs = {} self._pending_receipt_edus = []
self._start_catching_up() self._start_catching_up()
except FederationDeniedError as e: except FederationDeniedError as e:
@ -505,6 +544,7 @@ class PerDestinationQueue:
new_pdus = await filter_events_for_server( new_pdus = await filter_events_for_server(
self._storage_controllers, self._storage_controllers,
self._destination, self._destination,
self._server_name,
new_pdus, new_pdus,
redact=False, redact=False,
) )
@ -542,22 +582,27 @@ class PerDestinationQueue:
self._destination, last_successful_stream_ordering self._destination, last_successful_stream_ordering
) )
def _get_rr_edus(self, force_flush: bool) -> Iterable[Edu]: def _get_receipt_edus(self, force_flush: bool, limit: int) -> Iterable[Edu]:
if not self._pending_rrs: if not self._pending_receipt_edus:
return return
if not force_flush and not self._rrs_pending_flush: if not force_flush and not self._rrs_pending_flush:
# not yet time for this lot # not yet time for this lot
return return
edu = Edu( # Send at most limit EDUs for receipts.
origin=self._server_name, for content in self._pending_receipt_edus[:limit]:
destination=self._destination, yield Edu(
edu_type=EduTypes.RECEIPT, origin=self._server_name,
content=self._pending_rrs, destination=self._destination,
) edu_type=EduTypes.RECEIPT,
self._pending_rrs = {} content=content,
self._rrs_pending_flush = False )
yield edu self._pending_receipt_edus = self._pending_receipt_edus[limit:]
# If there are still pending read-receipts, don't reset the pending flush
# flag.
if not self._pending_receipt_edus:
self._rrs_pending_flush = False
def _pop_pending_edus(self, limit: int) -> List[Edu]: def _pop_pending_edus(self, limit: int) -> List[Edu]:
pending_edus = self._pending_edus pending_edus = self._pending_edus
@ -644,40 +689,20 @@ class _TransactionQueueManager:
async def __aenter__(self) -> Tuple[List[EventBase], List[Edu]]: async def __aenter__(self) -> Tuple[List[EventBase], List[Edu]]:
# First we calculate the EDUs we want to send, if any. # First we calculate the EDUs we want to send, if any.
# We start by fetching device related EDUs, i.e device updates and to # There's a maximum number of EDUs that can be sent with a transaction,
# device messages. We have to keep 2 free slots for presence and rr_edus. # generally device updates and to-device messages get priority, but we
device_edu_limit = MAX_EDUS_PER_TRANSACTION - 2 # want to ensure that there's room for some other EDUs as well.
#
# This is done by:
#
# * Add a presence EDU, if one exists.
# * Add up-to a small limit of read receipt EDUs.
# * Add to-device EDUs, but leave some space for device list updates.
# * Add device list updates EDUs.
# * If there's any remaining room, add other EDUs.
pending_edus = []
# We prioritize to-device messages so that existing encryption channels # Add presence EDU.
# work. We also keep a few slots spare (by reducing the limit) so that
# we can still trickle out some device list updates.
(
to_device_edus,
device_stream_id,
) = await self.queue._get_to_device_message_edus(device_edu_limit - 10)
if to_device_edus:
self._device_stream_id = device_stream_id
else:
self.queue._last_device_stream_id = device_stream_id
device_edu_limit -= len(to_device_edus)
device_update_edus, dev_list_id = await self.queue._get_device_update_edus(
device_edu_limit
)
if device_update_edus:
self._device_list_id = dev_list_id
else:
self.queue._last_device_list_stream_id = dev_list_id
pending_edus = device_update_edus + to_device_edus
# Now add the read receipt EDU.
pending_edus.extend(self.queue._get_rr_edus(force_flush=False))
# And presence EDU.
if self.queue._pending_presence: if self.queue._pending_presence:
pending_edus.append( pending_edus.append(
Edu( Edu(
@ -696,16 +721,47 @@ class _TransactionQueueManager:
) )
self.queue._pending_presence = {} self.queue._pending_presence = {}
# Finally add any other types of EDUs if there is room. # Add read receipt EDUs.
pending_edus.extend( pending_edus.extend(self.queue._get_receipt_edus(force_flush=False, limit=5))
self.queue._pop_pending_edus(MAX_EDUS_PER_TRANSACTION - len(pending_edus)) edu_limit = MAX_EDUS_PER_TRANSACTION - len(pending_edus)
# Next, prioritize to-device messages so that existing encryption channels
# work. We also keep a few slots spare (by reducing the limit) so that
# we can still trickle out some device list updates.
(
to_device_edus,
device_stream_id,
) = await self.queue._get_to_device_message_edus(edu_limit - 10)
if to_device_edus:
self._device_stream_id = device_stream_id
else:
self.queue._last_device_stream_id = device_stream_id
pending_edus.extend(to_device_edus)
edu_limit -= len(to_device_edus)
# Add device list update EDUs.
device_update_edus, dev_list_id = await self.queue._get_device_update_edus(
edu_limit
) )
while (
len(pending_edus) < MAX_EDUS_PER_TRANSACTION if device_update_edus:
and self.queue._pending_edus_keyed self._device_list_id = dev_list_id
): else:
self.queue._last_device_list_stream_id = dev_list_id
pending_edus.extend(device_update_edus)
edu_limit -= len(device_update_edus)
# Finally add any other types of EDUs if there is room.
other_edus = self.queue._pop_pending_edus(edu_limit)
pending_edus.extend(other_edus)
edu_limit -= len(other_edus)
while edu_limit > 0 and self.queue._pending_edus_keyed:
_, val = self.queue._pending_edus_keyed.popitem() _, val = self.queue._pending_edus_keyed.popitem()
pending_edus.append(val) pending_edus.append(val)
edu_limit -= 1
# Now we look for any PDUs to send, by getting up to 50 PDUs from the # Now we look for any PDUs to send, by getting up to 50 PDUs from the
# queue # queue
@ -716,8 +772,10 @@ class _TransactionQueueManager:
# if we've decided to send a transaction anyway, and we have room, we # if we've decided to send a transaction anyway, and we have room, we
# may as well send any pending RRs # may as well send any pending RRs
if len(pending_edus) < MAX_EDUS_PER_TRANSACTION: if edu_limit:
pending_edus.extend(self.queue._get_rr_edus(force_flush=True)) pending_edus.extend(
self.queue._get_receipt_edus(force_flush=True, limit=edu_limit)
)
if self._pdus: if self._pdus:
self._last_stream_ordering = self._pdus[ self._last_stream_ordering = self._pdus[

View file

@ -185,9 +185,8 @@ class TransportLayerClient:
Raises: Raises:
Various exceptions when the request fails Various exceptions when the request fails
""" """
path = _create_path( path = _create_v1_path(
FEDERATION_UNSTABLE_PREFIX, "/timestamp_to_event/%s",
"/org.matrix.msc3030/timestamp_to_event/%s",
room_id, room_id,
) )
@ -280,12 +279,11 @@ class TransportLayerClient:
Note that this does not append any events to any graphs. Note that this does not append any events to any graphs.
Args: Args:
destination (str): address of remote homeserver destination: address of remote homeserver
room_id (str): room to join/leave room_id: room to join/leave
user_id (str): user to be joined/left user_id: user to be joined/left
membership (str): one of join/leave membership: one of join/leave
params (dict[str, str|Iterable[str]]): Query parameters to include in the params: Query parameters to include in the request.
request.
Returns: Returns:
Succeeds when we get a 2xx HTTP response. The result Succeeds when we get a 2xx HTTP response. The result

View file

@ -25,7 +25,6 @@ from synapse.federation.transport.server._base import (
from synapse.federation.transport.server.federation import ( from synapse.federation.transport.server.federation import (
FEDERATION_SERVLET_CLASSES, FEDERATION_SERVLET_CLASSES,
FederationAccountStatusServlet, FederationAccountStatusServlet,
FederationTimestampLookupServlet,
) )
from synapse.http.server import HttpServer, JsonResource from synapse.http.server import HttpServer, JsonResource
from synapse.http.servlet import ( from synapse.http.servlet import (
@ -291,13 +290,6 @@ def register_servlets(
) )
for servletclass in SERVLET_GROUPS[servlet_group]: for servletclass in SERVLET_GROUPS[servlet_group]:
# Only allow the `/timestamp_to_event` servlet if msc3030 is enabled
if (
servletclass == FederationTimestampLookupServlet
and not hs.config.experimental.msc3030_enabled
):
continue
# Only allow the `/account_status` servlet if msc3720 is enabled # Only allow the `/account_status` servlet if msc3720 is enabled
if ( if (
servletclass == FederationAccountStatusServlet servletclass == FederationAccountStatusServlet

View file

@ -224,10 +224,10 @@ class BaseFederationServlet:
With arguments: With arguments:
origin (unicode|None): The authenticated server_name of the calling server, origin (str|None): The authenticated server_name of the calling server,
unless REQUIRE_AUTH is set to False and authentication failed. unless REQUIRE_AUTH is set to False and authentication failed.
content (unicode|None): decoded json body of the request. None if the content (str|None): decoded json body of the request. None if the
request was a GET. request was a GET.
query (dict[bytes, list[bytes]]): Query params from the request. url-decoded query (dict[bytes, list[bytes]]): Query params from the request. url-decoded

View file

@ -218,14 +218,13 @@ class FederationTimestampLookupServlet(BaseFederationServerServlet):
`dir` can be `f` or `b` to indicate forwards and backwards in time from the `dir` can be `f` or `b` to indicate forwards and backwards in time from the
given timestamp. given timestamp.
GET /_matrix/federation/unstable/org.matrix.msc3030/timestamp_to_event/<roomID>?ts=<timestamp>&dir=<direction> GET /_matrix/federation/v1/timestamp_to_event/<roomID>?ts=<timestamp>&dir=<direction>
{ {
"event_id": ... "event_id": ...
} }
""" """
PATH = "/timestamp_to_event/(?P<room_id>[^/]*)/?" PATH = "/timestamp_to_event/(?P<room_id>[^/]*)/?"
PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc3030"
async def on_GET( async def on_GET(
self, self,

View file

@ -16,6 +16,7 @@ import logging
from typing import TYPE_CHECKING, Optional from typing import TYPE_CHECKING, Optional
from synapse.api.errors import SynapseError from synapse.api.errors import SynapseError
from synapse.handlers.device import DeviceHandler
from synapse.metrics.background_process_metrics import run_as_background_process from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import Codes, Requester, UserID, create_requester from synapse.types import Codes, Requester, UserID, create_requester
@ -76,6 +77,9 @@ class DeactivateAccountHandler:
True if identity server supports removing threepids, otherwise False. True if identity server supports removing threepids, otherwise False.
""" """
# This can only be called on the main process.
assert isinstance(self._device_handler, DeviceHandler)
# Check if this user can be deactivated # Check if this user can be deactivated
if not await self._third_party_rules.check_can_deactivate_user( if not await self._third_party_rules.check_can_deactivate_user(
user_id, by_admin user_id, by_admin

View file

@ -65,6 +65,8 @@ DELETE_STALE_DEVICES_INTERVAL_MS = 24 * 60 * 60 * 1000
class DeviceWorkerHandler: class DeviceWorkerHandler:
device_list_updater: "DeviceListWorkerUpdater"
def __init__(self, hs: "HomeServer"): def __init__(self, hs: "HomeServer"):
self.clock = hs.get_clock() self.clock = hs.get_clock()
self.hs = hs self.hs = hs
@ -76,6 +78,8 @@ class DeviceWorkerHandler:
self.server_name = hs.hostname self.server_name = hs.hostname
self._msc3852_enabled = hs.config.experimental.msc3852_enabled self._msc3852_enabled = hs.config.experimental.msc3852_enabled
self.device_list_updater = DeviceListWorkerUpdater(hs)
@trace @trace
async def get_devices_by_user(self, user_id: str) -> List[JsonDict]: async def get_devices_by_user(self, user_id: str) -> List[JsonDict]:
""" """
@ -99,6 +103,19 @@ class DeviceWorkerHandler:
log_kv(device_map) log_kv(device_map)
return devices return devices
async def get_dehydrated_device(
self, user_id: str
) -> Optional[Tuple[str, JsonDict]]:
"""Retrieve the information for a dehydrated device.
Args:
user_id: the user whose dehydrated device we are looking for
Returns:
a tuple whose first item is the device ID, and the second item is
the dehydrated device information
"""
return await self.store.get_dehydrated_device(user_id)
@trace @trace
async def get_device(self, user_id: str, device_id: str) -> JsonDict: async def get_device(self, user_id: str, device_id: str) -> JsonDict:
"""Retrieve the given device """Retrieve the given device
@ -127,7 +144,7 @@ class DeviceWorkerHandler:
@cancellable @cancellable
async def get_device_changes_in_shared_rooms( async def get_device_changes_in_shared_rooms(
self, user_id: str, room_ids: Collection[str], from_token: StreamToken self, user_id: str, room_ids: Collection[str], from_token: StreamToken
) -> Collection[str]: ) -> Set[str]:
"""Get the set of users whose devices have changed who share a room with """Get the set of users whose devices have changed who share a room with
the given user. the given user.
""" """
@ -320,6 +337,8 @@ class DeviceWorkerHandler:
class DeviceHandler(DeviceWorkerHandler): class DeviceHandler(DeviceWorkerHandler):
device_list_updater: "DeviceListUpdater"
def __init__(self, hs: "HomeServer"): def __init__(self, hs: "HomeServer"):
super().__init__(hs) super().__init__(hs)
@ -402,6 +421,9 @@ class DeviceHandler(DeviceWorkerHandler):
self._check_device_name_length(initial_device_display_name) self._check_device_name_length(initial_device_display_name)
# Prune the user's device list if they already have a lot of devices.
await self._prune_too_many_devices(user_id)
if device_id is not None: if device_id is not None:
new_device = await self.store.store_device( new_device = await self.store.store_device(
user_id=user_id, user_id=user_id,
@ -433,6 +455,14 @@ class DeviceHandler(DeviceWorkerHandler):
raise errors.StoreError(500, "Couldn't generate a device ID.") raise errors.StoreError(500, "Couldn't generate a device ID.")
async def _prune_too_many_devices(self, user_id: str) -> None:
"""Delete any excess old devices this user may have."""
device_ids = await self.store.check_too_many_devices_for_user(user_id)
if not device_ids:
return
await self.delete_devices(user_id, device_ids)
async def _delete_stale_devices(self) -> None: async def _delete_stale_devices(self) -> None:
"""Background task that deletes devices which haven't been accessed for more than """Background task that deletes devices which haven't been accessed for more than
a configured time period. a configured time period.
@ -462,7 +492,7 @@ class DeviceHandler(DeviceWorkerHandler):
device_ids = [d for d in device_ids if d != except_device_id] device_ids = [d for d in device_ids if d != except_device_id]
await self.delete_devices(user_id, device_ids) await self.delete_devices(user_id, device_ids)
async def delete_devices(self, user_id: str, device_ids: List[str]) -> None: async def delete_devices(self, user_id: str, device_ids: Collection[str]) -> None:
"""Delete several devices """Delete several devices
Args: Args:
@ -606,19 +636,6 @@ class DeviceHandler(DeviceWorkerHandler):
await self.delete_devices(user_id, [old_device_id]) await self.delete_devices(user_id, [old_device_id])
return device_id return device_id
async def get_dehydrated_device(
self, user_id: str
) -> Optional[Tuple[str, JsonDict]]:
"""Retrieve the information for a dehydrated device.
Args:
user_id: the user whose dehydrated device we are looking for
Returns:
a tuple whose first item is the device ID, and the second item is
the dehydrated device information
"""
return await self.store.get_dehydrated_device(user_id)
async def rehydrate_device( async def rehydrate_device(
self, user_id: str, access_token: str, device_id: str self, user_id: str, access_token: str, device_id: str
) -> dict: ) -> dict:
@ -682,13 +699,33 @@ class DeviceHandler(DeviceWorkerHandler):
hosts_already_sent_to: Set[str] = set() hosts_already_sent_to: Set[str] = set()
try: try:
stream_id, room_id = await self.store.get_device_change_last_converted_pos()
while True: while True:
self._handle_new_device_update_new_data = False self._handle_new_device_update_new_data = False
rows = await self.store.get_uncoverted_outbound_room_pokes() max_stream_id = self.store.get_device_stream_token()
rows = await self.store.get_uncoverted_outbound_room_pokes(
stream_id, room_id
)
if not rows: if not rows:
# If the DB returned nothing then there is nothing left to # If the DB returned nothing then there is nothing left to
# do, *unless* a new device list update happened during the # do, *unless* a new device list update happened during the
# DB query. # DB query.
# Advance `(stream_id, room_id)`.
# `max_stream_id` comes from *before* the query for unconverted
# rows, which means that any unconverted rows must have a larger
# stream ID.
if max_stream_id > stream_id:
stream_id, room_id = max_stream_id, ""
await self.store.set_device_change_last_converted_pos(
stream_id, room_id
)
else:
assert max_stream_id == stream_id
# Avoid moving `room_id` backwards.
pass
if self._handle_new_device_update_new_data: if self._handle_new_device_update_new_data:
continue continue
else: else:
@ -718,7 +755,6 @@ class DeviceHandler(DeviceWorkerHandler):
user_id=user_id, user_id=user_id,
device_id=device_id, device_id=device_id,
room_id=room_id, room_id=room_id,
stream_id=stream_id,
hosts=hosts, hosts=hosts,
context=opentracing_context, context=opentracing_context,
) )
@ -752,6 +788,12 @@ class DeviceHandler(DeviceWorkerHandler):
hosts_already_sent_to.update(hosts) hosts_already_sent_to.update(hosts)
current_stream_id = stream_id current_stream_id = stream_id
# Advance `(stream_id, room_id)`.
_, _, room_id, stream_id, _ = rows[-1]
await self.store.set_device_change_last_converted_pos(
stream_id, room_id
)
finally: finally:
self._handle_new_device_update_is_processing = False self._handle_new_device_update_is_processing = False
@ -834,7 +876,6 @@ class DeviceHandler(DeviceWorkerHandler):
user_id=user_id, user_id=user_id,
device_id=device_id, device_id=device_id,
room_id=room_id, room_id=room_id,
stream_id=None,
hosts=potentially_changed_hosts, hosts=potentially_changed_hosts,
context=None, context=None,
) )
@ -858,7 +899,36 @@ def _update_device_from_client_ips(
) )
class DeviceListUpdater: class DeviceListWorkerUpdater:
"Handles incoming device list updates from federation and contacts the main process over replication"
def __init__(self, hs: "HomeServer"):
from synapse.replication.http.devices import (
ReplicationUserDevicesResyncRestServlet,
)
self._user_device_resync_client = (
ReplicationUserDevicesResyncRestServlet.make_client(hs)
)
async def user_device_resync(
self, user_id: str, mark_failed_as_stale: bool = True
) -> Optional[JsonDict]:
"""Fetches all devices for a user and updates the device cache with them.
Args:
user_id: The user's id whose device_list will be updated.
mark_failed_as_stale: Whether to mark the user's device list as stale
if the attempt to resync failed.
Returns:
A dict with device info as under the "devices" in the result of this
request:
https://matrix.org/docs/spec/server_server/r0.1.2#get-matrix-federation-v1-user-devices-userid
"""
return await self._user_device_resync_client(user_id=user_id)
class DeviceListUpdater(DeviceListWorkerUpdater):
"Handles incoming device list updates from federation and updates the DB" "Handles incoming device list updates from federation and updates the DB"
def __init__(self, hs: "HomeServer", device_handler: DeviceHandler): def __init__(self, hs: "HomeServer", device_handler: DeviceHandler):

View file

@ -27,9 +27,9 @@ from twisted.internet import defer
from synapse.api.constants import EduTypes from synapse.api.constants import EduTypes
from synapse.api.errors import CodeMessageException, Codes, NotFoundError, SynapseError from synapse.api.errors import CodeMessageException, Codes, NotFoundError, SynapseError
from synapse.handlers.device import DeviceHandler
from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.logging.opentracing import log_kv, set_tag, tag_args, trace from synapse.logging.opentracing import log_kv, set_tag, tag_args, trace
from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet
from synapse.types import ( from synapse.types import (
JsonDict, JsonDict,
UserID, UserID,
@ -56,27 +56,23 @@ class E2eKeysHandler:
self.is_mine = hs.is_mine self.is_mine = hs.is_mine
self.clock = hs.get_clock() self.clock = hs.get_clock()
self._edu_updater = SigningKeyEduUpdater(hs, self)
federation_registry = hs.get_federation_registry() federation_registry = hs.get_federation_registry()
self._is_master = hs.config.worker.worker_app is None is_master = hs.config.worker.worker_app is None
if not self._is_master: if is_master:
self._user_device_resync_client = ( edu_updater = SigningKeyEduUpdater(hs)
ReplicationUserDevicesResyncRestServlet.make_client(hs)
)
else:
# Only register this edu handler on master as it requires writing # Only register this edu handler on master as it requires writing
# device updates to the db # device updates to the db
federation_registry.register_edu_handler( federation_registry.register_edu_handler(
EduTypes.SIGNING_KEY_UPDATE, EduTypes.SIGNING_KEY_UPDATE,
self._edu_updater.incoming_signing_key_update, edu_updater.incoming_signing_key_update,
) )
# also handle the unstable version # also handle the unstable version
# FIXME: remove this when enough servers have upgraded # FIXME: remove this when enough servers have upgraded
federation_registry.register_edu_handler( federation_registry.register_edu_handler(
EduTypes.UNSTABLE_SIGNING_KEY_UPDATE, EduTypes.UNSTABLE_SIGNING_KEY_UPDATE,
self._edu_updater.incoming_signing_key_update, edu_updater.incoming_signing_key_update,
) )
# doesn't really work as part of the generic query API, because the # doesn't really work as part of the generic query API, because the
@ -319,14 +315,13 @@ class E2eKeysHandler:
# probably be tracking their device lists. However, we haven't # probably be tracking their device lists. However, we haven't
# done an initial sync on the device list so we do it now. # done an initial sync on the device list so we do it now.
try: try:
if self._is_master: resync_results = (
resync_results = await self.device_handler.device_list_updater.user_device_resync( await self.device_handler.device_list_updater.user_device_resync(
user_id user_id
) )
else: )
resync_results = await self._user_device_resync_client( if resync_results is None:
user_id=user_id raise ValueError("Device resync failed")
)
# Add the device keys to the results. # Add the device keys to the results.
user_devices = resync_results["devices"] user_devices = resync_results["devices"]
@ -605,6 +600,8 @@ class E2eKeysHandler:
async def upload_keys_for_user( async def upload_keys_for_user(
self, user_id: str, device_id: str, keys: JsonDict self, user_id: str, device_id: str, keys: JsonDict
) -> JsonDict: ) -> JsonDict:
# This can only be called from the main process.
assert isinstance(self.device_handler, DeviceHandler)
time_now = self.clock.time_msec() time_now = self.clock.time_msec()
@ -732,6 +729,8 @@ class E2eKeysHandler:
user_id: the user uploading the keys user_id: the user uploading the keys
keys: the signing keys keys: the signing keys
""" """
# This can only be called from the main process.
assert isinstance(self.device_handler, DeviceHandler)
# if a master key is uploaded, then check it. Otherwise, load the # if a master key is uploaded, then check it. Otherwise, load the
# stored master key, to check signatures on other keys # stored master key, to check signatures on other keys
@ -823,6 +822,9 @@ class E2eKeysHandler:
Raises: Raises:
SynapseError: if the signatures dict is not valid. SynapseError: if the signatures dict is not valid.
""" """
# This can only be called from the main process.
assert isinstance(self.device_handler, DeviceHandler)
failures = {} failures = {}
# signatures to be stored. Each item will be a SignatureListItem # signatures to be stored. Each item will be a SignatureListItem
@ -870,7 +872,7 @@ class E2eKeysHandler:
- signatures of the user's master key by the user's devices. - signatures of the user's master key by the user's devices.
Args: Args:
user_id (string): the user uploading the keys user_id: the user uploading the keys
signatures (dict[string, dict]): map of devices to signed keys signatures (dict[string, dict]): map of devices to signed keys
Returns: Returns:
@ -1200,6 +1202,9 @@ class E2eKeysHandler:
A tuple of the retrieved key content, the key's ID and the matching VerifyKey. A tuple of the retrieved key content, the key's ID and the matching VerifyKey.
If the key cannot be retrieved, all values in the tuple will instead be None. If the key cannot be retrieved, all values in the tuple will instead be None.
""" """
# This can only be called from the main process.
assert isinstance(self.device_handler, DeviceHandler)
try: try:
remote_result = await self.federation.query_user_devices( remote_result = await self.federation.query_user_devices(
user.domain, user.to_string() user.domain, user.to_string()
@ -1396,11 +1401,14 @@ class SignatureListItem:
class SigningKeyEduUpdater: class SigningKeyEduUpdater:
"""Handles incoming signing key updates from federation and updates the DB""" """Handles incoming signing key updates from federation and updates the DB"""
def __init__(self, hs: "HomeServer", e2e_keys_handler: E2eKeysHandler): def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main self.store = hs.get_datastores().main
self.federation = hs.get_federation_client() self.federation = hs.get_federation_client()
self.clock = hs.get_clock() self.clock = hs.get_clock()
self.e2e_keys_handler = e2e_keys_handler
device_handler = hs.get_device_handler()
assert isinstance(device_handler, DeviceHandler)
self._device_handler = device_handler
self._remote_edu_linearizer = Linearizer(name="remote_signing_key") self._remote_edu_linearizer = Linearizer(name="remote_signing_key")
@ -1445,9 +1453,6 @@ class SigningKeyEduUpdater:
user_id: the user whose updates we are processing user_id: the user whose updates we are processing
""" """
device_handler = self.e2e_keys_handler.device_handler
device_list_updater = device_handler.device_list_updater
async with self._remote_edu_linearizer.queue(user_id): async with self._remote_edu_linearizer.queue(user_id):
pending_updates = self._pending_updates.pop(user_id, []) pending_updates = self._pending_updates.pop(user_id, [])
if not pending_updates: if not pending_updates:
@ -1459,13 +1464,11 @@ class SigningKeyEduUpdater:
logger.info("pending updates: %r", pending_updates) logger.info("pending updates: %r", pending_updates)
for master_key, self_signing_key in pending_updates: for master_key, self_signing_key in pending_updates:
new_device_ids = ( new_device_ids = await self._device_handler.device_list_updater.process_cross_signing_key_update(
await device_list_updater.process_cross_signing_key_update( user_id,
user_id, master_key,
master_key, self_signing_key,
self_signing_key,
)
) )
device_ids = device_ids + new_device_ids device_ids = device_ids + new_device_ids
await device_handler.notify_device_update(user_id, device_ids) await self._device_handler.notify_device_update(user_id, device_ids)

View file

@ -377,8 +377,9 @@ class E2eRoomKeysHandler:
"""Deletes a given version of the user's e2e_room_keys backup """Deletes a given version of the user's e2e_room_keys backup
Args: Args:
user_id(str): the user whose current backup version we're deleting user_id: the user whose current backup version we're deleting
version(str): the version id of the backup being deleted version: Optional. the version ID of the backup version we're deleting
If missing, we delete the current backup version info.
Raises: Raises:
NotFoundError: if this backup version doesn't exist NotFoundError: if this backup version doesn't exist
""" """

View file

@ -45,6 +45,7 @@ class EventAuthHandler:
def __init__(self, hs: "HomeServer"): def __init__(self, hs: "HomeServer"):
self._clock = hs.get_clock() self._clock = hs.get_clock()
self._store = hs.get_datastores().main self._store = hs.get_datastores().main
self._state_storage_controller = hs.get_storage_controllers().state
self._server_name = hs.hostname self._server_name = hs.hostname
async def check_auth_rules_from_context( async def check_auth_rules_from_context(
@ -179,17 +180,22 @@ class EventAuthHandler:
this function may return an incorrect result as we are not able to fully this function may return an incorrect result as we are not able to fully
track server membership in a room without full state. track server membership in a room without full state.
""" """
if not allow_partial_state_rooms and await self._store.is_partial_state_room( if await self._store.is_partial_state_room(room_id):
room_id if allow_partial_state_rooms:
): current_hosts = await self._state_storage_controller.get_current_hosts_in_room_or_partial_state_approximation(
raise AuthError( room_id
403, )
"Unable to authorise you right now; room is partial-stated here.", if host not in current_hosts:
errcode=Codes.UNABLE_DUE_TO_PARTIAL_STATE, raise AuthError(403, "Host not in room (partial-state approx).")
) else:
raise AuthError(
if not await self.is_host_in_room(room_id, host): 403,
raise AuthError(403, "Host not in room.") "Unable to authorise you right now; room is partial-stated here.",
errcode=Codes.UNABLE_DUE_TO_PARTIAL_STATE,
)
else:
if not await self.is_host_in_room(room_id, host):
raise AuthError(403, "Host not in room.")
async def check_restricted_join_rules( async def check_restricted_join_rules(
self, self,

View file

@ -379,6 +379,7 @@ class FederationHandler:
filtered_extremities = await filter_events_for_server( filtered_extremities = await filter_events_for_server(
self._storage_controllers, self._storage_controllers,
self.server_name, self.server_name,
self.server_name,
events_to_check, events_to_check,
redact=False, redact=False,
check_history_visibility_only=True, check_history_visibility_only=True,
@ -1231,7 +1232,9 @@ class FederationHandler:
async def on_backfill_request( async def on_backfill_request(
self, origin: str, room_id: str, pdu_list: List[str], limit: int self, origin: str, room_id: str, pdu_list: List[str], limit: int
) -> List[EventBase]: ) -> List[EventBase]:
await self._event_auth_handler.assert_host_in_room(room_id, origin) # We allow partially joined rooms since in this case we are filtering out
# non-local events in `filter_events_for_server`.
await self._event_auth_handler.assert_host_in_room(room_id, origin, True)
# Synapse asks for 100 events per backfill request. Do not allow more. # Synapse asks for 100 events per backfill request. Do not allow more.
limit = min(limit, 100) limit = min(limit, 100)
@ -1252,7 +1255,7 @@ class FederationHandler:
) )
events = await filter_events_for_server( events = await filter_events_for_server(
self._storage_controllers, origin, events self._storage_controllers, origin, self.server_name, events
) )
return events return events
@ -1283,7 +1286,7 @@ class FederationHandler:
await self._event_auth_handler.assert_host_in_room(event.room_id, origin) await self._event_auth_handler.assert_host_in_room(event.room_id, origin)
events = await filter_events_for_server( events = await filter_events_for_server(
self._storage_controllers, origin, [event] self._storage_controllers, origin, self.server_name, [event]
) )
event = events[0] event = events[0]
return event return event
@ -1296,7 +1299,9 @@ class FederationHandler:
latest_events: List[str], latest_events: List[str],
limit: int, limit: int,
) -> List[EventBase]: ) -> List[EventBase]:
await self._event_auth_handler.assert_host_in_room(room_id, origin) # We allow partially joined rooms since in this case we are filtering out
# non-local events in `filter_events_for_server`.
await self._event_auth_handler.assert_host_in_room(room_id, origin, True)
# Only allow up to 20 events to be retrieved per request. # Only allow up to 20 events to be retrieved per request.
limit = min(limit, 20) limit = min(limit, 20)
@ -1309,7 +1314,7 @@ class FederationHandler:
) )
missing_events = await filter_events_for_server( missing_events = await filter_events_for_server(
self._storage_controllers, origin, missing_events self._storage_controllers, origin, self.server_name, missing_events
) )
return missing_events return missing_events
@ -1596,8 +1601,8 @@ class FederationHandler:
Fetch the complexity of a remote room over federation. Fetch the complexity of a remote room over federation.
Args: Args:
remote_room_hosts (list[str]): The remote servers to ask. remote_room_hosts: The remote servers to ask.
room_id (str): The room ID to ask about. room_id: The room ID to ask about.
Returns: Returns:
Dict contains the complexity Dict contains the complexity

View file

@ -711,7 +711,7 @@ class IdentityHandler:
inviter_display_name: The current display name of the inviter_display_name: The current display name of the
inviter. inviter.
inviter_avatar_url: The URL of the inviter's avatar. inviter_avatar_url: The URL of the inviter's avatar.
id_access_token (str): The access token to authenticate to the identity id_access_token: The access token to authenticate to the identity
server with server with
Returns: Returns:

View file

@ -1137,11 +1137,13 @@ class EventCreationHandler:
) )
state_events = await self.store.get_events_as_list(state_event_ids) state_events = await self.store.get_events_as_list(state_event_ids)
# Create a StateMap[str] # Create a StateMap[str]
state_map = {(e.type, e.state_key): e.event_id for e in state_events} current_state_ids = {
(e.type, e.state_key): e.event_id for e in state_events
}
# Actually strip down and only use the necessary auth events # Actually strip down and only use the necessary auth events
auth_event_ids = self._event_auth_handler.compute_auth_events( auth_event_ids = self._event_auth_handler.compute_auth_events(
event=temp_event, event=temp_event,
current_state_ids=state_map, current_state_ids=current_state_ids,
for_verification=False, for_verification=False,
) )

View file

@ -787,7 +787,7 @@ class OidcProvider:
Must include an ``access_token`` field. Must include an ``access_token`` field.
Returns: Returns:
UserInfo: an object representing the user. an object representing the user.
""" """
logger.debug("Using the OAuth2 access_token to request userinfo") logger.debug("Using the OAuth2 access_token to request userinfo")
metadata = await self.load_metadata() metadata = await self.load_metadata()
@ -1435,6 +1435,7 @@ class UserAttributeDict(TypedDict):
localpart: Optional[str] localpart: Optional[str]
confirm_localpart: bool confirm_localpart: bool
display_name: Optional[str] display_name: Optional[str]
picture: Optional[str] # may be omitted by older `OidcMappingProviders`
emails: List[str] emails: List[str]
@ -1520,6 +1521,7 @@ env.filters.update(
@attr.s(slots=True, frozen=True, auto_attribs=True) @attr.s(slots=True, frozen=True, auto_attribs=True)
class JinjaOidcMappingConfig: class JinjaOidcMappingConfig:
subject_claim: str subject_claim: str
picture_claim: str
localpart_template: Optional[Template] localpart_template: Optional[Template]
display_name_template: Optional[Template] display_name_template: Optional[Template]
email_template: Optional[Template] email_template: Optional[Template]
@ -1539,6 +1541,7 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
@staticmethod @staticmethod
def parse_config(config: dict) -> JinjaOidcMappingConfig: def parse_config(config: dict) -> JinjaOidcMappingConfig:
subject_claim = config.get("subject_claim", "sub") subject_claim = config.get("subject_claim", "sub")
picture_claim = config.get("picture_claim", "picture")
def parse_template_config(option_name: str) -> Optional[Template]: def parse_template_config(option_name: str) -> Optional[Template]:
if option_name not in config: if option_name not in config:
@ -1572,6 +1575,7 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
return JinjaOidcMappingConfig( return JinjaOidcMappingConfig(
subject_claim=subject_claim, subject_claim=subject_claim,
picture_claim=picture_claim,
localpart_template=localpart_template, localpart_template=localpart_template,
display_name_template=display_name_template, display_name_template=display_name_template,
email_template=email_template, email_template=email_template,
@ -1611,10 +1615,13 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
if email: if email:
emails.append(email) emails.append(email)
picture = userinfo.get("picture")
return UserAttributeDict( return UserAttributeDict(
localpart=localpart, localpart=localpart,
display_name=display_name, display_name=display_name,
emails=emails, emails=emails,
picture=picture,
confirm_localpart=self._config.confirm_localpart, confirm_localpart=self._config.confirm_localpart,
) )

View file

@ -448,6 +448,12 @@ class PaginationHandler:
if pagin_config.from_token: if pagin_config.from_token:
from_token = pagin_config.from_token from_token = pagin_config.from_token
elif pagin_config.direction == "f":
from_token = (
await self.hs.get_event_sources().get_start_token_for_pagination(
room_id
)
)
else: else:
from_token = ( from_token = (
await self.hs.get_event_sources().get_current_token_for_pagination( await self.hs.get_event_sources().get_current_token_for_pagination(

View file

@ -201,7 +201,7 @@ class BasePresenceHandler(abc.ABC):
"""Get the current presence state for multiple users. """Get the current presence state for multiple users.
Returns: Returns:
dict: `user_id` -> `UserPresenceState` A mapping of `user_id` -> `UserPresenceState`
""" """
states = {} states = {}
missing = [] missing = []
@ -478,7 +478,7 @@ class WorkerPresenceHandler(BasePresenceHandler):
return _NullContextManager() return _NullContextManager()
prev_state = await self.current_state_for_user(user_id) prev_state = await self.current_state_for_user(user_id)
if prev_state != PresenceState.BUSY: if prev_state.state != PresenceState.BUSY:
# We set state here but pass ignore_status_msg = True as we don't want to # We set state here but pass ignore_status_msg = True as we don't want to
# cause the status message to be cleared. # cause the status message to be cleared.
# Note that this causes last_active_ts to be incremented which is not # Note that this causes last_active_ts to be incremented which is not

View file

@ -92,7 +92,6 @@ class ReceiptsHandler:
continue continue
# Check if these receipts apply to a thread. # Check if these receipts apply to a thread.
thread_id = None
data = user_values.get("data", {}) data = user_values.get("data", {})
thread_id = data.get("thread_id") thread_id = data.get("thread_id")
# If the thread ID is invalid, consider it missing. # If the thread ID is invalid, consider it missing.

View file

@ -38,6 +38,7 @@ from synapse.api.errors import (
) )
from synapse.appservice import ApplicationService from synapse.appservice import ApplicationService
from synapse.config.server import is_threepid_reserved from synapse.config.server import is_threepid_reserved
from synapse.handlers.device import DeviceHandler
from synapse.http.servlet import assert_params_in_dict from synapse.http.servlet import assert_params_in_dict
from synapse.replication.http.login import RegisterDeviceReplicationServlet from synapse.replication.http.login import RegisterDeviceReplicationServlet
from synapse.replication.http.register import ( from synapse.replication.http.register import (
@ -848,6 +849,9 @@ class RegistrationHandler:
refresh_token = None refresh_token = None
refresh_token_id = None refresh_token_id = None
# This can only run on the main process.
assert isinstance(self.device_handler, DeviceHandler)
registered_device_id = await self.device_handler.check_device_registered( registered_device_id = await self.device_handler.check_device_registered(
user_id, user_id,
device_id, device_id,

View file

@ -13,17 +13,19 @@
# limitations under the License. # limitations under the License.
import enum import enum
import logging import logging
from typing import TYPE_CHECKING, Dict, FrozenSet, Iterable, List, Optional, Tuple from typing import TYPE_CHECKING, Collection, Dict, FrozenSet, Iterable, List, Optional
import attr import attr
from synapse.api.constants import EventTypes, RelationTypes from synapse.api.constants import EventTypes, RelationTypes
from synapse.api.errors import SynapseError from synapse.api.errors import SynapseError
from synapse.events import EventBase, relation_from_event from synapse.events import EventBase, relation_from_event
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.logging.opentracing import trace from synapse.logging.opentracing import trace
from synapse.storage.databases.main.relations import ThreadsNextBatch, _RelatedEvent from synapse.storage.databases.main.relations import ThreadsNextBatch, _RelatedEvent
from synapse.streams.config import PaginationConfig from synapse.streams.config import PaginationConfig
from synapse.types import JsonDict, Requester, StreamToken, UserID from synapse.types import JsonDict, Requester, UserID
from synapse.util.async_helpers import gather_results
from synapse.visibility import filter_events_for_client from synapse.visibility import filter_events_for_client
if TYPE_CHECKING: if TYPE_CHECKING:
@ -172,40 +174,6 @@ class RelationsHandler:
return return_value return return_value
async def get_relations_for_event(
self,
event_id: str,
event: EventBase,
room_id: str,
relation_type: str,
ignored_users: FrozenSet[str] = frozenset(),
) -> Tuple[List[_RelatedEvent], Optional[StreamToken]]:
"""Get a list of events which relate to an event, ordered by topological ordering.
Args:
event_id: Fetch events that relate to this event ID.
event: The matching EventBase to event_id.
room_id: The room the event belongs to.
relation_type: The type of relation.
ignored_users: The users ignored by the requesting user.
Returns:
List of event IDs that match relations requested. The rows are of
the form `{"event_id": "..."}`.
"""
# Call the underlying storage method, which is cached.
related_events, next_token = await self._main_store.get_relations_for_event(
event_id, event, room_id, relation_type, direction="f"
)
# Filter out ignored users and convert to the expected format.
related_events = [
event for event in related_events if event.sender not in ignored_users
]
return related_events, next_token
async def redact_events_related_to( async def redact_events_related_to(
self, self,
requester: Requester, requester: Requester,
@ -259,51 +227,107 @@ class RelationsHandler:
e.msg, e.msg,
) )
async def get_annotations_for_event( async def get_annotations_for_events(
self, self, event_ids: Collection[str], ignored_users: FrozenSet[str] = frozenset()
event_id: str, ) -> Dict[str, List[JsonDict]]:
room_id: str, """Get a list of annotations to the given events, grouped by event type and
limit: int = 5,
ignored_users: FrozenSet[str] = frozenset(),
) -> List[JsonDict]:
"""Get a list of annotations on the event, grouped by event type and
aggregation key, sorted by count. aggregation key, sorted by count.
This is used e.g. to get the what and how many reactions have happend This is used e.g. to get the what and how many reactions have happened
on an event. on an event.
Args: Args:
event_id: Fetch events that relate to this event ID. event_ids: Fetch events that relate to these event IDs.
room_id: The room the event belongs to.
limit: Only fetch the `limit` groups.
ignored_users: The users ignored by the requesting user. ignored_users: The users ignored by the requesting user.
Returns: Returns:
List of groups of annotations that match. Each row is a dict with A map of event IDs to a list of groups of annotations that match.
`type`, `key` and `count` fields. Each entry is a dict with `type`, `key` and `count` fields.
""" """
# Get the base results for all users. # Get the base results for all users.
full_results = await self._main_store.get_aggregation_groups_for_event( full_results = await self._main_store.get_aggregation_groups_for_events(
event_id, room_id, limit event_ids
) )
# Avoid additional logic if there are no ignored users.
if not ignored_users:
return {
event_id: results
for event_id, results in full_results.items()
if results
}
# Then subtract off the results for any ignored users. # Then subtract off the results for any ignored users.
ignored_results = await self._main_store.get_aggregation_groups_for_users( ignored_results = await self._main_store.get_aggregation_groups_for_users(
event_id, room_id, limit, ignored_users [event_id for event_id, results in full_results.items() if results],
ignored_users,
) )
filtered_results = [] filtered_results = {}
for result in full_results: for event_id, results in full_results.items():
key = (result["type"], result["key"]) # If no annotations, skip.
if key in ignored_results: if not results:
result = result.copy() continue
result["count"] -= ignored_results[key]
if result["count"] <= 0: # If there are not ignored results for this event, copy verbatim.
continue if event_id not in ignored_results:
filtered_results.append(result) filtered_results[event_id] = results
continue
# Otherwise, subtract out the ignored results.
event_ignored_results = ignored_results[event_id]
for result in results:
key = (result["type"], result["key"])
if key in event_ignored_results:
# Ensure to not modify the cache.
result = result.copy()
result["count"] -= event_ignored_results[key]
if result["count"] <= 0:
continue
filtered_results.setdefault(event_id, []).append(result)
return filtered_results return filtered_results
async def get_references_for_events(
self, event_ids: Collection[str], ignored_users: FrozenSet[str] = frozenset()
) -> Dict[str, List[_RelatedEvent]]:
"""Get a list of references to the given events.
Args:
event_ids: Fetch events that relate to this event ID.
ignored_users: The users ignored by the requesting user.
Returns:
A map of event IDs to a list related events.
"""
related_events = await self._main_store.get_references_for_events(event_ids)
# Avoid additional logic if there are no ignored users.
if not ignored_users:
return {
event_id: results
for event_id, results in related_events.items()
if results
}
# Filter out ignored users.
results = {}
for event_id, events in related_events.items():
# If no references, skip.
if not events:
continue
# Filter ignored users out.
events = [event for event in events if event.sender not in ignored_users]
# If there are no events left, skip this event.
if not events:
continue
results[event_id] = events
return results
async def _get_threads_for_events( async def _get_threads_for_events(
self, self,
events_by_id: Dict[str, EventBase], events_by_id: Dict[str, EventBase],
@ -366,59 +390,66 @@ class RelationsHandler:
results = {} results = {}
for event_id, summary in summaries.items(): for event_id, summary in summaries.items():
if summary: # If no thread, skip.
thread_count, latest_thread_event = summary if not summary:
continue
# Subtract off the count of any ignored users. thread_count, latest_thread_event = summary
for ignored_user in ignored_users:
thread_count -= ignored_results.get((event_id, ignored_user), 0)
# This is gnarly, but if the latest event is from an ignored user, # Subtract off the count of any ignored users.
# attempt to find one that isn't from an ignored user. for ignored_user in ignored_users:
if latest_thread_event.sender in ignored_users: thread_count -= ignored_results.get((event_id, ignored_user), 0)
room_id = latest_thread_event.room_id
# If the root event is not found, something went wrong, do # This is gnarly, but if the latest event is from an ignored user,
# not include a summary of the thread. # attempt to find one that isn't from an ignored user.
event = await self._event_handler.get_event(user, room_id, event_id) if latest_thread_event.sender in ignored_users:
if event is None: room_id = latest_thread_event.room_id
continue
potential_events, _ = await self.get_relations_for_event( # If the root event is not found, something went wrong, do
event_id, # not include a summary of the thread.
event, event = await self._event_handler.get_event(user, room_id, event_id)
room_id, if event is None:
RelationTypes.THREAD, continue
ignored_users,
)
# If all found events are from ignored users, do not include # Attempt to find another event to use as the latest event.
# a summary of the thread. potential_events, _ = await self._main_store.get_relations_for_event(
if not potential_events: event_id, event, room_id, RelationTypes.THREAD, direction="f"
continue
# The *last* event returned is the one that is cared about.
event = await self._event_handler.get_event(
user, room_id, potential_events[-1].event_id
)
# It is unexpected that the event will not exist.
if event is None:
logger.warning(
"Unable to fetch latest event in a thread with event ID: %s",
potential_events[-1].event_id,
)
continue
latest_thread_event = event
results[event_id] = _ThreadAggregation(
latest_event=latest_thread_event,
count=thread_count,
# If there's a thread summary it must also exist in the
# participated dictionary.
current_user_participated=events_by_id[event_id].sender == user_id
or participated[event_id],
) )
# Filter out ignored users.
potential_events = [
event
for event in potential_events
if event.sender not in ignored_users
]
# If all found events are from ignored users, do not include
# a summary of the thread.
if not potential_events:
continue
# The *last* event returned is the one that is cared about.
event = await self._event_handler.get_event(
user, room_id, potential_events[-1].event_id
)
# It is unexpected that the event will not exist.
if event is None:
logger.warning(
"Unable to fetch latest event in a thread with event ID: %s",
potential_events[-1].event_id,
)
continue
latest_thread_event = event
results[event_id] = _ThreadAggregation(
latest_event=latest_thread_event,
count=thread_count,
# If there's a thread summary it must also exist in the
# participated dictionary.
current_user_participated=events_by_id[event_id].sender == user_id
or participated[event_id],
)
return results return results
@trace @trace
@ -496,49 +527,56 @@ class RelationsHandler:
# (as that is what makes it part of the thread). # (as that is what makes it part of the thread).
relations_by_id[latest_thread_event.event_id] = RelationTypes.THREAD relations_by_id[latest_thread_event.event_id] = RelationTypes.THREAD
# Fetch other relations per event. async def _fetch_annotations() -> None:
for event in events_by_id.values(): """Fetch any annotations (ie, reactions) to bundle with this event."""
# Fetch any annotations (ie, reactions) to bundle with this event. annotations_by_event_id = await self.get_annotations_for_events(
annotations = await self.get_annotations_for_event( events_by_id.keys(), ignored_users=ignored_users
event.event_id, event.room_id, ignored_users=ignored_users
) )
if annotations: for event_id, annotations in annotations_by_event_id.items():
results.setdefault( if annotations:
event.event_id, BundledAggregations() results.setdefault(event_id, BundledAggregations()).annotations = {
).annotations = {"chunk": annotations} "chunk": annotations
}
# Fetch any references to bundle with this event. async def _fetch_references() -> None:
references, next_token = await self.get_relations_for_event( """Fetch any references to bundle with this event."""
event.event_id, references_by_event_id = await self.get_references_for_events(
event, events_by_id.keys(), ignored_users=ignored_users
event.room_id,
RelationTypes.REFERENCE,
ignored_users=ignored_users,
) )
if references: for event_id, references in references_by_event_id.items():
aggregations = results.setdefault(event.event_id, BundledAggregations()) if references:
aggregations.references = { results.setdefault(event_id, BundledAggregations()).references = {
"chunk": [{"event_id": ev.event_id} for ev in references] "chunk": [{"event_id": ev.event_id} for ev in references]
} }
if next_token: async def _fetch_edits() -> None:
aggregations.references["next_batch"] = await next_token.to_string( """
self._main_store Fetch any edits (but not for redacted events).
)
# Fetch any edits (but not for redacted events). Note that there is no use in limiting edits by ignored users since the
# parent event should be ignored in the first place if the user is ignored.
# Note that there is no use in limiting edits by ignored users since the """
# parent event should be ignored in the first place if the user is ignored. edits = await self._main_store.get_applicable_edits(
edits = await self._main_store.get_applicable_edits( [
[ event_id
event_id for event_id, event in events_by_id.items()
for event_id, event in events_by_id.items() if not event.internal_metadata.is_redacted()
if not event.internal_metadata.is_redacted() ]
] )
for event_id, edit in edits.items():
results.setdefault(event_id, BundledAggregations()).replace = edit
# Parallelize the calls for annotations, references, and edits since they
# are unrelated.
await make_deferred_yieldable(
gather_results(
(
run_in_background(_fetch_annotations),
run_in_background(_fetch_references),
run_in_background(_fetch_edits),
)
)
) )
for event_id, edit in edits.items():
results.setdefault(event_id, BundledAggregations()).replace = edit
return results return results
@ -571,7 +609,7 @@ class RelationsHandler:
room_id, requester, allow_departed_users=True room_id, requester, allow_departed_users=True
) )
# Note that ignored users are not passed into get_relations_for_event # Note that ignored users are not passed into get_threads
# below. Ignored users are handled in filter_events_for_client (and by # below. Ignored users are handled in filter_events_for_client (and by
# not passing them in here we should get a better cache hit rate). # not passing them in here we should get a better cache hit rate).
thread_roots, next_batch = await self._main_store.get_threads( thread_roots, next_batch = await self._main_store.get_threads(

View file

@ -441,7 +441,7 @@ class DefaultSamlMappingProvider:
client_redirect_url: where the client wants to redirect to client_redirect_url: where the client wants to redirect to
Returns: Returns:
dict: A dict containing new user attributes. Possible keys: A dict containing new user attributes. Possible keys:
* mxid_localpart (str): Required. The localpart of the user's mxid * mxid_localpart (str): Required. The localpart of the user's mxid
* displayname (str): The displayname of the user * displayname (str): The displayname of the user
* emails (list[str]): Any emails for the user * emails (list[str]): Any emails for the user
@ -483,7 +483,7 @@ class DefaultSamlMappingProvider:
Args: Args:
config: A dictionary containing configuration options for this provider config: A dictionary containing configuration options for this provider
Returns: Returns:
SamlConfig: A custom config object for this module A custom config object for this module
""" """
# Parse config options and use defaults where necessary # Parse config options and use defaults where necessary
mxid_source_attribute = config.get("mxid_source_attribute", "uid") mxid_source_attribute = config.get("mxid_source_attribute", "uid")

View file

@ -15,6 +15,7 @@ import logging
from typing import TYPE_CHECKING, Optional from typing import TYPE_CHECKING, Optional
from synapse.api.errors import Codes, StoreError, SynapseError from synapse.api.errors import Codes, StoreError, SynapseError
from synapse.handlers.device import DeviceHandler
from synapse.types import Requester from synapse.types import Requester
if TYPE_CHECKING: if TYPE_CHECKING:
@ -29,7 +30,10 @@ class SetPasswordHandler:
def __init__(self, hs: "HomeServer"): def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main self.store = hs.get_datastores().main
self._auth_handler = hs.get_auth_handler() self._auth_handler = hs.get_auth_handler()
self._device_handler = hs.get_device_handler() # This can only be instantiated on the main process.
device_handler = hs.get_device_handler()
assert isinstance(device_handler, DeviceHandler)
self._device_handler = device_handler
async def set_password( async def set_password(
self, self,

View file

@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import abc import abc
import hashlib
import io
import logging import logging
from typing import ( from typing import (
TYPE_CHECKING, TYPE_CHECKING,
@ -37,6 +39,7 @@ from twisted.web.server import Request
from synapse.api.constants import LoginType from synapse.api.constants import LoginType
from synapse.api.errors import Codes, NotFoundError, RedirectException, SynapseError from synapse.api.errors import Codes, NotFoundError, RedirectException, SynapseError
from synapse.config.sso import SsoAttributeRequirement from synapse.config.sso import SsoAttributeRequirement
from synapse.handlers.device import DeviceHandler
from synapse.handlers.register import init_counters_for_auth_provider from synapse.handlers.register import init_counters_for_auth_provider
from synapse.handlers.ui_auth import UIAuthSessionDataConstants from synapse.handlers.ui_auth import UIAuthSessionDataConstants
from synapse.http import get_request_user_agent from synapse.http import get_request_user_agent
@ -137,6 +140,7 @@ class UserAttributes:
localpart: Optional[str] localpart: Optional[str]
confirm_localpart: bool = False confirm_localpart: bool = False
display_name: Optional[str] = None display_name: Optional[str] = None
picture: Optional[str] = None
emails: Collection[str] = attr.Factory(list) emails: Collection[str] = attr.Factory(list)
@ -195,6 +199,10 @@ class SsoHandler:
self._error_template = hs.config.sso.sso_error_template self._error_template = hs.config.sso.sso_error_template
self._bad_user_template = hs.config.sso.sso_auth_bad_user_template self._bad_user_template = hs.config.sso.sso_auth_bad_user_template
self._profile_handler = hs.get_profile_handler() self._profile_handler = hs.get_profile_handler()
self._media_repo = (
hs.get_media_repository() if hs.config.media.can_load_media_repo else None
)
self._http_client = hs.get_proxied_blacklisted_http_client()
# The following template is shown after a successful user interactive # The following template is shown after a successful user interactive
# authentication session. It tells the user they can close the window. # authentication session. It tells the user they can close the window.
@ -494,6 +502,8 @@ class SsoHandler:
await self._profile_handler.set_displayname( await self._profile_handler.set_displayname(
user_id_obj, requester, attributes.display_name, True user_id_obj, requester, attributes.display_name, True
) )
if attributes.picture:
await self.set_avatar(user_id, attributes.picture)
await self._auth_handler.complete_sso_login( await self._auth_handler.complete_sso_login(
user_id, user_id,
@ -702,8 +712,110 @@ class SsoHandler:
await self._store.record_user_external_id( await self._store.record_user_external_id(
auth_provider_id, remote_user_id, registered_user_id auth_provider_id, remote_user_id, registered_user_id
) )
# Set avatar, if available
if attributes.picture:
await self.set_avatar(registered_user_id, attributes.picture)
return registered_user_id return registered_user_id
async def set_avatar(self, user_id: str, picture_https_url: str) -> bool:
"""Set avatar of the user.
This downloads the image file from the URL provided, stores that in
the media repository and then sets the avatar on the user's profile.
It can detect if the same image is being saved again and bails early by storing
the hash of the file in the `upload_name` of the avatar image.
Currently, it only supports server configurations which run the media repository
within the same process.
It silently fails and logs a warning by raising an exception and catching it
internally if:
* it is unable to fetch the image itself (non 200 status code) or
* the image supplied is bigger than max allowed size or
* the image type is not one of the allowed image types.
Args:
user_id: matrix user ID in the form @localpart:domain as a string.
picture_https_url: HTTPS url for the picture image file.
Returns: `True` if the user's avatar has been successfully set to the image at
`picture_https_url`.
"""
if self._media_repo is None:
logger.info(
"failed to set user avatar because out-of-process media repositories "
"are not supported yet "
)
return False
try:
uid = UserID.from_string(user_id)
def is_allowed_mime_type(content_type: str) -> bool:
if (
self._profile_handler.allowed_avatar_mimetypes
and content_type
not in self._profile_handler.allowed_avatar_mimetypes
):
return False
return True
# download picture, enforcing size limit & mime type check
picture = io.BytesIO()
content_length, headers, uri, code = await self._http_client.get_file(
url=picture_https_url,
output_stream=picture,
max_size=self._profile_handler.max_avatar_size,
is_allowed_content_type=is_allowed_mime_type,
)
if code != 200:
raise Exception(
"GET request to download sso avatar image returned {}".format(code)
)
# upload name includes hash of the image file's content so that we can
# easily check if it requires an update or not, the next time user logs in
upload_name = "sso_avatar_" + hashlib.sha256(picture.read()).hexdigest()
# bail if user already has the same avatar
profile = await self._profile_handler.get_profile(user_id)
if profile["avatar_url"] is not None:
server_name = profile["avatar_url"].split("/")[-2]
media_id = profile["avatar_url"].split("/")[-1]
if server_name == self._server_name:
media = await self._media_repo.store.get_local_media(media_id)
if media is not None and upload_name == media["upload_name"]:
logger.info("skipping saving the user avatar")
return True
# store it in media repository
avatar_mxc_url = await self._media_repo.create_content(
media_type=headers[b"Content-Type"][0].decode("utf-8"),
upload_name=upload_name,
content=picture,
content_length=content_length,
auth_user=uid,
)
# save it as user avatar
await self._profile_handler.set_avatar_url(
uid,
create_requester(uid),
str(avatar_mxc_url),
)
logger.info("successfully saved the user avatar")
return True
except Exception:
logger.warning("failed to save the user avatar")
return False
async def complete_sso_ui_auth_request( async def complete_sso_ui_auth_request(
self, self,
auth_provider_id: str, auth_provider_id: str,
@ -1035,6 +1147,8 @@ class SsoHandler:
) -> None: ) -> None:
"""Revoke any devices and in-flight logins tied to a provider session. """Revoke any devices and in-flight logins tied to a provider session.
Can only be called from the main process.
Args: Args:
auth_provider_id: A unique identifier for this SSO provider, e.g. auth_provider_id: A unique identifier for this SSO provider, e.g.
"oidc" or "saml". "oidc" or "saml".
@ -1042,6 +1156,12 @@ class SsoHandler:
expected_user_id: The user we're expecting to logout. If set, it will ignore expected_user_id: The user we're expecting to logout. If set, it will ignore
sessions belonging to other users and log an error. sessions belonging to other users and log an error.
""" """
# It is expected that this is the main process.
assert isinstance(
self._device_handler, DeviceHandler
), "revoking SSO sessions can only be called on the main process"
# Invalidate any running user-mapping sessions # Invalidate any running user-mapping sessions
to_delete = [] to_delete = []
for session_id, session in self._username_mapping_sessions.items(): for session_id, session in self._username_mapping_sessions.items():

View file

@ -1425,14 +1425,14 @@ class SyncHandler:
logger.debug("Fetching OTK data") logger.debug("Fetching OTK data")
device_id = sync_config.device_id device_id = sync_config.device_id
one_time_key_counts: JsonDict = {} one_time_keys_count: JsonDict = {}
unused_fallback_key_types: List[str] = [] unused_fallback_key_types: List[str] = []
if device_id: if device_id:
# TODO: We should have a way to let clients differentiate between the states of: # TODO: We should have a way to let clients differentiate between the states of:
# * no change in OTK count since the provided since token # * no change in OTK count since the provided since token
# * the server has zero OTKs left for this device # * the server has zero OTKs left for this device
# Spec issue: https://github.com/matrix-org/matrix-doc/issues/3298 # Spec issue: https://github.com/matrix-org/matrix-doc/issues/3298
one_time_key_counts = await self.store.count_e2e_one_time_keys( one_time_keys_count = await self.store.count_e2e_one_time_keys(
user_id, device_id user_id, device_id
) )
unused_fallback_key_types = ( unused_fallback_key_types = (
@ -1462,7 +1462,7 @@ class SyncHandler:
archived=sync_result_builder.archived, archived=sync_result_builder.archived,
to_device=sync_result_builder.to_device, to_device=sync_result_builder.to_device,
device_lists=device_lists, device_lists=device_lists,
device_one_time_keys_count=one_time_key_counts, device_one_time_keys_count=one_time_keys_count,
device_unused_fallback_key_types=unused_fallback_key_types, device_unused_fallback_key_types=unused_fallback_key_types,
next_batch=sync_result_builder.now_token, next_batch=sync_result_builder.now_token,
) )

View file

@ -45,8 +45,7 @@ class AdditionalResource(DirectServeJsonResource):
Args: Args:
hs: homeserver hs: homeserver
handler ((twisted.web.server.Request) -> twisted.internet.defer.Deferred): handler: function to be called to handle the request.
function to be called to handle the request.
""" """
super().__init__() super().__init__()
self._handler = handler self._handler = handler

View file

@ -155,11 +155,10 @@ class MatrixFederationAgent:
a file for a file upload). Or None if the request is to have a file for a file upload). Or None if the request is to have
no body. no body.
Returns: Returns:
Deferred[twisted.web.iweb.IResponse]: A deferred which fires when the header of the response has been received
fires when the header of the response has been received (regardless of the (regardless of the response status code). Fails if there is any problem
response status code). Fails if there is any problem which prevents that which prevents that response from being received (including problems that
response from being received (including problems that prevent the request prevent the request from being sent).
from being sent).
""" """
# We use urlparse as that will set `port` to None if there is no # We use urlparse as that will set `port` to None if there is no
# explicit port. # explicit port.

View file

@ -951,8 +951,7 @@ class MatrixFederationHttpClient:
args: query params args: query params
Returns: Returns:
dict|list: Succeeds when we get a 2xx HTTP response. The Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body.
result will be the decoded JSON body.
Raises: Raises:
HttpResponseException: If we get an HTTP response code >= 300 HttpResponseException: If we get an HTTP response code >= 300

View file

@ -34,7 +34,7 @@ from twisted.web.client import (
) )
from twisted.web.error import SchemeNotSupported from twisted.web.error import SchemeNotSupported
from twisted.web.http_headers import Headers from twisted.web.http_headers import Headers
from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS, IResponse
from synapse.http import redact_uri from synapse.http import redact_uri
from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint, ProxyCredentials from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint, ProxyCredentials
@ -134,7 +134,7 @@ class ProxyAgent(_AgentBase):
uri: bytes, uri: bytes,
headers: Optional[Headers] = None, headers: Optional[Headers] = None,
bodyProducer: Optional[IBodyProducer] = None, bodyProducer: Optional[IBodyProducer] = None,
) -> defer.Deferred: ) -> "defer.Deferred[IResponse]":
""" """
Issue a request to the server indicated by the given uri. Issue a request to the server indicated by the given uri.
@ -157,17 +157,17 @@ class ProxyAgent(_AgentBase):
a file upload). Or, None if the request is to have no body. a file upload). Or, None if the request is to have no body.
Returns: Returns:
Deferred[IResponse]: completes when the header of the response has A deferred which completes when the header of the response has
been received (regardless of the response status code). been received (regardless of the response status code).
Can fail with: Can fail with:
SchemeNotSupported: if the uri is not http or https SchemeNotSupported: if the uri is not http or https
twisted.internet.error.TimeoutError if the server we are connecting twisted.internet.error.TimeoutError if the server we are connecting
to (proxy or destination) does not accept a connection before to (proxy or destination) does not accept a connection before
connectTimeout. connectTimeout.
... other things too. ... other things too.
""" """
uri = uri.strip() uri = uri.strip()
if not _VALID_URI.match(uri): if not _VALID_URI.match(uri):

View file

@ -267,7 +267,7 @@ class HttpServer(Protocol):
request. The first argument will be the request object and request. The first argument will be the request object and
subsequent arguments will be any matched groups from the regex. subsequent arguments will be any matched groups from the regex.
This should return either tuple of (code, response), or None. This should return either tuple of (code, response), or None.
servlet_classname (str): The name of the handler to be used in prometheus servlet_classname: The name of the handler to be used in prometheus
and opentracing logs. and opentracing logs.
""" """

View file

@ -400,7 +400,7 @@ class SynapseRequest(Request):
be sure to call finished_processing. be sure to call finished_processing.
Args: Args:
servlet_name (str): the name of the servlet which will be servlet_name: the name of the servlet which will be
processing this request. This is used in the metrics. processing this request. This is used in the metrics.
It is possible to update this afterwards by updating It is possible to update this afterwards by updating

View file

@ -117,8 +117,7 @@ class ContextResourceUsage:
"""Create a new ContextResourceUsage """Create a new ContextResourceUsage
Args: Args:
copy_from (ContextResourceUsage|None): if not None, an object to copy_from: if not None, an object to copy stats from
copy stats from
""" """
if copy_from is None: if copy_from is None:
self.reset() self.reset()
@ -162,7 +161,7 @@ class ContextResourceUsage:
"""Add another ContextResourceUsage's stats to this one's. """Add another ContextResourceUsage's stats to this one's.
Args: Args:
other (ContextResourceUsage): the other resource usage object other: the other resource usage object
""" """
self.ru_utime += other.ru_utime self.ru_utime += other.ru_utime
self.ru_stime += other.ru_stime self.ru_stime += other.ru_stime
@ -342,7 +341,7 @@ class LoggingContext:
called directly. called directly.
Returns: Returns:
LoggingContext: the current logging context The current logging context
""" """
warnings.warn( warnings.warn(
"synapse.logging.context.LoggingContext.current_context() is deprecated " "synapse.logging.context.LoggingContext.current_context() is deprecated "
@ -362,7 +361,8 @@ class LoggingContext:
called directly. called directly.
Args: Args:
context(LoggingContext): The context to activate. context: The context to activate.
Returns: Returns:
The context that was previously active The context that was previously active
""" """
@ -474,8 +474,7 @@ class LoggingContext:
"""Get resources used by this logcontext so far. """Get resources used by this logcontext so far.
Returns: Returns:
ContextResourceUsage: a *copy* of the object tracking resource A *copy* of the object tracking resource usage so far
usage so far
""" """
# we always return a copy, for consistency # we always return a copy, for consistency
res = self._resource_usage.copy() res = self._resource_usage.copy()
@ -663,7 +662,8 @@ def current_context() -> LoggingContextOrSentinel:
def set_current_context(context: LoggingContextOrSentinel) -> LoggingContextOrSentinel: def set_current_context(context: LoggingContextOrSentinel) -> LoggingContextOrSentinel:
"""Set the current logging context in thread local storage """Set the current logging context in thread local storage
Args: Args:
context(LoggingContext): The context to activate. context: The context to activate.
Returns: Returns:
The context that was previously active The context that was previously active
""" """
@ -700,7 +700,7 @@ def nested_logging_context(suffix: str) -> LoggingContext:
suffix: suffix to add to the parent context's 'name'. suffix: suffix to add to the parent context's 'name'.
Returns: Returns:
LoggingContext: new logging context. A new logging context.
""" """
curr_context = current_context() curr_context = current_context()
if not curr_context: if not curr_context:
@ -898,20 +898,19 @@ def defer_to_thread(
on it. on it.
Args: Args:
reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread reactor: The reactor in whose main thread the Deferred will be invoked,
the Deferred will be invoked, and whose threadpool we should use for the and whose threadpool we should use for the function.
function.
Normally this will be hs.get_reactor(). Normally this will be hs.get_reactor().
f (callable): The function to call. f: The function to call.
args: positional arguments to pass to f. args: positional arguments to pass to f.
kwargs: keyword arguments to pass to f. kwargs: keyword arguments to pass to f.
Returns: Returns:
Deferred: A Deferred which fires a callback with the result of `f`, or an A Deferred which fires a callback with the result of `f`, or an
errback if `f` throws an exception. errback if `f` throws an exception.
""" """
return defer_to_threadpool(reactor, reactor.getThreadPool(), f, *args, **kwargs) return defer_to_threadpool(reactor, reactor.getThreadPool(), f, *args, **kwargs)
@ -939,20 +938,20 @@ def defer_to_threadpool(
on it. on it.
Args: Args:
reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread reactor: The reactor in whose main thread the Deferred will be invoked.
the Deferred will be invoked. Normally this will be hs.get_reactor(). Normally this will be hs.get_reactor().
threadpool (twisted.python.threadpool.ThreadPool): The threadpool to use for threadpool: The threadpool to use for running `f`. Normally this will be
running `f`. Normally this will be hs.get_reactor().getThreadPool(). hs.get_reactor().getThreadPool().
f (callable): The function to call. f: The function to call.
args: positional arguments to pass to f. args: positional arguments to pass to f.
kwargs: keyword arguments to pass to f. kwargs: keyword arguments to pass to f.
Returns: Returns:
Deferred: A Deferred which fires a callback with the result of `f`, or an A Deferred which fires a callback with the result of `f`, or an
errback if `f` throws an exception. errback if `f` throws an exception.
""" """
curr_context = current_context() curr_context = current_context()

View file

@ -721,7 +721,7 @@ def inject_header_dict(
destination: address of entity receiving the span context. Must be given unless destination: address of entity receiving the span context. Must be given unless
check_destination is False. The context will only be injected if the check_destination is False. The context will only be injected if the
destination matches the opentracing whitelist destination matches the opentracing whitelist
check_destination (bool): If false, destination will be ignored and the context check_destination: If false, destination will be ignored and the context
will always be injected. will always be injected.
Note: Note:
@ -780,7 +780,7 @@ def get_active_span_text_map(destination: Optional[str] = None) -> Dict[str, str
destination: the name of the remote server. destination: the name of the remote server.
Returns: Returns:
dict: the active span's context if opentracing is enabled, otherwise empty. the active span's context if opentracing is enabled, otherwise empty.
""" """
if destination and not whitelisted_homeserver(destination): if destination and not whitelisted_homeserver(destination):

View file

@ -47,11 +47,7 @@ from twisted.python.threadpool import ThreadPool
# This module is imported for its side effects; flake8 needn't warn that it's unused. # This module is imported for its side effects; flake8 needn't warn that it's unused.
import synapse.metrics._reactor_metrics # noqa: F401 import synapse.metrics._reactor_metrics # noqa: F401
from synapse.metrics._gc import MIN_TIME_BETWEEN_GCS, install_gc_manager from synapse.metrics._gc import MIN_TIME_BETWEEN_GCS, install_gc_manager
from synapse.metrics._legacy_exposition import ( from synapse.metrics._twisted_exposition import MetricsResource, generate_latest
MetricsResource,
generate_latest,
start_http_server,
)
from synapse.metrics._types import Collector from synapse.metrics._types import Collector
from synapse.util import SYNAPSE_VERSION from synapse.util import SYNAPSE_VERSION
@ -474,7 +470,6 @@ __all__ = [
"Collector", "Collector",
"MetricsResource", "MetricsResource",
"generate_latest", "generate_latest",
"start_http_server",
"LaterGauge", "LaterGauge",
"InFlightGauge", "InFlightGauge",
"GaugeBucketCollector", "GaugeBucketCollector",

View file

@ -1,288 +0,0 @@
# Copyright 2015-2019 Prometheus Python Client Developers
# Copyright 2019 Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based off `prometheus_client/exposition.py` from version 0.7.1.
Due to the renaming of metrics in prometheus_client 0.4.0, this customised
vendoring of the code will emit both the old versions that Synapse dashboards
expect, and the newer "best practice" version of the up-to-date official client.
"""
import logging
import math
import threading
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from typing import Any, Dict, List, Type, Union
from urllib.parse import parse_qs, urlparse
from prometheus_client import REGISTRY, CollectorRegistry
from prometheus_client.core import Sample
from twisted.web.resource import Resource
from twisted.web.server import Request
logger = logging.getLogger(__name__)
CONTENT_TYPE_LATEST = "text/plain; version=0.0.4; charset=utf-8"
def floatToGoString(d: Union[int, float]) -> str:
d = float(d)
if d == math.inf:
return "+Inf"
elif d == -math.inf:
return "-Inf"
elif math.isnan(d):
return "NaN"
else:
s = repr(d)
dot = s.find(".")
# Go switches to exponents sooner than Python.
# We only need to care about positive values for le/quantile.
if d > 0 and dot > 6:
mantissa = f"{s[0]}.{s[1:dot]}{s[dot + 1 :]}".rstrip("0.")
return f"{mantissa}e+0{dot - 1}"
return s
def sample_line(line: Sample, name: str) -> str:
if line.labels:
labelstr = "{{{0}}}".format(
",".join(
[
'{}="{}"'.format(
k,
v.replace("\\", r"\\").replace("\n", r"\n").replace('"', r"\""),
)
for k, v in sorted(line.labels.items())
]
)
)
else:
labelstr = ""
timestamp = ""
if line.timestamp is not None:
# Convert to milliseconds.
timestamp = f" {int(float(line.timestamp) * 1000):d}"
return "{}{} {}{}\n".format(name, labelstr, floatToGoString(line.value), timestamp)
# Mapping from new metric names to legacy metric names.
# We translate these back to their old names when exposing them through our
# legacy vendored exporter.
# Only this legacy exposition module applies these name changes.
LEGACY_METRIC_NAMES = {
"synapse_util_caches_cache_hits": "synapse_util_caches_cache:hits",
"synapse_util_caches_cache_size": "synapse_util_caches_cache:size",
"synapse_util_caches_cache_evicted_size": "synapse_util_caches_cache:evicted_size",
"synapse_util_caches_cache": "synapse_util_caches_cache:total",
"synapse_util_caches_response_cache_size": "synapse_util_caches_response_cache:size",
"synapse_util_caches_response_cache_hits": "synapse_util_caches_response_cache:hits",
"synapse_util_caches_response_cache_evicted_size": "synapse_util_caches_response_cache:evicted_size",
"synapse_util_caches_response_cache": "synapse_util_caches_response_cache:total",
"synapse_federation_client_sent_pdu_destinations": "synapse_federation_client_sent_pdu_destinations:total",
"synapse_federation_client_sent_pdu_destinations_count": "synapse_federation_client_sent_pdu_destinations:count",
"synapse_admin_mau_current": "synapse_admin_mau:current",
"synapse_admin_mau_max": "synapse_admin_mau:max",
"synapse_admin_mau_registered_reserved_users": "synapse_admin_mau:registered_reserved_users",
}
def generate_latest(registry: CollectorRegistry, emit_help: bool = False) -> bytes:
"""
Generate metrics in legacy format. Modern metrics are generated directly
by prometheus-client.
"""
output = []
for metric in registry.collect():
if not metric.samples:
# No samples, don't bother.
continue
# Translate to legacy metric name if it has one.
mname = LEGACY_METRIC_NAMES.get(metric.name, metric.name)
mnewname = metric.name
mtype = metric.type
# OpenMetrics -> Prometheus
if mtype == "counter":
mnewname = mnewname + "_total"
elif mtype == "info":
mtype = "gauge"
mnewname = mnewname + "_info"
elif mtype == "stateset":
mtype = "gauge"
elif mtype == "gaugehistogram":
mtype = "histogram"
elif mtype == "unknown":
mtype = "untyped"
# Output in the old format for compatibility.
if emit_help:
output.append(
"# HELP {} {}\n".format(
mname,
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
)
)
output.append(f"# TYPE {mname} {mtype}\n")
om_samples: Dict[str, List[str]] = {}
for s in metric.samples:
for suffix in ["_created", "_gsum", "_gcount"]:
if s.name == mname + suffix:
# OpenMetrics specific sample, put in a gauge at the end.
# (these come from gaugehistograms which don't get renamed,
# so no need to faff with mnewname)
om_samples.setdefault(suffix, []).append(sample_line(s, s.name))
break
else:
newname = s.name.replace(mnewname, mname)
if ":" in newname and newname.endswith("_total"):
newname = newname[: -len("_total")]
output.append(sample_line(s, newname))
for suffix, lines in sorted(om_samples.items()):
if emit_help:
output.append(
"# HELP {}{} {}\n".format(
mname,
suffix,
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
)
)
output.append(f"# TYPE {mname}{suffix} gauge\n")
output.extend(lines)
# Get rid of the weird colon things while we're at it
if mtype == "counter":
mnewname = mnewname.replace(":total", "")
mnewname = mnewname.replace(":", "_")
if mname == mnewname:
continue
# Also output in the new format, if it's different.
if emit_help:
output.append(
"# HELP {} {}\n".format(
mnewname,
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
)
)
output.append(f"# TYPE {mnewname} {mtype}\n")
for s in metric.samples:
# Get rid of the OpenMetrics specific samples (we should already have
# dealt with them above anyway.)
for suffix in ["_created", "_gsum", "_gcount"]:
if s.name == mname + suffix:
break
else:
sample_name = LEGACY_METRIC_NAMES.get(s.name, s.name)
output.append(
sample_line(s, sample_name.replace(":total", "").replace(":", "_"))
)
return "".join(output).encode("utf-8")
class MetricsHandler(BaseHTTPRequestHandler):
"""HTTP handler that gives metrics from ``REGISTRY``."""
registry = REGISTRY
def do_GET(self) -> None:
registry = self.registry
params = parse_qs(urlparse(self.path).query)
if "help" in params:
emit_help = True
else:
emit_help = False
try:
output = generate_latest(registry, emit_help=emit_help)
except Exception:
self.send_error(500, "error generating metric output")
raise
try:
self.send_response(200)
self.send_header("Content-Type", CONTENT_TYPE_LATEST)
self.send_header("Content-Length", str(len(output)))
self.end_headers()
self.wfile.write(output)
except BrokenPipeError as e:
logger.warning(
"BrokenPipeError when serving metrics (%s). Did Prometheus restart?", e
)
def log_message(self, format: str, *args: Any) -> None:
"""Log nothing."""
@classmethod
def factory(cls, registry: CollectorRegistry) -> Type:
"""Returns a dynamic MetricsHandler class tied
to the passed registry.
"""
# This implementation relies on MetricsHandler.registry
# (defined above and defaulted to REGISTRY).
# As we have unicode_literals, we need to create a str()
# object for type().
cls_name = str(cls.__name__)
MyMetricsHandler = type(cls_name, (cls, object), {"registry": registry})
return MyMetricsHandler
class _ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
"""Thread per request HTTP server."""
# Make worker threads "fire and forget". Beginning with Python 3.7 this
# prevents a memory leak because ``ThreadingMixIn`` starts to gather all
# non-daemon threads in a list in order to join on them at server close.
# Enabling daemon threads virtually makes ``_ThreadingSimpleServer`` the
# same as Python 3.7's ``ThreadingHTTPServer``.
daemon_threads = True
def start_http_server(
port: int, addr: str = "", registry: CollectorRegistry = REGISTRY
) -> None:
"""Starts an HTTP server for prometheus metrics as a daemon thread"""
CustomMetricsHandler = MetricsHandler.factory(registry)
httpd = _ThreadingSimpleServer((addr, port), CustomMetricsHandler)
t = threading.Thread(target=httpd.serve_forever)
t.daemon = True
t.start()
class MetricsResource(Resource):
"""
Twisted ``Resource`` that serves prometheus metrics.
"""
isLeaf = True
def __init__(self, registry: CollectorRegistry = REGISTRY):
self.registry = registry
def render_GET(self, request: Request) -> bytes:
request.setHeader(b"Content-Type", CONTENT_TYPE_LATEST.encode("ascii"))
response = generate_latest(self.registry)
request.setHeader(b"Content-Length", str(len(response)))
return response

View file

@ -0,0 +1,38 @@
# Copyright 2015-2019 Prometheus Python Client Developers
# Copyright 2019 Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from prometheus_client import REGISTRY, CollectorRegistry, generate_latest
from twisted.web.resource import Resource
from twisted.web.server import Request
CONTENT_TYPE_LATEST = "text/plain; version=0.0.4; charset=utf-8"
class MetricsResource(Resource):
"""
Twisted ``Resource`` that serves prometheus metrics.
"""
isLeaf = True
def __init__(self, registry: CollectorRegistry = REGISTRY):
self.registry = registry
def render_GET(self, request: Request) -> bytes:
request.setHeader(b"Content-Type", CONTENT_TYPE_LATEST.encode("ascii"))
response = generate_latest(self.registry)
request.setHeader(b"Content-Length", str(len(response)))
return response

View file

@ -54,7 +54,9 @@ class CommonUsageMetricsManager:
async def setup(self) -> None: async def setup(self) -> None:
"""Keep the gauges for common usage metrics up to date.""" """Keep the gauges for common usage metrics up to date."""
await self._update_gauges() run_as_background_process(
desc="common_usage_metrics_update_gauges", func=self._update_gauges
)
self._clock.looping_call( self._clock.looping_call(
run_as_background_process, run_as_background_process,
5 * 60 * 1000, 5 * 60 * 1000,

View file

@ -86,6 +86,7 @@ from synapse.handlers.auth import (
ON_LOGGED_OUT_CALLBACK, ON_LOGGED_OUT_CALLBACK,
AuthHandler, AuthHandler,
) )
from synapse.handlers.device import DeviceHandler
from synapse.handlers.push_rules import RuleSpec, check_actions from synapse.handlers.push_rules import RuleSpec, check_actions
from synapse.http.client import SimpleHttpClient from synapse.http.client import SimpleHttpClient
from synapse.http.server import ( from synapse.http.server import (
@ -207,6 +208,7 @@ class ModuleApi:
self._registration_handler = hs.get_registration_handler() self._registration_handler = hs.get_registration_handler()
self._send_email_handler = hs.get_send_email_handler() self._send_email_handler = hs.get_send_email_handler()
self._push_rules_handler = hs.get_push_rules_handler() self._push_rules_handler = hs.get_push_rules_handler()
self._device_handler = hs.get_device_handler()
self.custom_template_dir = hs.config.server.custom_template_directory self.custom_template_dir = hs.config.server.custom_template_directory
try: try:
@ -784,10 +786,12 @@ class ModuleApi:
) -> Generator["defer.Deferred[Any]", Any, None]: ) -> Generator["defer.Deferred[Any]", Any, None]:
"""Invalidate an access token for a user """Invalidate an access token for a user
Can only be called from the main process.
Added in Synapse v0.25.0. Added in Synapse v0.25.0.
Args: Args:
access_token(str): access token access_token: access token
Returns: Returns:
twisted.internet.defer.Deferred - resolves once the access token twisted.internet.defer.Deferred - resolves once the access token
@ -796,6 +800,10 @@ class ModuleApi:
Raises: Raises:
synapse.api.errors.AuthError: the access token is invalid synapse.api.errors.AuthError: the access token is invalid
""" """
assert isinstance(
self._device_handler, DeviceHandler
), "invalidate_access_token can only be called on the main process"
# see if the access token corresponds to a device # see if the access token corresponds to a device
user_info = yield defer.ensureDeferred( user_info = yield defer.ensureDeferred(
self._auth.get_user_by_access_token(access_token) self._auth.get_user_by_access_token(access_token)
@ -805,7 +813,7 @@ class ModuleApi:
if device_id: if device_id:
# delete the device, which will also delete its access tokens # delete the device, which will also delete its access tokens
yield defer.ensureDeferred( yield defer.ensureDeferred(
self._hs.get_device_handler().delete_devices(user_id, [device_id]) self._device_handler.delete_devices(user_id, [device_id])
) )
else: else:
# no associated device. Just delete the access token. # no associated device. Just delete the access token.
@ -832,7 +840,7 @@ class ModuleApi:
**kwargs: named args to be passed to func **kwargs: named args to be passed to func
Returns: Returns:
Deferred[object]: result of func Result of func
""" """
# type-ignore: See https://github.com/python/mypy/issues/8862 # type-ignore: See https://github.com/python/mypy/issues/8862
return defer.ensureDeferred( return defer.ensureDeferred(
@ -924,8 +932,7 @@ class ModuleApi:
to represent 'any') of the room state to acquire. to represent 'any') of the room state to acquire.
Returns: Returns:
twisted.internet.defer.Deferred[list(synapse.events.FrozenEvent)]: The filtered state events in the room.
The filtered state events in the room.
""" """
state_ids = yield defer.ensureDeferred( state_ids = yield defer.ensureDeferred(
self._storage_controllers.state.get_current_state_ids( self._storage_controllers.state.get_current_state_ids(

View file

@ -29,6 +29,7 @@ from typing import (
from prometheus_client import Counter from prometheus_client import Counter
from synapse.api.constants import MAIN_TIMELINE, EventTypes, Membership, RelationTypes, EventContentFields from synapse.api.constants import MAIN_TIMELINE, EventTypes, Membership, RelationTypes, EventContentFields
from synapse.api.room_versions import PushRuleRoomFlag, RoomVersion
from synapse.event_auth import auth_types_for_event, get_user_power_level from synapse.event_auth import auth_types_for_event, get_user_power_level
from synapse.events import EventBase, relation_from_event from synapse.events import EventBase, relation_from_event
from synapse.events.snapshot import EventContext from synapse.events.snapshot import EventContext
@ -339,13 +340,19 @@ class BulkPushRuleEvaluator:
for user_id, level in notification_levels.items(): for user_id, level in notification_levels.items():
notification_levels[user_id] = int(level) notification_levels[user_id] = int(level)
room_version_features = event.room_version.msc3931_push_features
if not room_version_features:
room_version_features = []
evaluator = PushRuleEvaluator( evaluator = PushRuleEvaluator(
_flatten_dict(event), _flatten_dict(event, room_version=event.room_version),
room_member_count, room_member_count,
sender_power_level, sender_power_level,
notification_levels, notification_levels,
related_events, related_events,
self._related_event_match_enabled, self._related_event_match_enabled,
room_version_features,
self.hs.config.experimental.msc1767_enabled, # MSC3931 flag
) )
users = rules_by_user.keys() users = rules_by_user.keys()
@ -421,6 +428,7 @@ StateGroup = Union[object, int]
def _flatten_dict( def _flatten_dict(
d: Union[EventBase, Mapping[str, Any]], d: Union[EventBase, Mapping[str, Any]],
room_version: Optional[RoomVersion] = None,
prefix: Optional[List[str]] = None, prefix: Optional[List[str]] = None,
result: Optional[Dict[str, str]] = None, result: Optional[Dict[str, str]] = None,
) -> Dict[str, str]: ) -> Dict[str, str]:
@ -432,6 +440,31 @@ def _flatten_dict(
if isinstance(value, str): if isinstance(value, str):
result[".".join(prefix + [key])] = value.lower() result[".".join(prefix + [key])] = value.lower()
elif isinstance(value, Mapping): elif isinstance(value, Mapping):
# do not set `room_version` due to recursion considerations below
_flatten_dict(value, prefix=(prefix + [key]), result=result) _flatten_dict(value, prefix=(prefix + [key]), result=result)
# `room_version` should only ever be set when looking at the top level of an event
if (
room_version is not None
and PushRuleRoomFlag.EXTENSIBLE_EVENTS in room_version.msc3931_push_features
and isinstance(d, EventBase)
):
# Room supports extensible events: replace `content.body` with the plain text
# representation from `m.markup`, as per MSC1767.
markup = d.get("content").get("m.markup")
if room_version.identifier.startswith("org.matrix.msc1767."):
markup = d.get("content").get("org.matrix.msc1767.markup")
if markup is not None and isinstance(markup, list):
text = ""
for rep in markup:
if not isinstance(rep, dict):
# invalid markup - skip all processing
break
if rep.get("mimetype", "text/plain") == "text/plain":
rep_text = rep.get("body")
if rep_text is not None and isinstance(rep_text, str):
text = rep_text.lower()
break
result["content.body"] = text
return result return result

View file

@ -153,7 +153,7 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
argument list. argument list.
Returns: Returns:
dict: If POST/PUT request then dictionary must be JSON serialisable, If POST/PUT request then dictionary must be JSON serialisable,
otherwise must be appropriate for adding as query args. otherwise must be appropriate for adding as query args.
""" """
return {} return {}

View file

@ -13,11 +13,12 @@
# limitations under the License. # limitations under the License.
import logging import logging
from typing import TYPE_CHECKING, Tuple from typing import TYPE_CHECKING, Optional, Tuple
from twisted.web.server import Request from twisted.web.server import Request
from synapse.http.server import HttpServer from synapse.http.server import HttpServer
from synapse.http.servlet import parse_json_object_from_request
from synapse.replication.http._base import ReplicationEndpoint from synapse.replication.http._base import ReplicationEndpoint
from synapse.types import JsonDict from synapse.types import JsonDict
@ -62,7 +63,12 @@ class ReplicationUserDevicesResyncRestServlet(ReplicationEndpoint):
def __init__(self, hs: "HomeServer"): def __init__(self, hs: "HomeServer"):
super().__init__(hs) super().__init__(hs)
self.device_list_updater = hs.get_device_handler().device_list_updater from synapse.handlers.device import DeviceHandler
handler = hs.get_device_handler()
assert isinstance(handler, DeviceHandler)
self.device_list_updater = handler.device_list_updater
self.store = hs.get_datastores().main self.store = hs.get_datastores().main
self.clock = hs.get_clock() self.clock = hs.get_clock()
@ -72,11 +78,77 @@ class ReplicationUserDevicesResyncRestServlet(ReplicationEndpoint):
async def _handle_request( # type: ignore[override] async def _handle_request( # type: ignore[override]
self, request: Request, user_id: str self, request: Request, user_id: str
) -> Tuple[int, JsonDict]: ) -> Tuple[int, Optional[JsonDict]]:
user_devices = await self.device_list_updater.user_device_resync(user_id) user_devices = await self.device_list_updater.user_device_resync(user_id)
return 200, user_devices return 200, user_devices
class ReplicationUploadKeysForUserRestServlet(ReplicationEndpoint):
"""Ask master to upload keys for the user and send them out over federation to
update other servers.
For now, only the master is permitted to handle key upload requests;
any worker can handle key query requests (since they're read-only).
Calls to e2e_keys_handler.upload_keys_for_user(user_id, device_id, keys) on
the main process to accomplish this.
Defined in https://spec.matrix.org/v1.4/client-server-api/#post_matrixclientv3keysupload
Request format(borrowed and expanded from KeyUploadServlet):
POST /_synapse/replication/upload_keys_for_user
{
"user_id": "<user_id>",
"device_id": "<device_id>",
"keys": {
....this part can be found in KeyUploadServlet in rest/client/keys.py....
}
}
Response is equivalent to ` /_matrix/client/v3/keys/upload` found in KeyUploadServlet
"""
NAME = "upload_keys_for_user"
PATH_ARGS = ()
CACHE = False
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.e2e_keys_handler = hs.get_e2e_keys_handler()
self.store = hs.get_datastores().main
self.clock = hs.get_clock()
@staticmethod
async def _serialize_payload( # type: ignore[override]
user_id: str, device_id: str, keys: JsonDict
) -> JsonDict:
return {
"user_id": user_id,
"device_id": device_id,
"keys": keys,
}
async def _handle_request( # type: ignore[override]
self, request: Request
) -> Tuple[int, JsonDict]:
content = parse_json_object_from_request(request)
user_id = content["user_id"]
device_id = content["device_id"]
keys = content["keys"]
results = await self.e2e_keys_handler.upload_keys_for_user(
user_id, device_id, keys
)
return 200, results
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
ReplicationUserDevicesResyncRestServlet(hs).register(http_server) ReplicationUserDevicesResyncRestServlet(hs).register(http_server)
ReplicationUploadKeysForUserRestServlet(hs).register(http_server)

View file

@ -1,13 +0,0 @@
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View file

@ -1,13 +0,0 @@
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View file

@ -1,50 +0,0 @@
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple
from synapse.storage.database import LoggingDatabaseConnection
from synapse.storage.util.id_generators import AbstractStreamIdTracker, _load_current_id
class SlavedIdTracker(AbstractStreamIdTracker):
"""Tracks the "current" stream ID of a stream with a single writer.
See `AbstractStreamIdTracker` for more details.
Note that this class does not work correctly when there are multiple
writers.
"""
def __init__(
self,
db_conn: LoggingDatabaseConnection,
table: str,
column: str,
extra_tables: Optional[List[Tuple[str, str]]] = None,
step: int = 1,
):
self.step = step
self._current = _load_current_id(db_conn, table, column, step)
if extra_tables:
for table, column in extra_tables:
self.advance(None, _load_current_id(db_conn, table, column))
def advance(self, instance_name: Optional[str], new_id: int) -> None:
self._current = (max if self.step > 0 else min)(self._current, new_id)
def get_current_token(self) -> int:
return self._current
def get_current_token_for_writer(self, instance_name: str) -> int:
return self.get_current_token()

View file

@ -245,7 +245,7 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
self._parse_and_dispatch_line(line) self._parse_and_dispatch_line(line)
def _parse_and_dispatch_line(self, line: bytes) -> None: def _parse_and_dispatch_line(self, line: bytes) -> None:
if line.strip() == "": if line.strip() == b"":
# Ignore blank lines # Ignore blank lines
return return

View file

@ -238,6 +238,10 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
""" """
Register all the admin servlets. Register all the admin servlets.
""" """
# Admin servlets aren't registered on workers.
if hs.config.worker.worker_app is not None:
return
register_servlets_for_client_rest_resource(hs, http_server) register_servlets_for_client_rest_resource(hs, http_server)
BlockRoomRestServlet(hs).register(http_server) BlockRoomRestServlet(hs).register(http_server)
ListRoomRestServlet(hs).register(http_server) ListRoomRestServlet(hs).register(http_server)
@ -254,9 +258,6 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
UserTokenRestServlet(hs).register(http_server) UserTokenRestServlet(hs).register(http_server)
UserRestServletV2(hs).register(http_server) UserRestServletV2(hs).register(http_server)
UsersRestServletV2(hs).register(http_server) UsersRestServletV2(hs).register(http_server)
DeviceRestServlet(hs).register(http_server)
DevicesRestServlet(hs).register(http_server)
DeleteDevicesRestServlet(hs).register(http_server)
UserMediaStatisticsRestServlet(hs).register(http_server) UserMediaStatisticsRestServlet(hs).register(http_server)
EventReportDetailRestServlet(hs).register(http_server) EventReportDetailRestServlet(hs).register(http_server)
EventReportsRestServlet(hs).register(http_server) EventReportsRestServlet(hs).register(http_server)
@ -280,12 +281,13 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
UserByExternalId(hs).register(http_server) UserByExternalId(hs).register(http_server)
UserByThreePid(hs).register(http_server) UserByThreePid(hs).register(http_server)
# Some servlets only get registered for the main process. DeviceRestServlet(hs).register(http_server)
if hs.config.worker.worker_app is None: DevicesRestServlet(hs).register(http_server)
SendServerNoticeServlet(hs).register(http_server) DeleteDevicesRestServlet(hs).register(http_server)
BackgroundUpdateEnabledRestServlet(hs).register(http_server) SendServerNoticeServlet(hs).register(http_server)
BackgroundUpdateRestServlet(hs).register(http_server) BackgroundUpdateEnabledRestServlet(hs).register(http_server)
BackgroundUpdateStartJobRestServlet(hs).register(http_server) BackgroundUpdateRestServlet(hs).register(http_server)
BackgroundUpdateStartJobRestServlet(hs).register(http_server)
def register_servlets_for_client_rest_resource( def register_servlets_for_client_rest_resource(
@ -294,9 +296,11 @@ def register_servlets_for_client_rest_resource(
"""Register only the servlets which need to be exposed on /_matrix/client/xxx""" """Register only the servlets which need to be exposed on /_matrix/client/xxx"""
WhoisRestServlet(hs).register(http_server) WhoisRestServlet(hs).register(http_server)
PurgeHistoryStatusRestServlet(hs).register(http_server) PurgeHistoryStatusRestServlet(hs).register(http_server)
DeactivateAccountRestServlet(hs).register(http_server)
PurgeHistoryRestServlet(hs).register(http_server) PurgeHistoryRestServlet(hs).register(http_server)
ResetPasswordRestServlet(hs).register(http_server) # The following resources can only be run on the main process.
if hs.config.worker.worker_app is None:
DeactivateAccountRestServlet(hs).register(http_server)
ResetPasswordRestServlet(hs).register(http_server)
SearchUsersRestServlet(hs).register(http_server) SearchUsersRestServlet(hs).register(http_server)
UserRegisterServlet(hs).register(http_server) UserRegisterServlet(hs).register(http_server)
AccountValidityRenewServlet(hs).register(http_server) AccountValidityRenewServlet(hs).register(http_server)

View file

@ -16,6 +16,7 @@ from http import HTTPStatus
from typing import TYPE_CHECKING, Tuple from typing import TYPE_CHECKING, Tuple
from synapse.api.errors import NotFoundError, SynapseError from synapse.api.errors import NotFoundError, SynapseError
from synapse.handlers.device import DeviceHandler
from synapse.http.servlet import ( from synapse.http.servlet import (
RestServlet, RestServlet,
assert_params_in_dict, assert_params_in_dict,
@ -43,7 +44,9 @@ class DeviceRestServlet(RestServlet):
def __init__(self, hs: "HomeServer"): def __init__(self, hs: "HomeServer"):
super().__init__() super().__init__()
self.auth = hs.get_auth() self.auth = hs.get_auth()
self.device_handler = hs.get_device_handler() handler = hs.get_device_handler()
assert isinstance(handler, DeviceHandler)
self.device_handler = handler
self.store = hs.get_datastores().main self.store = hs.get_datastores().main
self.is_mine = hs.is_mine self.is_mine = hs.is_mine
@ -112,7 +115,9 @@ class DevicesRestServlet(RestServlet):
def __init__(self, hs: "HomeServer"): def __init__(self, hs: "HomeServer"):
self.auth = hs.get_auth() self.auth = hs.get_auth()
self.device_handler = hs.get_device_handler() handler = hs.get_device_handler()
assert isinstance(handler, DeviceHandler)
self.device_handler = handler
self.store = hs.get_datastores().main self.store = hs.get_datastores().main
self.is_mine = hs.is_mine self.is_mine = hs.is_mine
@ -143,7 +148,9 @@ class DeleteDevicesRestServlet(RestServlet):
def __init__(self, hs: "HomeServer"): def __init__(self, hs: "HomeServer"):
self.auth = hs.get_auth() self.auth = hs.get_auth()
self.device_handler = hs.get_device_handler() handler = hs.get_device_handler()
assert isinstance(handler, DeviceHandler)
self.device_handler = handler
self.store = hs.get_datastores().main self.store = hs.get_datastores().main
self.is_mine = hs.is_mine self.is_mine = hs.is_mine

View file

@ -903,8 +903,9 @@ class PushersRestServlet(RestServlet):
@user:server/pushers @user:server/pushers
Returns: Returns:
pushers: Dictionary containing pushers information. A dictionary with keys:
total: Number of pushers in dictionary `pushers`. pushers: Dictionary containing pushers information.
total: Number of pushers in dictionary `pushers`.
""" """
PATTERNS = admin_patterns("/users/(?P<user_id>[^/]*)/pushers$") PATTERNS = admin_patterns("/users/(?P<user_id>[^/]*)/pushers$")

View file

@ -20,6 +20,7 @@ from pydantic import Extra, StrictStr
from synapse.api import errors from synapse.api import errors
from synapse.api.errors import NotFoundError from synapse.api.errors import NotFoundError
from synapse.handlers.device import DeviceHandler
from synapse.http.server import HttpServer from synapse.http.server import HttpServer
from synapse.http.servlet import ( from synapse.http.servlet import (
RestServlet, RestServlet,
@ -80,7 +81,9 @@ class DeleteDevicesRestServlet(RestServlet):
super().__init__() super().__init__()
self.hs = hs self.hs = hs
self.auth = hs.get_auth() self.auth = hs.get_auth()
self.device_handler = hs.get_device_handler() handler = hs.get_device_handler()
assert isinstance(handler, DeviceHandler)
self.device_handler = handler
self.auth_handler = hs.get_auth_handler() self.auth_handler = hs.get_auth_handler()
class PostBody(RequestBodyModel): class PostBody(RequestBodyModel):
@ -125,7 +128,9 @@ class DeviceRestServlet(RestServlet):
super().__init__() super().__init__()
self.hs = hs self.hs = hs
self.auth = hs.get_auth() self.auth = hs.get_auth()
self.device_handler = hs.get_device_handler() handler = hs.get_device_handler()
assert isinstance(handler, DeviceHandler)
self.device_handler = handler
self.auth_handler = hs.get_auth_handler() self.auth_handler = hs.get_auth_handler()
self._msc3852_enabled = hs.config.experimental.msc3852_enabled self._msc3852_enabled = hs.config.experimental.msc3852_enabled
@ -256,7 +261,9 @@ class DehydratedDeviceServlet(RestServlet):
super().__init__() super().__init__()
self.hs = hs self.hs = hs
self.auth = hs.get_auth() self.auth = hs.get_auth()
self.device_handler = hs.get_device_handler() handler = hs.get_device_handler()
assert isinstance(handler, DeviceHandler)
self.device_handler = handler
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request) requester = await self.auth.get_user_by_req(request)
@ -313,7 +320,9 @@ class ClaimDehydratedDeviceServlet(RestServlet):
super().__init__() super().__init__()
self.hs = hs self.hs = hs
self.auth = hs.get_auth() self.auth = hs.get_auth()
self.device_handler = hs.get_device_handler() handler = hs.get_device_handler()
assert isinstance(handler, DeviceHandler)
self.device_handler = handler
class PostBody(RequestBodyModel): class PostBody(RequestBodyModel):
device_id: StrictStr device_id: StrictStr

View file

@ -27,6 +27,7 @@ from synapse.http.servlet import (
) )
from synapse.http.site import SynapseRequest from synapse.http.site import SynapseRequest
from synapse.logging.opentracing import log_kv, set_tag from synapse.logging.opentracing import log_kv, set_tag
from synapse.replication.http.devices import ReplicationUploadKeysForUserRestServlet
from synapse.rest.client._base import client_patterns, interactive_auth_handler from synapse.rest.client._base import client_patterns, interactive_auth_handler
from synapse.types import JsonDict, StreamToken from synapse.types import JsonDict, StreamToken
from synapse.util.cancellation import cancellable from synapse.util.cancellation import cancellable
@ -43,24 +44,48 @@ class KeyUploadServlet(RestServlet):
Content-Type: application/json Content-Type: application/json
{ {
"device_keys": { "device_keys": {
"user_id": "<user_id>", "user_id": "<user_id>",
"device_id": "<device_id>", "device_id": "<device_id>",
"valid_until_ts": <millisecond_timestamp>, "valid_until_ts": <millisecond_timestamp>,
"algorithms": [ "algorithms": [
"m.olm.curve25519-aes-sha2", "m.olm.curve25519-aes-sha2",
] ]
"keys": { "keys": {
"<algorithm>:<device_id>": "<key_base64>", "<algorithm>:<device_id>": "<key_base64>",
},
"signatures:" {
"<user_id>" {
"<algorithm>:<device_id>": "<signature_base64>"
}
}
},
"fallback_keys": {
"<algorithm>:<device_id>": "<key_base64>",
"signed_<algorithm>:<device_id>": {
"fallback": true,
"key": "<key_base64>",
"signatures": {
"<user_id>": {
"<algorithm>:<device_id>": "<key_base64>"
}
}
}
}
"one_time_keys": {
"<algorithm>:<key_id>": "<key_base64>"
}, },
"signatures:" {
"<user_id>" {
"<algorithm>:<device_id>": "<signature_base64>"
} } },
"one_time_keys": {
"<algorithm>:<key_id>": "<key_base64>"
},
} }
response, e.g.:
{
"one_time_key_counts": {
"curve25519": 10,
"signed_curve25519": 20
}
}
""" """
PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$") PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
@ -71,6 +96,13 @@ class KeyUploadServlet(RestServlet):
self.e2e_keys_handler = hs.get_e2e_keys_handler() self.e2e_keys_handler = hs.get_e2e_keys_handler()
self.device_handler = hs.get_device_handler() self.device_handler = hs.get_device_handler()
if hs.config.worker.worker_app is None:
# if main process
self.key_uploader = self.e2e_keys_handler.upload_keys_for_user
else:
# then a worker
self.key_uploader = ReplicationUploadKeysForUserRestServlet.make_client(hs)
async def on_POST( async def on_POST(
self, request: SynapseRequest, device_id: Optional[str] self, request: SynapseRequest, device_id: Optional[str]
) -> Tuple[int, JsonDict]: ) -> Tuple[int, JsonDict]:
@ -109,8 +141,8 @@ class KeyUploadServlet(RestServlet):
400, "To upload keys, you must pass device_id when authenticating" 400, "To upload keys, you must pass device_id when authenticating"
) )
result = await self.e2e_keys_handler.upload_keys_for_user( result = await self.key_uploader(
user_id, device_id, body user_id=user_id, device_id=device_id, keys=body
) )
return 200, result return 200, result

View file

@ -350,7 +350,7 @@ class LoginRestServlet(RestServlet):
auth_provider_session_id: The session ID got during login from the SSO IdP. auth_provider_session_id: The session ID got during login from the SSO IdP.
Returns: Returns:
result: Dictionary of account information after successful login. Dictionary of account information after successful login.
""" """
# Before we actually log them in we check if they've already logged in # Before we actually log them in we check if they've already logged in

View file

@ -15,6 +15,7 @@
import logging import logging
from typing import TYPE_CHECKING, Tuple from typing import TYPE_CHECKING, Tuple
from synapse.handlers.device import DeviceHandler
from synapse.http.server import HttpServer from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet from synapse.http.servlet import RestServlet
from synapse.http.site import SynapseRequest from synapse.http.site import SynapseRequest
@ -34,7 +35,9 @@ class LogoutRestServlet(RestServlet):
super().__init__() super().__init__()
self.auth = hs.get_auth() self.auth = hs.get_auth()
self._auth_handler = hs.get_auth_handler() self._auth_handler = hs.get_auth_handler()
self._device_handler = hs.get_device_handler() handler = hs.get_device_handler()
assert isinstance(handler, DeviceHandler)
self._device_handler = handler
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_expired=True) requester = await self.auth.get_user_by_req(request, allow_expired=True)
@ -59,7 +62,9 @@ class LogoutAllRestServlet(RestServlet):
super().__init__() super().__init__()
self.auth = hs.get_auth() self.auth = hs.get_auth()
self._auth_handler = hs.get_auth_handler() self._auth_handler = hs.get_auth_handler()
self._device_handler = hs.get_device_handler() handler = hs.get_device_handler()
assert isinstance(handler, DeviceHandler)
self._device_handler = handler
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_expired=True) requester = await self.auth.get_user_by_req(request, allow_expired=True)

View file

@ -1288,17 +1288,14 @@ class TimestampLookupRestServlet(RestServlet):
`dir` can be `f` or `b` to indicate forwards and backwards in time from the `dir` can be `f` or `b` to indicate forwards and backwards in time from the
given timestamp. given timestamp.
GET /_matrix/client/unstable/org.matrix.msc3030/rooms/<roomID>/timestamp_to_event?ts=<timestamp>&dir=<direction> GET /_matrix/client/v1/rooms/<roomID>/timestamp_to_event?ts=<timestamp>&dir=<direction>
{ {
"event_id": ... "event_id": ...
} }
""" """
PATTERNS = ( PATTERNS = (
re.compile( re.compile("^/_matrix/client/v1/rooms/(?P<room_id>[^/]*)/timestamp_to_event$"),
"^/_matrix/client/unstable/org.matrix.msc3030"
"/rooms/(?P<room_id>[^/]*)/timestamp_to_event$"
),
) )
def __init__(self, hs: "HomeServer"): def __init__(self, hs: "HomeServer"):
@ -1425,8 +1422,7 @@ def register_servlets(
RoomAliasListServlet(hs).register(http_server) RoomAliasListServlet(hs).register(http_server)
SearchRestServlet(hs).register(http_server) SearchRestServlet(hs).register(http_server)
RoomCreateRestServlet(hs).register(http_server) RoomCreateRestServlet(hs).register(http_server)
if hs.config.experimental.msc3030_enabled: TimestampLookupRestServlet(hs).register(http_server)
TimestampLookupRestServlet(hs).register(http_server)
# Some servlets only get registered for the main process. # Some servlets only get registered for the main process.
if not is_worker: if not is_worker:

View file

@ -101,8 +101,6 @@ class VersionsRestServlet(RestServlet):
"org.matrix.msc3827.stable": True, "org.matrix.msc3827.stable": True,
# Adds support for importing historical messages as per MSC2716 # Adds support for importing historical messages as per MSC2716
"org.matrix.msc2716": self.config.experimental.msc2716_enabled, "org.matrix.msc2716": self.config.experimental.msc2716_enabled,
# Adds support for jump to date endpoints (/timestamp_to_event) as per MSC3030
"org.matrix.msc3030": self.config.experimental.msc3030_enabled,
# Adds support for thread relations, per MSC3440. # Adds support for thread relations, per MSC3440.
"org.matrix.msc3440.stable": True, # TODO: remove when "v1.3" is added above "org.matrix.msc3440.stable": True, # TODO: remove when "v1.3" is added above
# Support for thread read receipts & notification counts. # Support for thread read receipts & notification counts.

View file

@ -344,8 +344,8 @@ class MediaRepository:
download from remote server. download from remote server.
Args: Args:
server_name (str): Remote server_name where the media originated. server_name: Remote server_name where the media originated.
media_id (str): The media ID of the content (as defined by the media_id: The media ID of the content (as defined by the
remote server). remote server).
Returns: Returns:

View file

@ -138,7 +138,7 @@ class Thumbnailer:
"""Rescales the image to the given dimensions. """Rescales the image to the given dimensions.
Returns: Returns:
BytesIO: the bytes of the encoded image ready to be written to disk The bytes of the encoded image ready to be written to disk
""" """
with self._resize(width, height) as scaled: with self._resize(width, height) as scaled:
return self._encode_image(scaled, output_type) return self._encode_image(scaled, output_type)
@ -155,7 +155,7 @@ class Thumbnailer:
max_height: The largest possible height. max_height: The largest possible height.
Returns: Returns:
BytesIO: the bytes of the encoded image ready to be written to disk The bytes of the encoded image ready to be written to disk
""" """
if width * self.height > height * self.width: if width * self.height > height * self.width:
scaled_width = width scaled_width = width

View file

@ -510,7 +510,7 @@ class HomeServer(metaclass=abc.ABCMeta):
) )
@cache_in_self @cache_in_self
def get_device_handler(self): def get_device_handler(self) -> DeviceWorkerHandler:
if self.config.worker.worker_app: if self.config.worker.worker_app:
return DeviceWorkerHandler(self) return DeviceWorkerHandler(self)
else: else:

View file

@ -113,9 +113,8 @@ def copy_with_str_subst(x: Any, substitutions: Any) -> Any:
"""Deep-copy a structure, carrying out string substitutions on any strings """Deep-copy a structure, carrying out string substitutions on any strings
Args: Args:
x (object): structure to be copied x: structure to be copied
substitutions (object): substitutions to be made - passed into the substitutions: substitutions to be made - passed into the string '%' operator
string '%' operator
Returns: Returns:
copy of x copy of x

View file

@ -170,11 +170,13 @@ class ResourceLimitsServerNotices:
room_id: The room id of the server notices room room_id: The room id of the server notices room
Returns: Returns:
bool: Is the room currently blocked Tuple of:
list: The list of pinned event IDs that are unrelated to limit blocking Is the room currently blocked
This list can be used as a convenience in the case where the block
is to be lifted and the remaining pinned event references need to be The list of pinned event IDs that are unrelated to limit blocking
preserved This list can be used as a convenience in the case where the block
is to be lifted and the remaining pinned event references need to be
preserved
""" """
currently_blocked = False currently_blocked = False
pinned_state_event = None pinned_state_event = None

Some files were not shown because too many files have changed in this diff Show more