mirror of
https://git.anonymousland.org/anonymousland/synapse.git
synced 2024-10-01 11:49:51 -04:00
Merge remote-tracking branch 'upstream/release-v1.73'
This commit is contained in:
commit
bb26f5f0a9
@ -4,7 +4,7 @@
|
||||
root = true
|
||||
|
||||
# 4 space indentation
|
||||
[*.py]
|
||||
[*.{py,pyi}]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
max_line_length = 88
|
||||
|
45
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
45
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
@ -74,6 +74,36 @@ body:
|
||||
- Debian packages from packages.matrix.org
|
||||
- pip (from PyPI)
|
||||
- Other (please mention below)
|
||||
- I don't know
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: database
|
||||
attributes:
|
||||
label: Database
|
||||
description: |
|
||||
Are you using SQLite or PostgreSQL? What's the version of your database?
|
||||
|
||||
If PostgreSQL, please also answer the following:
|
||||
- are you using a single PostgreSQL server
|
||||
or [separate servers for `main` and `state`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#databases)?
|
||||
- have you previously ported from SQLite using the Synapse "portdb" script?
|
||||
- have you previously restored from a backup?
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: workers
|
||||
attributes:
|
||||
label: Workers
|
||||
description: |
|
||||
Are you running a single Synapse process, or are you running
|
||||
[2 or more workers](https://matrix-org.github.io/synapse/latest/workers.html)?
|
||||
options:
|
||||
- Single process
|
||||
- Multiple workers
|
||||
- I don't know
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: platform
|
||||
attributes:
|
||||
@ -83,17 +113,28 @@ body:
|
||||
e.g. distro, hardware, if it's running in a vm/container, etc.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: config
|
||||
attributes:
|
||||
label: Configuration
|
||||
description: |
|
||||
Do you have any unusual config options turned on? If so, please provide details.
|
||||
|
||||
- Experimental or undocumented features
|
||||
- [Presence](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#presence)
|
||||
- [Message retention](https://matrix-org.github.io/synapse/latest/message_retention_policies.html)
|
||||
- [Synapse modules](https://matrix-org.github.io/synapse/latest/modules/index.html)
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: Relevant log output
|
||||
description: |
|
||||
Please copy and paste any relevant log output, ideally at INFO or DEBUG log level.
|
||||
This will be automatically formatted into code, so there is no need for backticks.
|
||||
This will be automatically formatted into code, so there is no need for backticks (`\``).
|
||||
|
||||
Please be careful to remove any personal or private data.
|
||||
|
||||
**Bug reports are usually very difficult to diagnose without logging.**
|
||||
**Bug reports are usually impossible to diagnose without logging.**
|
||||
render: shell
|
||||
validations:
|
||||
required: true
|
||||
|
6
.github/workflows/latest_deps.yml
vendored
6
.github/workflows/latest_deps.yml
vendored
@ -27,7 +27,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@ -61,7 +61,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@ -134,7 +134,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
74
.github/workflows/push_complement_image.yml
vendored
Normal file
74
.github/workflows/push_complement_image.yml
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
# This task does not run complement tests, see tests.yaml instead.
|
||||
# This task does not build docker images for synapse for use on docker hub, see docker.yaml instead
|
||||
|
||||
name: Store complement-synapse image in ghcr.io
|
||||
on:
|
||||
push:
|
||||
branches: [ "master" ]
|
||||
schedule:
|
||||
- cron: '0 5 * * *'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
branch:
|
||||
required: true
|
||||
default: 'develop'
|
||||
type: choice
|
||||
options:
|
||||
- develop
|
||||
- master
|
||||
|
||||
# Only run this action once per pull request/branch; restart if a new commit arrives.
|
||||
# C.f. https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#concurrency
|
||||
# and https://docs.github.com/en/actions/reference/context-and-expression-syntax-for-github-actions#github-context
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build and push complement image
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- name: Checkout specific branch (debug build)
|
||||
uses: actions/checkout@v3
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
with:
|
||||
ref: ${{ inputs.branch }}
|
||||
- name: Checkout clean copy of develop (scheduled build)
|
||||
uses: actions/checkout@v3
|
||||
if: github.event_name == 'schedule'
|
||||
with:
|
||||
ref: develop
|
||||
- name: Checkout clean copy of master (on-push)
|
||||
uses: actions/checkout@v3
|
||||
if: github.event_name == 'push'
|
||||
with:
|
||||
ref: master
|
||||
- name: Login to registry
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Work out labels for complement image
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}/complement-synapse
|
||||
tags: |
|
||||
type=schedule,pattern=nightly,enable=${{ github.event_name == 'schedule'}}
|
||||
type=raw,value=develop,enable=${{ github.event_name == 'schedule' || inputs.branch == 'develop' }}
|
||||
type=raw,value=latest,enable=${{ github.event_name == 'push' || inputs.branch == 'master' }}
|
||||
type=sha,format=long
|
||||
- name: Run scripts-dev/complement.sh to generate complement-synapse:latest image.
|
||||
run: scripts-dev/complement.sh --build-only
|
||||
- name: Tag and push generated image
|
||||
run: |
|
||||
for TAG in ${{ join(fromJson(steps.meta.outputs.json).tags, ' ') }}; do
|
||||
echo "tag and push $TAG"
|
||||
docker tag complement-synapse $TAG
|
||||
docker push $TAG
|
||||
done
|
15
.github/workflows/tests.yml
vendored
15
.github/workflows/tests.yml
vendored
@ -27,6 +27,7 @@ jobs:
|
||||
rust:
|
||||
- 'rust/**'
|
||||
- 'Cargo.toml'
|
||||
- 'Cargo.lock'
|
||||
|
||||
check-sampleconfig:
|
||||
runs-on: ubuntu-latest
|
||||
@ -102,7 +103,7 @@ jobs:
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
components: clippy
|
||||
@ -122,7 +123,7 @@ jobs:
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
components: rustfmt
|
||||
@ -184,7 +185,7 @@ jobs:
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@ -228,7 +229,7 @@ jobs:
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@ -346,7 +347,7 @@ jobs:
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@ -489,7 +490,7 @@ jobs:
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@ -517,7 +518,7 @@ jobs:
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
6
.github/workflows/twisted_trunk.yml
vendored
6
.github/workflows/twisted_trunk.yml
vendored
@ -18,7 +18,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@ -43,7 +43,7 @@ jobs:
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@ -82,7 +82,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
65
CHANGES.md
65
CHANGES.md
@ -1,3 +1,68 @@
|
||||
Synapse 1.73.0rc1 (2022-11-29)
|
||||
==============================
|
||||
|
||||
Please note that legacy Prometheus metric names have been removed in this release; see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.73/docs/upgrade.md#legacy-prometheus-metric-names-have-now-been-removed) for more details.
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Speed-up `/messages` with `filter_events_for_client` optimizations. ([\#14527](https://github.com/matrix-org/synapse/issues/14527))
|
||||
- Improve DB performance by reducing amount of data that gets read in `device_lists_changes_in_room`. ([\#14534](https://github.com/matrix-org/synapse/issues/14534))
|
||||
- Adds support for handling avatar in SSO login. Contributed by @ashfame. ([\#13917](https://github.com/matrix-org/synapse/issues/13917))
|
||||
- Move MSC3030 `/timestamp_to_event` endpoints to stable `v1` location (`/_matrix/client/v1/rooms/<roomID>/timestamp_to_event?ts=<timestamp>&dir=<direction>`, `/_matrix/federation/v1/timestamp_to_event/<roomID>?ts=<timestamp>&dir=<direction>`). ([\#14471](https://github.com/matrix-org/synapse/issues/14471))
|
||||
- Reduce database load of [Client-Server endpoints](https://spec.matrix.org/v1.5/client-server-api/#aggregations) which return bundled aggregations. ([\#14491](https://github.com/matrix-org/synapse/issues/14491), [\#14508](https://github.com/matrix-org/synapse/issues/14508), [\#14510](https://github.com/matrix-org/synapse/issues/14510))
|
||||
- Add unstable support for an Extensible Events room version (`org.matrix.msc1767.10`) via [MSC1767](https://github.com/matrix-org/matrix-spec-proposals/pull/1767), [MSC3931](https://github.com/matrix-org/matrix-spec-proposals/pull/3931), [MSC3932](https://github.com/matrix-org/matrix-spec-proposals/pull/3932), and [MSC3933](https://github.com/matrix-org/matrix-spec-proposals/pull/3933). ([\#14520](https://github.com/matrix-org/synapse/issues/14520), [\#14521](https://github.com/matrix-org/synapse/issues/14521), [\#14524](https://github.com/matrix-org/synapse/issues/14524))
|
||||
- Prune user's old devices on login if they have too many. ([\#14038](https://github.com/matrix-org/synapse/issues/14038), [\#14580](https://github.com/matrix-org/synapse/issues/14580))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a long-standing bug where paginating from the start of a room did not work. Contributed by @gnunicorn. ([\#14149](https://github.com/matrix-org/synapse/issues/14149))
|
||||
- Fix a bug introduced in Synapse 1.58.0 where a user with presence state `org.matrix.msc3026.busy` would mistakenly be set to `online` when calling `/sync` or `/events` on a worker process. ([\#14393](https://github.com/matrix-org/synapse/issues/14393))
|
||||
- Fix a bug introduced in Synapse 1.70.0 where a receipt's thread ID was not sent over federation. ([\#14466](https://github.com/matrix-org/synapse/issues/14466))
|
||||
- Fix a long-standing bug where the [List media admin API](https://matrix-org.github.io/synapse/latest/admin_api/media_admin_api.html#list-all-media-in-a-room) would fail when processing an image with broken thumbnail information. ([\#14537](https://github.com/matrix-org/synapse/issues/14537))
|
||||
- Fix a bug introduced in Synapse 1.67.0 where two logging context warnings would be logged on startup. ([\#14574](https://github.com/matrix-org/synapse/issues/14574))
|
||||
- In application service transactions that include the experimental `org.matrix.msc3202.device_one_time_key_counts` key, include a duplicate key of `org.matrix.msc3202.device_one_time_keys_count` to match the name proposed by [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/blob/travis/msc/otk-dl-appservice/proposals/3202-encrypted-appservices.md). ([\#14565](https://github.com/matrix-org/synapse/issues/14565))
|
||||
- Fix a bug introduced in Synapse 0.9 where Synapse would fail to fetch server keys whose IDs contain a forward slash. ([\#14490](https://github.com/matrix-org/synapse/issues/14490))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Fixed link to 'Synapse administration endpoints'. ([\#14499](https://github.com/matrix-org/synapse/issues/14499))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Remove legacy Prometheus metrics names. They were deprecated in Synapse v1.69.0 and disabled by default in Synapse v1.71.0. ([\#14538](https://github.com/matrix-org/synapse/issues/14538))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Improve type hinting throughout Synapse. ([\#14055](https://github.com/matrix-org/synapse/issues/14055), [\#14412](https://github.com/matrix-org/synapse/issues/14412), [\#14529](https://github.com/matrix-org/synapse/issues/14529), [\#14452](https://github.com/matrix-org/synapse/issues/14452)).
|
||||
- Remove old stream ID tracking code. Contributed by Nick @Beeper (@fizzadar). ([\#14376](https://github.com/matrix-org/synapse/issues/14376), [\#14468](https://github.com/matrix-org/synapse/issues/14468))
|
||||
- Remove the `worker_main_http_uri` configuration setting. This is now handled via internal replication. ([\#14400](https://github.com/matrix-org/synapse/issues/14400), [\#14476](https://github.com/matrix-org/synapse/issues/14476))
|
||||
- Refactor `federation_sender` and `pusher` configuration loading. ([\#14496](https://github.com/matrix-org/synapse/issues/14496))
|
||||
([\#14509](https://github.com/matrix-org/synapse/issues/14509), [\#14573](https://github.com/matrix-org/synapse/issues/14573))
|
||||
- Faster joins: do not wait for full state when creating events to send. ([\#14403](https://github.com/matrix-org/synapse/issues/14403))
|
||||
- Faster joins: filter out non local events when a room doesn't have its full state. ([\#14404](https://github.com/matrix-org/synapse/issues/14404))
|
||||
- Faster joins: send events to initial list of servers if we don't have the full state yet. ([\#14408](https://github.com/matrix-org/synapse/issues/14408))
|
||||
- Faster joins: use servers list approximation received during `send_join` (potentially updated with received membership events) in `assert_host_in_room`. ([\#14515](https://github.com/matrix-org/synapse/issues/14515))
|
||||
- Fix type logic in TCP replication code that prevented correctly ignoring blank commands. ([\#14449](https://github.com/matrix-org/synapse/issues/14449))
|
||||
- Remove option to skip locking of tables when performing emulated upserts, to avoid a class of bugs in future. ([\#14469](https://github.com/matrix-org/synapse/issues/14469))
|
||||
- `scripts-dev/federation_client`: Fix routing on servers with `.well-known` files. ([\#14479](https://github.com/matrix-org/synapse/issues/14479))
|
||||
- Reduce default third party invite rate limit to 216 invites per day. ([\#14487](https://github.com/matrix-org/synapse/issues/14487))
|
||||
- Refactor conversion of device list changes in room to outbound pokes to track unconverted rows using a `(stream ID, room ID)` position instead of updating the `converted_to_destinations` flag on every row. ([\#14516](https://github.com/matrix-org/synapse/issues/14516))
|
||||
- Add more prompts to the bug report form. ([\#14522](https://github.com/matrix-org/synapse/issues/14522))
|
||||
- Extend editorconfig rules on indent and line length to `.pyi` files. ([\#14526](https://github.com/matrix-org/synapse/issues/14526))
|
||||
- Run Rust CI when `Cargo.lock` changes. This is particularly useful for dependabot updates. ([\#14571](https://github.com/matrix-org/synapse/issues/14571))
|
||||
- Fix a possible variable shadow in `create_new_client_event`. ([\#14575](https://github.com/matrix-org/synapse/issues/14575))
|
||||
- Bump various dependencies in the `poetry.lock` file and in CI scripts. ([\#14557](https://github.com/matrix-org/synapse/issues/14557), [\#14559](https://github.com/matrix-org/synapse/issues/14559), [\#14560](https://github.com/matrix-org/synapse/issues/14560), [\#14500](https://github.com/matrix-org/synapse/issues/14500), [\#14501](https://github.com/matrix-org/synapse/issues/14501), [\#14502](https://github.com/matrix-org/synapse/issues/14502), [\#14503](https://github.com/matrix-org/synapse/issues/14503), [\#14504](https://github.com/matrix-org/synapse/issues/14504), [\#14505](https://github.com/matrix-org/synapse/issues/14505)).
|
||||
|
||||
|
||||
Synapse 1.72.0 (2022-11-22)
|
||||
===========================
|
||||
|
||||
|
16
Cargo.lock
generated
16
Cargo.lock
generated
@ -323,18 +323,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.147"
|
||||
version = "1.0.148"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965"
|
||||
checksum = "e53f64bb4ba0191d6d0676e1b141ca55047d83b74f5607e6d8eb88126c52c2dc"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.147"
|
||||
version = "1.0.148"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852"
|
||||
checksum = "a55492425aa53521babf6137309e7d34c20bbfbbfcfe2c7f3a047fd1f6b92c0c"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@ -343,9 +343,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.87"
|
||||
version = "1.0.89"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45"
|
||||
checksum = "020ff22c755c2ed3f8cf162dbb41a7268d934702f3ed3631656ea597e08fc3db"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
@ -366,9 +366,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "1.0.102"
|
||||
version = "1.0.104"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3fcd952facd492f9be3ef0d0b7032a6e442ee9b361d4acc2b1d0c4aaa5f613a1"
|
||||
checksum = "4ae548ec36cf198c0ef7710d3c230987c2d6d7bd98ad6edc0274462724c585ce"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
6
debian/changelog
vendored
6
debian/changelog
vendored
@ -1,3 +1,9 @@
|
||||
matrix-synapse-py3 (1.73.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.73.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 29 Nov 2022 12:28:13 +0000
|
||||
|
||||
matrix-synapse-py3 (1.72.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.72.0.
|
||||
|
@ -100,8 +100,6 @@ experimental_features:
|
||||
# client-side support for partial state in /send_join responses
|
||||
faster_joins: true
|
||||
{% endif %}
|
||||
# Enable jump to date endpoint
|
||||
msc3030_enabled: true
|
||||
# Filtering /messages by relation type.
|
||||
msc3874_enabled: true
|
||||
|
||||
|
@ -140,6 +140,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases",
|
||||
"^/_matrix/client/v1/rooms/.*/timestamp_to_event$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/search",
|
||||
],
|
||||
"shared_extra_conf": {},
|
||||
@ -163,6 +164,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"^/_matrix/federation/(v1|v2)/invite/",
|
||||
"^/_matrix/federation/(v1|v2)/query_auth/",
|
||||
"^/_matrix/federation/(v1|v2)/event_auth/",
|
||||
"^/_matrix/federation/v1/timestamp_to_event/",
|
||||
"^/_matrix/federation/(v1|v2)/exchange_third_party_invite/",
|
||||
"^/_matrix/federation/(v1|v2)/user/devices/",
|
||||
"^/_matrix/federation/(v1|v2)/get_groups_publicised$",
|
||||
@ -213,10 +215,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"listener_resources": ["client", "replication"],
|
||||
"endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload"],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": (
|
||||
"worker_main_http_uri: http://127.0.0.1:%d"
|
||||
% (MAIN_PROCESS_HTTP_LISTENER_PORT,)
|
||||
),
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"account_data": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
|
@ -88,6 +88,28 @@ process, for example:
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
# Upgrading to v1.73.0
|
||||
|
||||
## Legacy Prometheus metric names have now been removed
|
||||
|
||||
Synapse v1.69.0 included the deprecation of legacy Prometheus metric names
|
||||
and offered an option to disable them.
|
||||
Synapse v1.71.0 disabled legacy Prometheus metric names by default.
|
||||
|
||||
This version, v1.73.0, removes those legacy Prometheus metric names entirely.
|
||||
This also means that the `enable_legacy_metrics` configuration option has been
|
||||
removed; it will no longer be possible to re-enable the legacy metric names.
|
||||
|
||||
If you use metrics and have not yet updated your Grafana dashboard(s),
|
||||
Prometheus console(s) or alerting rule(s), please consider doing so when upgrading
|
||||
to this version.
|
||||
Note that the included Grafana dashboard was updated in v1.72.0 to correct some
|
||||
metric names which were missed when legacy metrics were disabled by default.
|
||||
|
||||
See [v1.69.0: Deprecation of legacy Prometheus metric names](#deprecation-of-legacy-prometheus-metric-names)
|
||||
for more context.
|
||||
|
||||
|
||||
# Upgrading to v1.72.0
|
||||
|
||||
## Dropping support for PostgreSQL 10
|
||||
|
@ -19,7 +19,7 @@ already on your `$PATH` depending on how Synapse was installed.
|
||||
Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings.
|
||||
|
||||
## Making an Admin API request
|
||||
For security reasons, we [recommend](reverse_proxy.md#synapse-administration-endpoints)
|
||||
For security reasons, we [recommend](../../../reverse_proxy.md#synapse-administration-endpoints)
|
||||
that the Admin API (`/_synapse/admin/...`) should be hidden from public view using a
|
||||
reverse proxy. This means you should typically query the Admin API from a terminal on
|
||||
the machine which runs Synapse.
|
||||
|
@ -2437,31 +2437,6 @@ Example configuration:
|
||||
enable_metrics: true
|
||||
```
|
||||
---
|
||||
### `enable_legacy_metrics`
|
||||
|
||||
Set to `true` to publish both legacy and non-legacy Prometheus metric names,
|
||||
or to `false` to only publish non-legacy Prometheus metric names.
|
||||
Defaults to `false`. Has no effect if `enable_metrics` is `false`.
|
||||
**In Synapse v1.67.0 up to and including Synapse v1.70.1, this defaulted to `true`.**
|
||||
|
||||
Legacy metric names include:
|
||||
- metrics containing colons in the name, such as `synapse_util_caches_response_cache:hits`, because colons are supposed to be reserved for user-defined recording rules;
|
||||
- counters that don't end with the `_total` suffix, such as `synapse_federation_client_sent_edus`, therefore not adhering to the OpenMetrics standard.
|
||||
|
||||
These legacy metric names are unconventional and not compliant with OpenMetrics standards.
|
||||
They are included for backwards compatibility.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
enable_legacy_metrics: false
|
||||
```
|
||||
|
||||
See https://github.com/matrix-org/synapse/issues/11106 for context.
|
||||
|
||||
*Since v1.67.0.*
|
||||
|
||||
**Will be removed in v1.73.0.**
|
||||
---
|
||||
### `sentry`
|
||||
|
||||
Use this option to enable sentry integration. Provide the DSN assigned to you by sentry
|
||||
@ -2993,10 +2968,17 @@ Options for each entry include:
|
||||
|
||||
For the default provider, the following settings are available:
|
||||
|
||||
* subject_claim: name of the claim containing a unique identifier
|
||||
* `subject_claim`: name of the claim containing a unique identifier
|
||||
for the user. Defaults to 'sub', which OpenID Connect
|
||||
compliant providers should provide.
|
||||
|
||||
* `picture_claim`: name of the claim containing an url for the user's profile picture.
|
||||
Defaults to 'picture', which OpenID Connect compliant providers should provide
|
||||
and has to refer to a direct image file such as PNG, JPEG, or GIF image file.
|
||||
|
||||
Currently only supported in monolithic (single-process) server configurations
|
||||
where the media repository runs within the Synapse process.
|
||||
|
||||
* `localpart_template`: Jinja2 template for the localpart of the MXID.
|
||||
If this is not set, the user will be prompted to choose their
|
||||
own username (see the documentation for the `sso_auth_account_details.html`
|
||||
|
@ -135,8 +135,8 @@ In the config file for each worker, you must specify:
|
||||
[`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)).
|
||||
* If handling HTTP requests, a [`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners) option
|
||||
with an `http` listener.
|
||||
* If handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
|
||||
the main process (`worker_main_http_uri`).
|
||||
* **Synapse 1.72 and older:** if handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
|
||||
the main process (`worker_main_http_uri`). This config option is no longer required and is ignored when running Synapse 1.73 and newer.
|
||||
|
||||
For example:
|
||||
|
||||
@ -191,6 +191,7 @@ information.
|
||||
^/_matrix/federation/(v1|v2)/send_leave/
|
||||
^/_matrix/federation/(v1|v2)/invite/
|
||||
^/_matrix/federation/v1/event_auth/
|
||||
^/_matrix/federation/v1/timestamp_to_event/
|
||||
^/_matrix/federation/v1/exchange_third_party_invite/
|
||||
^/_matrix/federation/v1/user/devices/
|
||||
^/_matrix/key/v2/query
|
||||
@ -218,10 +219,10 @@ information.
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event/
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms$
|
||||
^/_matrix/client/v1/rooms/.*/timestamp_to_event$
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/search$
|
||||
|
||||
# Encryption requests
|
||||
# Note that ^/_matrix/client/(r0|v3|unstable)/keys/upload/ requires `worker_main_http_uri`
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/query$
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/changes$
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/claim$
|
||||
@ -376,7 +377,7 @@ responsible for
|
||||
- persisting them to the DB, and finally
|
||||
- updating the events stream.
|
||||
|
||||
Because load is sharded in this way, you *must* restart all worker instances when
|
||||
Because load is sharded in this way, you *must* restart all worker instances when
|
||||
adding or removing event persisters.
|
||||
|
||||
An `event_persister` should not be mistaken for an `event_creator`.
|
||||
|
19
mypy.ini
19
mypy.ini
@ -11,6 +11,7 @@ warn_unused_ignores = True
|
||||
local_partial_types = True
|
||||
no_implicit_optional = True
|
||||
disallow_untyped_defs = True
|
||||
strict_equality = True
|
||||
|
||||
files =
|
||||
docker/,
|
||||
@ -58,11 +59,6 @@ exclude = (?x)
|
||||
|tests/server_notices/test_resource_limits_server_notices.py
|
||||
|tests/test_state.py
|
||||
|tests/test_terms_auth.py
|
||||
|tests/util/caches/test_cached_call.py
|
||||
|tests/util/caches/test_deferred_cache.py
|
||||
|tests/util/caches/test_descriptors.py
|
||||
|tests/util/caches/test_response_cache.py
|
||||
|tests/util/caches/test_ttlcache.py
|
||||
|tests/util/test_async_helpers.py
|
||||
|tests/util/test_batching_queue.py
|
||||
|tests/util/test_dict_cache.py
|
||||
@ -117,9 +113,15 @@ disallow_untyped_defs = True
|
||||
[mypy-tests.state.test_profile]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.storage.test_id_generators]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.storage.test_profile]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.handlers.test_sso]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.storage.test_user_directory]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
@ -129,9 +131,14 @@ disallow_untyped_defs = True
|
||||
[mypy-tests.federation.transport.test_client]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.utils]
|
||||
[mypy-tests.util.caches.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.util.caches.test_descriptors]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-tests.utils]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
;; Dependencies without annotations
|
||||
;; Before ignoring a module, check to see if type stubs are available.
|
||||
|
39
poetry.lock
generated
39
poetry.lock
generated
@ -663,7 +663,7 @@ python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
|
||||
|
||||
[[package]]
|
||||
name = "phonenumbers"
|
||||
version = "8.12.56"
|
||||
version = "8.13.0"
|
||||
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
|
||||
category = "main"
|
||||
optional = false
|
||||
@ -814,15 +814,15 @@ python-versions = ">=3.6"
|
||||
|
||||
[[package]]
|
||||
name = "pygithub"
|
||||
version = "1.56"
|
||||
version = "1.57"
|
||||
description = "Use the full Github API v3"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
python-versions = ">=3.7"
|
||||
|
||||
[package.dependencies]
|
||||
deprecated = "*"
|
||||
pyjwt = ">=2.0"
|
||||
pyjwt = ">=2.4.0"
|
||||
pynacl = ">=1.4.0"
|
||||
requests = ">=2.14.0"
|
||||
|
||||
@ -1076,7 +1076,7 @@ doc = ["Sphinx", "sphinx-rtd-theme"]
|
||||
|
||||
[[package]]
|
||||
name = "sentry-sdk"
|
||||
version = "1.10.1"
|
||||
version = "1.11.0"
|
||||
description = "Python client for Sentry (https://sentry.io)"
|
||||
category = "main"
|
||||
optional = true
|
||||
@ -1098,6 +1098,7 @@ fastapi = ["fastapi (>=0.79.0)"]
|
||||
flask = ["blinker (>=1.1)", "flask (>=0.11)"]
|
||||
httpx = ["httpx (>=0.16.0)"]
|
||||
pure-eval = ["asttokens", "executing", "pure-eval"]
|
||||
pymongo = ["pymongo (>=3.1)"]
|
||||
pyspark = ["pyspark (>=2.4.4)"]
|
||||
quart = ["blinker (>=1.1)", "quart (>=0.16.1)"]
|
||||
rq = ["rq (>=0.6)"]
|
||||
@ -1256,11 +1257,11 @@ python-versions = ">= 3.5"
|
||||
|
||||
[[package]]
|
||||
name = "towncrier"
|
||||
version = "21.9.0"
|
||||
version = "22.8.0"
|
||||
description = "Building newsfiles for your project."
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
python-versions = ">=3.7"
|
||||
|
||||
[package.dependencies]
|
||||
click = "*"
|
||||
@ -1268,7 +1269,7 @@ click-default-group = "*"
|
||||
incremental = "*"
|
||||
jinja2 = "*"
|
||||
setuptools = "*"
|
||||
tomli = {version = "*", markers = "python_version >= \"3.6\""}
|
||||
tomli = "*"
|
||||
|
||||
[package.extras]
|
||||
dev = ["packaging"]
|
||||
@ -1439,7 +1440,7 @@ python-versions = "*"
|
||||
|
||||
[[package]]
|
||||
name = "types-pillow"
|
||||
version = "9.2.2.1"
|
||||
version = "9.3.0.1"
|
||||
description = "Typing stubs for Pillow"
|
||||
category = "dev"
|
||||
optional = false
|
||||
@ -2257,8 +2258,8 @@ pathspec = [
|
||||
{file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"},
|
||||
]
|
||||
phonenumbers = [
|
||||
{file = "phonenumbers-8.12.56-py2.py3-none-any.whl", hash = "sha256:80a7422cf0999a6f9b7a2e6cfbdbbfcc56ab5b75414dc3b805bbec91276b64a3"},
|
||||
{file = "phonenumbers-8.12.56.tar.gz", hash = "sha256:82a4f226c930d02dcdf6d4b29e4cfd8678991fe65c2efd5fdd143557186f0868"},
|
||||
{file = "phonenumbers-8.13.0-py2.py3-none-any.whl", hash = "sha256:dbaea9e4005a976bcf18fbe2bb87cb9cd0a3f119136f04188ac412d7741cebf0"},
|
||||
{file = "phonenumbers-8.13.0.tar.gz", hash = "sha256:93745d7afd38e246660bb601b07deac54eeb76c8e5e43f5e83333b0383a0a1e4"},
|
||||
]
|
||||
pillow = [
|
||||
{file = "Pillow-9.3.0-1-cp37-cp37m-win32.whl", hash = "sha256:e6ea6b856a74d560d9326c0f5895ef8050126acfdc7ca08ad703eb0081e82b74"},
|
||||
@ -2419,8 +2420,8 @@ pyflakes = [
|
||||
{file = "pyflakes-2.5.0.tar.gz", hash = "sha256:491feb020dca48ccc562a8c0cbe8df07ee13078df59813b83959cbdada312ea3"},
|
||||
]
|
||||
pygithub = [
|
||||
{file = "PyGithub-1.56-py3-none-any.whl", hash = "sha256:d15f13d82165306da8a68aefc0f848a6f6432d5febbff13b60a94758ce3ef8b5"},
|
||||
{file = "PyGithub-1.56.tar.gz", hash = "sha256:80c6d85cf0f9418ffeb840fd105840af694c4f17e102970badbaf678251f2a01"},
|
||||
{file = "PyGithub-1.57-py3-none-any.whl", hash = "sha256:5822febeac2391f1306c55a99af2bc8f86c8bf82ded000030cd02c18f31b731f"},
|
||||
{file = "PyGithub-1.57.tar.gz", hash = "sha256:c273f252b278fb81f1769505cc6921bdb6791e1cebd6ac850cc97dad13c31ff3"},
|
||||
]
|
||||
pygments = [
|
||||
{file = "Pygments-2.11.2-py3-none-any.whl", hash = "sha256:44238f1b60a76d78fc8ca0528ee429702aae011c265fe6a8dd8b63049ae41c65"},
|
||||
@ -2568,8 +2569,8 @@ semantic-version = [
|
||||
{file = "semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c"},
|
||||
]
|
||||
sentry-sdk = [
|
||||
{file = "sentry-sdk-1.10.1.tar.gz", hash = "sha256:105faf7bd7b7fa25653404619ee261527266b14103fe1389e0ce077bd23a9691"},
|
||||
{file = "sentry_sdk-1.10.1-py2.py3-none-any.whl", hash = "sha256:06c0fa9ccfdc80d7e3b5d2021978d6eb9351fa49db9b5847cf4d1f2a473414ad"},
|
||||
{file = "sentry-sdk-1.11.0.tar.gz", hash = "sha256:e7b78a1ddf97a5f715a50ab8c3f7a93f78b114c67307785ee828ef67a5d6f117"},
|
||||
{file = "sentry_sdk-1.11.0-py2.py3-none-any.whl", hash = "sha256:f467e6c7fac23d4d42bc83eb049c400f756cd2d65ab44f0cc1165d0c7c3d40bc"},
|
||||
]
|
||||
service-identity = [
|
||||
{file = "service-identity-21.1.0.tar.gz", hash = "sha256:6e6c6086ca271dc11b033d17c3a8bea9f24ebff920c587da090afc9519419d34"},
|
||||
@ -2720,8 +2721,8 @@ tornado = [
|
||||
{file = "tornado-6.1.tar.gz", hash = "sha256:33c6e81d7bd55b468d2e793517c909b139960b6c790a60b7991b9b6b76fb9791"},
|
||||
]
|
||||
towncrier = [
|
||||
{file = "towncrier-21.9.0-py2.py3-none-any.whl", hash = "sha256:fc5a88a2a54988e3a8ed2b60d553599da8330f65722cc607c839614ed87e0f92"},
|
||||
{file = "towncrier-21.9.0.tar.gz", hash = "sha256:9cb6f45c16e1a1eec9d0e7651165e7be60cd0ab81d13a5c96ca97a498ae87f48"},
|
||||
{file = "towncrier-22.8.0-py2.py3-none-any.whl", hash = "sha256:3b780c3d966e1b26414830aec3d15000654b31e64e024f3e5fd128b4c6eb8f47"},
|
||||
{file = "towncrier-22.8.0.tar.gz", hash = "sha256:7d3839b033859b45fb55df82b74cfd702431933c0cc9f287a5a7ea3e05d042cb"},
|
||||
]
|
||||
treq = [
|
||||
{file = "treq-22.2.0-py3-none-any.whl", hash = "sha256:27d95b07c5c14be3e7b280416139b036087617ad5595be913b1f9b3ce981b9b2"},
|
||||
@ -2808,8 +2809,8 @@ types-opentracing = [
|
||||
{file = "types_opentracing-2.4.10-py3-none-any.whl", hash = "sha256:66d9cfbbdc4a6f8ca8189a15ad26f0fe41cee84c07057759c5d194e2505b84c2"},
|
||||
]
|
||||
types-pillow = [
|
||||
{file = "types-Pillow-9.2.2.1.tar.gz", hash = "sha256:85c139e06e1c46ec5f9c634d5c54a156b0958d5d0e8be024ed353db0c804b426"},
|
||||
{file = "types_Pillow-9.2.2.1-py3-none-any.whl", hash = "sha256:3a6a871cade8428433a21ef459bb0a65532b87d05f9e836a0664431ce445bdcf"},
|
||||
{file = "types-Pillow-9.3.0.1.tar.gz", hash = "sha256:f3b7cada3fa496c78d75253c6b1f07a843d625f42e5639b320a72acaff6f7cfb"},
|
||||
{file = "types_Pillow-9.3.0.1-py3-none-any.whl", hash = "sha256:79837755fe9659f29efd1016e9903ac4a500e0c73260483f07296bd6ca47668b"},
|
||||
]
|
||||
types-psycopg2 = [
|
||||
{file = "types-psycopg2-2.9.21.1.tar.gz", hash = "sha256:f5532cf15afdc6b5ebb1e59b7d896617217321f488fd1fbd74e7efb94decfab6"},
|
||||
|
@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.72.0"
|
||||
version = "1.73.0rc1"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "Apache-2.0"
|
||||
|
@ -443,9 +443,9 @@ packaging==21.3 ; python_full_version >= "3.7.1" and python_full_version < "4.0.
|
||||
parameterized==0.8.1 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
|
||||
--hash=sha256:41bbff37d6186430f77f900d777e5bb6a24928a1c46fb1de692f8b52b8833b5c \
|
||||
--hash=sha256:9cbb0b69a03e8695d68b3399a8a5825200976536fe1cb79db60ed6a4c8c9efe9
|
||||
phonenumbers==8.12.56 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
|
||||
--hash=sha256:80a7422cf0999a6f9b7a2e6cfbdbbfcc56ab5b75414dc3b805bbec91276b64a3 \
|
||||
--hash=sha256:82a4f226c930d02dcdf6d4b29e4cfd8678991fe65c2efd5fdd143557186f0868
|
||||
phonenumbers==8.13.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
|
||||
--hash=sha256:93745d7afd38e246660bb601b07deac54eeb76c8e5e43f5e83333b0383a0a1e4 \
|
||||
--hash=sha256:dbaea9e4005a976bcf18fbe2bb87cb9cd0a3f119136f04188ac412d7741cebf0
|
||||
pillow==9.3.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
|
||||
--hash=sha256:03150abd92771742d4a8cd6f2fa6246d847dcd2e332a18d0c15cc75bf6703040 \
|
||||
--hash=sha256:073adb2ae23431d3b9bcbcff3fe698b62ed47211d0716b067385538a1b0f28b8 \
|
||||
|
@ -274,6 +274,156 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed(
|
||||
"global/underride/.org.matrix.msc3933.rule.extensible.encrypted_room_one_to_one",
|
||||
),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("org.matrix.msc1767.encrypted")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
Condition::Known(KnownCondition::RoomMemberCount {
|
||||
is: Some(Cow::Borrowed("2")),
|
||||
}),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed(
|
||||
"global/underride/.org.matrix.msc3933.rule.extensible.message.room_one_to_one",
|
||||
),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("org.matrix.msc1767.message")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
Condition::Known(KnownCondition::RoomMemberCount {
|
||||
is: Some(Cow::Borrowed("2")),
|
||||
}),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed(
|
||||
"global/underride/.org.matrix.msc3933.rule.extensible.file.room_one_to_one",
|
||||
),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("org.matrix.msc1767.file")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
Condition::Known(KnownCondition::RoomMemberCount {
|
||||
is: Some(Cow::Borrowed("2")),
|
||||
}),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed(
|
||||
"global/underride/.org.matrix.msc3933.rule.extensible.image.room_one_to_one",
|
||||
),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("org.matrix.msc1767.image")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
Condition::Known(KnownCondition::RoomMemberCount {
|
||||
is: Some(Cow::Borrowed("2")),
|
||||
}),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed(
|
||||
"global/underride/.org.matrix.msc3933.rule.extensible.video.room_one_to_one",
|
||||
),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("org.matrix.msc1767.video")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
Condition::Known(KnownCondition::RoomMemberCount {
|
||||
is: Some(Cow::Borrowed("2")),
|
||||
}),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed(
|
||||
"global/underride/.org.matrix.msc3933.rule.extensible.audio.room_one_to_one",
|
||||
),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("org.matrix.msc1767.audio")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
Condition::Known(KnownCondition::RoomMemberCount {
|
||||
is: Some(Cow::Borrowed("2")),
|
||||
}),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.m.rule.message"),
|
||||
priority_class: 1,
|
||||
@ -302,6 +452,126 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.encrypted"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("m.encrypted")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.message"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("m.message")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.file"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("m.file")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.image"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("m.image")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.video"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("m.video")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.audio"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("m.audio")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.im.vector.jitsi"),
|
||||
priority_class: 1,
|
||||
|
@ -12,8 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use crate::push::{PushRule, PushRules};
|
||||
use anyhow::{Context, Error};
|
||||
use lazy_static::lazy_static;
|
||||
use log::warn;
|
||||
@ -29,6 +31,33 @@ use super::{
|
||||
lazy_static! {
|
||||
/// Used to parse the `is` clause in the room member count condition.
|
||||
static ref INEQUALITY_EXPR: Regex = Regex::new(r"^([=<>]*)([0-9]+)$").expect("valid regex");
|
||||
|
||||
/// Used to determine which MSC3931 room version feature flags are actually known to
|
||||
/// the push evaluator.
|
||||
static ref KNOWN_RVER_FLAGS: Vec<String> = vec![
|
||||
RoomVersionFeatures::ExtensibleEvents.as_str().to_string(),
|
||||
];
|
||||
|
||||
/// The "safe" rule IDs which are not affected by MSC3932's behaviour (room versions which
|
||||
/// declare Extensible Events support ultimately *disable* push rules which do not declare
|
||||
/// *any* MSC3931 room_version_supports condition).
|
||||
static ref SAFE_EXTENSIBLE_EVENTS_RULE_IDS: Vec<String> = vec![
|
||||
"global/override/.m.rule.master".to_string(),
|
||||
"global/override/.m.rule.roomnotif".to_string(),
|
||||
"global/content/.m.rule.contains_user_name".to_string(),
|
||||
];
|
||||
}
|
||||
|
||||
enum RoomVersionFeatures {
|
||||
ExtensibleEvents,
|
||||
}
|
||||
|
||||
impl RoomVersionFeatures {
|
||||
fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
RoomVersionFeatures::ExtensibleEvents => "org.matrix.msc3932.extensible_events",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Allows running a set of push rules against a particular event.
|
||||
@ -57,6 +86,13 @@ pub struct PushRuleEvaluator {
|
||||
|
||||
/// If msc3664, push rules for related events, is enabled.
|
||||
related_event_match_enabled: bool,
|
||||
|
||||
/// If MSC3931 is applicable, the feature flags for the room version.
|
||||
room_version_feature_flags: Vec<String>,
|
||||
|
||||
/// If MSC3931 (room version feature flags) is enabled. Usually controlled by the same
|
||||
/// flag as MSC1767 (extensible events core).
|
||||
msc3931_enabled: bool,
|
||||
}
|
||||
|
||||
#[pymethods]
|
||||
@ -70,6 +106,8 @@ impl PushRuleEvaluator {
|
||||
notification_power_levels: BTreeMap<String, i64>,
|
||||
related_events_flattened: BTreeMap<String, BTreeMap<String, String>>,
|
||||
related_event_match_enabled: bool,
|
||||
room_version_feature_flags: Vec<String>,
|
||||
msc3931_enabled: bool,
|
||||
) -> Result<Self, Error> {
|
||||
let body = flattened_keys
|
||||
.get("content.body")
|
||||
@ -84,6 +122,8 @@ impl PushRuleEvaluator {
|
||||
sender_power_level,
|
||||
related_events_flattened,
|
||||
related_event_match_enabled,
|
||||
room_version_feature_flags,
|
||||
msc3931_enabled,
|
||||
})
|
||||
}
|
||||
|
||||
@ -106,7 +146,22 @@ impl PushRuleEvaluator {
|
||||
continue;
|
||||
}
|
||||
|
||||
let rule_id = &push_rule.rule_id().to_string();
|
||||
let extev_flag = &RoomVersionFeatures::ExtensibleEvents.as_str().to_string();
|
||||
let supports_extensible_events = self.room_version_feature_flags.contains(extev_flag);
|
||||
let safe_from_rver_condition = SAFE_EXTENSIBLE_EVENTS_RULE_IDS.contains(rule_id);
|
||||
let mut has_rver_condition = false;
|
||||
|
||||
for condition in push_rule.conditions.iter() {
|
||||
has_rver_condition = has_rver_condition
|
||||
|| match condition {
|
||||
Condition::Known(known) => match known {
|
||||
// per MSC3932, we just need *any* room version condition to match
|
||||
KnownCondition::RoomVersionSupports { feature: _ } => true,
|
||||
_ => false,
|
||||
},
|
||||
_ => false,
|
||||
};
|
||||
match self.match_condition(condition, user_id, display_name) {
|
||||
Ok(true) => {}
|
||||
Ok(false) => continue 'outer,
|
||||
@ -117,6 +172,13 @@ impl PushRuleEvaluator {
|
||||
}
|
||||
}
|
||||
|
||||
// MSC3932: Disable push rules in extensible event-supporting room versions if they
|
||||
// don't describe *any* MSC3931 room version condition, unless the rule is on the
|
||||
// safe list.
|
||||
if !has_rver_condition && !safe_from_rver_condition && supports_extensible_events {
|
||||
continue;
|
||||
}
|
||||
|
||||
let actions = push_rule
|
||||
.actions
|
||||
.iter()
|
||||
@ -204,6 +266,15 @@ impl PushRuleEvaluator {
|
||||
false
|
||||
}
|
||||
}
|
||||
KnownCondition::RoomVersionSupports { feature } => {
|
||||
if !self.msc3931_enabled {
|
||||
false
|
||||
} else {
|
||||
let flag = feature.to_string();
|
||||
KNOWN_RVER_FLAGS.contains(&flag)
|
||||
&& self.room_version_feature_flags.contains(&flag)
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Ok(result)
|
||||
@ -362,9 +433,59 @@ fn push_rule_evaluator() {
|
||||
BTreeMap::new(),
|
||||
BTreeMap::new(),
|
||||
true,
|
||||
vec![],
|
||||
true,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let result = evaluator.run(&FilteredPushRules::default(), None, Some("bob"));
|
||||
assert_eq!(result.len(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_requires_room_version_supports_condition() {
|
||||
let mut flattened_keys = BTreeMap::new();
|
||||
flattened_keys.insert("content.body".to_string(), "foo bar bob hello".to_string());
|
||||
let flags = vec![RoomVersionFeatures::ExtensibleEvents.as_str().to_string()];
|
||||
let evaluator = PushRuleEvaluator::py_new(
|
||||
flattened_keys,
|
||||
10,
|
||||
Some(0),
|
||||
BTreeMap::new(),
|
||||
BTreeMap::new(),
|
||||
false,
|
||||
flags,
|
||||
true,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// first test: are the master and contains_user_name rules excluded from the "requires room
|
||||
// version condition" check?
|
||||
let mut result = evaluator.run(
|
||||
&FilteredPushRules::default(),
|
||||
Some("@bob:example.org"),
|
||||
None,
|
||||
);
|
||||
assert_eq!(result.len(), 3);
|
||||
|
||||
// second test: if an appropriate push rule is in play, does it get handled?
|
||||
let custom_rule = PushRule {
|
||||
rule_id: Cow::from("global/underride/.org.example.extensible"),
|
||||
priority_class: 1, // underride
|
||||
conditions: Cow::from(vec![Condition::Known(
|
||||
KnownCondition::RoomVersionSupports {
|
||||
feature: Cow::from(RoomVersionFeatures::ExtensibleEvents.as_str().to_string()),
|
||||
},
|
||||
)]),
|
||||
actions: Cow::from(vec![Action::Notify]),
|
||||
default: false,
|
||||
default_enabled: true,
|
||||
};
|
||||
let rules = PushRules::new(vec![custom_rule]);
|
||||
result = evaluator.run(
|
||||
&FilteredPushRules::py_new(rules, BTreeMap::new(), true, true),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
assert_eq!(result.len(), 1);
|
||||
}
|
||||
|
@ -277,6 +277,10 @@ pub enum KnownCondition {
|
||||
SenderNotificationPermission {
|
||||
key: Cow<'static, str>,
|
||||
},
|
||||
#[serde(rename = "org.matrix.msc3931.room_version_supports")]
|
||||
RoomVersionSupports {
|
||||
feature: Cow<'static, str>,
|
||||
},
|
||||
}
|
||||
|
||||
impl IntoPy<PyObject> for Condition {
|
||||
@ -408,6 +412,7 @@ pub struct FilteredPushRules {
|
||||
push_rules: PushRules,
|
||||
enabled_map: BTreeMap<String, bool>,
|
||||
msc3664_enabled: bool,
|
||||
msc1767_enabled: bool,
|
||||
}
|
||||
|
||||
#[pymethods]
|
||||
@ -417,11 +422,13 @@ impl FilteredPushRules {
|
||||
push_rules: PushRules,
|
||||
enabled_map: BTreeMap<String, bool>,
|
||||
msc3664_enabled: bool,
|
||||
msc1767_enabled: bool,
|
||||
) -> Self {
|
||||
Self {
|
||||
push_rules,
|
||||
enabled_map,
|
||||
msc3664_enabled,
|
||||
msc1767_enabled,
|
||||
}
|
||||
}
|
||||
|
||||
@ -446,6 +453,10 @@ impl FilteredPushRules {
|
||||
return false;
|
||||
}
|
||||
|
||||
if !self.msc1767_enabled && rule.rule_id.contains("org.matrix.msc1767") {
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
})
|
||||
.map(|r| {
|
||||
@ -491,6 +502,18 @@ fn test_deserialize_unstable_msc3664_condition() {
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserialize_unstable_msc3931_condition() {
|
||||
let json =
|
||||
r#"{"kind":"org.matrix.msc3931.room_version_supports","feature":"org.example.feature"}"#;
|
||||
|
||||
let condition: Condition = serde_json::from_str(json).unwrap();
|
||||
assert!(matches!(
|
||||
condition,
|
||||
Condition::Known(KnownCondition::RoomVersionSupports { feature: _ })
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserialize_custom_condition() {
|
||||
let json = r#"{"kind":"custom_tag"}"#;
|
||||
|
@ -162,9 +162,9 @@ else
|
||||
# We only test faster room joins on monoliths, because they are purposefully
|
||||
# being developed without worker support to start with.
|
||||
#
|
||||
# The tests for importing historical messages (MSC2716) and jump to date (MSC3030)
|
||||
# also only pass with monoliths, currently.
|
||||
test_tags="$test_tags,faster_joins,msc2716,msc3030"
|
||||
# The tests for importing historical messages (MSC2716) also only pass with monoliths,
|
||||
# currently.
|
||||
test_tags="$test_tags,faster_joins,msc2716"
|
||||
fi
|
||||
|
||||
|
||||
|
@ -46,11 +46,12 @@ import signedjson.key
|
||||
import signedjson.types
|
||||
import srvlookup
|
||||
import yaml
|
||||
from requests import PreparedRequest, Response
|
||||
from requests.adapters import HTTPAdapter
|
||||
from urllib3 import HTTPConnectionPool
|
||||
|
||||
# uncomment the following to enable debug logging of http requests
|
||||
# from httplib import HTTPConnection
|
||||
# from http.client import HTTPConnection
|
||||
# HTTPConnection.debuglevel = 1
|
||||
|
||||
|
||||
@ -103,6 +104,7 @@ def request(
|
||||
destination: str,
|
||||
path: str,
|
||||
content: Optional[str],
|
||||
verify_tls: bool,
|
||||
) -> requests.Response:
|
||||
if method is None:
|
||||
if content is None:
|
||||
@ -141,7 +143,6 @@ def request(
|
||||
s.mount("matrix://", MatrixConnectionAdapter())
|
||||
|
||||
headers: Dict[str, str] = {
|
||||
"Host": destination,
|
||||
"Authorization": authorization_headers[0],
|
||||
}
|
||||
|
||||
@ -152,7 +153,7 @@ def request(
|
||||
method=method,
|
||||
url=dest,
|
||||
headers=headers,
|
||||
verify=False,
|
||||
verify=verify_tls,
|
||||
data=content,
|
||||
stream=True,
|
||||
)
|
||||
@ -202,6 +203,12 @@ def main() -> None:
|
||||
|
||||
parser.add_argument("--body", help="Data to send as the body of the HTTP request")
|
||||
|
||||
parser.add_argument(
|
||||
"--insecure",
|
||||
action="store_true",
|
||||
help="Disable TLS certificate verification",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"path", help="request path, including the '/_matrix/federation/...' prefix."
|
||||
)
|
||||
@ -227,6 +234,7 @@ def main() -> None:
|
||||
args.destination,
|
||||
args.path,
|
||||
content=args.body,
|
||||
verify_tls=not args.insecure,
|
||||
)
|
||||
|
||||
sys.stderr.write("Status Code: %d\n" % (result.status_code,))
|
||||
@ -254,36 +262,93 @@ def read_args_from_config(args: argparse.Namespace) -> None:
|
||||
|
||||
|
||||
class MatrixConnectionAdapter(HTTPAdapter):
|
||||
@staticmethod
|
||||
def lookup(s: str, skip_well_known: bool = False) -> Tuple[str, int]:
|
||||
if s[-1] == "]":
|
||||
# ipv6 literal (with no port)
|
||||
return s, 8448
|
||||
def send(
|
||||
self,
|
||||
request: PreparedRequest,
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
) -> Response:
|
||||
# overrides the send() method in the base class.
|
||||
|
||||
if ":" in s:
|
||||
out = s.rsplit(":", 1)
|
||||
# We need to look for .well-known redirects before passing the request up to
|
||||
# HTTPAdapter.send().
|
||||
assert isinstance(request.url, str)
|
||||
parsed = urlparse.urlsplit(request.url)
|
||||
server_name = parsed.netloc
|
||||
well_known = self._get_well_known(parsed.netloc)
|
||||
|
||||
if well_known:
|
||||
server_name = well_known
|
||||
|
||||
# replace the scheme in the uri with https, so that cert verification is done
|
||||
# also replace the hostname if we got a .well-known result
|
||||
request.url = urlparse.urlunsplit(
|
||||
("https", server_name, parsed.path, parsed.query, parsed.fragment)
|
||||
)
|
||||
|
||||
# at this point we also add the host header (otherwise urllib will add one
|
||||
# based on the `host` from the connection returned by `get_connection`,
|
||||
# which will be wrong if there is an SRV record).
|
||||
request.headers["Host"] = server_name
|
||||
|
||||
return super().send(request, *args, **kwargs)
|
||||
|
||||
def get_connection(
|
||||
self, url: str, proxies: Optional[Dict[str, str]] = None
|
||||
) -> HTTPConnectionPool:
|
||||
# overrides the get_connection() method in the base class
|
||||
parsed = urlparse.urlsplit(url)
|
||||
(host, port, ssl_server_name) = self._lookup(parsed.netloc)
|
||||
print(
|
||||
f"Connecting to {host}:{port} with SNI {ssl_server_name}", file=sys.stderr
|
||||
)
|
||||
return self.poolmanager.connection_from_host(
|
||||
host,
|
||||
port=port,
|
||||
scheme="https",
|
||||
pool_kwargs={"server_hostname": ssl_server_name},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _lookup(server_name: str) -> Tuple[str, int, str]:
|
||||
"""
|
||||
Do an SRV lookup on a server name and return the host:port to connect to
|
||||
Given the server_name (after any .well-known lookup), return the host, port and
|
||||
the ssl server name
|
||||
"""
|
||||
if server_name[-1] == "]":
|
||||
# ipv6 literal (with no port)
|
||||
return server_name, 8448, server_name
|
||||
|
||||
if ":" in server_name:
|
||||
# explicit port
|
||||
out = server_name.rsplit(":", 1)
|
||||
try:
|
||||
port = int(out[1])
|
||||
except ValueError:
|
||||
raise ValueError("Invalid host:port '%s'" % s)
|
||||
return out[0], port
|
||||
|
||||
# try a .well-known lookup
|
||||
if not skip_well_known:
|
||||
well_known = MatrixConnectionAdapter.get_well_known(s)
|
||||
if well_known:
|
||||
return MatrixConnectionAdapter.lookup(well_known, skip_well_known=True)
|
||||
raise ValueError("Invalid host:port '%s'" % (server_name,))
|
||||
return out[0], port, out[0]
|
||||
|
||||
try:
|
||||
srv = srvlookup.lookup("matrix", "tcp", s)[0]
|
||||
return srv.host, srv.port
|
||||
srv = srvlookup.lookup("matrix", "tcp", server_name)[0]
|
||||
print(
|
||||
f"SRV lookup on _matrix._tcp.{server_name} gave {srv}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
return srv.host, srv.port, server_name
|
||||
except Exception:
|
||||
return s, 8448
|
||||
return server_name, 8448, server_name
|
||||
|
||||
@staticmethod
|
||||
def get_well_known(server_name: str) -> Optional[str]:
|
||||
uri = "https://%s/.well-known/matrix/server" % (server_name,)
|
||||
print("fetching %s" % (uri,), file=sys.stderr)
|
||||
def _get_well_known(server_name: str) -> Optional[str]:
|
||||
if ":" in server_name:
|
||||
# explicit port, or ipv6 literal. Either way, no .well-known
|
||||
return None
|
||||
|
||||
# TODO: check for ipv4 literals
|
||||
|
||||
uri = f"https://{server_name}/.well-known/matrix/server"
|
||||
print(f"fetching {uri}", file=sys.stderr)
|
||||
|
||||
try:
|
||||
resp = requests.get(uri)
|
||||
@ -304,19 +369,6 @@ class MatrixConnectionAdapter(HTTPAdapter):
|
||||
print("Invalid response from %s: %s" % (uri, e), file=sys.stderr)
|
||||
return None
|
||||
|
||||
def get_connection(
|
||||
self, url: str, proxies: Optional[Dict[str, str]] = None
|
||||
) -> HTTPConnectionPool:
|
||||
parsed = urlparse.urlparse(url)
|
||||
|
||||
(host, port) = self.lookup(parsed.netloc)
|
||||
netloc = "%s:%d" % (host, port)
|
||||
print("Connecting to %s" % (netloc,), file=sys.stderr)
|
||||
url = urlparse.urlunparse(
|
||||
("https", netloc, parsed.path, parsed.params, parsed.query, parsed.fragment)
|
||||
)
|
||||
return super().get_connection(url, proxies)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -26,7 +26,11 @@ class PushRules:
|
||||
|
||||
class FilteredPushRules:
|
||||
def __init__(
|
||||
self, push_rules: PushRules, enabled_map: Dict[str, bool], msc3664_enabled: bool
|
||||
self,
|
||||
push_rules: PushRules,
|
||||
enabled_map: Dict[str, bool],
|
||||
msc3664_enabled: bool,
|
||||
msc1767_enabled: bool,
|
||||
): ...
|
||||
def rules(self) -> Collection[Tuple[PushRule, bool]]: ...
|
||||
|
||||
@ -41,6 +45,8 @@ class PushRuleEvaluator:
|
||||
notification_power_levels: Mapping[str, int],
|
||||
related_events_flattened: Mapping[str, Mapping[str, str]],
|
||||
related_event_match_enabled: bool,
|
||||
room_version_feature_flags: list[str],
|
||||
msc3931_enabled: bool,
|
||||
): ...
|
||||
def run(
|
||||
self,
|
||||
|
@ -713,7 +713,7 @@ class HttpResponseException(CodeMessageException):
|
||||
set to the reason code from the HTTP response.
|
||||
|
||||
Returns:
|
||||
SynapseError:
|
||||
The error converted to a SynapseError.
|
||||
"""
|
||||
# try to parse the body as json, to get better errcode/msg, but
|
||||
# default to M_UNKNOWN with the HTTP status as the error text
|
||||
|
@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import Callable, Dict, Optional
|
||||
from typing import Callable, Dict, List, Optional
|
||||
|
||||
import attr
|
||||
|
||||
@ -51,6 +51,13 @@ class RoomDisposition:
|
||||
UNSTABLE = "unstable"
|
||||
|
||||
|
||||
class PushRuleRoomFlag:
|
||||
"""Enum for listing possible MSC3931 room version feature flags, for push rules"""
|
||||
|
||||
# MSC3932: Room version supports MSC1767 Extensible Events.
|
||||
EXTENSIBLE_EVENTS = "org.matrix.msc3932.extensible_events"
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class RoomVersion:
|
||||
"""An object which describes the unique attributes of a room version."""
|
||||
@ -91,6 +98,12 @@ class RoomVersion:
|
||||
msc3787_knock_restricted_join_rule: bool
|
||||
# MSC3667: Enforce integer power levels
|
||||
msc3667_int_only_power_levels: bool
|
||||
# MSC3931: Adds a push rule condition for "room version feature flags", making
|
||||
# some push rules room version dependent. Note that adding a flag to this list
|
||||
# is not enough to mark it "supported": the push rule evaluator also needs to
|
||||
# support the flag. Unknown flags are ignored by the evaluator, making conditions
|
||||
# fail if used.
|
||||
msc3931_push_features: List[str] # values from PushRuleRoomFlag
|
||||
|
||||
|
||||
class RoomVersions:
|
||||
@ -111,6 +124,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=[],
|
||||
)
|
||||
V2 = RoomVersion(
|
||||
"2",
|
||||
@ -129,6 +143,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=[],
|
||||
)
|
||||
V3 = RoomVersion(
|
||||
"3",
|
||||
@ -147,6 +162,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=[],
|
||||
)
|
||||
V4 = RoomVersion(
|
||||
"4",
|
||||
@ -165,6 +181,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=[],
|
||||
)
|
||||
V5 = RoomVersion(
|
||||
"5",
|
||||
@ -183,6 +200,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=[],
|
||||
)
|
||||
V6 = RoomVersion(
|
||||
"6",
|
||||
@ -201,6 +219,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=[],
|
||||
)
|
||||
MSC2176 = RoomVersion(
|
||||
"org.matrix.msc2176",
|
||||
@ -219,6 +238,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=[],
|
||||
)
|
||||
V7 = RoomVersion(
|
||||
"7",
|
||||
@ -237,6 +257,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=[],
|
||||
)
|
||||
V8 = RoomVersion(
|
||||
"8",
|
||||
@ -255,6 +276,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=[],
|
||||
)
|
||||
V9 = RoomVersion(
|
||||
"9",
|
||||
@ -273,6 +295,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=[],
|
||||
)
|
||||
MSC3787 = RoomVersion(
|
||||
"org.matrix.msc3787",
|
||||
@ -291,6 +314,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=[],
|
||||
)
|
||||
V10 = RoomVersion(
|
||||
"10",
|
||||
@ -309,6 +333,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=True,
|
||||
msc3931_push_features=[],
|
||||
)
|
||||
MSC2716v4 = RoomVersion(
|
||||
"org.matrix.msc2716v4",
|
||||
@ -327,6 +352,27 @@ class RoomVersions:
|
||||
msc2716_redactions=True,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=[],
|
||||
)
|
||||
MSC1767v10 = RoomVersion(
|
||||
# MSC1767 (Extensible Events) based on room version "10"
|
||||
"org.matrix.msc1767.10",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=True,
|
||||
msc3931_push_features=[PushRuleRoomFlag.EXTENSIBLE_EVENTS],
|
||||
)
|
||||
|
||||
|
||||
|
@ -266,26 +266,18 @@ def register_start(
|
||||
reactor.callWhenRunning(lambda: defer.ensureDeferred(wrapper()))
|
||||
|
||||
|
||||
def listen_metrics(
|
||||
bind_addresses: Iterable[str], port: int, enable_legacy_metric_names: bool
|
||||
) -> None:
|
||||
def listen_metrics(bind_addresses: Iterable[str], port: int) -> None:
|
||||
"""
|
||||
Start Prometheus metrics server.
|
||||
"""
|
||||
from prometheus_client import start_http_server as start_http_server_prometheus
|
||||
|
||||
from synapse.metrics import (
|
||||
RegistryProxy,
|
||||
start_http_server as start_http_server_legacy,
|
||||
)
|
||||
from synapse.metrics import RegistryProxy
|
||||
|
||||
for host in bind_addresses:
|
||||
logger.info("Starting metrics listener on %s:%d", host, port)
|
||||
if enable_legacy_metric_names:
|
||||
start_http_server_legacy(port, addr=host, registry=RegistryProxy)
|
||||
else:
|
||||
_set_prometheus_client_use_created_metrics(False)
|
||||
start_http_server_prometheus(port, addr=host, registry=RegistryProxy)
|
||||
_set_prometheus_client_use_created_metrics(False)
|
||||
start_http_server_prometheus(port, addr=host, registry=RegistryProxy)
|
||||
|
||||
|
||||
def _set_prometheus_client_use_created_metrics(new_value: bool) -> None:
|
||||
|
@ -14,14 +14,12 @@
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import sys
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
from typing import Dict, List
|
||||
|
||||
from twisted.internet import address
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
import synapse
|
||||
import synapse.events
|
||||
from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
|
||||
from synapse.api.urls import (
|
||||
CLIENT_API_PREFIX,
|
||||
FEDERATION_PREFIX,
|
||||
@ -43,8 +41,6 @@ from synapse.config.logger import setup_logging
|
||||
from synapse.config.server import ListenerConfig
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
from synapse.http.server import JsonResource, OptionsResource
|
||||
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import LoggingContext
|
||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
|
||||
@ -70,12 +66,12 @@ from synapse.rest.client import (
|
||||
versions,
|
||||
voip,
|
||||
)
|
||||
from synapse.rest.client._base import client_patterns
|
||||
from synapse.rest.client.account import ThreepidRestServlet, WhoamiRestServlet
|
||||
from synapse.rest.client.devices import DevicesRestServlet
|
||||
from synapse.rest.client.keys import (
|
||||
KeyChangesServlet,
|
||||
KeyQueryServlet,
|
||||
KeyUploadServlet,
|
||||
OneTimeKeyServlet,
|
||||
)
|
||||
from synapse.rest.client.register import (
|
||||
@ -132,107 +128,12 @@ from synapse.storage.databases.main.transactions import TransactionWorkerStore
|
||||
from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore
|
||||
from synapse.storage.databases.main.user_directory import UserDirectoryStore
|
||||
from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import SYNAPSE_VERSION
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
|
||||
logger = logging.getLogger("synapse.app.generic_worker")
|
||||
|
||||
|
||||
class KeyUploadServlet(RestServlet):
|
||||
"""An implementation of the `KeyUploadServlet` that responds to read only
|
||||
requests, but otherwise proxies through to the master instance.
|
||||
"""
|
||||
|
||||
PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
|
||||
|
||||
def __init__(self, hs: HomeServer):
|
||||
"""
|
||||
Args:
|
||||
hs: server
|
||||
"""
|
||||
super().__init__()
|
||||
self.auth = hs.get_auth()
|
||||
self.store = hs.get_datastores().main
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
self.main_uri = hs.config.worker.worker_main_http_uri
|
||||
|
||||
async def on_POST(
|
||||
self, request: SynapseRequest, device_id: Optional[str]
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
user_id = requester.user.to_string()
|
||||
body = parse_json_object_from_request(request)
|
||||
|
||||
if device_id is not None:
|
||||
# passing the device_id here is deprecated; however, we allow it
|
||||
# for now for compatibility with older clients.
|
||||
if requester.device_id is not None and device_id != requester.device_id:
|
||||
logger.warning(
|
||||
"Client uploading keys for a different device "
|
||||
"(logged in as %s, uploading for %s)",
|
||||
requester.device_id,
|
||||
device_id,
|
||||
)
|
||||
else:
|
||||
device_id = requester.device_id
|
||||
|
||||
if device_id is None:
|
||||
raise SynapseError(
|
||||
400, "To upload keys, you must pass device_id when authenticating"
|
||||
)
|
||||
|
||||
if body:
|
||||
# They're actually trying to upload something, proxy to main synapse.
|
||||
|
||||
# Proxy headers from the original request, such as the auth headers
|
||||
# (in case the access token is there) and the original IP /
|
||||
# User-Agent of the request.
|
||||
headers: Dict[bytes, List[bytes]] = {
|
||||
header: list(request.requestHeaders.getRawHeaders(header, []))
|
||||
for header in (b"Authorization", b"User-Agent")
|
||||
}
|
||||
# Add the previous hop to the X-Forwarded-For header.
|
||||
x_forwarded_for = list(
|
||||
request.requestHeaders.getRawHeaders(b"X-Forwarded-For", [])
|
||||
)
|
||||
# we use request.client here, since we want the previous hop, not the
|
||||
# original client (as returned by request.getClientAddress()).
|
||||
if isinstance(request.client, (address.IPv4Address, address.IPv6Address)):
|
||||
previous_host = request.client.host.encode("ascii")
|
||||
# If the header exists, add to the comma-separated list of the first
|
||||
# instance of the header. Otherwise, generate a new header.
|
||||
if x_forwarded_for:
|
||||
x_forwarded_for = [x_forwarded_for[0] + b", " + previous_host]
|
||||
x_forwarded_for.extend(x_forwarded_for[1:])
|
||||
else:
|
||||
x_forwarded_for = [previous_host]
|
||||
headers[b"X-Forwarded-For"] = x_forwarded_for
|
||||
|
||||
# Replicate the original X-Forwarded-Proto header. Note that
|
||||
# XForwardedForRequest overrides isSecure() to give us the original protocol
|
||||
# used by the client, as opposed to the protocol used by our upstream proxy
|
||||
# - which is what we want here.
|
||||
headers[b"X-Forwarded-Proto"] = [
|
||||
b"https" if request.isSecure() else b"http"
|
||||
]
|
||||
|
||||
try:
|
||||
result = await self.http_client.post_json_get_json(
|
||||
self.main_uri + request.uri.decode("ascii"), body, headers=headers
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
raise e.to_synapse_error() from e
|
||||
except RequestSendFailed as e:
|
||||
raise SynapseError(502, "Failed to talk to master") from e
|
||||
|
||||
return 200, result
|
||||
else:
|
||||
# Just interested in counts.
|
||||
result = await self.store.count_e2e_one_time_keys(user_id, device_id)
|
||||
return 200, {"one_time_key_counts": result}
|
||||
|
||||
|
||||
class GenericWorkerSlavedStore(
|
||||
# FIXME(#3714): We need to add UserDirectoryStore as we write directly
|
||||
# rather than going via the correct worker.
|
||||
@ -419,7 +320,6 @@ class GenericWorkerServer(HomeServer):
|
||||
_base.listen_metrics(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
enable_legacy_metric_names=self.config.metrics.enable_legacy_metrics,
|
||||
)
|
||||
else:
|
||||
logger.warning("Unsupported listener type: %s", listener.type)
|
||||
|
@ -265,7 +265,6 @@ class SynapseHomeServer(HomeServer):
|
||||
_base.listen_metrics(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
enable_legacy_metric_names=self.config.metrics.enable_legacy_metrics,
|
||||
)
|
||||
else:
|
||||
# this shouldn't happen, as the listener type should have been checked
|
||||
|
@ -32,9 +32,9 @@ if TYPE_CHECKING:
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Type for the `device_one_time_key_counts` field in an appservice transaction
|
||||
# Type for the `device_one_time_keys_count` field in an appservice transaction
|
||||
# user ID -> {device ID -> {algorithm -> count}}
|
||||
TransactionOneTimeKeyCounts = Dict[str, Dict[str, Dict[str, int]]]
|
||||
TransactionOneTimeKeysCount = Dict[str, Dict[str, Dict[str, int]]]
|
||||
|
||||
# Type for the `device_unused_fallback_key_types` field in an appservice transaction
|
||||
# user ID -> {device ID -> [algorithm]}
|
||||
@ -376,7 +376,7 @@ class AppServiceTransaction:
|
||||
events: List[EventBase],
|
||||
ephemeral: List[JsonDict],
|
||||
to_device_messages: List[JsonDict],
|
||||
one_time_key_counts: TransactionOneTimeKeyCounts,
|
||||
one_time_keys_count: TransactionOneTimeKeysCount,
|
||||
unused_fallback_keys: TransactionUnusedFallbackKeys,
|
||||
device_list_summary: DeviceListUpdates,
|
||||
):
|
||||
@ -385,7 +385,7 @@ class AppServiceTransaction:
|
||||
self.events = events
|
||||
self.ephemeral = ephemeral
|
||||
self.to_device_messages = to_device_messages
|
||||
self.one_time_key_counts = one_time_key_counts
|
||||
self.one_time_keys_count = one_time_keys_count
|
||||
self.unused_fallback_keys = unused_fallback_keys
|
||||
self.device_list_summary = device_list_summary
|
||||
|
||||
@ -402,7 +402,7 @@ class AppServiceTransaction:
|
||||
events=self.events,
|
||||
ephemeral=self.ephemeral,
|
||||
to_device_messages=self.to_device_messages,
|
||||
one_time_key_counts=self.one_time_key_counts,
|
||||
one_time_keys_count=self.one_time_keys_count,
|
||||
unused_fallback_keys=self.unused_fallback_keys,
|
||||
device_list_summary=self.device_list_summary,
|
||||
txn_id=self.id,
|
||||
|
@ -23,7 +23,7 @@ from synapse.api.constants import EventTypes, Membership, ThirdPartyEntityKind
|
||||
from synapse.api.errors import CodeMessageException
|
||||
from synapse.appservice import (
|
||||
ApplicationService,
|
||||
TransactionOneTimeKeyCounts,
|
||||
TransactionOneTimeKeysCount,
|
||||
TransactionUnusedFallbackKeys,
|
||||
)
|
||||
from synapse.events import EventBase
|
||||
@ -262,7 +262,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
events: List[EventBase],
|
||||
ephemeral: List[JsonDict],
|
||||
to_device_messages: List[JsonDict],
|
||||
one_time_key_counts: TransactionOneTimeKeyCounts,
|
||||
one_time_keys_count: TransactionOneTimeKeysCount,
|
||||
unused_fallback_keys: TransactionUnusedFallbackKeys,
|
||||
device_list_summary: DeviceListUpdates,
|
||||
txn_id: Optional[int] = None,
|
||||
@ -310,10 +310,13 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
|
||||
# TODO: Update to stable prefixes once MSC3202 completes FCP merge
|
||||
if service.msc3202_transaction_extensions:
|
||||
if one_time_key_counts:
|
||||
if one_time_keys_count:
|
||||
body[
|
||||
"org.matrix.msc3202.device_one_time_key_counts"
|
||||
] = one_time_key_counts
|
||||
] = one_time_keys_count
|
||||
body[
|
||||
"org.matrix.msc3202.device_one_time_keys_count"
|
||||
] = one_time_keys_count
|
||||
if unused_fallback_keys:
|
||||
body[
|
||||
"org.matrix.msc3202.device_unused_fallback_key_types"
|
||||
|
@ -64,7 +64,7 @@ from typing import (
|
||||
from synapse.appservice import (
|
||||
ApplicationService,
|
||||
ApplicationServiceState,
|
||||
TransactionOneTimeKeyCounts,
|
||||
TransactionOneTimeKeysCount,
|
||||
TransactionUnusedFallbackKeys,
|
||||
)
|
||||
from synapse.appservice.api import ApplicationServiceApi
|
||||
@ -258,7 +258,7 @@ class _ServiceQueuer:
|
||||
):
|
||||
return
|
||||
|
||||
one_time_key_counts: Optional[TransactionOneTimeKeyCounts] = None
|
||||
one_time_keys_count: Optional[TransactionOneTimeKeysCount] = None
|
||||
unused_fallback_keys: Optional[TransactionUnusedFallbackKeys] = None
|
||||
|
||||
if (
|
||||
@ -269,7 +269,7 @@ class _ServiceQueuer:
|
||||
# for the users which are mentioned in this transaction,
|
||||
# as well as the appservice's sender.
|
||||
(
|
||||
one_time_key_counts,
|
||||
one_time_keys_count,
|
||||
unused_fallback_keys,
|
||||
) = await self._compute_msc3202_otk_counts_and_fallback_keys(
|
||||
service, events, ephemeral, to_device_messages_to_send
|
||||
@ -281,7 +281,7 @@ class _ServiceQueuer:
|
||||
events,
|
||||
ephemeral,
|
||||
to_device_messages_to_send,
|
||||
one_time_key_counts,
|
||||
one_time_keys_count,
|
||||
unused_fallback_keys,
|
||||
device_list_summary,
|
||||
)
|
||||
@ -296,7 +296,7 @@ class _ServiceQueuer:
|
||||
events: Iterable[EventBase],
|
||||
ephemerals: Iterable[JsonDict],
|
||||
to_device_messages: Iterable[JsonDict],
|
||||
) -> Tuple[TransactionOneTimeKeyCounts, TransactionUnusedFallbackKeys]:
|
||||
) -> Tuple[TransactionOneTimeKeysCount, TransactionUnusedFallbackKeys]:
|
||||
"""
|
||||
Given a list of the events, ephemeral messages and to-device messages,
|
||||
- first computes a list of application services users that may have
|
||||
@ -367,7 +367,7 @@ class _TransactionController:
|
||||
events: List[EventBase],
|
||||
ephemeral: Optional[List[JsonDict]] = None,
|
||||
to_device_messages: Optional[List[JsonDict]] = None,
|
||||
one_time_key_counts: Optional[TransactionOneTimeKeyCounts] = None,
|
||||
one_time_keys_count: Optional[TransactionOneTimeKeysCount] = None,
|
||||
unused_fallback_keys: Optional[TransactionUnusedFallbackKeys] = None,
|
||||
device_list_summary: Optional[DeviceListUpdates] = None,
|
||||
) -> None:
|
||||
@ -380,7 +380,7 @@ class _TransactionController:
|
||||
events: The persistent events to include in the transaction.
|
||||
ephemeral: The ephemeral events to include in the transaction.
|
||||
to_device_messages: The to-device messages to include in the transaction.
|
||||
one_time_key_counts: Counts of remaining one-time keys for relevant
|
||||
one_time_keys_count: Counts of remaining one-time keys for relevant
|
||||
appservice devices in the transaction.
|
||||
unused_fallback_keys: Lists of unused fallback keys for relevant
|
||||
appservice devices in the transaction.
|
||||
@ -397,7 +397,7 @@ class _TransactionController:
|
||||
events=events,
|
||||
ephemeral=ephemeral or [],
|
||||
to_device_messages=to_device_messages or [],
|
||||
one_time_key_counts=one_time_key_counts or {},
|
||||
one_time_keys_count=one_time_keys_count or {},
|
||||
unused_fallback_keys=unused_fallback_keys or {},
|
||||
device_list_summary=device_list_summary or DeviceListUpdates(),
|
||||
)
|
||||
|
@ -16,6 +16,7 @@ from typing import Any, Optional
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
|
||||
from synapse.config._base import Config
|
||||
from synapse.types import JsonDict
|
||||
|
||||
@ -53,9 +54,6 @@ class ExperimentalConfig(Config):
|
||||
# MSC3266 (room summary api)
|
||||
self.msc3266_enabled: bool = experimental.get("msc3266_enabled", False)
|
||||
|
||||
# MSC3030 (Jump to date API endpoint)
|
||||
self.msc3030_enabled: bool = experimental.get("msc3030_enabled", False)
|
||||
|
||||
# MSC2409 (this setting only relates to optionally sending to-device messages).
|
||||
# Presence, typing and read receipt EDUs are already sent to application services that
|
||||
# have opted in to receive them. If enabled, this adds to-device messages to that list.
|
||||
@ -131,3 +129,10 @@ class ExperimentalConfig(Config):
|
||||
|
||||
# MSC3912: Relation-based redactions.
|
||||
self.msc3912_enabled: bool = experimental.get("msc3912_enabled", False)
|
||||
|
||||
# MSC1767 and friends: Extensible Events
|
||||
self.msc1767_enabled: bool = experimental.get("msc1767_enabled", False)
|
||||
if self.msc1767_enabled:
|
||||
# Enable room version (and thus applicable push rules from MSC3931/3932)
|
||||
version_id = RoomVersions.MSC1767v10.identifier
|
||||
KNOWN_ROOM_VERSIONS[version_id] = RoomVersions.MSC1767v10
|
||||
|
@ -317,10 +317,9 @@ def setup_logging(
|
||||
Set up the logging subsystem.
|
||||
|
||||
Args:
|
||||
config (LoggingConfig | synapse.config.worker.WorkerConfig):
|
||||
configuration data
|
||||
config: configuration data
|
||||
|
||||
use_worker_options (bool): True to use the 'worker_log_config' option
|
||||
use_worker_options: True to use the 'worker_log_config' option
|
||||
instead of 'log_config'.
|
||||
|
||||
logBeginner: The Twisted logBeginner to use.
|
||||
|
@ -43,8 +43,6 @@ class MetricsConfig(Config):
|
||||
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
|
||||
self.enable_metrics = config.get("enable_metrics", False)
|
||||
|
||||
self.enable_legacy_metrics = config.get("enable_legacy_metrics", False)
|
||||
|
||||
self.report_stats = config.get("report_stats", None)
|
||||
self.report_stats_endpoint = config.get(
|
||||
"report_stats_endpoint", "https://matrix.org/report-usage-stats/push"
|
||||
|
@ -150,8 +150,5 @@ class RatelimitConfig(Config):
|
||||
|
||||
self.rc_third_party_invite = RatelimitSettings(
|
||||
config.get("rc_third_party_invite", {}),
|
||||
defaults={
|
||||
"per_second": self.rc_message.per_second,
|
||||
"burst_count": self.rc_message.burst_count,
|
||||
},
|
||||
defaults={"per_second": 0.0025, "burst_count": 5},
|
||||
)
|
||||
|
@ -29,20 +29,6 @@ from ._base import (
|
||||
)
|
||||
from .server import DIRECT_TCP_ERROR, ListenerConfig, parse_listener_def
|
||||
|
||||
_FEDERATION_SENDER_WITH_SEND_FEDERATION_ENABLED_ERROR = """
|
||||
The send_federation config option must be disabled in the main
|
||||
synapse process before they can be run in a separate worker.
|
||||
|
||||
Please add ``send_federation: false`` to the main config
|
||||
"""
|
||||
|
||||
_PUSHER_WITH_START_PUSHERS_ENABLED_ERROR = """
|
||||
The start_pushers config option must be disabled in the main
|
||||
synapse process before they can be run in a separate worker.
|
||||
|
||||
Please add ``start_pushers: false`` to the main config
|
||||
"""
|
||||
|
||||
_DEPRECATED_WORKER_DUTY_OPTION_USED = """
|
||||
The '%s' configuration option is deprecated and will be removed in a future
|
||||
Synapse version. Please use ``%s: name_of_worker`` instead.
|
||||
@ -162,7 +148,13 @@ class WorkerConfig(Config):
|
||||
self.worker_name = config.get("worker_name", self.worker_app)
|
||||
self.instance_name = self.worker_name or "master"
|
||||
|
||||
# FIXME: Remove this check after a suitable amount of time.
|
||||
self.worker_main_http_uri = config.get("worker_main_http_uri", None)
|
||||
if self.worker_main_http_uri is not None:
|
||||
logger.warning(
|
||||
"The config option worker_main_http_uri is unused since Synapse 1.73. "
|
||||
"It can be safely removed from your configuration."
|
||||
)
|
||||
|
||||
# This option is really only here to support `--manhole` command line
|
||||
# argument.
|
||||
@ -176,40 +168,12 @@ class WorkerConfig(Config):
|
||||
)
|
||||
)
|
||||
|
||||
# Handle federation sender configuration.
|
||||
#
|
||||
# There are two ways of configuring which instances handle federation
|
||||
# sending:
|
||||
# 1. The old way where "send_federation" is set to false and running a
|
||||
# `synapse.app.federation_sender` worker app.
|
||||
# 2. Specifying the workers sending federation in
|
||||
# `federation_sender_instances`.
|
||||
#
|
||||
|
||||
send_federation = config.get("send_federation", True)
|
||||
|
||||
federation_sender_instances = config.get("federation_sender_instances")
|
||||
if federation_sender_instances is None:
|
||||
# Default to an empty list, which means "another, unknown, worker is
|
||||
# responsible for it".
|
||||
federation_sender_instances = []
|
||||
|
||||
# If no federation sender instances are set we check if
|
||||
# `send_federation` is set, which means use master
|
||||
if send_federation:
|
||||
federation_sender_instances = ["master"]
|
||||
|
||||
if self.worker_app == "synapse.app.federation_sender":
|
||||
if send_federation:
|
||||
# If we're running federation senders, and not using
|
||||
# `federation_sender_instances`, then we should have
|
||||
# explicitly set `send_federation` to false.
|
||||
raise ConfigError(
|
||||
_FEDERATION_SENDER_WITH_SEND_FEDERATION_ENABLED_ERROR
|
||||
)
|
||||
|
||||
federation_sender_instances = [self.worker_name]
|
||||
|
||||
federation_sender_instances = self._worker_names_performing_this_duty(
|
||||
config,
|
||||
"send_federation",
|
||||
"synapse.app.federation_sender",
|
||||
"federation_sender_instances",
|
||||
)
|
||||
self.send_federation = self.instance_name in federation_sender_instances
|
||||
self.federation_shard_config = ShardedWorkerHandlingConfig(
|
||||
federation_sender_instances
|
||||
@ -276,27 +240,12 @@ class WorkerConfig(Config):
|
||||
)
|
||||
|
||||
# Handle sharded push
|
||||
start_pushers = config.get("start_pushers", True)
|
||||
pusher_instances = config.get("pusher_instances")
|
||||
if pusher_instances is None:
|
||||
# Default to an empty list, which means "another, unknown, worker is
|
||||
# responsible for it".
|
||||
pusher_instances = []
|
||||
|
||||
# If no pushers instances are set we check if `start_pushers` is
|
||||
# set, which means use master
|
||||
if start_pushers:
|
||||
pusher_instances = ["master"]
|
||||
|
||||
if self.worker_app == "synapse.app.pusher":
|
||||
if start_pushers:
|
||||
# If we're running pushers, and not using
|
||||
# `pusher_instances`, then we should have explicitly set
|
||||
# `start_pushers` to false.
|
||||
raise ConfigError(_PUSHER_WITH_START_PUSHERS_ENABLED_ERROR)
|
||||
|
||||
pusher_instances = [self.instance_name]
|
||||
|
||||
pusher_instances = self._worker_names_performing_this_duty(
|
||||
config,
|
||||
"start_pushers",
|
||||
"synapse.app.pusher",
|
||||
"pusher_instances",
|
||||
)
|
||||
self.start_pushers = self.instance_name in pusher_instances
|
||||
self.pusher_shard_config = ShardedWorkerHandlingConfig(pusher_instances)
|
||||
|
||||
@ -419,6 +368,64 @@ class WorkerConfig(Config):
|
||||
# (By this point, these are either the same value or only one is not None.)
|
||||
return bool(new_option_should_run_here or legacy_option_should_run_here)
|
||||
|
||||
def _worker_names_performing_this_duty(
|
||||
self,
|
||||
config: Dict[str, Any],
|
||||
legacy_option_name: str,
|
||||
legacy_app_name: str,
|
||||
modern_instance_list_name: str,
|
||||
) -> List[str]:
|
||||
"""
|
||||
Retrieves the names of the workers handling a given duty, by either legacy
|
||||
option or instance list.
|
||||
|
||||
There are two ways of configuring which instances handle a given duty, e.g.
|
||||
for configuring pushers:
|
||||
|
||||
1. The old way where "start_pushers" is set to false and running a
|
||||
`synapse.app.pusher'` worker app.
|
||||
2. Specifying the workers sending federation in `pusher_instances`.
|
||||
|
||||
Args:
|
||||
config: settings read from yaml.
|
||||
legacy_option_name: the old way of enabling options. e.g. 'start_pushers'
|
||||
legacy_app_name: The historical app name. e.g. 'synapse.app.pusher'
|
||||
modern_instance_list_name: the string name of the new instance_list. e.g.
|
||||
'pusher_instances'
|
||||
|
||||
Returns:
|
||||
A list of worker instance names handling the given duty.
|
||||
"""
|
||||
|
||||
legacy_option = config.get(legacy_option_name, True)
|
||||
|
||||
worker_instances = config.get(modern_instance_list_name)
|
||||
if worker_instances is None:
|
||||
# Default to an empty list, which means "another, unknown, worker is
|
||||
# responsible for it".
|
||||
worker_instances = []
|
||||
|
||||
# If no worker instances are set we check if the legacy option
|
||||
# is set, which means use the main process.
|
||||
if legacy_option:
|
||||
worker_instances = ["master"]
|
||||
|
||||
if self.worker_app == legacy_app_name:
|
||||
if legacy_option:
|
||||
# If we're using `legacy_app_name`, and not using
|
||||
# `modern_instance_list_name`, then we should have
|
||||
# explicitly set `legacy_option_name` to false.
|
||||
raise ConfigError(
|
||||
f"The '{legacy_option_name}' config option must be disabled in "
|
||||
"the main synapse process before they can be run in a separate "
|
||||
"worker.\n"
|
||||
f"Please add `{legacy_option_name}: false` to the main config.\n",
|
||||
)
|
||||
|
||||
worker_instances = [self.worker_name]
|
||||
|
||||
return worker_instances
|
||||
|
||||
def read_arguments(self, args: argparse.Namespace) -> None:
|
||||
# We support a bunch of command line arguments that override options in
|
||||
# the config. A lot of these options have a worker_* prefix when running
|
||||
|
@ -213,7 +213,7 @@ class Keyring:
|
||||
|
||||
def verify_json_objects_for_server(
|
||||
self, server_and_json: Iterable[Tuple[str, dict, int]]
|
||||
) -> List[defer.Deferred]:
|
||||
) -> List["defer.Deferred[None]"]:
|
||||
"""Bulk verifies signatures of json objects, bulk fetching keys as
|
||||
necessary.
|
||||
|
||||
@ -226,10 +226,9 @@ class Keyring:
|
||||
valid.
|
||||
|
||||
Returns:
|
||||
List<Deferred[None]>: for each input triplet, a deferred indicating success
|
||||
or failure to verify each json object's signature for the given
|
||||
server_name. The deferreds run their callbacks in the sentinel
|
||||
logcontext.
|
||||
For each input triplet, a deferred indicating success or failure to
|
||||
verify each json object's signature for the given server_name. The
|
||||
deferreds run their callbacks in the sentinel logcontext.
|
||||
"""
|
||||
return [
|
||||
run_in_background(
|
||||
@ -858,7 +857,7 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
|
||||
response = await self.client.get_json(
|
||||
destination=server_name,
|
||||
path="/_matrix/key/v2/server/"
|
||||
+ urllib.parse.quote(requested_key_id),
|
||||
+ urllib.parse.quote(requested_key_id, safe=""),
|
||||
ignore_backoff=True,
|
||||
# we only give the remote server 10s to respond. It should be an
|
||||
# easy request to handle, so if it doesn't reply within 10s, it's
|
||||
|
@ -597,8 +597,7 @@ def _event_type_from_format_version(
|
||||
format_version: The event format version
|
||||
|
||||
Returns:
|
||||
type: A type that can be initialized as per the initializer of
|
||||
`FrozenEvent`
|
||||
A type that can be initialized as per the initializer of `FrozenEvent`
|
||||
"""
|
||||
|
||||
if format_version == EventFormatVersions.ROOM_V1_V2:
|
||||
|
@ -128,6 +128,7 @@ class EventBuilder:
|
||||
state_filter=StateFilter.from_types(
|
||||
auth_types_for_event(self.room_version, self)
|
||||
),
|
||||
await_full_state=False,
|
||||
)
|
||||
auth_event_ids = self._event_auth_handler.compute_auth_events(
|
||||
self, state_ids
|
||||
|
@ -1691,9 +1691,19 @@ class FederationClient(FederationBase):
|
||||
# to return events on *both* sides of the timestamp to
|
||||
# help reconcile the gap faster.
|
||||
_timestamp_to_event_from_destination,
|
||||
# Since this endpoint is new, we should try other servers before giving up.
|
||||
# We can safely remove this in a year (remove after 2023-11-16).
|
||||
failover_on_unknown_endpoint=True,
|
||||
)
|
||||
return timestamp_to_event_response
|
||||
except SynapseError:
|
||||
except SynapseError as e:
|
||||
logger.warn(
|
||||
"timestamp_to_event(room_id=%s, timestamp=%s, direction=%s): encountered error when trying to fetch from destinations: %s",
|
||||
room_id,
|
||||
timestamp,
|
||||
direction,
|
||||
e,
|
||||
)
|
||||
return None
|
||||
|
||||
async def _timestamp_to_event_from_destination(
|
||||
|
@ -434,7 +434,23 @@ class FederationSender(AbstractFederationSender):
|
||||
# If there are no prev event IDs then the state is empty
|
||||
# and so no remote servers in the room
|
||||
destinations = set()
|
||||
else:
|
||||
|
||||
if destinations is None:
|
||||
# During partial join we use the set of servers that we got
|
||||
# when beginning the join. It's still possible that we send
|
||||
# events to servers that left the room in the meantime, but
|
||||
# we consider that an acceptable risk since it is only our own
|
||||
# events that we leak and not other server's ones.
|
||||
partial_state_destinations = (
|
||||
await self.store.get_partial_state_servers_at_join(
|
||||
event.room_id
|
||||
)
|
||||
)
|
||||
|
||||
if len(partial_state_destinations) > 0:
|
||||
destinations = partial_state_destinations
|
||||
|
||||
if destinations is None:
|
||||
# We check the external cache for the destinations, which is
|
||||
# stored per state group.
|
||||
|
||||
|
@ -35,7 +35,7 @@ from synapse.logging import issue9533_logger
|
||||
from synapse.logging.opentracing import SynapseTags, set_tag
|
||||
from synapse.metrics import sent_transactions_counter
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import ReadReceipt
|
||||
from synapse.types import JsonDict, ReadReceipt
|
||||
from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
|
||||
from synapse.visibility import filter_events_for_server
|
||||
|
||||
@ -136,8 +136,11 @@ class PerDestinationQueue:
|
||||
# destination
|
||||
self._pending_presence: Dict[str, UserPresenceState] = {}
|
||||
|
||||
# room_id -> receipt_type -> user_id -> receipt_dict
|
||||
self._pending_rrs: Dict[str, Dict[str, Dict[str, dict]]] = {}
|
||||
# List of room_id -> receipt_type -> user_id -> receipt_dict,
|
||||
#
|
||||
# Each receipt can only have a single receipt per
|
||||
# (room ID, receipt type, user ID, thread ID) tuple.
|
||||
self._pending_receipt_edus: List[Dict[str, Dict[str, Dict[str, dict]]]] = []
|
||||
self._rrs_pending_flush = False
|
||||
|
||||
# stream_id of last successfully sent to-device message.
|
||||
@ -202,17 +205,53 @@ class PerDestinationQueue:
|
||||
Args:
|
||||
receipt: receipt to be queued
|
||||
"""
|
||||
self._pending_rrs.setdefault(receipt.room_id, {}).setdefault(
|
||||
receipt.receipt_type, {}
|
||||
)[receipt.user_id] = {"event_ids": receipt.event_ids, "data": receipt.data}
|
||||
serialized_receipt: JsonDict = {
|
||||
"event_ids": receipt.event_ids,
|
||||
"data": receipt.data,
|
||||
}
|
||||
if receipt.thread_id is not None:
|
||||
serialized_receipt["data"]["thread_id"] = receipt.thread_id
|
||||
|
||||
# Find which EDU to add this receipt to. There's three situations depending
|
||||
# on the (room ID, receipt type, user, thread ID) tuple:
|
||||
#
|
||||
# 1. If it fully matches, clobber the information.
|
||||
# 2. If it is missing, add the information.
|
||||
# 3. If the subset tuple of (room ID, receipt type, user) matches, check
|
||||
# the next EDU (or add a new EDU).
|
||||
for edu in self._pending_receipt_edus:
|
||||
receipt_content = edu.setdefault(receipt.room_id, {}).setdefault(
|
||||
receipt.receipt_type, {}
|
||||
)
|
||||
# If this room ID, receipt type, user ID is not in this EDU, OR if
|
||||
# the full tuple matches, use the current EDU.
|
||||
if (
|
||||
receipt.user_id not in receipt_content
|
||||
or receipt_content[receipt.user_id].get("thread_id")
|
||||
== receipt.thread_id
|
||||
):
|
||||
receipt_content[receipt.user_id] = serialized_receipt
|
||||
break
|
||||
|
||||
# If no matching EDU was found, create a new one.
|
||||
else:
|
||||
self._pending_receipt_edus.append(
|
||||
{
|
||||
receipt.room_id: {
|
||||
receipt.receipt_type: {receipt.user_id: serialized_receipt}
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
def flush_read_receipts_for_room(self, room_id: str) -> None:
|
||||
# if we don't have any read-receipts for this room, it may be that we've already
|
||||
# sent them out, so we don't need to flush.
|
||||
if room_id not in self._pending_rrs:
|
||||
return
|
||||
self._rrs_pending_flush = True
|
||||
self.attempt_new_transaction()
|
||||
# If there are any pending receipts for this room then force-flush them
|
||||
# in a new transaction.
|
||||
for edu in self._pending_receipt_edus:
|
||||
if room_id in edu:
|
||||
self._rrs_pending_flush = True
|
||||
self.attempt_new_transaction()
|
||||
# No use in checking remaining EDUs if the room was found.
|
||||
break
|
||||
|
||||
def send_keyed_edu(self, edu: Edu, key: Hashable) -> None:
|
||||
self._pending_edus_keyed[(edu.edu_type, key)] = edu
|
||||
@ -351,7 +390,7 @@ class PerDestinationQueue:
|
||||
self._pending_edus = []
|
||||
self._pending_edus_keyed = {}
|
||||
self._pending_presence = {}
|
||||
self._pending_rrs = {}
|
||||
self._pending_receipt_edus = []
|
||||
|
||||
self._start_catching_up()
|
||||
except FederationDeniedError as e:
|
||||
@ -505,6 +544,7 @@ class PerDestinationQueue:
|
||||
new_pdus = await filter_events_for_server(
|
||||
self._storage_controllers,
|
||||
self._destination,
|
||||
self._server_name,
|
||||
new_pdus,
|
||||
redact=False,
|
||||
)
|
||||
@ -542,22 +582,27 @@ class PerDestinationQueue:
|
||||
self._destination, last_successful_stream_ordering
|
||||
)
|
||||
|
||||
def _get_rr_edus(self, force_flush: bool) -> Iterable[Edu]:
|
||||
if not self._pending_rrs:
|
||||
def _get_receipt_edus(self, force_flush: bool, limit: int) -> Iterable[Edu]:
|
||||
if not self._pending_receipt_edus:
|
||||
return
|
||||
if not force_flush and not self._rrs_pending_flush:
|
||||
# not yet time for this lot
|
||||
return
|
||||
|
||||
edu = Edu(
|
||||
origin=self._server_name,
|
||||
destination=self._destination,
|
||||
edu_type=EduTypes.RECEIPT,
|
||||
content=self._pending_rrs,
|
||||
)
|
||||
self._pending_rrs = {}
|
||||
self._rrs_pending_flush = False
|
||||
yield edu
|
||||
# Send at most limit EDUs for receipts.
|
||||
for content in self._pending_receipt_edus[:limit]:
|
||||
yield Edu(
|
||||
origin=self._server_name,
|
||||
destination=self._destination,
|
||||
edu_type=EduTypes.RECEIPT,
|
||||
content=content,
|
||||
)
|
||||
self._pending_receipt_edus = self._pending_receipt_edus[limit:]
|
||||
|
||||
# If there are still pending read-receipts, don't reset the pending flush
|
||||
# flag.
|
||||
if not self._pending_receipt_edus:
|
||||
self._rrs_pending_flush = False
|
||||
|
||||
def _pop_pending_edus(self, limit: int) -> List[Edu]:
|
||||
pending_edus = self._pending_edus
|
||||
@ -644,40 +689,20 @@ class _TransactionQueueManager:
|
||||
async def __aenter__(self) -> Tuple[List[EventBase], List[Edu]]:
|
||||
# First we calculate the EDUs we want to send, if any.
|
||||
|
||||
# We start by fetching device related EDUs, i.e device updates and to
|
||||
# device messages. We have to keep 2 free slots for presence and rr_edus.
|
||||
device_edu_limit = MAX_EDUS_PER_TRANSACTION - 2
|
||||
# There's a maximum number of EDUs that can be sent with a transaction,
|
||||
# generally device updates and to-device messages get priority, but we
|
||||
# want to ensure that there's room for some other EDUs as well.
|
||||
#
|
||||
# This is done by:
|
||||
#
|
||||
# * Add a presence EDU, if one exists.
|
||||
# * Add up-to a small limit of read receipt EDUs.
|
||||
# * Add to-device EDUs, but leave some space for device list updates.
|
||||
# * Add device list updates EDUs.
|
||||
# * If there's any remaining room, add other EDUs.
|
||||
pending_edus = []
|
||||
|
||||
# We prioritize to-device messages so that existing encryption channels
|
||||
# work. We also keep a few slots spare (by reducing the limit) so that
|
||||
# we can still trickle out some device list updates.
|
||||
(
|
||||
to_device_edus,
|
||||
device_stream_id,
|
||||
) = await self.queue._get_to_device_message_edus(device_edu_limit - 10)
|
||||
|
||||
if to_device_edus:
|
||||
self._device_stream_id = device_stream_id
|
||||
else:
|
||||
self.queue._last_device_stream_id = device_stream_id
|
||||
|
||||
device_edu_limit -= len(to_device_edus)
|
||||
|
||||
device_update_edus, dev_list_id = await self.queue._get_device_update_edus(
|
||||
device_edu_limit
|
||||
)
|
||||
|
||||
if device_update_edus:
|
||||
self._device_list_id = dev_list_id
|
||||
else:
|
||||
self.queue._last_device_list_stream_id = dev_list_id
|
||||
|
||||
pending_edus = device_update_edus + to_device_edus
|
||||
|
||||
# Now add the read receipt EDU.
|
||||
pending_edus.extend(self.queue._get_rr_edus(force_flush=False))
|
||||
|
||||
# And presence EDU.
|
||||
# Add presence EDU.
|
||||
if self.queue._pending_presence:
|
||||
pending_edus.append(
|
||||
Edu(
|
||||
@ -696,16 +721,47 @@ class _TransactionQueueManager:
|
||||
)
|
||||
self.queue._pending_presence = {}
|
||||
|
||||
# Finally add any other types of EDUs if there is room.
|
||||
pending_edus.extend(
|
||||
self.queue._pop_pending_edus(MAX_EDUS_PER_TRANSACTION - len(pending_edus))
|
||||
# Add read receipt EDUs.
|
||||
pending_edus.extend(self.queue._get_receipt_edus(force_flush=False, limit=5))
|
||||
edu_limit = MAX_EDUS_PER_TRANSACTION - len(pending_edus)
|
||||
|
||||
# Next, prioritize to-device messages so that existing encryption channels
|
||||
# work. We also keep a few slots spare (by reducing the limit) so that
|
||||
# we can still trickle out some device list updates.
|
||||
(
|
||||
to_device_edus,
|
||||
device_stream_id,
|
||||
) = await self.queue._get_to_device_message_edus(edu_limit - 10)
|
||||
|
||||
if to_device_edus:
|
||||
self._device_stream_id = device_stream_id
|
||||
else:
|
||||
self.queue._last_device_stream_id = device_stream_id
|
||||
|
||||
pending_edus.extend(to_device_edus)
|
||||
edu_limit -= len(to_device_edus)
|
||||
|
||||
# Add device list update EDUs.
|
||||
device_update_edus, dev_list_id = await self.queue._get_device_update_edus(
|
||||
edu_limit
|
||||
)
|
||||
while (
|
||||
len(pending_edus) < MAX_EDUS_PER_TRANSACTION
|
||||
and self.queue._pending_edus_keyed
|
||||
):
|
||||
|
||||
if device_update_edus:
|
||||
self._device_list_id = dev_list_id
|
||||
else:
|
||||
self.queue._last_device_list_stream_id = dev_list_id
|
||||
|
||||
pending_edus.extend(device_update_edus)
|
||||
edu_limit -= len(device_update_edus)
|
||||
|
||||
# Finally add any other types of EDUs if there is room.
|
||||
other_edus = self.queue._pop_pending_edus(edu_limit)
|
||||
pending_edus.extend(other_edus)
|
||||
edu_limit -= len(other_edus)
|
||||
while edu_limit > 0 and self.queue._pending_edus_keyed:
|
||||
_, val = self.queue._pending_edus_keyed.popitem()
|
||||
pending_edus.append(val)
|
||||
edu_limit -= 1
|
||||
|
||||
# Now we look for any PDUs to send, by getting up to 50 PDUs from the
|
||||
# queue
|
||||
@ -716,8 +772,10 @@ class _TransactionQueueManager:
|
||||
|
||||
# if we've decided to send a transaction anyway, and we have room, we
|
||||
# may as well send any pending RRs
|
||||
if len(pending_edus) < MAX_EDUS_PER_TRANSACTION:
|
||||
pending_edus.extend(self.queue._get_rr_edus(force_flush=True))
|
||||
if edu_limit:
|
||||
pending_edus.extend(
|
||||
self.queue._get_receipt_edus(force_flush=True, limit=edu_limit)
|
||||
)
|
||||
|
||||
if self._pdus:
|
||||
self._last_stream_ordering = self._pdus[
|
||||
|
@ -185,9 +185,8 @@ class TransportLayerClient:
|
||||
Raises:
|
||||
Various exceptions when the request fails
|
||||
"""
|
||||
path = _create_path(
|
||||
FEDERATION_UNSTABLE_PREFIX,
|
||||
"/org.matrix.msc3030/timestamp_to_event/%s",
|
||||
path = _create_v1_path(
|
||||
"/timestamp_to_event/%s",
|
||||
room_id,
|
||||
)
|
||||
|
||||
@ -280,12 +279,11 @@ class TransportLayerClient:
|
||||
Note that this does not append any events to any graphs.
|
||||
|
||||
Args:
|
||||
destination (str): address of remote homeserver
|
||||
room_id (str): room to join/leave
|
||||
user_id (str): user to be joined/left
|
||||
membership (str): one of join/leave
|
||||
params (dict[str, str|Iterable[str]]): Query parameters to include in the
|
||||
request.
|
||||
destination: address of remote homeserver
|
||||
room_id: room to join/leave
|
||||
user_id: user to be joined/left
|
||||
membership: one of join/leave
|
||||
params: Query parameters to include in the request.
|
||||
|
||||
Returns:
|
||||
Succeeds when we get a 2xx HTTP response. The result
|
||||
|
@ -25,7 +25,6 @@ from synapse.federation.transport.server._base import (
|
||||
from synapse.federation.transport.server.federation import (
|
||||
FEDERATION_SERVLET_CLASSES,
|
||||
FederationAccountStatusServlet,
|
||||
FederationTimestampLookupServlet,
|
||||
)
|
||||
from synapse.http.server import HttpServer, JsonResource
|
||||
from synapse.http.servlet import (
|
||||
@ -291,13 +290,6 @@ def register_servlets(
|
||||
)
|
||||
|
||||
for servletclass in SERVLET_GROUPS[servlet_group]:
|
||||
# Only allow the `/timestamp_to_event` servlet if msc3030 is enabled
|
||||
if (
|
||||
servletclass == FederationTimestampLookupServlet
|
||||
and not hs.config.experimental.msc3030_enabled
|
||||
):
|
||||
continue
|
||||
|
||||
# Only allow the `/account_status` servlet if msc3720 is enabled
|
||||
if (
|
||||
servletclass == FederationAccountStatusServlet
|
||||
|
@ -224,10 +224,10 @@ class BaseFederationServlet:
|
||||
|
||||
With arguments:
|
||||
|
||||
origin (unicode|None): The authenticated server_name of the calling server,
|
||||
origin (str|None): The authenticated server_name of the calling server,
|
||||
unless REQUIRE_AUTH is set to False and authentication failed.
|
||||
|
||||
content (unicode|None): decoded json body of the request. None if the
|
||||
content (str|None): decoded json body of the request. None if the
|
||||
request was a GET.
|
||||
|
||||
query (dict[bytes, list[bytes]]): Query params from the request. url-decoded
|
||||
|
@ -218,14 +218,13 @@ class FederationTimestampLookupServlet(BaseFederationServerServlet):
|
||||
`dir` can be `f` or `b` to indicate forwards and backwards in time from the
|
||||
given timestamp.
|
||||
|
||||
GET /_matrix/federation/unstable/org.matrix.msc3030/timestamp_to_event/<roomID>?ts=<timestamp>&dir=<direction>
|
||||
GET /_matrix/federation/v1/timestamp_to_event/<roomID>?ts=<timestamp>&dir=<direction>
|
||||
{
|
||||
"event_id": ...
|
||||
}
|
||||
"""
|
||||
|
||||
PATH = "/timestamp_to_event/(?P<room_id>[^/]*)/?"
|
||||
PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc3030"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
|
@ -16,6 +16,7 @@ import logging
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.handlers.device import DeviceHandler
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import Codes, Requester, UserID, create_requester
|
||||
|
||||
@ -76,6 +77,9 @@ class DeactivateAccountHandler:
|
||||
True if identity server supports removing threepids, otherwise False.
|
||||
"""
|
||||
|
||||
# This can only be called on the main process.
|
||||
assert isinstance(self._device_handler, DeviceHandler)
|
||||
|
||||
# Check if this user can be deactivated
|
||||
if not await self._third_party_rules.check_can_deactivate_user(
|
||||
user_id, by_admin
|
||||
|
@ -65,6 +65,8 @@ DELETE_STALE_DEVICES_INTERVAL_MS = 24 * 60 * 60 * 1000
|
||||
|
||||
|
||||
class DeviceWorkerHandler:
|
||||
device_list_updater: "DeviceListWorkerUpdater"
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.clock = hs.get_clock()
|
||||
self.hs = hs
|
||||
@ -76,6 +78,8 @@ class DeviceWorkerHandler:
|
||||
self.server_name = hs.hostname
|
||||
self._msc3852_enabled = hs.config.experimental.msc3852_enabled
|
||||
|
||||
self.device_list_updater = DeviceListWorkerUpdater(hs)
|
||||
|
||||
@trace
|
||||
async def get_devices_by_user(self, user_id: str) -> List[JsonDict]:
|
||||
"""
|
||||
@ -99,6 +103,19 @@ class DeviceWorkerHandler:
|
||||
log_kv(device_map)
|
||||
return devices
|
||||
|
||||
async def get_dehydrated_device(
|
||||
self, user_id: str
|
||||
) -> Optional[Tuple[str, JsonDict]]:
|
||||
"""Retrieve the information for a dehydrated device.
|
||||
|
||||
Args:
|
||||
user_id: the user whose dehydrated device we are looking for
|
||||
Returns:
|
||||
a tuple whose first item is the device ID, and the second item is
|
||||
the dehydrated device information
|
||||
"""
|
||||
return await self.store.get_dehydrated_device(user_id)
|
||||
|
||||
@trace
|
||||
async def get_device(self, user_id: str, device_id: str) -> JsonDict:
|
||||
"""Retrieve the given device
|
||||
@ -127,7 +144,7 @@ class DeviceWorkerHandler:
|
||||
@cancellable
|
||||
async def get_device_changes_in_shared_rooms(
|
||||
self, user_id: str, room_ids: Collection[str], from_token: StreamToken
|
||||
) -> Collection[str]:
|
||||
) -> Set[str]:
|
||||
"""Get the set of users whose devices have changed who share a room with
|
||||
the given user.
|
||||
"""
|
||||
@ -320,6 +337,8 @@ class DeviceWorkerHandler:
|
||||
|
||||
|
||||
class DeviceHandler(DeviceWorkerHandler):
|
||||
device_list_updater: "DeviceListUpdater"
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
|
||||
@ -402,6 +421,9 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
|
||||
self._check_device_name_length(initial_device_display_name)
|
||||
|
||||
# Prune the user's device list if they already have a lot of devices.
|
||||
await self._prune_too_many_devices(user_id)
|
||||
|
||||
if device_id is not None:
|
||||
new_device = await self.store.store_device(
|
||||
user_id=user_id,
|
||||
@ -433,6 +455,14 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
|
||||
raise errors.StoreError(500, "Couldn't generate a device ID.")
|
||||
|
||||
async def _prune_too_many_devices(self, user_id: str) -> None:
|
||||
"""Delete any excess old devices this user may have."""
|
||||
device_ids = await self.store.check_too_many_devices_for_user(user_id)
|
||||
if not device_ids:
|
||||
return
|
||||
|
||||
await self.delete_devices(user_id, device_ids)
|
||||
|
||||
async def _delete_stale_devices(self) -> None:
|
||||
"""Background task that deletes devices which haven't been accessed for more than
|
||||
a configured time period.
|
||||
@ -462,7 +492,7 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
device_ids = [d for d in device_ids if d != except_device_id]
|
||||
await self.delete_devices(user_id, device_ids)
|
||||
|
||||
async def delete_devices(self, user_id: str, device_ids: List[str]) -> None:
|
||||
async def delete_devices(self, user_id: str, device_ids: Collection[str]) -> None:
|
||||
"""Delete several devices
|
||||
|
||||
Args:
|
||||
@ -606,19 +636,6 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
await self.delete_devices(user_id, [old_device_id])
|
||||
return device_id
|
||||
|
||||
async def get_dehydrated_device(
|
||||
self, user_id: str
|
||||
) -> Optional[Tuple[str, JsonDict]]:
|
||||
"""Retrieve the information for a dehydrated device.
|
||||
|
||||
Args:
|
||||
user_id: the user whose dehydrated device we are looking for
|
||||
Returns:
|
||||
a tuple whose first item is the device ID, and the second item is
|
||||
the dehydrated device information
|
||||
"""
|
||||
return await self.store.get_dehydrated_device(user_id)
|
||||
|
||||
async def rehydrate_device(
|
||||
self, user_id: str, access_token: str, device_id: str
|
||||
) -> dict:
|
||||
@ -682,13 +699,33 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
hosts_already_sent_to: Set[str] = set()
|
||||
|
||||
try:
|
||||
stream_id, room_id = await self.store.get_device_change_last_converted_pos()
|
||||
|
||||
while True:
|
||||
self._handle_new_device_update_new_data = False
|
||||
rows = await self.store.get_uncoverted_outbound_room_pokes()
|
||||
max_stream_id = self.store.get_device_stream_token()
|
||||
rows = await self.store.get_uncoverted_outbound_room_pokes(
|
||||
stream_id, room_id
|
||||
)
|
||||
if not rows:
|
||||
# If the DB returned nothing then there is nothing left to
|
||||
# do, *unless* a new device list update happened during the
|
||||
# DB query.
|
||||
|
||||
# Advance `(stream_id, room_id)`.
|
||||
# `max_stream_id` comes from *before* the query for unconverted
|
||||
# rows, which means that any unconverted rows must have a larger
|
||||
# stream ID.
|
||||
if max_stream_id > stream_id:
|
||||
stream_id, room_id = max_stream_id, ""
|
||||
await self.store.set_device_change_last_converted_pos(
|
||||
stream_id, room_id
|
||||
)
|
||||
else:
|
||||
assert max_stream_id == stream_id
|
||||
# Avoid moving `room_id` backwards.
|
||||
pass
|
||||
|
||||
if self._handle_new_device_update_new_data:
|
||||
continue
|
||||
else:
|
||||
@ -718,7 +755,6 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
room_id=room_id,
|
||||
stream_id=stream_id,
|
||||
hosts=hosts,
|
||||
context=opentracing_context,
|
||||
)
|
||||
@ -752,6 +788,12 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
hosts_already_sent_to.update(hosts)
|
||||
current_stream_id = stream_id
|
||||
|
||||
# Advance `(stream_id, room_id)`.
|
||||
_, _, room_id, stream_id, _ = rows[-1]
|
||||
await self.store.set_device_change_last_converted_pos(
|
||||
stream_id, room_id
|
||||
)
|
||||
|
||||
finally:
|
||||
self._handle_new_device_update_is_processing = False
|
||||
|
||||
@ -834,7 +876,6 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
room_id=room_id,
|
||||
stream_id=None,
|
||||
hosts=potentially_changed_hosts,
|
||||
context=None,
|
||||
)
|
||||
@ -858,7 +899,36 @@ def _update_device_from_client_ips(
|
||||
)
|
||||
|
||||
|
||||
class DeviceListUpdater:
|
||||
class DeviceListWorkerUpdater:
|
||||
"Handles incoming device list updates from federation and contacts the main process over replication"
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
from synapse.replication.http.devices import (
|
||||
ReplicationUserDevicesResyncRestServlet,
|
||||
)
|
||||
|
||||
self._user_device_resync_client = (
|
||||
ReplicationUserDevicesResyncRestServlet.make_client(hs)
|
||||
)
|
||||
|
||||
async def user_device_resync(
|
||||
self, user_id: str, mark_failed_as_stale: bool = True
|
||||
) -> Optional[JsonDict]:
|
||||
"""Fetches all devices for a user and updates the device cache with them.
|
||||
|
||||
Args:
|
||||
user_id: The user's id whose device_list will be updated.
|
||||
mark_failed_as_stale: Whether to mark the user's device list as stale
|
||||
if the attempt to resync failed.
|
||||
Returns:
|
||||
A dict with device info as under the "devices" in the result of this
|
||||
request:
|
||||
https://matrix.org/docs/spec/server_server/r0.1.2#get-matrix-federation-v1-user-devices-userid
|
||||
"""
|
||||
return await self._user_device_resync_client(user_id=user_id)
|
||||
|
||||
|
||||
class DeviceListUpdater(DeviceListWorkerUpdater):
|
||||
"Handles incoming device list updates from federation and updates the DB"
|
||||
|
||||
def __init__(self, hs: "HomeServer", device_handler: DeviceHandler):
|
||||
|
@ -27,9 +27,9 @@ from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EduTypes
|
||||
from synapse.api.errors import CodeMessageException, Codes, NotFoundError, SynapseError
|
||||
from synapse.handlers.device import DeviceHandler
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.logging.opentracing import log_kv, set_tag, tag_args, trace
|
||||
from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet
|
||||
from synapse.types import (
|
||||
JsonDict,
|
||||
UserID,
|
||||
@ -56,27 +56,23 @@ class E2eKeysHandler:
|
||||
self.is_mine = hs.is_mine
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
self._edu_updater = SigningKeyEduUpdater(hs, self)
|
||||
|
||||
federation_registry = hs.get_federation_registry()
|
||||
|
||||
self._is_master = hs.config.worker.worker_app is None
|
||||
if not self._is_master:
|
||||
self._user_device_resync_client = (
|
||||
ReplicationUserDevicesResyncRestServlet.make_client(hs)
|
||||
)
|
||||
else:
|
||||
is_master = hs.config.worker.worker_app is None
|
||||
if is_master:
|
||||
edu_updater = SigningKeyEduUpdater(hs)
|
||||
|
||||
# Only register this edu handler on master as it requires writing
|
||||
# device updates to the db
|
||||
federation_registry.register_edu_handler(
|
||||
EduTypes.SIGNING_KEY_UPDATE,
|
||||
self._edu_updater.incoming_signing_key_update,
|
||||
edu_updater.incoming_signing_key_update,
|
||||
)
|
||||
# also handle the unstable version
|
||||
# FIXME: remove this when enough servers have upgraded
|
||||
federation_registry.register_edu_handler(
|
||||
EduTypes.UNSTABLE_SIGNING_KEY_UPDATE,
|
||||
self._edu_updater.incoming_signing_key_update,
|
||||
edu_updater.incoming_signing_key_update,
|
||||
)
|
||||
|
||||
# doesn't really work as part of the generic query API, because the
|
||||
@ -319,14 +315,13 @@ class E2eKeysHandler:
|
||||
# probably be tracking their device lists. However, we haven't
|
||||
# done an initial sync on the device list so we do it now.
|
||||
try:
|
||||
if self._is_master:
|
||||
resync_results = await self.device_handler.device_list_updater.user_device_resync(
|
||||
resync_results = (
|
||||
await self.device_handler.device_list_updater.user_device_resync(
|
||||
user_id
|
||||
)
|
||||
else:
|
||||
resync_results = await self._user_device_resync_client(
|
||||
user_id=user_id
|
||||
)
|
||||
)
|
||||
if resync_results is None:
|
||||
raise ValueError("Device resync failed")
|
||||
|
||||
# Add the device keys to the results.
|
||||
user_devices = resync_results["devices"]
|
||||
@ -605,6 +600,8 @@ class E2eKeysHandler:
|
||||
async def upload_keys_for_user(
|
||||
self, user_id: str, device_id: str, keys: JsonDict
|
||||
) -> JsonDict:
|
||||
# This can only be called from the main process.
|
||||
assert isinstance(self.device_handler, DeviceHandler)
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
@ -732,6 +729,8 @@ class E2eKeysHandler:
|
||||
user_id: the user uploading the keys
|
||||
keys: the signing keys
|
||||
"""
|
||||
# This can only be called from the main process.
|
||||
assert isinstance(self.device_handler, DeviceHandler)
|
||||
|
||||
# if a master key is uploaded, then check it. Otherwise, load the
|
||||
# stored master key, to check signatures on other keys
|
||||
@ -823,6 +822,9 @@ class E2eKeysHandler:
|
||||
Raises:
|
||||
SynapseError: if the signatures dict is not valid.
|
||||
"""
|
||||
# This can only be called from the main process.
|
||||
assert isinstance(self.device_handler, DeviceHandler)
|
||||
|
||||
failures = {}
|
||||
|
||||
# signatures to be stored. Each item will be a SignatureListItem
|
||||
@ -870,7 +872,7 @@ class E2eKeysHandler:
|
||||
- signatures of the user's master key by the user's devices.
|
||||
|
||||
Args:
|
||||
user_id (string): the user uploading the keys
|
||||
user_id: the user uploading the keys
|
||||
signatures (dict[string, dict]): map of devices to signed keys
|
||||
|
||||
Returns:
|
||||
@ -1200,6 +1202,9 @@ class E2eKeysHandler:
|
||||
A tuple of the retrieved key content, the key's ID and the matching VerifyKey.
|
||||
If the key cannot be retrieved, all values in the tuple will instead be None.
|
||||
"""
|
||||
# This can only be called from the main process.
|
||||
assert isinstance(self.device_handler, DeviceHandler)
|
||||
|
||||
try:
|
||||
remote_result = await self.federation.query_user_devices(
|
||||
user.domain, user.to_string()
|
||||
@ -1396,11 +1401,14 @@ class SignatureListItem:
|
||||
class SigningKeyEduUpdater:
|
||||
"""Handles incoming signing key updates from federation and updates the DB"""
|
||||
|
||||
def __init__(self, hs: "HomeServer", e2e_keys_handler: E2eKeysHandler):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.store = hs.get_datastores().main
|
||||
self.federation = hs.get_federation_client()
|
||||
self.clock = hs.get_clock()
|
||||
self.e2e_keys_handler = e2e_keys_handler
|
||||
|
||||
device_handler = hs.get_device_handler()
|
||||
assert isinstance(device_handler, DeviceHandler)
|
||||
self._device_handler = device_handler
|
||||
|
||||
self._remote_edu_linearizer = Linearizer(name="remote_signing_key")
|
||||
|
||||
@ -1445,9 +1453,6 @@ class SigningKeyEduUpdater:
|
||||
user_id: the user whose updates we are processing
|
||||
"""
|
||||
|
||||
device_handler = self.e2e_keys_handler.device_handler
|
||||
device_list_updater = device_handler.device_list_updater
|
||||
|
||||
async with self._remote_edu_linearizer.queue(user_id):
|
||||
pending_updates = self._pending_updates.pop(user_id, [])
|
||||
if not pending_updates:
|
||||
@ -1459,13 +1464,11 @@ class SigningKeyEduUpdater:
|
||||
logger.info("pending updates: %r", pending_updates)
|
||||
|
||||
for master_key, self_signing_key in pending_updates:
|
||||
new_device_ids = (
|
||||
await device_list_updater.process_cross_signing_key_update(
|
||||
user_id,
|
||||
master_key,
|
||||
self_signing_key,
|
||||
)
|
||||
new_device_ids = await self._device_handler.device_list_updater.process_cross_signing_key_update(
|
||||
user_id,
|
||||
master_key,
|
||||
self_signing_key,
|
||||
)
|
||||
device_ids = device_ids + new_device_ids
|
||||
|
||||
await device_handler.notify_device_update(user_id, device_ids)
|
||||
await self._device_handler.notify_device_update(user_id, device_ids)
|
||||
|
@ -377,8 +377,9 @@ class E2eRoomKeysHandler:
|
||||
"""Deletes a given version of the user's e2e_room_keys backup
|
||||
|
||||
Args:
|
||||
user_id(str): the user whose current backup version we're deleting
|
||||
version(str): the version id of the backup being deleted
|
||||
user_id: the user whose current backup version we're deleting
|
||||
version: Optional. the version ID of the backup version we're deleting
|
||||
If missing, we delete the current backup version info.
|
||||
Raises:
|
||||
NotFoundError: if this backup version doesn't exist
|
||||
"""
|
||||
|
@ -45,6 +45,7 @@ class EventAuthHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self._clock = hs.get_clock()
|
||||
self._store = hs.get_datastores().main
|
||||
self._state_storage_controller = hs.get_storage_controllers().state
|
||||
self._server_name = hs.hostname
|
||||
|
||||
async def check_auth_rules_from_context(
|
||||
@ -179,17 +180,22 @@ class EventAuthHandler:
|
||||
this function may return an incorrect result as we are not able to fully
|
||||
track server membership in a room without full state.
|
||||
"""
|
||||
if not allow_partial_state_rooms and await self._store.is_partial_state_room(
|
||||
room_id
|
||||
):
|
||||
raise AuthError(
|
||||
403,
|
||||
"Unable to authorise you right now; room is partial-stated here.",
|
||||
errcode=Codes.UNABLE_DUE_TO_PARTIAL_STATE,
|
||||
)
|
||||
|
||||
if not await self.is_host_in_room(room_id, host):
|
||||
raise AuthError(403, "Host not in room.")
|
||||
if await self._store.is_partial_state_room(room_id):
|
||||
if allow_partial_state_rooms:
|
||||
current_hosts = await self._state_storage_controller.get_current_hosts_in_room_or_partial_state_approximation(
|
||||
room_id
|
||||
)
|
||||
if host not in current_hosts:
|
||||
raise AuthError(403, "Host not in room (partial-state approx).")
|
||||
else:
|
||||
raise AuthError(
|
||||
403,
|
||||
"Unable to authorise you right now; room is partial-stated here.",
|
||||
errcode=Codes.UNABLE_DUE_TO_PARTIAL_STATE,
|
||||
)
|
||||
else:
|
||||
if not await self.is_host_in_room(room_id, host):
|
||||
raise AuthError(403, "Host not in room.")
|
||||
|
||||
async def check_restricted_join_rules(
|
||||
self,
|
||||
|
@ -379,6 +379,7 @@ class FederationHandler:
|
||||
filtered_extremities = await filter_events_for_server(
|
||||
self._storage_controllers,
|
||||
self.server_name,
|
||||
self.server_name,
|
||||
events_to_check,
|
||||
redact=False,
|
||||
check_history_visibility_only=True,
|
||||
@ -1231,7 +1232,9 @@ class FederationHandler:
|
||||
async def on_backfill_request(
|
||||
self, origin: str, room_id: str, pdu_list: List[str], limit: int
|
||||
) -> List[EventBase]:
|
||||
await self._event_auth_handler.assert_host_in_room(room_id, origin)
|
||||
# We allow partially joined rooms since in this case we are filtering out
|
||||
# non-local events in `filter_events_for_server`.
|
||||
await self._event_auth_handler.assert_host_in_room(room_id, origin, True)
|
||||
|
||||
# Synapse asks for 100 events per backfill request. Do not allow more.
|
||||
limit = min(limit, 100)
|
||||
@ -1252,7 +1255,7 @@ class FederationHandler:
|
||||
)
|
||||
|
||||
events = await filter_events_for_server(
|
||||
self._storage_controllers, origin, events
|
||||
self._storage_controllers, origin, self.server_name, events
|
||||
)
|
||||
|
||||
return events
|
||||
@ -1283,7 +1286,7 @@ class FederationHandler:
|
||||
await self._event_auth_handler.assert_host_in_room(event.room_id, origin)
|
||||
|
||||
events = await filter_events_for_server(
|
||||
self._storage_controllers, origin, [event]
|
||||
self._storage_controllers, origin, self.server_name, [event]
|
||||
)
|
||||
event = events[0]
|
||||
return event
|
||||
@ -1296,7 +1299,9 @@ class FederationHandler:
|
||||
latest_events: List[str],
|
||||
limit: int,
|
||||
) -> List[EventBase]:
|
||||
await self._event_auth_handler.assert_host_in_room(room_id, origin)
|
||||
# We allow partially joined rooms since in this case we are filtering out
|
||||
# non-local events in `filter_events_for_server`.
|
||||
await self._event_auth_handler.assert_host_in_room(room_id, origin, True)
|
||||
|
||||
# Only allow up to 20 events to be retrieved per request.
|
||||
limit = min(limit, 20)
|
||||
@ -1309,7 +1314,7 @@ class FederationHandler:
|
||||
)
|
||||
|
||||
missing_events = await filter_events_for_server(
|
||||
self._storage_controllers, origin, missing_events
|
||||
self._storage_controllers, origin, self.server_name, missing_events
|
||||
)
|
||||
|
||||
return missing_events
|
||||
@ -1596,8 +1601,8 @@ class FederationHandler:
|
||||
Fetch the complexity of a remote room over federation.
|
||||
|
||||
Args:
|
||||
remote_room_hosts (list[str]): The remote servers to ask.
|
||||
room_id (str): The room ID to ask about.
|
||||
remote_room_hosts: The remote servers to ask.
|
||||
room_id: The room ID to ask about.
|
||||
|
||||
Returns:
|
||||
Dict contains the complexity
|
||||
|
@ -711,7 +711,7 @@ class IdentityHandler:
|
||||
inviter_display_name: The current display name of the
|
||||
inviter.
|
||||
inviter_avatar_url: The URL of the inviter's avatar.
|
||||
id_access_token (str): The access token to authenticate to the identity
|
||||
id_access_token: The access token to authenticate to the identity
|
||||
server with
|
||||
|
||||
Returns:
|
||||
|
@ -1137,11 +1137,13 @@ class EventCreationHandler:
|
||||
)
|
||||
state_events = await self.store.get_events_as_list(state_event_ids)
|
||||
# Create a StateMap[str]
|
||||
state_map = {(e.type, e.state_key): e.event_id for e in state_events}
|
||||
current_state_ids = {
|
||||
(e.type, e.state_key): e.event_id for e in state_events
|
||||
}
|
||||
# Actually strip down and only use the necessary auth events
|
||||
auth_event_ids = self._event_auth_handler.compute_auth_events(
|
||||
event=temp_event,
|
||||
current_state_ids=state_map,
|
||||
current_state_ids=current_state_ids,
|
||||
for_verification=False,
|
||||
)
|
||||
|
||||
|
@ -787,7 +787,7 @@ class OidcProvider:
|
||||
Must include an ``access_token`` field.
|
||||
|
||||
Returns:
|
||||
UserInfo: an object representing the user.
|
||||
an object representing the user.
|
||||
"""
|
||||
logger.debug("Using the OAuth2 access_token to request userinfo")
|
||||
metadata = await self.load_metadata()
|
||||
@ -1435,6 +1435,7 @@ class UserAttributeDict(TypedDict):
|
||||
localpart: Optional[str]
|
||||
confirm_localpart: bool
|
||||
display_name: Optional[str]
|
||||
picture: Optional[str] # may be omitted by older `OidcMappingProviders`
|
||||
emails: List[str]
|
||||
|
||||
|
||||
@ -1520,6 +1521,7 @@ env.filters.update(
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class JinjaOidcMappingConfig:
|
||||
subject_claim: str
|
||||
picture_claim: str
|
||||
localpart_template: Optional[Template]
|
||||
display_name_template: Optional[Template]
|
||||
email_template: Optional[Template]
|
||||
@ -1539,6 +1541,7 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
|
||||
@staticmethod
|
||||
def parse_config(config: dict) -> JinjaOidcMappingConfig:
|
||||
subject_claim = config.get("subject_claim", "sub")
|
||||
picture_claim = config.get("picture_claim", "picture")
|
||||
|
||||
def parse_template_config(option_name: str) -> Optional[Template]:
|
||||
if option_name not in config:
|
||||
@ -1572,6 +1575,7 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
|
||||
|
||||
return JinjaOidcMappingConfig(
|
||||
subject_claim=subject_claim,
|
||||
picture_claim=picture_claim,
|
||||
localpart_template=localpart_template,
|
||||
display_name_template=display_name_template,
|
||||
email_template=email_template,
|
||||
@ -1611,10 +1615,13 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
|
||||
if email:
|
||||
emails.append(email)
|
||||
|
||||
picture = userinfo.get("picture")
|
||||
|
||||
return UserAttributeDict(
|
||||
localpart=localpart,
|
||||
display_name=display_name,
|
||||
emails=emails,
|
||||
picture=picture,
|
||||
confirm_localpart=self._config.confirm_localpart,
|
||||
)
|
||||
|
||||
|
@ -448,6 +448,12 @@ class PaginationHandler:
|
||||
|
||||
if pagin_config.from_token:
|
||||
from_token = pagin_config.from_token
|
||||
elif pagin_config.direction == "f":
|
||||
from_token = (
|
||||
await self.hs.get_event_sources().get_start_token_for_pagination(
|
||||
room_id
|
||||
)
|
||||
)
|
||||
else:
|
||||
from_token = (
|
||||
await self.hs.get_event_sources().get_current_token_for_pagination(
|
||||
|
@ -201,7 +201,7 @@ class BasePresenceHandler(abc.ABC):
|
||||
"""Get the current presence state for multiple users.
|
||||
|
||||
Returns:
|
||||
dict: `user_id` -> `UserPresenceState`
|
||||
A mapping of `user_id` -> `UserPresenceState`
|
||||
"""
|
||||
states = {}
|
||||
missing = []
|
||||
@ -478,7 +478,7 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||
return _NullContextManager()
|
||||
|
||||
prev_state = await self.current_state_for_user(user_id)
|
||||
if prev_state != PresenceState.BUSY:
|
||||
if prev_state.state != PresenceState.BUSY:
|
||||
# We set state here but pass ignore_status_msg = True as we don't want to
|
||||
# cause the status message to be cleared.
|
||||
# Note that this causes last_active_ts to be incremented which is not
|
||||
|
@ -92,7 +92,6 @@ class ReceiptsHandler:
|
||||
continue
|
||||
|
||||
# Check if these receipts apply to a thread.
|
||||
thread_id = None
|
||||
data = user_values.get("data", {})
|
||||
thread_id = data.get("thread_id")
|
||||
# If the thread ID is invalid, consider it missing.
|
||||
|
@ -38,6 +38,7 @@ from synapse.api.errors import (
|
||||
)
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.config.server import is_threepid_reserved
|
||||
from synapse.handlers.device import DeviceHandler
|
||||
from synapse.http.servlet import assert_params_in_dict
|
||||
from synapse.replication.http.login import RegisterDeviceReplicationServlet
|
||||
from synapse.replication.http.register import (
|
||||
@ -848,6 +849,9 @@ class RegistrationHandler:
|
||||
refresh_token = None
|
||||
refresh_token_id = None
|
||||
|
||||
# This can only run on the main process.
|
||||
assert isinstance(self.device_handler, DeviceHandler)
|
||||
|
||||
registered_device_id = await self.device_handler.check_device_registered(
|
||||
user_id,
|
||||
device_id,
|
||||
|
@ -13,17 +13,19 @@
|
||||
# limitations under the License.
|
||||
import enum
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Dict, FrozenSet, Iterable, List, Optional, Tuple
|
||||
from typing import TYPE_CHECKING, Collection, Dict, FrozenSet, Iterable, List, Optional
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.api.constants import EventTypes, RelationTypes
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.events import EventBase, relation_from_event
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.logging.opentracing import trace
|
||||
from synapse.storage.databases.main.relations import ThreadsNextBatch, _RelatedEvent
|
||||
from synapse.streams.config import PaginationConfig
|
||||
from synapse.types import JsonDict, Requester, StreamToken, UserID
|
||||
from synapse.types import JsonDict, Requester, UserID
|
||||
from synapse.util.async_helpers import gather_results
|
||||
from synapse.visibility import filter_events_for_client
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@ -172,40 +174,6 @@ class RelationsHandler:
|
||||
|
||||
return return_value
|
||||
|
||||
async def get_relations_for_event(
|
||||
self,
|
||||
event_id: str,
|
||||
event: EventBase,
|
||||
room_id: str,
|
||||
relation_type: str,
|
||||
ignored_users: FrozenSet[str] = frozenset(),
|
||||
) -> Tuple[List[_RelatedEvent], Optional[StreamToken]]:
|
||||
"""Get a list of events which relate to an event, ordered by topological ordering.
|
||||
|
||||
Args:
|
||||
event_id: Fetch events that relate to this event ID.
|
||||
event: The matching EventBase to event_id.
|
||||
room_id: The room the event belongs to.
|
||||
relation_type: The type of relation.
|
||||
ignored_users: The users ignored by the requesting user.
|
||||
|
||||
Returns:
|
||||
List of event IDs that match relations requested. The rows are of
|
||||
the form `{"event_id": "..."}`.
|
||||
"""
|
||||
|
||||
# Call the underlying storage method, which is cached.
|
||||
related_events, next_token = await self._main_store.get_relations_for_event(
|
||||
event_id, event, room_id, relation_type, direction="f"
|
||||
)
|
||||
|
||||
# Filter out ignored users and convert to the expected format.
|
||||
related_events = [
|
||||
event for event in related_events if event.sender not in ignored_users
|
||||
]
|
||||
|
||||
return related_events, next_token
|
||||
|
||||
async def redact_events_related_to(
|
||||
self,
|
||||
requester: Requester,
|
||||
@ -259,51 +227,107 @@ class RelationsHandler:
|
||||
e.msg,
|
||||
)
|
||||
|
||||
async def get_annotations_for_event(
|
||||
self,
|
||||
event_id: str,
|
||||
room_id: str,
|
||||
limit: int = 5,
|
||||
ignored_users: FrozenSet[str] = frozenset(),
|
||||
) -> List[JsonDict]:
|
||||
"""Get a list of annotations on the event, grouped by event type and
|
||||
async def get_annotations_for_events(
|
||||
self, event_ids: Collection[str], ignored_users: FrozenSet[str] = frozenset()
|
||||
) -> Dict[str, List[JsonDict]]:
|
||||
"""Get a list of annotations to the given events, grouped by event type and
|
||||
aggregation key, sorted by count.
|
||||
|
||||
This is used e.g. to get the what and how many reactions have happend
|
||||
This is used e.g. to get the what and how many reactions have happened
|
||||
on an event.
|
||||
|
||||
Args:
|
||||
event_id: Fetch events that relate to this event ID.
|
||||
room_id: The room the event belongs to.
|
||||
limit: Only fetch the `limit` groups.
|
||||
event_ids: Fetch events that relate to these event IDs.
|
||||
ignored_users: The users ignored by the requesting user.
|
||||
|
||||
Returns:
|
||||
List of groups of annotations that match. Each row is a dict with
|
||||
`type`, `key` and `count` fields.
|
||||
A map of event IDs to a list of groups of annotations that match.
|
||||
Each entry is a dict with `type`, `key` and `count` fields.
|
||||
"""
|
||||
# Get the base results for all users.
|
||||
full_results = await self._main_store.get_aggregation_groups_for_event(
|
||||
event_id, room_id, limit
|
||||
full_results = await self._main_store.get_aggregation_groups_for_events(
|
||||
event_ids
|
||||
)
|
||||
|
||||
# Avoid additional logic if there are no ignored users.
|
||||
if not ignored_users:
|
||||
return {
|
||||
event_id: results
|
||||
for event_id, results in full_results.items()
|
||||
if results
|
||||
}
|
||||
|
||||
# Then subtract off the results for any ignored users.
|
||||
ignored_results = await self._main_store.get_aggregation_groups_for_users(
|
||||
event_id, room_id, limit, ignored_users
|
||||
[event_id for event_id, results in full_results.items() if results],
|
||||
ignored_users,
|
||||
)
|
||||
|
||||
filtered_results = []
|
||||
for result in full_results:
|
||||
key = (result["type"], result["key"])
|
||||
if key in ignored_results:
|
||||
result = result.copy()
|
||||
result["count"] -= ignored_results[key]
|
||||
if result["count"] <= 0:
|
||||
continue
|
||||
filtered_results.append(result)
|
||||
filtered_results = {}
|
||||
for event_id, results in full_results.items():
|
||||
# If no annotations, skip.
|
||||
if not results:
|
||||
continue
|
||||
|
||||
# If there are not ignored results for this event, copy verbatim.
|
||||
if event_id not in ignored_results:
|
||||
filtered_results[event_id] = results
|
||||
continue
|
||||
|
||||
# Otherwise, subtract out the ignored results.
|
||||
event_ignored_results = ignored_results[event_id]
|
||||
for result in results:
|
||||
key = (result["type"], result["key"])
|
||||
if key in event_ignored_results:
|
||||
# Ensure to not modify the cache.
|
||||
result = result.copy()
|
||||
result["count"] -= event_ignored_results[key]
|
||||
if result["count"] <= 0:
|
||||
continue
|
||||
filtered_results.setdefault(event_id, []).append(result)
|
||||
|
||||
return filtered_results
|
||||
|
||||
async def get_references_for_events(
|
||||
self, event_ids: Collection[str], ignored_users: FrozenSet[str] = frozenset()
|
||||
) -> Dict[str, List[_RelatedEvent]]:
|
||||
"""Get a list of references to the given events.
|
||||
|
||||
Args:
|
||||
event_ids: Fetch events that relate to this event ID.
|
||||
ignored_users: The users ignored by the requesting user.
|
||||
|
||||
Returns:
|
||||
A map of event IDs to a list related events.
|
||||
"""
|
||||
|
||||
related_events = await self._main_store.get_references_for_events(event_ids)
|
||||
|
||||
# Avoid additional logic if there are no ignored users.
|
||||
if not ignored_users:
|
||||
return {
|
||||
event_id: results
|
||||
for event_id, results in related_events.items()
|
||||
if results
|
||||
}
|
||||
|
||||
# Filter out ignored users.
|
||||
results = {}
|
||||
for event_id, events in related_events.items():
|
||||
# If no references, skip.
|
||||
if not events:
|
||||
continue
|
||||
|
||||
# Filter ignored users out.
|
||||
events = [event for event in events if event.sender not in ignored_users]
|
||||
# If there are no events left, skip this event.
|
||||
if not events:
|
||||
continue
|
||||
|
||||
results[event_id] = events
|
||||
|
||||
return results
|
||||
|
||||
async def _get_threads_for_events(
|
||||
self,
|
||||
events_by_id: Dict[str, EventBase],
|
||||
@ -366,59 +390,66 @@ class RelationsHandler:
|
||||
results = {}
|
||||
|
||||
for event_id, summary in summaries.items():
|
||||
if summary:
|
||||
thread_count, latest_thread_event = summary
|
||||
# If no thread, skip.
|
||||
if not summary:
|
||||
continue
|
||||
|
||||
# Subtract off the count of any ignored users.
|
||||
for ignored_user in ignored_users:
|
||||
thread_count -= ignored_results.get((event_id, ignored_user), 0)
|
||||
thread_count, latest_thread_event = summary
|
||||
|
||||
# This is gnarly, but if the latest event is from an ignored user,
|
||||
# attempt to find one that isn't from an ignored user.
|
||||
if latest_thread_event.sender in ignored_users:
|
||||
room_id = latest_thread_event.room_id
|
||||
# Subtract off the count of any ignored users.
|
||||
for ignored_user in ignored_users:
|
||||
thread_count -= ignored_results.get((event_id, ignored_user), 0)
|
||||
|
||||
# If the root event is not found, something went wrong, do
|
||||
# not include a summary of the thread.
|
||||
event = await self._event_handler.get_event(user, room_id, event_id)
|
||||
if event is None:
|
||||
continue
|
||||
# This is gnarly, but if the latest event is from an ignored user,
|
||||
# attempt to find one that isn't from an ignored user.
|
||||
if latest_thread_event.sender in ignored_users:
|
||||
room_id = latest_thread_event.room_id
|
||||
|
||||
potential_events, _ = await self.get_relations_for_event(
|
||||
event_id,
|
||||
event,
|
||||
room_id,
|
||||
RelationTypes.THREAD,
|
||||
ignored_users,
|
||||
)
|
||||
# If the root event is not found, something went wrong, do
|
||||
# not include a summary of the thread.
|
||||
event = await self._event_handler.get_event(user, room_id, event_id)
|
||||
if event is None:
|
||||
continue
|
||||
|
||||
# If all found events are from ignored users, do not include
|
||||
# a summary of the thread.
|
||||
if not potential_events:
|
||||
continue
|
||||
|
||||
# The *last* event returned is the one that is cared about.
|
||||
event = await self._event_handler.get_event(
|
||||
user, room_id, potential_events[-1].event_id
|
||||
)
|
||||
# It is unexpected that the event will not exist.
|
||||
if event is None:
|
||||
logger.warning(
|
||||
"Unable to fetch latest event in a thread with event ID: %s",
|
||||
potential_events[-1].event_id,
|
||||
)
|
||||
continue
|
||||
latest_thread_event = event
|
||||
|
||||
results[event_id] = _ThreadAggregation(
|
||||
latest_event=latest_thread_event,
|
||||
count=thread_count,
|
||||
# If there's a thread summary it must also exist in the
|
||||
# participated dictionary.
|
||||
current_user_participated=events_by_id[event_id].sender == user_id
|
||||
or participated[event_id],
|
||||
# Attempt to find another event to use as the latest event.
|
||||
potential_events, _ = await self._main_store.get_relations_for_event(
|
||||
event_id, event, room_id, RelationTypes.THREAD, direction="f"
|
||||
)
|
||||
|
||||
# Filter out ignored users.
|
||||
potential_events = [
|
||||
event
|
||||
for event in potential_events
|
||||
if event.sender not in ignored_users
|
||||
]
|
||||
|
||||
# If all found events are from ignored users, do not include
|
||||
# a summary of the thread.
|
||||
if not potential_events:
|
||||
continue
|
||||
|
||||
# The *last* event returned is the one that is cared about.
|
||||
event = await self._event_handler.get_event(
|
||||
user, room_id, potential_events[-1].event_id
|
||||
)
|
||||
# It is unexpected that the event will not exist.
|
||||
if event is None:
|
||||
logger.warning(
|
||||
"Unable to fetch latest event in a thread with event ID: %s",
|
||||
potential_events[-1].event_id,
|
||||
)
|
||||
continue
|
||||
latest_thread_event = event
|
||||
|
||||
results[event_id] = _ThreadAggregation(
|
||||
latest_event=latest_thread_event,
|
||||
count=thread_count,
|
||||
# If there's a thread summary it must also exist in the
|
||||
# participated dictionary.
|
||||
current_user_participated=events_by_id[event_id].sender == user_id
|
||||
or participated[event_id],
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
@trace
|
||||
@ -496,49 +527,56 @@ class RelationsHandler:
|
||||
# (as that is what makes it part of the thread).
|
||||
relations_by_id[latest_thread_event.event_id] = RelationTypes.THREAD
|
||||
|
||||
# Fetch other relations per event.
|
||||
for event in events_by_id.values():
|
||||
# Fetch any annotations (ie, reactions) to bundle with this event.
|
||||
annotations = await self.get_annotations_for_event(
|
||||
event.event_id, event.room_id, ignored_users=ignored_users
|
||||
async def _fetch_annotations() -> None:
|
||||
"""Fetch any annotations (ie, reactions) to bundle with this event."""
|
||||
annotations_by_event_id = await self.get_annotations_for_events(
|
||||
events_by_id.keys(), ignored_users=ignored_users
|
||||
)
|
||||
if annotations:
|
||||
results.setdefault(
|
||||
event.event_id, BundledAggregations()
|
||||
).annotations = {"chunk": annotations}
|
||||
for event_id, annotations in annotations_by_event_id.items():
|
||||
if annotations:
|
||||
results.setdefault(event_id, BundledAggregations()).annotations = {
|
||||
"chunk": annotations
|
||||
}
|
||||
|
||||
# Fetch any references to bundle with this event.
|
||||
references, next_token = await self.get_relations_for_event(
|
||||
event.event_id,
|
||||
event,
|
||||
event.room_id,
|
||||
RelationTypes.REFERENCE,
|
||||
ignored_users=ignored_users,
|
||||
async def _fetch_references() -> None:
|
||||
"""Fetch any references to bundle with this event."""
|
||||
references_by_event_id = await self.get_references_for_events(
|
||||
events_by_id.keys(), ignored_users=ignored_users
|
||||
)
|
||||
if references:
|
||||
aggregations = results.setdefault(event.event_id, BundledAggregations())
|
||||
aggregations.references = {
|
||||
"chunk": [{"event_id": ev.event_id} for ev in references]
|
||||
}
|
||||
for event_id, references in references_by_event_id.items():
|
||||
if references:
|
||||
results.setdefault(event_id, BundledAggregations()).references = {
|
||||
"chunk": [{"event_id": ev.event_id} for ev in references]
|
||||
}
|
||||
|
||||
if next_token:
|
||||
aggregations.references["next_batch"] = await next_token.to_string(
|
||||
self._main_store
|
||||
)
|
||||
async def _fetch_edits() -> None:
|
||||
"""
|
||||
Fetch any edits (but not for redacted events).
|
||||
|
||||
# Fetch any edits (but not for redacted events).
|
||||
#
|
||||
# Note that there is no use in limiting edits by ignored users since the
|
||||
# parent event should be ignored in the first place if the user is ignored.
|
||||
edits = await self._main_store.get_applicable_edits(
|
||||
[
|
||||
event_id
|
||||
for event_id, event in events_by_id.items()
|
||||
if not event.internal_metadata.is_redacted()
|
||||
]
|
||||
Note that there is no use in limiting edits by ignored users since the
|
||||
parent event should be ignored in the first place if the user is ignored.
|
||||
"""
|
||||
edits = await self._main_store.get_applicable_edits(
|
||||
[
|
||||
event_id
|
||||
for event_id, event in events_by_id.items()
|
||||
if not event.internal_metadata.is_redacted()
|
||||
]
|
||||
)
|
||||
for event_id, edit in edits.items():
|
||||
results.setdefault(event_id, BundledAggregations()).replace = edit
|
||||
|
||||
# Parallelize the calls for annotations, references, and edits since they
|
||||
# are unrelated.
|
||||
await make_deferred_yieldable(
|
||||
gather_results(
|
||||
(
|
||||
run_in_background(_fetch_annotations),
|
||||
run_in_background(_fetch_references),
|
||||
run_in_background(_fetch_edits),
|
||||
)
|
||||
)
|
||||
)
|
||||
for event_id, edit in edits.items():
|
||||
results.setdefault(event_id, BundledAggregations()).replace = edit
|
||||
|
||||
return results
|
||||
|
||||
@ -571,7 +609,7 @@ class RelationsHandler:
|
||||
room_id, requester, allow_departed_users=True
|
||||
)
|
||||
|
||||
# Note that ignored users are not passed into get_relations_for_event
|
||||
# Note that ignored users are not passed into get_threads
|
||||
# below. Ignored users are handled in filter_events_for_client (and by
|
||||
# not passing them in here we should get a better cache hit rate).
|
||||
thread_roots, next_batch = await self._main_store.get_threads(
|
||||
|
@ -441,7 +441,7 @@ class DefaultSamlMappingProvider:
|
||||
client_redirect_url: where the client wants to redirect to
|
||||
|
||||
Returns:
|
||||
dict: A dict containing new user attributes. Possible keys:
|
||||
A dict containing new user attributes. Possible keys:
|
||||
* mxid_localpart (str): Required. The localpart of the user's mxid
|
||||
* displayname (str): The displayname of the user
|
||||
* emails (list[str]): Any emails for the user
|
||||
@ -483,7 +483,7 @@ class DefaultSamlMappingProvider:
|
||||
Args:
|
||||
config: A dictionary containing configuration options for this provider
|
||||
Returns:
|
||||
SamlConfig: A custom config object for this module
|
||||
A custom config object for this module
|
||||
"""
|
||||
# Parse config options and use defaults where necessary
|
||||
mxid_source_attribute = config.get("mxid_source_attribute", "uid")
|
||||
|
@ -15,6 +15,7 @@ import logging
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from synapse.api.errors import Codes, StoreError, SynapseError
|
||||
from synapse.handlers.device import DeviceHandler
|
||||
from synapse.types import Requester
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@ -29,7 +30,10 @@ class SetPasswordHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.store = hs.get_datastores().main
|
||||
self._auth_handler = hs.get_auth_handler()
|
||||
self._device_handler = hs.get_device_handler()
|
||||
# This can only be instantiated on the main process.
|
||||
device_handler = hs.get_device_handler()
|
||||
assert isinstance(device_handler, DeviceHandler)
|
||||
self._device_handler = device_handler
|
||||
|
||||
async def set_password(
|
||||
self,
|
||||
|
@ -12,6 +12,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import abc
|
||||
import hashlib
|
||||
import io
|
||||
import logging
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
@ -37,6 +39,7 @@ from twisted.web.server import Request
|
||||
from synapse.api.constants import LoginType
|
||||
from synapse.api.errors import Codes, NotFoundError, RedirectException, SynapseError
|
||||
from synapse.config.sso import SsoAttributeRequirement
|
||||
from synapse.handlers.device import DeviceHandler
|
||||
from synapse.handlers.register import init_counters_for_auth_provider
|
||||
from synapse.handlers.ui_auth import UIAuthSessionDataConstants
|
||||
from synapse.http import get_request_user_agent
|
||||
@ -137,6 +140,7 @@ class UserAttributes:
|
||||
localpart: Optional[str]
|
||||
confirm_localpart: bool = False
|
||||
display_name: Optional[str] = None
|
||||
picture: Optional[str] = None
|
||||
emails: Collection[str] = attr.Factory(list)
|
||||
|
||||
|
||||
@ -195,6 +199,10 @@ class SsoHandler:
|
||||
self._error_template = hs.config.sso.sso_error_template
|
||||
self._bad_user_template = hs.config.sso.sso_auth_bad_user_template
|
||||
self._profile_handler = hs.get_profile_handler()
|
||||
self._media_repo = (
|
||||
hs.get_media_repository() if hs.config.media.can_load_media_repo else None
|
||||
)
|
||||
self._http_client = hs.get_proxied_blacklisted_http_client()
|
||||
|
||||
# The following template is shown after a successful user interactive
|
||||
# authentication session. It tells the user they can close the window.
|
||||
@ -494,6 +502,8 @@ class SsoHandler:
|
||||
await self._profile_handler.set_displayname(
|
||||
user_id_obj, requester, attributes.display_name, True
|
||||
)
|
||||
if attributes.picture:
|
||||
await self.set_avatar(user_id, attributes.picture)
|
||||
|
||||
await self._auth_handler.complete_sso_login(
|
||||
user_id,
|
||||
@ -702,8 +712,110 @@ class SsoHandler:
|
||||
await self._store.record_user_external_id(
|
||||
auth_provider_id, remote_user_id, registered_user_id
|
||||
)
|
||||
|
||||
# Set avatar, if available
|
||||
if attributes.picture:
|
||||
await self.set_avatar(registered_user_id, attributes.picture)
|
||||
|
||||
return registered_user_id
|
||||
|
||||
async def set_avatar(self, user_id: str, picture_https_url: str) -> bool:
|
||||
"""Set avatar of the user.
|
||||
|
||||
This downloads the image file from the URL provided, stores that in
|
||||
the media repository and then sets the avatar on the user's profile.
|
||||
|
||||
It can detect if the same image is being saved again and bails early by storing
|
||||
the hash of the file in the `upload_name` of the avatar image.
|
||||
|
||||
Currently, it only supports server configurations which run the media repository
|
||||
within the same process.
|
||||
|
||||
It silently fails and logs a warning by raising an exception and catching it
|
||||
internally if:
|
||||
* it is unable to fetch the image itself (non 200 status code) or
|
||||
* the image supplied is bigger than max allowed size or
|
||||
* the image type is not one of the allowed image types.
|
||||
|
||||
Args:
|
||||
user_id: matrix user ID in the form @localpart:domain as a string.
|
||||
|
||||
picture_https_url: HTTPS url for the picture image file.
|
||||
|
||||
Returns: `True` if the user's avatar has been successfully set to the image at
|
||||
`picture_https_url`.
|
||||
"""
|
||||
if self._media_repo is None:
|
||||
logger.info(
|
||||
"failed to set user avatar because out-of-process media repositories "
|
||||
"are not supported yet "
|
||||
)
|
||||
return False
|
||||
|
||||
try:
|
||||
uid = UserID.from_string(user_id)
|
||||
|
||||
def is_allowed_mime_type(content_type: str) -> bool:
|
||||
if (
|
||||
self._profile_handler.allowed_avatar_mimetypes
|
||||
and content_type
|
||||
not in self._profile_handler.allowed_avatar_mimetypes
|
||||
):
|
||||
return False
|
||||
return True
|
||||
|
||||
# download picture, enforcing size limit & mime type check
|
||||
picture = io.BytesIO()
|
||||
|
||||
content_length, headers, uri, code = await self._http_client.get_file(
|
||||
url=picture_https_url,
|
||||
output_stream=picture,
|
||||
max_size=self._profile_handler.max_avatar_size,
|
||||
is_allowed_content_type=is_allowed_mime_type,
|
||||
)
|
||||
|
||||
if code != 200:
|
||||
raise Exception(
|
||||
"GET request to download sso avatar image returned {}".format(code)
|
||||
)
|
||||
|
||||
# upload name includes hash of the image file's content so that we can
|
||||
# easily check if it requires an update or not, the next time user logs in
|
||||
upload_name = "sso_avatar_" + hashlib.sha256(picture.read()).hexdigest()
|
||||
|
||||
# bail if user already has the same avatar
|
||||
profile = await self._profile_handler.get_profile(user_id)
|
||||
if profile["avatar_url"] is not None:
|
||||
server_name = profile["avatar_url"].split("/")[-2]
|
||||
media_id = profile["avatar_url"].split("/")[-1]
|
||||
if server_name == self._server_name:
|
||||
media = await self._media_repo.store.get_local_media(media_id)
|
||||
if media is not None and upload_name == media["upload_name"]:
|
||||
logger.info("skipping saving the user avatar")
|
||||
return True
|
||||
|
||||
# store it in media repository
|
||||
avatar_mxc_url = await self._media_repo.create_content(
|
||||
media_type=headers[b"Content-Type"][0].decode("utf-8"),
|
||||
upload_name=upload_name,
|
||||
content=picture,
|
||||
content_length=content_length,
|
||||
auth_user=uid,
|
||||
)
|
||||
|
||||
# save it as user avatar
|
||||
await self._profile_handler.set_avatar_url(
|
||||
uid,
|
||||
create_requester(uid),
|
||||
str(avatar_mxc_url),
|
||||
)
|
||||
|
||||
logger.info("successfully saved the user avatar")
|
||||
return True
|
||||
except Exception:
|
||||
logger.warning("failed to save the user avatar")
|
||||
return False
|
||||
|
||||
async def complete_sso_ui_auth_request(
|
||||
self,
|
||||
auth_provider_id: str,
|
||||
@ -1035,6 +1147,8 @@ class SsoHandler:
|
||||
) -> None:
|
||||
"""Revoke any devices and in-flight logins tied to a provider session.
|
||||
|
||||
Can only be called from the main process.
|
||||
|
||||
Args:
|
||||
auth_provider_id: A unique identifier for this SSO provider, e.g.
|
||||
"oidc" or "saml".
|
||||
@ -1042,6 +1156,12 @@ class SsoHandler:
|
||||
expected_user_id: The user we're expecting to logout. If set, it will ignore
|
||||
sessions belonging to other users and log an error.
|
||||
"""
|
||||
|
||||
# It is expected that this is the main process.
|
||||
assert isinstance(
|
||||
self._device_handler, DeviceHandler
|
||||
), "revoking SSO sessions can only be called on the main process"
|
||||
|
||||
# Invalidate any running user-mapping sessions
|
||||
to_delete = []
|
||||
for session_id, session in self._username_mapping_sessions.items():
|
||||
|
@ -1425,14 +1425,14 @@ class SyncHandler:
|
||||
|
||||
logger.debug("Fetching OTK data")
|
||||
device_id = sync_config.device_id
|
||||
one_time_key_counts: JsonDict = {}
|
||||
one_time_keys_count: JsonDict = {}
|
||||
unused_fallback_key_types: List[str] = []
|
||||
if device_id:
|
||||
# TODO: We should have a way to let clients differentiate between the states of:
|
||||
# * no change in OTK count since the provided since token
|
||||
# * the server has zero OTKs left for this device
|
||||
# Spec issue: https://github.com/matrix-org/matrix-doc/issues/3298
|
||||
one_time_key_counts = await self.store.count_e2e_one_time_keys(
|
||||
one_time_keys_count = await self.store.count_e2e_one_time_keys(
|
||||
user_id, device_id
|
||||
)
|
||||
unused_fallback_key_types = (
|
||||
@ -1462,7 +1462,7 @@ class SyncHandler:
|
||||
archived=sync_result_builder.archived,
|
||||
to_device=sync_result_builder.to_device,
|
||||
device_lists=device_lists,
|
||||
device_one_time_keys_count=one_time_key_counts,
|
||||
device_one_time_keys_count=one_time_keys_count,
|
||||
device_unused_fallback_key_types=unused_fallback_key_types,
|
||||
next_batch=sync_result_builder.now_token,
|
||||
)
|
||||
|
@ -45,8 +45,7 @@ class AdditionalResource(DirectServeJsonResource):
|
||||
|
||||
Args:
|
||||
hs: homeserver
|
||||
handler ((twisted.web.server.Request) -> twisted.internet.defer.Deferred):
|
||||
function to be called to handle the request.
|
||||
handler: function to be called to handle the request.
|
||||
"""
|
||||
super().__init__()
|
||||
self._handler = handler
|
||||
|
@ -155,11 +155,10 @@ class MatrixFederationAgent:
|
||||
a file for a file upload). Or None if the request is to have
|
||||
no body.
|
||||
Returns:
|
||||
Deferred[twisted.web.iweb.IResponse]:
|
||||
fires when the header of the response has been received (regardless of the
|
||||
response status code). Fails if there is any problem which prevents that
|
||||
response from being received (including problems that prevent the request
|
||||
from being sent).
|
||||
A deferred which fires when the header of the response has been received
|
||||
(regardless of the response status code). Fails if there is any problem
|
||||
which prevents that response from being received (including problems that
|
||||
prevent the request from being sent).
|
||||
"""
|
||||
# We use urlparse as that will set `port` to None if there is no
|
||||
# explicit port.
|
||||
|
@ -951,8 +951,7 @@ class MatrixFederationHttpClient:
|
||||
|
||||
args: query params
|
||||
Returns:
|
||||
dict|list: Succeeds when we get a 2xx HTTP response. The
|
||||
result will be the decoded JSON body.
|
||||
Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body.
|
||||
|
||||
Raises:
|
||||
HttpResponseException: If we get an HTTP response code >= 300
|
||||
|
@ -34,7 +34,7 @@ from twisted.web.client import (
|
||||
)
|
||||
from twisted.web.error import SchemeNotSupported
|
||||
from twisted.web.http_headers import Headers
|
||||
from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS
|
||||
from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS, IResponse
|
||||
|
||||
from synapse.http import redact_uri
|
||||
from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint, ProxyCredentials
|
||||
@ -134,7 +134,7 @@ class ProxyAgent(_AgentBase):
|
||||
uri: bytes,
|
||||
headers: Optional[Headers] = None,
|
||||
bodyProducer: Optional[IBodyProducer] = None,
|
||||
) -> defer.Deferred:
|
||||
) -> "defer.Deferred[IResponse]":
|
||||
"""
|
||||
Issue a request to the server indicated by the given uri.
|
||||
|
||||
@ -157,17 +157,17 @@ class ProxyAgent(_AgentBase):
|
||||
a file upload). Or, None if the request is to have no body.
|
||||
|
||||
Returns:
|
||||
Deferred[IResponse]: completes when the header of the response has
|
||||
been received (regardless of the response status code).
|
||||
A deferred which completes when the header of the response has
|
||||
been received (regardless of the response status code).
|
||||
|
||||
Can fail with:
|
||||
SchemeNotSupported: if the uri is not http or https
|
||||
Can fail with:
|
||||
SchemeNotSupported: if the uri is not http or https
|
||||
|
||||
twisted.internet.error.TimeoutError if the server we are connecting
|
||||
to (proxy or destination) does not accept a connection before
|
||||
connectTimeout.
|
||||
twisted.internet.error.TimeoutError if the server we are connecting
|
||||
to (proxy or destination) does not accept a connection before
|
||||
connectTimeout.
|
||||
|
||||
... other things too.
|
||||
... other things too.
|
||||
"""
|
||||
uri = uri.strip()
|
||||
if not _VALID_URI.match(uri):
|
||||
|
@ -267,7 +267,7 @@ class HttpServer(Protocol):
|
||||
request. The first argument will be the request object and
|
||||
subsequent arguments will be any matched groups from the regex.
|
||||
This should return either tuple of (code, response), or None.
|
||||
servlet_classname (str): The name of the handler to be used in prometheus
|
||||
servlet_classname: The name of the handler to be used in prometheus
|
||||
and opentracing logs.
|
||||
"""
|
||||
|
||||
|
@ -400,7 +400,7 @@ class SynapseRequest(Request):
|
||||
be sure to call finished_processing.
|
||||
|
||||
Args:
|
||||
servlet_name (str): the name of the servlet which will be
|
||||
servlet_name: the name of the servlet which will be
|
||||
processing this request. This is used in the metrics.
|
||||
|
||||
It is possible to update this afterwards by updating
|
||||
|
@ -117,8 +117,7 @@ class ContextResourceUsage:
|
||||
"""Create a new ContextResourceUsage
|
||||
|
||||
Args:
|
||||
copy_from (ContextResourceUsage|None): if not None, an object to
|
||||
copy stats from
|
||||
copy_from: if not None, an object to copy stats from
|
||||
"""
|
||||
if copy_from is None:
|
||||
self.reset()
|
||||
@ -162,7 +161,7 @@ class ContextResourceUsage:
|
||||
"""Add another ContextResourceUsage's stats to this one's.
|
||||
|
||||
Args:
|
||||
other (ContextResourceUsage): the other resource usage object
|
||||
other: the other resource usage object
|
||||
"""
|
||||
self.ru_utime += other.ru_utime
|
||||
self.ru_stime += other.ru_stime
|
||||
@ -342,7 +341,7 @@ class LoggingContext:
|
||||
called directly.
|
||||
|
||||
Returns:
|
||||
LoggingContext: the current logging context
|
||||
The current logging context
|
||||
"""
|
||||
warnings.warn(
|
||||
"synapse.logging.context.LoggingContext.current_context() is deprecated "
|
||||
@ -362,7 +361,8 @@ class LoggingContext:
|
||||
called directly.
|
||||
|
||||
Args:
|
||||
context(LoggingContext): The context to activate.
|
||||
context: The context to activate.
|
||||
|
||||
Returns:
|
||||
The context that was previously active
|
||||
"""
|
||||
@ -474,8 +474,7 @@ class LoggingContext:
|
||||
"""Get resources used by this logcontext so far.
|
||||
|
||||
Returns:
|
||||
ContextResourceUsage: a *copy* of the object tracking resource
|
||||
usage so far
|
||||
A *copy* of the object tracking resource usage so far
|
||||
"""
|
||||
# we always return a copy, for consistency
|
||||
res = self._resource_usage.copy()
|
||||
@ -663,7 +662,8 @@ def current_context() -> LoggingContextOrSentinel:
|
||||
def set_current_context(context: LoggingContextOrSentinel) -> LoggingContextOrSentinel:
|
||||
"""Set the current logging context in thread local storage
|
||||
Args:
|
||||
context(LoggingContext): The context to activate.
|
||||
context: The context to activate.
|
||||
|
||||
Returns:
|
||||
The context that was previously active
|
||||
"""
|
||||
@ -700,7 +700,7 @@ def nested_logging_context(suffix: str) -> LoggingContext:
|
||||
suffix: suffix to add to the parent context's 'name'.
|
||||
|
||||
Returns:
|
||||
LoggingContext: new logging context.
|
||||
A new logging context.
|
||||
"""
|
||||
curr_context = current_context()
|
||||
if not curr_context:
|
||||
@ -898,20 +898,19 @@ def defer_to_thread(
|
||||
on it.
|
||||
|
||||
Args:
|
||||
reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread
|
||||
the Deferred will be invoked, and whose threadpool we should use for the
|
||||
function.
|
||||
reactor: The reactor in whose main thread the Deferred will be invoked,
|
||||
and whose threadpool we should use for the function.
|
||||
|
||||
Normally this will be hs.get_reactor().
|
||||
|
||||
f (callable): The function to call.
|
||||
f: The function to call.
|
||||
|
||||
args: positional arguments to pass to f.
|
||||
|
||||
kwargs: keyword arguments to pass to f.
|
||||
|
||||
Returns:
|
||||
Deferred: A Deferred which fires a callback with the result of `f`, or an
|
||||
A Deferred which fires a callback with the result of `f`, or an
|
||||
errback if `f` throws an exception.
|
||||
"""
|
||||
return defer_to_threadpool(reactor, reactor.getThreadPool(), f, *args, **kwargs)
|
||||
@ -939,20 +938,20 @@ def defer_to_threadpool(
|
||||
on it.
|
||||
|
||||
Args:
|
||||
reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread
|
||||
the Deferred will be invoked. Normally this will be hs.get_reactor().
|
||||
reactor: The reactor in whose main thread the Deferred will be invoked.
|
||||
Normally this will be hs.get_reactor().
|
||||
|
||||
threadpool (twisted.python.threadpool.ThreadPool): The threadpool to use for
|
||||
running `f`. Normally this will be hs.get_reactor().getThreadPool().
|
||||
threadpool: The threadpool to use for running `f`. Normally this will be
|
||||
hs.get_reactor().getThreadPool().
|
||||
|
||||
f (callable): The function to call.
|
||||
f: The function to call.
|
||||
|
||||
args: positional arguments to pass to f.
|
||||
|
||||
kwargs: keyword arguments to pass to f.
|
||||
|
||||
Returns:
|
||||
Deferred: A Deferred which fires a callback with the result of `f`, or an
|
||||
A Deferred which fires a callback with the result of `f`, or an
|
||||
errback if `f` throws an exception.
|
||||
"""
|
||||
curr_context = current_context()
|
||||
|
@ -721,7 +721,7 @@ def inject_header_dict(
|
||||
destination: address of entity receiving the span context. Must be given unless
|
||||
check_destination is False. The context will only be injected if the
|
||||
destination matches the opentracing whitelist
|
||||
check_destination (bool): If false, destination will be ignored and the context
|
||||
check_destination: If false, destination will be ignored and the context
|
||||
will always be injected.
|
||||
|
||||
Note:
|
||||
@ -780,7 +780,7 @@ def get_active_span_text_map(destination: Optional[str] = None) -> Dict[str, str
|
||||
destination: the name of the remote server.
|
||||
|
||||
Returns:
|
||||
dict: the active span's context if opentracing is enabled, otherwise empty.
|
||||
the active span's context if opentracing is enabled, otherwise empty.
|
||||
"""
|
||||
|
||||
if destination and not whitelisted_homeserver(destination):
|
||||
|
@ -47,11 +47,7 @@ from twisted.python.threadpool import ThreadPool
|
||||
# This module is imported for its side effects; flake8 needn't warn that it's unused.
|
||||
import synapse.metrics._reactor_metrics # noqa: F401
|
||||
from synapse.metrics._gc import MIN_TIME_BETWEEN_GCS, install_gc_manager
|
||||
from synapse.metrics._legacy_exposition import (
|
||||
MetricsResource,
|
||||
generate_latest,
|
||||
start_http_server,
|
||||
)
|
||||
from synapse.metrics._twisted_exposition import MetricsResource, generate_latest
|
||||
from synapse.metrics._types import Collector
|
||||
from synapse.util import SYNAPSE_VERSION
|
||||
|
||||
@ -474,7 +470,6 @@ __all__ = [
|
||||
"Collector",
|
||||
"MetricsResource",
|
||||
"generate_latest",
|
||||
"start_http_server",
|
||||
"LaterGauge",
|
||||
"InFlightGauge",
|
||||
"GaugeBucketCollector",
|
||||
|
@ -1,288 +0,0 @@
|
||||
# Copyright 2015-2019 Prometheus Python Client Developers
|
||||
# Copyright 2019 Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
This code is based off `prometheus_client/exposition.py` from version 0.7.1.
|
||||
|
||||
Due to the renaming of metrics in prometheus_client 0.4.0, this customised
|
||||
vendoring of the code will emit both the old versions that Synapse dashboards
|
||||
expect, and the newer "best practice" version of the up-to-date official client.
|
||||
"""
|
||||
import logging
|
||||
import math
|
||||
import threading
|
||||
from http.server import BaseHTTPRequestHandler, HTTPServer
|
||||
from socketserver import ThreadingMixIn
|
||||
from typing import Any, Dict, List, Type, Union
|
||||
from urllib.parse import parse_qs, urlparse
|
||||
|
||||
from prometheus_client import REGISTRY, CollectorRegistry
|
||||
from prometheus_client.core import Sample
|
||||
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.server import Request
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
CONTENT_TYPE_LATEST = "text/plain; version=0.0.4; charset=utf-8"
|
||||
|
||||
|
||||
def floatToGoString(d: Union[int, float]) -> str:
|
||||
d = float(d)
|
||||
if d == math.inf:
|
||||
return "+Inf"
|
||||
elif d == -math.inf:
|
||||
return "-Inf"
|
||||
elif math.isnan(d):
|
||||
return "NaN"
|
||||
else:
|
||||
s = repr(d)
|
||||
dot = s.find(".")
|
||||
# Go switches to exponents sooner than Python.
|
||||
# We only need to care about positive values for le/quantile.
|
||||
if d > 0 and dot > 6:
|
||||
mantissa = f"{s[0]}.{s[1:dot]}{s[dot + 1 :]}".rstrip("0.")
|
||||
return f"{mantissa}e+0{dot - 1}"
|
||||
return s
|
||||
|
||||
|
||||
def sample_line(line: Sample, name: str) -> str:
|
||||
if line.labels:
|
||||
labelstr = "{{{0}}}".format(
|
||||
",".join(
|
||||
[
|
||||
'{}="{}"'.format(
|
||||
k,
|
||||
v.replace("\\", r"\\").replace("\n", r"\n").replace('"', r"\""),
|
||||
)
|
||||
for k, v in sorted(line.labels.items())
|
||||
]
|
||||
)
|
||||
)
|
||||
else:
|
||||
labelstr = ""
|
||||
timestamp = ""
|
||||
if line.timestamp is not None:
|
||||
# Convert to milliseconds.
|
||||
timestamp = f" {int(float(line.timestamp) * 1000):d}"
|
||||
return "{}{} {}{}\n".format(name, labelstr, floatToGoString(line.value), timestamp)
|
||||
|
||||
|
||||
# Mapping from new metric names to legacy metric names.
|
||||
# We translate these back to their old names when exposing them through our
|
||||
# legacy vendored exporter.
|
||||
# Only this legacy exposition module applies these name changes.
|
||||
LEGACY_METRIC_NAMES = {
|
||||
"synapse_util_caches_cache_hits": "synapse_util_caches_cache:hits",
|
||||
"synapse_util_caches_cache_size": "synapse_util_caches_cache:size",
|
||||
"synapse_util_caches_cache_evicted_size": "synapse_util_caches_cache:evicted_size",
|
||||
"synapse_util_caches_cache": "synapse_util_caches_cache:total",
|
||||
"synapse_util_caches_response_cache_size": "synapse_util_caches_response_cache:size",
|
||||
"synapse_util_caches_response_cache_hits": "synapse_util_caches_response_cache:hits",
|
||||
"synapse_util_caches_response_cache_evicted_size": "synapse_util_caches_response_cache:evicted_size",
|
||||
"synapse_util_caches_response_cache": "synapse_util_caches_response_cache:total",
|
||||
"synapse_federation_client_sent_pdu_destinations": "synapse_federation_client_sent_pdu_destinations:total",
|
||||
"synapse_federation_client_sent_pdu_destinations_count": "synapse_federation_client_sent_pdu_destinations:count",
|
||||
"synapse_admin_mau_current": "synapse_admin_mau:current",
|
||||
"synapse_admin_mau_max": "synapse_admin_mau:max",
|
||||
"synapse_admin_mau_registered_reserved_users": "synapse_admin_mau:registered_reserved_users",
|
||||
}
|
||||
|
||||
|
||||
def generate_latest(registry: CollectorRegistry, emit_help: bool = False) -> bytes:
|
||||
"""
|
||||
Generate metrics in legacy format. Modern metrics are generated directly
|
||||
by prometheus-client.
|
||||
"""
|
||||
|
||||
output = []
|
||||
|
||||
for metric in registry.collect():
|
||||
if not metric.samples:
|
||||
# No samples, don't bother.
|
||||
continue
|
||||
|
||||
# Translate to legacy metric name if it has one.
|
||||
mname = LEGACY_METRIC_NAMES.get(metric.name, metric.name)
|
||||
mnewname = metric.name
|
||||
mtype = metric.type
|
||||
|
||||
# OpenMetrics -> Prometheus
|
||||
if mtype == "counter":
|
||||
mnewname = mnewname + "_total"
|
||||
elif mtype == "info":
|
||||
mtype = "gauge"
|
||||
mnewname = mnewname + "_info"
|
||||
elif mtype == "stateset":
|
||||
mtype = "gauge"
|
||||
elif mtype == "gaugehistogram":
|
||||
mtype = "histogram"
|
||||
elif mtype == "unknown":
|
||||
mtype = "untyped"
|
||||
|
||||
# Output in the old format for compatibility.
|
||||
if emit_help:
|
||||
output.append(
|
||||
"# HELP {} {}\n".format(
|
||||
mname,
|
||||
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
|
||||
)
|
||||
)
|
||||
output.append(f"# TYPE {mname} {mtype}\n")
|
||||
|
||||
om_samples: Dict[str, List[str]] = {}
|
||||
for s in metric.samples:
|
||||
for suffix in ["_created", "_gsum", "_gcount"]:
|
||||
if s.name == mname + suffix:
|
||||
# OpenMetrics specific sample, put in a gauge at the end.
|
||||
# (these come from gaugehistograms which don't get renamed,
|
||||
# so no need to faff with mnewname)
|
||||
om_samples.setdefault(suffix, []).append(sample_line(s, s.name))
|
||||
break
|
||||
else:
|
||||
newname = s.name.replace(mnewname, mname)
|
||||
if ":" in newname and newname.endswith("_total"):
|
||||
newname = newname[: -len("_total")]
|
||||
output.append(sample_line(s, newname))
|
||||
|
||||
for suffix, lines in sorted(om_samples.items()):
|
||||
if emit_help:
|
||||
output.append(
|
||||
"# HELP {}{} {}\n".format(
|
||||
mname,
|
||||
suffix,
|
||||
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
|
||||
)
|
||||
)
|
||||
output.append(f"# TYPE {mname}{suffix} gauge\n")
|
||||
output.extend(lines)
|
||||
|
||||
# Get rid of the weird colon things while we're at it
|
||||
if mtype == "counter":
|
||||
mnewname = mnewname.replace(":total", "")
|
||||
mnewname = mnewname.replace(":", "_")
|
||||
|
||||
if mname == mnewname:
|
||||
continue
|
||||
|
||||
# Also output in the new format, if it's different.
|
||||
if emit_help:
|
||||
output.append(
|
||||
"# HELP {} {}\n".format(
|
||||
mnewname,
|
||||
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
|
||||
)
|
||||
)
|
||||
output.append(f"# TYPE {mnewname} {mtype}\n")
|
||||
|
||||
for s in metric.samples:
|
||||
# Get rid of the OpenMetrics specific samples (we should already have
|
||||
# dealt with them above anyway.)
|
||||
for suffix in ["_created", "_gsum", "_gcount"]:
|
||||
if s.name == mname + suffix:
|
||||
break
|
||||
else:
|
||||
sample_name = LEGACY_METRIC_NAMES.get(s.name, s.name)
|
||||
output.append(
|
||||
sample_line(s, sample_name.replace(":total", "").replace(":", "_"))
|
||||
)
|
||||
|
||||
return "".join(output).encode("utf-8")
|
||||
|
||||
|
||||
class MetricsHandler(BaseHTTPRequestHandler):
|
||||
"""HTTP handler that gives metrics from ``REGISTRY``."""
|
||||
|
||||
registry = REGISTRY
|
||||
|
||||
def do_GET(self) -> None:
|
||||
registry = self.registry
|
||||
params = parse_qs(urlparse(self.path).query)
|
||||
|
||||
if "help" in params:
|
||||
emit_help = True
|
||||
else:
|
||||
emit_help = False
|
||||
|
||||
try:
|
||||
output = generate_latest(registry, emit_help=emit_help)
|
||||
except Exception:
|
||||
self.send_error(500, "error generating metric output")
|
||||
raise
|
||||
try:
|
||||
self.send_response(200)
|
||||
self.send_header("Content-Type", CONTENT_TYPE_LATEST)
|
||||
self.send_header("Content-Length", str(len(output)))
|
||||
self.end_headers()
|
||||
self.wfile.write(output)
|
||||
except BrokenPipeError as e:
|
||||
logger.warning(
|
||||
"BrokenPipeError when serving metrics (%s). Did Prometheus restart?", e
|
||||
)
|
||||
|
||||
def log_message(self, format: str, *args: Any) -> None:
|
||||
"""Log nothing."""
|
||||
|
||||
@classmethod
|
||||
def factory(cls, registry: CollectorRegistry) -> Type:
|
||||
"""Returns a dynamic MetricsHandler class tied
|
||||
to the passed registry.
|
||||
"""
|
||||
# This implementation relies on MetricsHandler.registry
|
||||
# (defined above and defaulted to REGISTRY).
|
||||
|
||||
# As we have unicode_literals, we need to create a str()
|
||||
# object for type().
|
||||
cls_name = str(cls.__name__)
|
||||
MyMetricsHandler = type(cls_name, (cls, object), {"registry": registry})
|
||||
return MyMetricsHandler
|
||||
|
||||
|
||||
class _ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
|
||||
"""Thread per request HTTP server."""
|
||||
|
||||
# Make worker threads "fire and forget". Beginning with Python 3.7 this
|
||||
# prevents a memory leak because ``ThreadingMixIn`` starts to gather all
|
||||
# non-daemon threads in a list in order to join on them at server close.
|
||||
# Enabling daemon threads virtually makes ``_ThreadingSimpleServer`` the
|
||||
# same as Python 3.7's ``ThreadingHTTPServer``.
|
||||
daemon_threads = True
|
||||
|
||||
|
||||
def start_http_server(
|
||||
port: int, addr: str = "", registry: CollectorRegistry = REGISTRY
|
||||
) -> None:
|
||||
"""Starts an HTTP server for prometheus metrics as a daemon thread"""
|
||||
CustomMetricsHandler = MetricsHandler.factory(registry)
|
||||
httpd = _ThreadingSimpleServer((addr, port), CustomMetricsHandler)
|
||||
t = threading.Thread(target=httpd.serve_forever)
|
||||
t.daemon = True
|
||||
t.start()
|
||||
|
||||
|
||||
class MetricsResource(Resource):
|
||||
"""
|
||||
Twisted ``Resource`` that serves prometheus metrics.
|
||||
"""
|
||||
|
||||
isLeaf = True
|
||||
|
||||
def __init__(self, registry: CollectorRegistry = REGISTRY):
|
||||
self.registry = registry
|
||||
|
||||
def render_GET(self, request: Request) -> bytes:
|
||||
request.setHeader(b"Content-Type", CONTENT_TYPE_LATEST.encode("ascii"))
|
||||
response = generate_latest(self.registry)
|
||||
request.setHeader(b"Content-Length", str(len(response)))
|
||||
return response
|
38
synapse/metrics/_twisted_exposition.py
Normal file
38
synapse/metrics/_twisted_exposition.py
Normal file
@ -0,0 +1,38 @@
|
||||
# Copyright 2015-2019 Prometheus Python Client Developers
|
||||
# Copyright 2019 Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from prometheus_client import REGISTRY, CollectorRegistry, generate_latest
|
||||
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.server import Request
|
||||
|
||||
CONTENT_TYPE_LATEST = "text/plain; version=0.0.4; charset=utf-8"
|
||||
|
||||
|
||||
class MetricsResource(Resource):
|
||||
"""
|
||||
Twisted ``Resource`` that serves prometheus metrics.
|
||||
"""
|
||||
|
||||
isLeaf = True
|
||||
|
||||
def __init__(self, registry: CollectorRegistry = REGISTRY):
|
||||
self.registry = registry
|
||||
|
||||
def render_GET(self, request: Request) -> bytes:
|
||||
request.setHeader(b"Content-Type", CONTENT_TYPE_LATEST.encode("ascii"))
|
||||
response = generate_latest(self.registry)
|
||||
request.setHeader(b"Content-Length", str(len(response)))
|
||||
return response
|
@ -54,7 +54,9 @@ class CommonUsageMetricsManager:
|
||||
|
||||
async def setup(self) -> None:
|
||||
"""Keep the gauges for common usage metrics up to date."""
|
||||
await self._update_gauges()
|
||||
run_as_background_process(
|
||||
desc="common_usage_metrics_update_gauges", func=self._update_gauges
|
||||
)
|
||||
self._clock.looping_call(
|
||||
run_as_background_process,
|
||||
5 * 60 * 1000,
|
||||
|
@ -86,6 +86,7 @@ from synapse.handlers.auth import (
|
||||
ON_LOGGED_OUT_CALLBACK,
|
||||
AuthHandler,
|
||||
)
|
||||
from synapse.handlers.device import DeviceHandler
|
||||
from synapse.handlers.push_rules import RuleSpec, check_actions
|
||||
from synapse.http.client import SimpleHttpClient
|
||||
from synapse.http.server import (
|
||||
@ -207,6 +208,7 @@ class ModuleApi:
|
||||
self._registration_handler = hs.get_registration_handler()
|
||||
self._send_email_handler = hs.get_send_email_handler()
|
||||
self._push_rules_handler = hs.get_push_rules_handler()
|
||||
self._device_handler = hs.get_device_handler()
|
||||
self.custom_template_dir = hs.config.server.custom_template_directory
|
||||
|
||||
try:
|
||||
@ -784,10 +786,12 @@ class ModuleApi:
|
||||
) -> Generator["defer.Deferred[Any]", Any, None]:
|
||||
"""Invalidate an access token for a user
|
||||
|
||||
Can only be called from the main process.
|
||||
|
||||
Added in Synapse v0.25.0.
|
||||
|
||||
Args:
|
||||
access_token(str): access token
|
||||
access_token: access token
|
||||
|
||||
Returns:
|
||||
twisted.internet.defer.Deferred - resolves once the access token
|
||||
@ -796,6 +800,10 @@ class ModuleApi:
|
||||
Raises:
|
||||
synapse.api.errors.AuthError: the access token is invalid
|
||||
"""
|
||||
assert isinstance(
|
||||
self._device_handler, DeviceHandler
|
||||
), "invalidate_access_token can only be called on the main process"
|
||||
|
||||
# see if the access token corresponds to a device
|
||||
user_info = yield defer.ensureDeferred(
|
||||
self._auth.get_user_by_access_token(access_token)
|
||||
@ -805,7 +813,7 @@ class ModuleApi:
|
||||
if device_id:
|
||||
# delete the device, which will also delete its access tokens
|
||||
yield defer.ensureDeferred(
|
||||
self._hs.get_device_handler().delete_devices(user_id, [device_id])
|
||||
self._device_handler.delete_devices(user_id, [device_id])
|
||||
)
|
||||
else:
|
||||
# no associated device. Just delete the access token.
|
||||
@ -832,7 +840,7 @@ class ModuleApi:
|
||||
**kwargs: named args to be passed to func
|
||||
|
||||
Returns:
|
||||
Deferred[object]: result of func
|
||||
Result of func
|
||||
"""
|
||||
# type-ignore: See https://github.com/python/mypy/issues/8862
|
||||
return defer.ensureDeferred(
|
||||
@ -924,8 +932,7 @@ class ModuleApi:
|
||||
to represent 'any') of the room state to acquire.
|
||||
|
||||
Returns:
|
||||
twisted.internet.defer.Deferred[list(synapse.events.FrozenEvent)]:
|
||||
The filtered state events in the room.
|
||||
The filtered state events in the room.
|
||||
"""
|
||||
state_ids = yield defer.ensureDeferred(
|
||||
self._storage_controllers.state.get_current_state_ids(
|
||||
|
@ -29,6 +29,7 @@ from typing import (
|
||||
from prometheus_client import Counter
|
||||
|
||||
from synapse.api.constants import MAIN_TIMELINE, EventTypes, Membership, RelationTypes, EventContentFields
|
||||
from synapse.api.room_versions import PushRuleRoomFlag, RoomVersion
|
||||
from synapse.event_auth import auth_types_for_event, get_user_power_level
|
||||
from synapse.events import EventBase, relation_from_event
|
||||
from synapse.events.snapshot import EventContext
|
||||
@ -339,13 +340,19 @@ class BulkPushRuleEvaluator:
|
||||
for user_id, level in notification_levels.items():
|
||||
notification_levels[user_id] = int(level)
|
||||
|
||||
room_version_features = event.room_version.msc3931_push_features
|
||||
if not room_version_features:
|
||||
room_version_features = []
|
||||
|
||||
evaluator = PushRuleEvaluator(
|
||||
_flatten_dict(event),
|
||||
_flatten_dict(event, room_version=event.room_version),
|
||||
room_member_count,
|
||||
sender_power_level,
|
||||
notification_levels,
|
||||
related_events,
|
||||
self._related_event_match_enabled,
|
||||
room_version_features,
|
||||
self.hs.config.experimental.msc1767_enabled, # MSC3931 flag
|
||||
)
|
||||
|
||||
users = rules_by_user.keys()
|
||||
@ -421,6 +428,7 @@ StateGroup = Union[object, int]
|
||||
|
||||
def _flatten_dict(
|
||||
d: Union[EventBase, Mapping[str, Any]],
|
||||
room_version: Optional[RoomVersion] = None,
|
||||
prefix: Optional[List[str]] = None,
|
||||
result: Optional[Dict[str, str]] = None,
|
||||
) -> Dict[str, str]:
|
||||
@ -432,6 +440,31 @@ def _flatten_dict(
|
||||
if isinstance(value, str):
|
||||
result[".".join(prefix + [key])] = value.lower()
|
||||
elif isinstance(value, Mapping):
|
||||
# do not set `room_version` due to recursion considerations below
|
||||
_flatten_dict(value, prefix=(prefix + [key]), result=result)
|
||||
|
||||
# `room_version` should only ever be set when looking at the top level of an event
|
||||
if (
|
||||
room_version is not None
|
||||
and PushRuleRoomFlag.EXTENSIBLE_EVENTS in room_version.msc3931_push_features
|
||||
and isinstance(d, EventBase)
|
||||
):
|
||||
# Room supports extensible events: replace `content.body` with the plain text
|
||||
# representation from `m.markup`, as per MSC1767.
|
||||
markup = d.get("content").get("m.markup")
|
||||
if room_version.identifier.startswith("org.matrix.msc1767."):
|
||||
markup = d.get("content").get("org.matrix.msc1767.markup")
|
||||
if markup is not None and isinstance(markup, list):
|
||||
text = ""
|
||||
for rep in markup:
|
||||
if not isinstance(rep, dict):
|
||||
# invalid markup - skip all processing
|
||||
break
|
||||
if rep.get("mimetype", "text/plain") == "text/plain":
|
||||
rep_text = rep.get("body")
|
||||
if rep_text is not None and isinstance(rep_text, str):
|
||||
text = rep_text.lower()
|
||||
break
|
||||
result["content.body"] = text
|
||||
|
||||
return result
|
||||
|
@ -153,7 +153,7 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
|
||||
argument list.
|
||||
|
||||
Returns:
|
||||
dict: If POST/PUT request then dictionary must be JSON serialisable,
|
||||
If POST/PUT request then dictionary must be JSON serialisable,
|
||||
otherwise must be appropriate for adding as query args.
|
||||
"""
|
||||
return {}
|
||||
|
@ -13,11 +13,12 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Tuple
|
||||
from typing import TYPE_CHECKING, Optional, Tuple
|
||||
|
||||
from twisted.web.server import Request
|
||||
|
||||
from synapse.http.server import HttpServer
|
||||
from synapse.http.servlet import parse_json_object_from_request
|
||||
from synapse.replication.http._base import ReplicationEndpoint
|
||||
from synapse.types import JsonDict
|
||||
|
||||
@ -62,7 +63,12 @@ class ReplicationUserDevicesResyncRestServlet(ReplicationEndpoint):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
|
||||
self.device_list_updater = hs.get_device_handler().device_list_updater
|
||||
from synapse.handlers.device import DeviceHandler
|
||||
|
||||
handler = hs.get_device_handler()
|
||||
assert isinstance(handler, DeviceHandler)
|
||||
self.device_list_updater = handler.device_list_updater
|
||||
|
||||
self.store = hs.get_datastores().main
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
@ -72,11 +78,77 @@ class ReplicationUserDevicesResyncRestServlet(ReplicationEndpoint):
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request, user_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
) -> Tuple[int, Optional[JsonDict]]:
|
||||
user_devices = await self.device_list_updater.user_device_resync(user_id)
|
||||
|
||||
return 200, user_devices
|
||||
|
||||
|
||||
class ReplicationUploadKeysForUserRestServlet(ReplicationEndpoint):
|
||||
"""Ask master to upload keys for the user and send them out over federation to
|
||||
update other servers.
|
||||
|
||||
For now, only the master is permitted to handle key upload requests;
|
||||
any worker can handle key query requests (since they're read-only).
|
||||
|
||||
Calls to e2e_keys_handler.upload_keys_for_user(user_id, device_id, keys) on
|
||||
the main process to accomplish this.
|
||||
|
||||
Defined in https://spec.matrix.org/v1.4/client-server-api/#post_matrixclientv3keysupload
|
||||
Request format(borrowed and expanded from KeyUploadServlet):
|
||||
|
||||
POST /_synapse/replication/upload_keys_for_user
|
||||
|
||||
{
|
||||
"user_id": "<user_id>",
|
||||
"device_id": "<device_id>",
|
||||
"keys": {
|
||||
....this part can be found in KeyUploadServlet in rest/client/keys.py....
|
||||
}
|
||||
}
|
||||
|
||||
Response is equivalent to ` /_matrix/client/v3/keys/upload` found in KeyUploadServlet
|
||||
|
||||
"""
|
||||
|
||||
NAME = "upload_keys_for_user"
|
||||
PATH_ARGS = ()
|
||||
CACHE = False
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
|
||||
self.e2e_keys_handler = hs.get_e2e_keys_handler()
|
||||
self.store = hs.get_datastores().main
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
@staticmethod
|
||||
async def _serialize_payload( # type: ignore[override]
|
||||
user_id: str, device_id: str, keys: JsonDict
|
||||
) -> JsonDict:
|
||||
|
||||
return {
|
||||
"user_id": user_id,
|
||||
"device_id": device_id,
|
||||
"keys": keys,
|
||||
}
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request
|
||||
) -> Tuple[int, JsonDict]:
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
user_id = content["user_id"]
|
||||
device_id = content["device_id"]
|
||||
keys = content["keys"]
|
||||
|
||||
results = await self.e2e_keys_handler.upload_keys_for_user(
|
||||
user_id, device_id, keys
|
||||
)
|
||||
|
||||
return 200, results
|
||||
|
||||
|
||||
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||
ReplicationUserDevicesResyncRestServlet(hs).register(http_server)
|
||||
ReplicationUploadKeysForUserRestServlet(hs).register(http_server)
|
||||
|
@ -1,13 +0,0 @@
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
@ -1,13 +0,0 @@
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
@ -1,50 +0,0 @@
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
from synapse.storage.database import LoggingDatabaseConnection
|
||||
from synapse.storage.util.id_generators import AbstractStreamIdTracker, _load_current_id
|
||||
|
||||
|
||||
class SlavedIdTracker(AbstractStreamIdTracker):
|
||||
"""Tracks the "current" stream ID of a stream with a single writer.
|
||||
|
||||
See `AbstractStreamIdTracker` for more details.
|
||||
|
||||
Note that this class does not work correctly when there are multiple
|
||||
writers.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
db_conn: LoggingDatabaseConnection,
|
||||
table: str,
|
||||
column: str,
|
||||
extra_tables: Optional[List[Tuple[str, str]]] = None,
|
||||
step: int = 1,
|
||||
):
|
||||
self.step = step
|
||||
self._current = _load_current_id(db_conn, table, column, step)
|
||||
if extra_tables:
|
||||
for table, column in extra_tables:
|
||||
self.advance(None, _load_current_id(db_conn, table, column))
|
||||
|
||||
def advance(self, instance_name: Optional[str], new_id: int) -> None:
|
||||
self._current = (max if self.step > 0 else min)(self._current, new_id)
|
||||
|
||||
def get_current_token(self) -> int:
|
||||
return self._current
|
||||
|
||||
def get_current_token_for_writer(self, instance_name: str) -> int:
|
||||
return self.get_current_token()
|
@ -245,7 +245,7 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
|
||||
self._parse_and_dispatch_line(line)
|
||||
|
||||
def _parse_and_dispatch_line(self, line: bytes) -> None:
|
||||
if line.strip() == "":
|
||||
if line.strip() == b"":
|
||||
# Ignore blank lines
|
||||
return
|
||||
|
||||
|
@ -238,6 +238,10 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||
"""
|
||||
Register all the admin servlets.
|
||||
"""
|
||||
# Admin servlets aren't registered on workers.
|
||||
if hs.config.worker.worker_app is not None:
|
||||
return
|
||||
|
||||
register_servlets_for_client_rest_resource(hs, http_server)
|
||||
BlockRoomRestServlet(hs).register(http_server)
|
||||
ListRoomRestServlet(hs).register(http_server)
|
||||
@ -254,9 +258,6 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||
UserTokenRestServlet(hs).register(http_server)
|
||||
UserRestServletV2(hs).register(http_server)
|
||||
UsersRestServletV2(hs).register(http_server)
|
||||
DeviceRestServlet(hs).register(http_server)
|
||||
DevicesRestServlet(hs).register(http_server)
|
||||
DeleteDevicesRestServlet(hs).register(http_server)
|
||||
UserMediaStatisticsRestServlet(hs).register(http_server)
|
||||
EventReportDetailRestServlet(hs).register(http_server)
|
||||
EventReportsRestServlet(hs).register(http_server)
|
||||
@ -280,12 +281,13 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||
UserByExternalId(hs).register(http_server)
|
||||
UserByThreePid(hs).register(http_server)
|
||||
|
||||
# Some servlets only get registered for the main process.
|
||||
if hs.config.worker.worker_app is None:
|
||||
SendServerNoticeServlet(hs).register(http_server)
|
||||
BackgroundUpdateEnabledRestServlet(hs).register(http_server)
|
||||
BackgroundUpdateRestServlet(hs).register(http_server)
|
||||
BackgroundUpdateStartJobRestServlet(hs).register(http_server)
|
||||
DeviceRestServlet(hs).register(http_server)
|
||||
DevicesRestServlet(hs).register(http_server)
|
||||
DeleteDevicesRestServlet(hs).register(http_server)
|
||||
SendServerNoticeServlet(hs).register(http_server)
|
||||
BackgroundUpdateEnabledRestServlet(hs).register(http_server)
|
||||
BackgroundUpdateRestServlet(hs).register(http_server)
|
||||
BackgroundUpdateStartJobRestServlet(hs).register(http_server)
|
||||
|
||||
|
||||
def register_servlets_for_client_rest_resource(
|
||||
@ -294,9 +296,11 @@ def register_servlets_for_client_rest_resource(
|
||||
"""Register only the servlets which need to be exposed on /_matrix/client/xxx"""
|
||||
WhoisRestServlet(hs).register(http_server)
|
||||
PurgeHistoryStatusRestServlet(hs).register(http_server)
|
||||
DeactivateAccountRestServlet(hs).register(http_server)
|
||||
PurgeHistoryRestServlet(hs).register(http_server)
|
||||
ResetPasswordRestServlet(hs).register(http_server)
|
||||
# The following resources can only be run on the main process.
|
||||
if hs.config.worker.worker_app is None:
|
||||
DeactivateAccountRestServlet(hs).register(http_server)
|
||||
ResetPasswordRestServlet(hs).register(http_server)
|
||||
SearchUsersRestServlet(hs).register(http_server)
|
||||
UserRegisterServlet(hs).register(http_server)
|
||||
AccountValidityRenewServlet(hs).register(http_server)
|
||||
|
@ -16,6 +16,7 @@ from http import HTTPStatus
|
||||
from typing import TYPE_CHECKING, Tuple
|
||||
|
||||
from synapse.api.errors import NotFoundError, SynapseError
|
||||
from synapse.handlers.device import DeviceHandler
|
||||
from synapse.http.servlet import (
|
||||
RestServlet,
|
||||
assert_params_in_dict,
|
||||
@ -43,7 +44,9 @@ class DeviceRestServlet(RestServlet):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__()
|
||||
self.auth = hs.get_auth()
|
||||
self.device_handler = hs.get_device_handler()
|
||||
handler = hs.get_device_handler()
|
||||
assert isinstance(handler, DeviceHandler)
|
||||
self.device_handler = handler
|
||||
self.store = hs.get_datastores().main
|
||||
self.is_mine = hs.is_mine
|
||||
|
||||
@ -112,7 +115,9 @@ class DevicesRestServlet(RestServlet):
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.auth = hs.get_auth()
|
||||
self.device_handler = hs.get_device_handler()
|
||||
handler = hs.get_device_handler()
|
||||
assert isinstance(handler, DeviceHandler)
|
||||
self.device_handler = handler
|
||||
self.store = hs.get_datastores().main
|
||||
self.is_mine = hs.is_mine
|
||||
|
||||
@ -143,7 +148,9 @@ class DeleteDevicesRestServlet(RestServlet):
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.auth = hs.get_auth()
|
||||
self.device_handler = hs.get_device_handler()
|
||||
handler = hs.get_device_handler()
|
||||
assert isinstance(handler, DeviceHandler)
|
||||
self.device_handler = handler
|
||||
self.store = hs.get_datastores().main
|
||||
self.is_mine = hs.is_mine
|
||||
|
||||
|
@ -903,8 +903,9 @@ class PushersRestServlet(RestServlet):
|
||||
@user:server/pushers
|
||||
|
||||
Returns:
|
||||
pushers: Dictionary containing pushers information.
|
||||
total: Number of pushers in dictionary `pushers`.
|
||||
A dictionary with keys:
|
||||
pushers: Dictionary containing pushers information.
|
||||
total: Number of pushers in dictionary `pushers`.
|
||||
"""
|
||||
|
||||
PATTERNS = admin_patterns("/users/(?P<user_id>[^/]*)/pushers$")
|
||||
|
@ -20,6 +20,7 @@ from pydantic import Extra, StrictStr
|
||||
|
||||
from synapse.api import errors
|
||||
from synapse.api.errors import NotFoundError
|
||||
from synapse.handlers.device import DeviceHandler
|
||||
from synapse.http.server import HttpServer
|
||||
from synapse.http.servlet import (
|
||||
RestServlet,
|
||||
@ -80,7 +81,9 @@ class DeleteDevicesRestServlet(RestServlet):
|
||||
super().__init__()
|
||||
self.hs = hs
|
||||
self.auth = hs.get_auth()
|
||||
self.device_handler = hs.get_device_handler()
|
||||
handler = hs.get_device_handler()
|
||||
assert isinstance(handler, DeviceHandler)
|
||||
self.device_handler = handler
|
||||
self.auth_handler = hs.get_auth_handler()
|
||||
|
||||
class PostBody(RequestBodyModel):
|
||||
@ -125,7 +128,9 @@ class DeviceRestServlet(RestServlet):
|
||||
super().__init__()
|
||||
self.hs = hs
|
||||
self.auth = hs.get_auth()
|
||||
self.device_handler = hs.get_device_handler()
|
||||
handler = hs.get_device_handler()
|
||||
assert isinstance(handler, DeviceHandler)
|
||||
self.device_handler = handler
|
||||
self.auth_handler = hs.get_auth_handler()
|
||||
self._msc3852_enabled = hs.config.experimental.msc3852_enabled
|
||||
|
||||
@ -256,7 +261,9 @@ class DehydratedDeviceServlet(RestServlet):
|
||||
super().__init__()
|
||||
self.hs = hs
|
||||
self.auth = hs.get_auth()
|
||||
self.device_handler = hs.get_device_handler()
|
||||
handler = hs.get_device_handler()
|
||||
assert isinstance(handler, DeviceHandler)
|
||||
self.device_handler = handler
|
||||
|
||||
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
@ -313,7 +320,9 @@ class ClaimDehydratedDeviceServlet(RestServlet):
|
||||
super().__init__()
|
||||
self.hs = hs
|
||||
self.auth = hs.get_auth()
|
||||
self.device_handler = hs.get_device_handler()
|
||||
handler = hs.get_device_handler()
|
||||
assert isinstance(handler, DeviceHandler)
|
||||
self.device_handler = handler
|
||||
|
||||
class PostBody(RequestBodyModel):
|
||||
device_id: StrictStr
|
||||
|
@ -27,6 +27,7 @@ from synapse.http.servlet import (
|
||||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.opentracing import log_kv, set_tag
|
||||
from synapse.replication.http.devices import ReplicationUploadKeysForUserRestServlet
|
||||
from synapse.rest.client._base import client_patterns, interactive_auth_handler
|
||||
from synapse.types import JsonDict, StreamToken
|
||||
from synapse.util.cancellation import cancellable
|
||||
@ -43,24 +44,48 @@ class KeyUploadServlet(RestServlet):
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"device_keys": {
|
||||
"user_id": "<user_id>",
|
||||
"device_id": "<device_id>",
|
||||
"valid_until_ts": <millisecond_timestamp>,
|
||||
"algorithms": [
|
||||
"m.olm.curve25519-aes-sha2",
|
||||
]
|
||||
"keys": {
|
||||
"<algorithm>:<device_id>": "<key_base64>",
|
||||
"device_keys": {
|
||||
"user_id": "<user_id>",
|
||||
"device_id": "<device_id>",
|
||||
"valid_until_ts": <millisecond_timestamp>,
|
||||
"algorithms": [
|
||||
"m.olm.curve25519-aes-sha2",
|
||||
]
|
||||
"keys": {
|
||||
"<algorithm>:<device_id>": "<key_base64>",
|
||||
},
|
||||
"signatures:" {
|
||||
"<user_id>" {
|
||||
"<algorithm>:<device_id>": "<signature_base64>"
|
||||
}
|
||||
}
|
||||
},
|
||||
"fallback_keys": {
|
||||
"<algorithm>:<device_id>": "<key_base64>",
|
||||
"signed_<algorithm>:<device_id>": {
|
||||
"fallback": true,
|
||||
"key": "<key_base64>",
|
||||
"signatures": {
|
||||
"<user_id>": {
|
||||
"<algorithm>:<device_id>": "<key_base64>"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"one_time_keys": {
|
||||
"<algorithm>:<key_id>": "<key_base64>"
|
||||
},
|
||||
"signatures:" {
|
||||
"<user_id>" {
|
||||
"<algorithm>:<device_id>": "<signature_base64>"
|
||||
} } },
|
||||
"one_time_keys": {
|
||||
"<algorithm>:<key_id>": "<key_base64>"
|
||||
},
|
||||
}
|
||||
|
||||
response, e.g.:
|
||||
|
||||
{
|
||||
"one_time_key_counts": {
|
||||
"curve25519": 10,
|
||||
"signed_curve25519": 20
|
||||
}
|
||||
}
|
||||
|
||||
"""
|
||||
|
||||
PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
|
||||
@ -71,6 +96,13 @@ class KeyUploadServlet(RestServlet):
|
||||
self.e2e_keys_handler = hs.get_e2e_keys_handler()
|
||||
self.device_handler = hs.get_device_handler()
|
||||
|
||||
if hs.config.worker.worker_app is None:
|
||||
# if main process
|
||||
self.key_uploader = self.e2e_keys_handler.upload_keys_for_user
|
||||
else:
|
||||
# then a worker
|
||||
self.key_uploader = ReplicationUploadKeysForUserRestServlet.make_client(hs)
|
||||
|
||||
async def on_POST(
|
||||
self, request: SynapseRequest, device_id: Optional[str]
|
||||
) -> Tuple[int, JsonDict]:
|
||||
@ -109,8 +141,8 @@ class KeyUploadServlet(RestServlet):
|
||||
400, "To upload keys, you must pass device_id when authenticating"
|
||||
)
|
||||
|
||||
result = await self.e2e_keys_handler.upload_keys_for_user(
|
||||
user_id, device_id, body
|
||||
result = await self.key_uploader(
|
||||
user_id=user_id, device_id=device_id, keys=body
|
||||
)
|
||||
return 200, result
|
||||
|
||||
|
@ -350,7 +350,7 @@ class LoginRestServlet(RestServlet):
|
||||
auth_provider_session_id: The session ID got during login from the SSO IdP.
|
||||
|
||||
Returns:
|
||||
result: Dictionary of account information after successful login.
|
||||
Dictionary of account information after successful login.
|
||||
"""
|
||||
|
||||
# Before we actually log them in we check if they've already logged in
|
||||
|
@ -15,6 +15,7 @@
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Tuple
|
||||
|
||||
from synapse.handlers.device import DeviceHandler
|
||||
from synapse.http.server import HttpServer
|
||||
from synapse.http.servlet import RestServlet
|
||||
from synapse.http.site import SynapseRequest
|
||||
@ -34,7 +35,9 @@ class LogoutRestServlet(RestServlet):
|
||||
super().__init__()
|
||||
self.auth = hs.get_auth()
|
||||
self._auth_handler = hs.get_auth_handler()
|
||||
self._device_handler = hs.get_device_handler()
|
||||
handler = hs.get_device_handler()
|
||||
assert isinstance(handler, DeviceHandler)
|
||||
self._device_handler = handler
|
||||
|
||||
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
requester = await self.auth.get_user_by_req(request, allow_expired=True)
|
||||
@ -59,7 +62,9 @@ class LogoutAllRestServlet(RestServlet):
|
||||
super().__init__()
|
||||
self.auth = hs.get_auth()
|
||||
self._auth_handler = hs.get_auth_handler()
|
||||
self._device_handler = hs.get_device_handler()
|
||||
handler = hs.get_device_handler()
|
||||
assert isinstance(handler, DeviceHandler)
|
||||
self._device_handler = handler
|
||||
|
||||
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
requester = await self.auth.get_user_by_req(request, allow_expired=True)
|
||||
|
@ -1288,17 +1288,14 @@ class TimestampLookupRestServlet(RestServlet):
|
||||
`dir` can be `f` or `b` to indicate forwards and backwards in time from the
|
||||
given timestamp.
|
||||
|
||||
GET /_matrix/client/unstable/org.matrix.msc3030/rooms/<roomID>/timestamp_to_event?ts=<timestamp>&dir=<direction>
|
||||
GET /_matrix/client/v1/rooms/<roomID>/timestamp_to_event?ts=<timestamp>&dir=<direction>
|
||||
{
|
||||
"event_id": ...
|
||||
}
|
||||
"""
|
||||
|
||||
PATTERNS = (
|
||||
re.compile(
|
||||
"^/_matrix/client/unstable/org.matrix.msc3030"
|
||||
"/rooms/(?P<room_id>[^/]*)/timestamp_to_event$"
|
||||
),
|
||||
re.compile("^/_matrix/client/v1/rooms/(?P<room_id>[^/]*)/timestamp_to_event$"),
|
||||
)
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
@ -1425,8 +1422,7 @@ def register_servlets(
|
||||
RoomAliasListServlet(hs).register(http_server)
|
||||
SearchRestServlet(hs).register(http_server)
|
||||
RoomCreateRestServlet(hs).register(http_server)
|
||||
if hs.config.experimental.msc3030_enabled:
|
||||
TimestampLookupRestServlet(hs).register(http_server)
|
||||
TimestampLookupRestServlet(hs).register(http_server)
|
||||
|
||||
# Some servlets only get registered for the main process.
|
||||
if not is_worker:
|
||||
|
@ -101,8 +101,6 @@ class VersionsRestServlet(RestServlet):
|
||||
"org.matrix.msc3827.stable": True,
|
||||
# Adds support for importing historical messages as per MSC2716
|
||||
"org.matrix.msc2716": self.config.experimental.msc2716_enabled,
|
||||
# Adds support for jump to date endpoints (/timestamp_to_event) as per MSC3030
|
||||
"org.matrix.msc3030": self.config.experimental.msc3030_enabled,
|
||||
# Adds support for thread relations, per MSC3440.
|
||||
"org.matrix.msc3440.stable": True, # TODO: remove when "v1.3" is added above
|
||||
# Support for thread read receipts & notification counts.
|
||||
|
@ -344,8 +344,8 @@ class MediaRepository:
|
||||
download from remote server.
|
||||
|
||||
Args:
|
||||
server_name (str): Remote server_name where the media originated.
|
||||
media_id (str): The media ID of the content (as defined by the
|
||||
server_name: Remote server_name where the media originated.
|
||||
media_id: The media ID of the content (as defined by the
|
||||
remote server).
|
||||
|
||||
Returns:
|
||||
|
@ -138,7 +138,7 @@ class Thumbnailer:
|
||||
"""Rescales the image to the given dimensions.
|
||||
|
||||
Returns:
|
||||
BytesIO: the bytes of the encoded image ready to be written to disk
|
||||
The bytes of the encoded image ready to be written to disk
|
||||
"""
|
||||
with self._resize(width, height) as scaled:
|
||||
return self._encode_image(scaled, output_type)
|
||||
@ -155,7 +155,7 @@ class Thumbnailer:
|
||||
max_height: The largest possible height.
|
||||
|
||||
Returns:
|
||||
BytesIO: the bytes of the encoded image ready to be written to disk
|
||||
The bytes of the encoded image ready to be written to disk
|
||||
"""
|
||||
if width * self.height > height * self.width:
|
||||
scaled_width = width
|
||||
|
@ -510,7 +510,7 @@ class HomeServer(metaclass=abc.ABCMeta):
|
||||
)
|
||||
|
||||
@cache_in_self
|
||||
def get_device_handler(self):
|
||||
def get_device_handler(self) -> DeviceWorkerHandler:
|
||||
if self.config.worker.worker_app:
|
||||
return DeviceWorkerHandler(self)
|
||||
else:
|
||||
|
@ -113,9 +113,8 @@ def copy_with_str_subst(x: Any, substitutions: Any) -> Any:
|
||||
"""Deep-copy a structure, carrying out string substitutions on any strings
|
||||
|
||||
Args:
|
||||
x (object): structure to be copied
|
||||
substitutions (object): substitutions to be made - passed into the
|
||||
string '%' operator
|
||||
x: structure to be copied
|
||||
substitutions: substitutions to be made - passed into the string '%' operator
|
||||
|
||||
Returns:
|
||||
copy of x
|
||||
|
@ -170,11 +170,13 @@ class ResourceLimitsServerNotices:
|
||||
room_id: The room id of the server notices room
|
||||
|
||||
Returns:
|
||||
bool: Is the room currently blocked
|
||||
list: The list of pinned event IDs that are unrelated to limit blocking
|
||||
This list can be used as a convenience in the case where the block
|
||||
is to be lifted and the remaining pinned event references need to be
|
||||
preserved
|
||||
Tuple of:
|
||||
Is the room currently blocked
|
||||
|
||||
The list of pinned event IDs that are unrelated to limit blocking
|
||||
This list can be used as a convenience in the case where the block
|
||||
is to be lifted and the remaining pinned event references need to be
|
||||
preserved
|
||||
"""
|
||||
currently_blocked = False
|
||||
pinned_state_event = None
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user