Merge remote-tracking branch 'upstream/release-v1.74'

This commit is contained in:
Tulir Asokan 2022-12-13 21:15:23 +02:00
commit 53d6ac569d
195 changed files with 4152 additions and 2239 deletions

View file

@ -21,7 +21,7 @@ endblock
block Install Complement Dependencies block Install Complement Dependencies
sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev
go get -v github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest go install -v github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest
endblock endblock
block Install custom gotestfmt template block Install custom gotestfmt template

View file

@ -208,7 +208,7 @@ jobs:
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06 - uses: JasonEtco/create-an-issue@77399b6110ef82b94c1c9f9f615acf9e604f7f56 # v2.5.0, 2020-12-06
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with: with:

View file

@ -109,7 +109,29 @@ jobs:
components: clippy components: clippy
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2
- run: cargo clippy - run: cargo clippy -- -D warnings
# We also lint against a nightly rustc so that we can lint the benchmark
# suite, which requires a nightly compiler.
lint-clippy-nightly:
runs-on: ubuntu-latest
needs: changes
if: ${{ needs.changes.outputs.rust == 'true' }}
steps:
- uses: actions/checkout@v3
- name: Install Rust
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: nightly-2022-12-01
components: clippy
- uses: Swatinem/rust-cache@v2
- run: cargo clippy --all-features -- -D warnings
lint-rustfmt: lint-rustfmt:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -175,8 +197,12 @@ jobs:
- run: sudo apt-get -qq install xmlsec1 - run: sudo apt-get -qq install xmlsec1
- name: Set up PostgreSQL ${{ matrix.job.postgres-version }} - name: Set up PostgreSQL ${{ matrix.job.postgres-version }}
if: ${{ matrix.job.postgres-version }} if: ${{ matrix.job.postgres-version }}
# 1. Mount postgres data files onto a tmpfs in-memory filesystem to reduce overhead of docker's overlayfs layer.
# 2. Expose the unix socket for postgres. This removes latency of using docker-proxy for connections.
run: | run: |
docker run -d -p 5432:5432 \ docker run -d -p 5432:5432 \
--tmpfs /var/lib/postgres:rw,size=6144m \
--mount 'type=bind,src=/var/run/postgresql,dst=/var/run/postgresql' \
-e POSTGRES_PASSWORD=postgres \ -e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \ -e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.job.postgres-version }} postgres:${{ matrix.job.postgres-version }}
@ -198,10 +224,10 @@ jobs:
if: ${{ matrix.job.postgres-version }} if: ${{ matrix.job.postgres-version }}
timeout-minutes: 2 timeout-minutes: 2
run: until pg_isready -h localhost; do sleep 1; done run: until pg_isready -h localhost; do sleep 1; done
- run: poetry run trial --jobs=2 tests - run: poetry run trial --jobs=6 tests
env: env:
SYNAPSE_POSTGRES: ${{ matrix.job.database == 'postgres' || '' }} SYNAPSE_POSTGRES: ${{ matrix.job.database == 'postgres' || '' }}
SYNAPSE_POSTGRES_HOST: localhost SYNAPSE_POSTGRES_HOST: /var/run/postgresql
SYNAPSE_POSTGRES_USER: postgres SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres SYNAPSE_POSTGRES_PASSWORD: postgres
- name: Dump logs - name: Dump logs
@ -270,7 +296,7 @@ jobs:
python-version: '3.7' python-version: '3.7'
extras: "all test" extras: "all test"
- run: poetry run trial -j2 tests - run: poetry run trial -j6 tests
- name: Dump logs - name: Dump logs
# Logs are most useful when the command fails, always include them. # Logs are most useful when the command fails, always include them.
if: ${{ always() }} if: ${{ always() }}

View file

@ -174,7 +174,7 @@ jobs:
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06 - uses: JasonEtco/create-an-issue@77399b6110ef82b94c1c9f9f615acf9e604f7f56 # v2.5.0, 2020-12-06
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with: with:

View file

@ -1,3 +1,74 @@
Synapse 1.74.0rc1 (2022-12-13)
==============================
Features
--------
- Improve user search for international display names. ([\#14464](https://github.com/matrix-org/synapse/issues/14464))
- Stop using deprecated `keyIds` parameter when calling `/_matrix/key/v2/server`. ([\#14490](https://github.com/matrix-org/synapse/issues/14490), [\#14525](https://github.com/matrix-org/synapse/issues/14525))
- Add new `push.enabled` config option to allow opting out of push notification calculation. ([\#14551](https://github.com/matrix-org/synapse/issues/14551), [\#14619](https://github.com/matrix-org/synapse/issues/14619))
- Advertise support for Matrix 1.5 on `/_matrix/client/versions`. ([\#14576](https://github.com/matrix-org/synapse/issues/14576))
- Improve opentracing and logging for to-device message handling. ([\#14598](https://github.com/matrix-org/synapse/issues/14598))
- Allow selecting "prejoin" events by state keys in addition to event types. ([\#14642](https://github.com/matrix-org/synapse/issues/14642))
Bugfixes
--------
- Fix a long-standing bug where a device list update might not be sent to clients in certain circumstances. ([\#14435](https://github.com/matrix-org/synapse/issues/14435), [\#14592](https://github.com/matrix-org/synapse/issues/14592), [\#14604](https://github.com/matrix-org/synapse/issues/14604))
- Suppress a spurious warning when `POST /rooms/<room_id>/<membership>/`, `POST /join/<room_id_or_alias`, or the unspecced `PUT /join/<room_id_or_alias>/<txn_id>` receive an empty HTTP request body. ([\#14600](https://github.com/matrix-org/synapse/issues/14600))
- Return spec-compliant JSON errors when unknown endpoints are requested. ([\#14620](https://github.com/matrix-org/synapse/issues/14620), [\#14621](https://github.com/matrix-org/synapse/issues/14621))
- Update html templates to load images over HTTPS. Contributed by @ashfame. ([\#14625](https://github.com/matrix-org/synapse/issues/14625))
- Fix a long-standing bug where the user directory would return 1 more row than requested. ([\#14631](https://github.com/matrix-org/synapse/issues/14631))
- Reject invalid read receipt requests with empty room or event IDs. Contributed by Nick @ Beeper (@fizzadar). ([\#14632](https://github.com/matrix-org/synapse/issues/14632))
- Fix a bug introduced in Synapse 1.67.0 where not specifying a config file or a server URL would lead to the `register_new_matrix_user` script failing. ([\#14637](https://github.com/matrix-org/synapse/issues/14637))
- Fix a long-standing bug where the user directory and room/user stats might be out of sync. ([\#14639](https://github.com/matrix-org/synapse/issues/14639), [\#14643](https://github.com/matrix-org/synapse/issues/14643))
- Fix a bug introduced in Synapse 1.72.0 where the background updates to add non-thread unique indexes on receipts would fail if they were previously interrupted. ([\#14650](https://github.com/matrix-org/synapse/issues/14650))
- Improve validation of field size limits in events. ([\#14664](https://github.com/matrix-org/synapse/issues/14664))
- Fix bugs introduced in Synapse 1.55.0 and 1.69.0 where application services would not be notified of events in the correct rooms, due to stale caches. ([\#14670](https://github.com/matrix-org/synapse/issues/14670))
Improved Documentation
----------------------
- Update worker settings for `pusher` and `federation_sender` functionality. ([\#14493](https://github.com/matrix-org/synapse/issues/14493))
- Add links to third party package repositories, and point to the bug which highlights Ubuntu's out-of-date packages. ([\#14517](https://github.com/matrix-org/synapse/issues/14517))
- Remove old, incorrect minimum postgres version note and replace with a link to the [Dependency Deprecation Policy](https://matrix-org.github.io/synapse/v1.73/deprecation_policy.html). ([\#14590](https://github.com/matrix-org/synapse/issues/14590))
- Add Single-Sign On setup instructions for Mastodon-based instances. ([\#14594](https://github.com/matrix-org/synapse/issues/14594))
- Change `turn_allow_guests` example value to lowercase `true`. ([\#14634](https://github.com/matrix-org/synapse/issues/14634))
Internal Changes
----------------
- Optimise push badge count calculations. Contributed by Nick @ Beeper (@fizzadar). ([\#14255](https://github.com/matrix-org/synapse/issues/14255))
- Faster remote room joins: stream the un-partial-stating of rooms over replication. ([\#14473](https://github.com/matrix-org/synapse/issues/14473), [\#14474](https://github.com/matrix-org/synapse/issues/14474))
- Share the `ClientRestResource` for both workers and the main process. ([\#14528](https://github.com/matrix-org/synapse/issues/14528))
- Add `--editable` flag to `complement.sh` which uses an editable install of Synapse for faster turn-around times whilst developing iteratively. ([\#14548](https://github.com/matrix-org/synapse/issues/14548))
- Faster joins: use servers list approximation to send read receipts when in partial state instead of waiting for the full state of the room. ([\#14549](https://github.com/matrix-org/synapse/issues/14549))
- Modernize unit tests configuration related to workers. ([\#14568](https://github.com/matrix-org/synapse/issues/14568))
- Bump jsonschema from 4.17.0 to 4.17.3. ([\#14591](https://github.com/matrix-org/synapse/issues/14591))
- Fix Rust lint CI. ([\#14602](https://github.com/matrix-org/synapse/issues/14602))
- Bump JasonEtco/create-an-issue from 2.5.0 to 2.8.1. ([\#14607](https://github.com/matrix-org/synapse/issues/14607))
- Alter some unit test environment parameters to decrease time spent running tests. ([\#14610](https://github.com/matrix-org/synapse/issues/14610))
- Switch to Go recommended installation method for `gotestfmt` template in CI. ([\#14611](https://github.com/matrix-org/synapse/issues/14611))
- Bump phonenumbers from 8.13.0 to 8.13.1. ([\#14612](https://github.com/matrix-org/synapse/issues/14612))
- Bump types-setuptools from 65.5.0.3 to 65.6.0.1. ([\#14613](https://github.com/matrix-org/synapse/issues/14613))
- Bump twine from 4.0.1 to 4.0.2. ([\#14614](https://github.com/matrix-org/synapse/issues/14614))
- Bump types-requests from 2.28.11.2 to 2.28.11.5. ([\#14615](https://github.com/matrix-org/synapse/issues/14615))
- Bump cryptography from 38.0.3 to 38.0.4. ([\#14616](https://github.com/matrix-org/synapse/issues/14616))
- Remove useless cargo install with apt from Dockerfile. ([\#14636](https://github.com/matrix-org/synapse/issues/14636))
- Bump certifi from 2021.10.8 to 2022.12.7. ([\#14645](https://github.com/matrix-org/synapse/issues/14645))
- Bump flake8-bugbear from 22.10.27 to 22.12.6. ([\#14656](https://github.com/matrix-org/synapse/issues/14656))
- Bump packaging from 21.3 to 22.0. ([\#14657](https://github.com/matrix-org/synapse/issues/14657))
- Bump types-pillow from 9.3.0.1 to 9.3.0.4. ([\#14658](https://github.com/matrix-org/synapse/issues/14658))
- Bump serde from 1.0.148 to 1.0.150. ([\#14659](https://github.com/matrix-org/synapse/issues/14659))
- Bump phonenumbers from 8.13.1 to 8.13.2. ([\#14660](https://github.com/matrix-org/synapse/issues/14660))
- Bump authlib from 1.1.0 to 1.2.0. ([\#14661](https://github.com/matrix-org/synapse/issues/14661))
- Move `StateFilter` to `synapse.types`. ([\#14668](https://github.com/matrix-org/synapse/issues/14668))
- Improve type hints. ([\#14597](https://github.com/matrix-org/synapse/issues/14597), [\#14646](https://github.com/matrix-org/synapse/issues/14646), [\#14671](https://github.com/matrix-org/synapse/issues/14671))
Synapse 1.73.0 (2022-12-06) Synapse 1.73.0 (2022-12-06)
=========================== ===========================
@ -23,7 +94,7 @@ Features
- Speed-up `/messages` with `filter_events_for_client` optimizations. ([\#14527](https://github.com/matrix-org/synapse/issues/14527)) - Speed-up `/messages` with `filter_events_for_client` optimizations. ([\#14527](https://github.com/matrix-org/synapse/issues/14527))
- Improve DB performance by reducing amount of data that gets read in `device_lists_changes_in_room`. ([\#14534](https://github.com/matrix-org/synapse/issues/14534)) - Improve DB performance by reducing amount of data that gets read in `device_lists_changes_in_room`. ([\#14534](https://github.com/matrix-org/synapse/issues/14534))
- Adds support for handling avatar in SSO OIDC login. Contributed by @ashfame. ([\#13917](https://github.com/matrix-org/synapse/issues/13917)) - Add support for handling avatar in SSO OIDC login. Contributed by @ashfame. ([\#13917](https://github.com/matrix-org/synapse/issues/13917))
- Move MSC3030 `/timestamp_to_event` endpoints to stable `v1` location (`/_matrix/client/v1/rooms/<roomID>/timestamp_to_event?ts=<timestamp>&dir=<direction>`, `/_matrix/federation/v1/timestamp_to_event/<roomID>?ts=<timestamp>&dir=<direction>`). ([\#14471](https://github.com/matrix-org/synapse/issues/14471)) - Move MSC3030 `/timestamp_to_event` endpoints to stable `v1` location (`/_matrix/client/v1/rooms/<roomID>/timestamp_to_event?ts=<timestamp>&dir=<direction>`, `/_matrix/federation/v1/timestamp_to_event/<roomID>?ts=<timestamp>&dir=<direction>`). ([\#14471](https://github.com/matrix-org/synapse/issues/14471))
- Reduce database load of [Client-Server endpoints](https://spec.matrix.org/v1.5/client-server-api/#aggregations) which return bundled aggregations. ([\#14491](https://github.com/matrix-org/synapse/issues/14491), [\#14508](https://github.com/matrix-org/synapse/issues/14508), [\#14510](https://github.com/matrix-org/synapse/issues/14510)) - Reduce database load of [Client-Server endpoints](https://spec.matrix.org/v1.5/client-server-api/#aggregations) which return bundled aggregations. ([\#14491](https://github.com/matrix-org/synapse/issues/14491), [\#14508](https://github.com/matrix-org/synapse/issues/14508), [\#14510](https://github.com/matrix-org/synapse/issues/14510))
- Add unstable support for an Extensible Events room version (`org.matrix.msc1767.10`) via [MSC1767](https://github.com/matrix-org/matrix-spec-proposals/pull/1767), [MSC3931](https://github.com/matrix-org/matrix-spec-proposals/pull/3931), [MSC3932](https://github.com/matrix-org/matrix-spec-proposals/pull/3932), and [MSC3933](https://github.com/matrix-org/matrix-spec-proposals/pull/3933). ([\#14520](https://github.com/matrix-org/synapse/issues/14520), [\#14521](https://github.com/matrix-org/synapse/issues/14521), [\#14524](https://github.com/matrix-org/synapse/issues/14524)) - Add unstable support for an Extensible Events room version (`org.matrix.msc1767.10`) via [MSC1767](https://github.com/matrix-org/matrix-spec-proposals/pull/1767), [MSC3931](https://github.com/matrix-org/matrix-spec-proposals/pull/3931), [MSC3932](https://github.com/matrix-org/matrix-spec-proposals/pull/3932), and [MSC3933](https://github.com/matrix-org/matrix-spec-proposals/pull/3933). ([\#14520](https://github.com/matrix-org/synapse/issues/14520), [\#14521](https://github.com/matrix-org/synapse/issues/14521), [\#14524](https://github.com/matrix-org/synapse/issues/14524))

8
Cargo.lock generated
View file

@ -323,18 +323,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]] [[package]]
name = "serde" name = "serde"
version = "1.0.148" version = "1.0.150"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e53f64bb4ba0191d6d0676e1b141ca55047d83b74f5607e6d8eb88126c52c2dc" checksum = "e326c9ec8042f1b5da33252c8a37e9ffbd2c9bef0155215b6e6c80c790e05f91"
dependencies = [ dependencies = [
"serde_derive", "serde_derive",
] ]
[[package]] [[package]]
name = "serde_derive" name = "serde_derive"
version = "1.0.148" version = "1.0.150"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a55492425aa53521babf6137309e7d34c20bbfbbfcfe2c7f3a047fd1f6b92c0c" checksum = "42a3df25b0713732468deadad63ab9da1f1fd75a48a15024b50363f128db627e"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",

8
debian/changelog vendored
View file

@ -1,3 +1,11 @@
matrix-synapse-py3 (1.74.0~rc1) stable; urgency=medium
* New dependency on libicu-dev to provide improved results for user
search.
* New Synapse release 1.74.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 13 Dec 2022 13:30:01 +0000
matrix-synapse-py3 (1.73.0) stable; urgency=medium matrix-synapse-py3 (1.73.0) stable; urgency=medium
* New Synapse release 1.73.0. * New Synapse release 1.73.0.

2
debian/control vendored
View file

@ -8,6 +8,8 @@ Build-Depends:
dh-virtualenv (>= 1.1), dh-virtualenv (>= 1.1),
libsystemd-dev, libsystemd-dev,
libpq-dev, libpq-dev,
libicu-dev,
pkg-config,
lsb-release, lsb-release,
python3-dev, python3-dev,
python3, python3,

View file

@ -43,7 +43,7 @@ RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq \ apt-get update -qq && apt-get install -yqq \
build-essential cargo git libffi-dev libssl-dev \ build-essential git libffi-dev libssl-dev \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
# We install poetry in its own build stage to avoid its dependencies conflicting with # We install poetry in its own build stage to avoid its dependencies conflicting with
@ -97,6 +97,8 @@ RUN \
zlib1g-dev \ zlib1g-dev \
git \ git \
curl \ curl \
libicu-dev \
pkg-config \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*

View file

@ -84,6 +84,8 @@ RUN apt-get update -qq -o Acquire::Languages=none \
python3-venv \ python3-venv \
sqlite3 \ sqlite3 \
libpq-dev \ libpq-dev \
libicu-dev \
pkg-config \
xmlsec1 xmlsec1
# Install rust and ensure it's in the PATH # Install rust and ensure it's in the PATH

View file

@ -1,6 +1,7 @@
# syntax=docker/dockerfile:1 # syntax=docker/dockerfile:1
ARG SYNAPSE_VERSION=latest ARG SYNAPSE_VERSION=latest
ARG FROM=matrixdotorg/synapse:$SYNAPSE_VERSION
# first of all, we create a base image with an nginx which we can copy into the # first of all, we create a base image with an nginx which we can copy into the
# target image. For repeated rebuilds, this is much faster than apt installing # target image. For repeated rebuilds, this is much faster than apt installing
@ -23,7 +24,7 @@ FROM debian:bullseye-slim AS deps_base
FROM redis:6-bullseye AS redis_base FROM redis:6-bullseye AS redis_base
# now build the final image, based on the the regular Synapse docker image # now build the final image, based on the the regular Synapse docker image
FROM matrixdotorg/synapse:$SYNAPSE_VERSION FROM $FROM
# Install supervisord with pip instead of apt, to avoid installing a second # Install supervisord with pip instead of apt, to avoid installing a second
# copy of python. # copy of python.

View file

@ -7,8 +7,9 @@
# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse # https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse
ARG SYNAPSE_VERSION=latest ARG SYNAPSE_VERSION=latest
ARG FROM=matrixdotorg/synapse-workers:$SYNAPSE_VERSION
FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION FROM $FROM
# First of all, we copy postgres server from the official postgres image, # First of all, we copy postgres server from the official postgres image,
# since for repeated rebuilds, this is much faster than apt installing # since for repeated rebuilds, this is much faster than apt installing
# postgres each time. # postgres each time.

View file

@ -0,0 +1,75 @@
# syntax=docker/dockerfile:1
# This dockerfile builds an editable install of Synapse.
#
# Used by `complement.sh`. Not suitable for production use.
ARG PYTHON_VERSION=3.9
###
### Stage 0: generate requirements.txt
###
# We hardcode the use of Debian bullseye here because this could change upstream
# and other Dockerfiles used for testing are expecting bullseye.
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye
# Install Rust and other dependencies (stolen from normal Dockerfile)
# install the OS build deps
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq \
build-essential \
libffi-dev \
libjpeg-dev \
libpq-dev \
libssl-dev \
libwebp-dev \
libxml++2.6-dev \
libxslt1-dev \
openssl \
zlib1g-dev \
git \
curl \
gosu \
libjpeg62-turbo \
libpq5 \
libwebp6 \
xmlsec1 \
libjemalloc2 \
&& rm -rf /var/lib/apt/lists/*
ENV RUSTUP_HOME=/rust
ENV CARGO_HOME=/cargo
ENV PATH=/cargo/bin:/rust/bin:$PATH
RUN mkdir /rust /cargo
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
# Make a base copy of the editable source tree, so that we have something to
# install and build now — even though it's going to be covered up by a mount
# at runtime.
COPY synapse /editable-src/synapse/
COPY rust /editable-src/rust/
# ... and what we need to `pip install`.
COPY pyproject.toml poetry.lock README.rst build_rust.py Cargo.toml Cargo.lock /editable-src/
RUN pip install poetry
RUN poetry config virtualenvs.create false
RUN cd /editable-src && poetry install --extras all
# Make copies of useful things for inspection:
# - the Rust module (must be copied to the editable source tree before startup)
# - poetry.lock is useful for checking if dependencies have changed.
RUN cp /editable-src/synapse/synapse_rust.abi3.so /synapse_rust.abi3.so.bak
RUN cp /editable-src/poetry.lock /poetry.lock.bak
### Extra setup from original Dockerfile
COPY ./docker/start.py /start.py
COPY ./docker/conf /conf
EXPOSE 8008/tcp 8009/tcp 8448/tcp
ENTRYPOINT ["/start.py"]
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
CMD curl -fSs http://localhost:8008/health || exit 1

View file

@ -590,3 +590,44 @@ oidc_providers:
display_name_template: "{{ user.first_name }} {{ user.last_name }}" display_name_template: "{{ user.first_name }} {{ user.last_name }}"
email_template: "{{ user.email }}" email_template: "{{ user.email }}"
``` ```
### Mastodon
[Mastodon](https://docs.joinmastodon.org/) instances provide an [OAuth API](https://docs.joinmastodon.org/spec/oauth/), allowing those instances to be used as a single sign-on provider for Synapse.
The first step is to register Synapse as an application with your Mastodon instance, using the [Create an application API](https://docs.joinmastodon.org/methods/apps/#create) (see also [here](https://docs.joinmastodon.org/client/token/)). There are several ways to do this, but in the example below we are using CURL.
This example assumes that:
* the Mastodon instance website URL is `https://your.mastodon.instance.url`, and
* Synapse will be registered as an app named `my_synapse_app`.
Send the following request, substituting the value of `synapse_public_baseurl` from your Synapse installation.
```sh
curl -d "client_name=my_synapse_app&redirect_uris=https://[synapse_public_baseurl]/_synapse/client/oidc/callback" -X POST https://your.mastodon.instance.url/api/v1/apps
```
You should receive a response similar to the following. Make sure to save it.
```json
{"client_id":"someclientid_123","client_secret":"someclientsecret_123","id":"12345","name":"my_synapse_app","redirect_uri":"https://[synapse_public_baseurl]/_synapse/client/oidc/callback","website":null,"vapid_key":"somerandomvapidkey_123"}
```
As the Synapse login mechanism needs an attribute to uniquely identify users, and Mastodon's endpoint does not return a `sub` property, an alternative `subject_claim` has to be set. Your Synapse configuration should include the following:
```yaml
oidc_providers:
- idp_id: my_mastodon
idp_name: "Mastodon Instance Example"
discover: false
issuer: "https://your.mastodon.instance.url/@admin"
client_id: "someclientid_123"
client_secret: "someclientsecret_123"
authorization_endpoint: "https://your.mastodon.instance.url/oauth/authorize"
token_endpoint: "https://your.mastodon.instance.url/oauth/token"
userinfo_endpoint: "https://your.mastodon.instance.url/api/v1/accounts/verify_credentials"
scopes: ["read"]
user_mapping_provider:
config:
subject_claim: "id"
```
Note that the fields `client_id` and `client_secret` are taken from the CURL response above.

View file

@ -1,6 +1,7 @@
# Using Postgres # Using Postgres
Synapse supports PostgreSQL versions 10 or later. The minimum supported version of PostgreSQL is determined by the [Dependency
Deprecation Policy](deprecation_policy.md).
## Install postgres client libraries ## Install postgres client libraries

View file

@ -84,7 +84,9 @@ file when you upgrade the Debian package to a later version.
##### Downstream Debian packages ##### Downstream Debian packages
Andrej Shadura maintains a `matrix-synapse` package in the Debian repositories. Andrej Shadura maintains a
[`matrix-synapse`](https://packages.debian.org/sid/matrix-synapse) package in
the Debian repositories.
For `bookworm` and `sid`, it can be installed simply with: For `bookworm` and `sid`, it can be installed simply with:
```sh ```sh
@ -100,23 +102,27 @@ for information on how to use backports.
##### Downstream Ubuntu packages ##### Downstream Ubuntu packages
We do not recommend using the packages in the default Ubuntu repository We do not recommend using the packages in the default Ubuntu repository
at this time, as they are old and suffer from known security vulnerabilities. at this time, as they are [old and suffer from known security vulnerabilities](
https://bugs.launchpad.net/ubuntu/+source/matrix-synapse/+bug/1848709
).
The latest version of Synapse can be installed from [our repository](#matrixorg-packages). The latest version of Synapse can be installed from [our repository](#matrixorg-packages).
#### Fedora #### Fedora
Synapse is in the Fedora repositories as `matrix-synapse`: Synapse is in the Fedora repositories as
[`matrix-synapse`](https://src.fedoraproject.org/rpms/matrix-synapse):
```sh ```sh
sudo dnf install matrix-synapse sudo dnf install matrix-synapse
``` ```
Oleg Girko provides Fedora RPMs at Additionally, Oleg Girko provides Fedora RPMs at
<https://obs.infoserver.lv/project/monitor/matrix-synapse> <https://obs.infoserver.lv/project/monitor/matrix-synapse>
#### OpenSUSE #### OpenSUSE
Synapse is in the OpenSUSE repositories as `matrix-synapse`: Synapse is in the OpenSUSE repositories as
[`matrix-synapse`](https://software.opensuse.org/package/matrix-synapse):
```sh ```sh
sudo zypper install matrix-synapse sudo zypper install matrix-synapse
@ -151,7 +157,8 @@ sudo pip install py-bcrypt
#### Void Linux #### Void Linux
Synapse can be found in the void repositories as 'synapse': Synapse can be found in the void repositories as
['synapse'](https://github.com/void-linux/void-packages/tree/master/srcpkgs/synapse):
```sh ```sh
xbps-install -Su xbps-install -Su

View file

@ -38,7 +38,7 @@ As an example, here is the relevant section of the config file for `matrix.org`.
turn_uris: [ "turn:turn.matrix.org?transport=udp", "turn:turn.matrix.org?transport=tcp" ] turn_uris: [ "turn:turn.matrix.org?transport=udp", "turn:turn.matrix.org?transport=tcp" ]
turn_shared_secret: "n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons" turn_shared_secret: "n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons"
turn_user_lifetime: 86400000 turn_user_lifetime: 86400000
turn_allow_guests: True turn_allow_guests: true
After updating the homeserver configuration, you must restart synapse: After updating the homeserver configuration, you must restart synapse:

View file

@ -79,7 +79,7 @@ Here we can see that the request has been tagged with `GET-37`. (The tag depends
grep 'GET-37' homeserver.log grep 'GET-37' homeserver.log
``` ```
If you want to paste that output into a github issue or matrix room, please remember to surround it with triple-backticks (```) to make it legible (see https://help.github.com/en/articles/basic-writing-and-formatting-syntax#quoting-code). If you want to paste that output into a github issue or matrix room, please remember to surround it with triple-backticks (```) to make it legible (see [quoting code](https://help.github.com/en/articles/basic-writing-and-formatting-syntax#quoting-code)).
What do all those fields in the 'Processed' line mean? What do all those fields in the 'Processed' line mean?

View file

@ -858,7 +858,7 @@ which are older than the room's maximum retention period. Synapse will also
filter events received over federation so that events that should have been filter events received over federation so that events that should have been
purged are ignored and not stored again. purged are ignored and not stored again.
The message retention policies feature is disabled by default. Please be advised The message retention policies feature is disabled by default. Please be advised
that enabling this feature carries some risk. There are known bugs with the implementation that enabling this feature carries some risk. There are known bugs with the implementation
which can cause database corruption. Setting retention to delete older history which can cause database corruption. Setting retention to delete older history
is less risky than deleting newer history but in general caution is advised when enabling this is less risky than deleting newer history but in general caution is advised when enabling this
@ -2501,32 +2501,53 @@ Config settings related to the client/server API
--- ---
### `room_prejoin_state` ### `room_prejoin_state`
Controls for the state that is shared with users who receive an invite This setting controls the state that is shared with users upon receiving an
to a room. By default, the following state event types are shared with users who invite to a room, or in reply to a knock on a room. By default, the following
receive invites to the room: state events are shared with users:
- m.room.join_rules
- m.room.canonical_alias - `m.room.join_rules`
- m.room.avatar - `m.room.canonical_alias`
- m.room.encryption - `m.room.avatar`
- m.room.name - `m.room.encryption`
- m.room.create - `m.room.name`
- m.room.topic - `m.room.create`
- `m.room.topic`
To change the default behavior, use the following sub-options: To change the default behavior, use the following sub-options:
* `disable_default_event_types`: set to true to disable the above defaults. If this * `disable_default_event_types`: boolean. Set to `true` to disable the above
is enabled, only the event types listed in `additional_event_types` are shared. defaults. If this is enabled, only the event types listed in
Defaults to false. `additional_event_types` are shared. Defaults to `false`.
* `additional_event_types`: Additional state event types to share with users when they are invited * `additional_event_types`: A list of additional state events to include in the
to a room. By default, this list is empty (so only the default event types are shared). events to be shared. By default, this list is empty (so only the default event
types are shared).
Each entry in this list should be either a single string or a list of two
strings.
* A standalone string `t` represents all events with type `t` (i.e.
with no restrictions on state keys).
* A pair of strings `[t, s]` represents a single event with type `t` and
state key `s`. The same type can appear in two entries with different state
keys: in this situation, both state keys are included in prejoin state.
Example configuration: Example configuration:
```yaml ```yaml
room_prejoin_state: room_prejoin_state:
disable_default_event_types: true disable_default_event_types: false
additional_event_types: additional_event_types:
- org.example.custom.event.type # Share all events of type `org.example.custom.event.typeA`
- m.room.join_rules - org.example.custom.event.typeA
# Share only events of type `org.example.custom.event.typeB` whose
# state_key is "foo"
- ["org.example.custom.event.typeB", "foo"]
# Share only events of type `org.example.custom.event.typeC` whose
# state_key is "bar" or "baz"
- ["org.example.custom.event.typeC", "bar"]
- ["org.example.custom.event.typeC", "baz"]
``` ```
*Changed in Synapse 1.74:* admins can filter the events in prejoin state based
on their state key.
--- ---
### `track_puppeted_user_ips` ### `track_puppeted_user_ips`
@ -3003,7 +3024,7 @@ Options for each entry include:
which is set to the claims returned by the UserInfo Endpoint and/or which is set to the claims returned by the UserInfo Endpoint and/or
in the ID Token. in the ID Token.
* `backchannel_logout_enabled`: set to `true` to process OIDC Back-Channel Logout notifications. * `backchannel_logout_enabled`: set to `true` to process OIDC Back-Channel Logout notifications.
Those notifications are expected to be received on `/_synapse/client/oidc/backchannel_logout`. Those notifications are expected to be received on `/_synapse/client/oidc/backchannel_logout`.
Defaults to `false`. Defaults to `false`.
@ -3355,6 +3376,10 @@ Configuration settings related to push notifications
This setting defines options for push notifications. This setting defines options for push notifications.
This option has a number of sub-options. They are as follows: This option has a number of sub-options. They are as follows:
* `enabled`: Enables or disables push notification calculation. Note, disabling this will also
stop unread counts being calculated for rooms. This mode of operation is intended
for homeservers which may only have bots or appservice users connected, or are otherwise
not interested in push/unread counters. This is enabled by default.
* `include_content`: Clients requesting push notifications can either have the body of * `include_content`: Clients requesting push notifications can either have the body of
the message sent in the notification poke along with other details the message sent in the notification poke along with other details
like the sender, or just the event ID and room ID (`event_id_only`). like the sender, or just the event ID and room ID (`event_id_only`).
@ -3375,6 +3400,7 @@ This option has a number of sub-options. They are as follows:
Example configuration: Example configuration:
```yaml ```yaml
push: push:
enabled: true
include_content: false include_content: false
group_unread_count_by_room: false group_unread_count_by_room: false
``` ```
@ -3420,7 +3446,7 @@ This option has the following sub-options:
NB. If you set this to true, and the last time the user_directory search NB. If you set this to true, and the last time the user_directory search
indexes were (re)built was before Synapse 1.44, you'll have to indexes were (re)built was before Synapse 1.44, you'll have to
rebuild the indexes in order to search through all known users. rebuild the indexes in order to search through all known users.
These indexes are built the first time Synapse starts; admins can These indexes are built the first time Synapse starts; admins can
manually trigger a rebuild via the API following the instructions manually trigger a rebuild via the API following the instructions
[for running background updates](../administration/admin_api/background_updates.md#run), [for running background updates](../administration/admin_api/background_updates.md#run),
@ -3679,7 +3705,7 @@ As a result, the worker configuration is divided into two parts.
1. The first part (in this section of the manual) defines which shardable tasks 1. The first part (in this section of the manual) defines which shardable tasks
are delegated to privileged workers. This allows unprivileged workers to make are delegated to privileged workers. This allows unprivileged workers to make
request a privileged worker to act on their behalf. requests to a privileged worker to act on their behalf.
1. [The second part](#individual-worker-configuration) 1. [The second part](#individual-worker-configuration)
controls the behaviour of individual workers in isolation. controls the behaviour of individual workers in isolation.
@ -3691,7 +3717,7 @@ For guidance on setting up workers, see the [worker documentation](../../workers
A shared secret used by the replication APIs on the main process to authenticate A shared secret used by the replication APIs on the main process to authenticate
HTTP requests from workers. HTTP requests from workers.
The default, this value is omitted (equivalently `null`), which means that The default, this value is omitted (equivalently `null`), which means that
traffic between the workers and the main process is not authenticated. traffic between the workers and the main process is not authenticated.
Example configuration: Example configuration:
@ -3701,6 +3727,8 @@ worker_replication_secret: "secret_secret"
--- ---
### `start_pushers` ### `start_pushers`
Unnecessary to set if using [`pusher_instances`](#pusher_instances) with [`generic_workers`](../../workers.md#synapseappgeneric_worker).
Controls sending of push notifications on the main process. Set to `false` Controls sending of push notifications on the main process. Set to `false`
if using a [pusher worker](../../workers.md#synapseapppusher). Defaults to `true`. if using a [pusher worker](../../workers.md#synapseapppusher). Defaults to `true`.
@ -3711,25 +3739,30 @@ start_pushers: false
--- ---
### `pusher_instances` ### `pusher_instances`
It is possible to run multiple [pusher workers](../../workers.md#synapseapppusher), It is possible to scale the processes that handle sending push notifications to [sygnal](https://github.com/matrix-org/sygnal)
in which case the work is balanced across them. Use this setting to list the pushers by and email by running a [`generic_worker`](../../workers.md#synapseappgeneric_worker) and adding it's [`worker_name`](#worker_name) to
[`worker_name`](#worker_name). Ensure the main process and all pusher workers are a `pusher_instances` map. Doing so will remove handling of this function from the main
restarted after changing this option. process. Multiple workers can be added to this map, in which case the work is balanced
across them. Ensure the main process and all pusher workers are restarted after changing
this option.
If no or only one pusher worker is configured, this setting is not necessary. Example configuration for a single worker:
The main process will send out push notifications by default if you do not disable ```yaml
it by setting [`start_pushers: false`](#start_pushers). pusher_instances:
- pusher_worker1
Example configuration: ```
And for multiple workers:
```yaml ```yaml
start_pushers: false
pusher_instances: pusher_instances:
- pusher_worker1 - pusher_worker1
- pusher_worker2 - pusher_worker2
``` ```
--- ---
### `send_federation` ### `send_federation`
Unnecessary to set if using [`federation_sender_instances`](#federation_sender_instances) with [`generic_workers`](../../workers.md#synapseappgeneric_worker).
Controls sending of outbound federation transactions on the main process. Controls sending of outbound federation transactions on the main process.
Set to `false` if using a [federation sender worker](../../workers.md#synapseappfederation_sender). Set to `false` if using a [federation sender worker](../../workers.md#synapseappfederation_sender).
Defaults to `true`. Defaults to `true`.
@ -3741,29 +3774,36 @@ send_federation: false
--- ---
### `federation_sender_instances` ### `federation_sender_instances`
It is possible to run multiple It is possible to scale the processes that handle sending outbound federation requests
[federation sender worker](../../workers.md#synapseappfederation_sender), in which by running a [`generic_worker`](../../workers.md#synapseappgeneric_worker) and adding it's [`worker_name`](#worker_name) to
case the work is balanced across them. Use this setting to list the senders. a `federation_sender_instances` map. Doing so will remove handling of this function from
the main process. Multiple workers can be added to this map, in which case the work is
balanced across them.
This configuration setting must be shared between all federation sender workers, and if This configuration setting must be shared between all workers handling federation
changed all federation sender workers must be stopped at the same time and then sending, and if changed all federation sender workers must be stopped at the same time
started, to ensure that all instances are running with the same config (otherwise and then started, to ensure that all instances are running with the same config (otherwise
events may be dropped). events may be dropped).
Example configuration: Example configuration for a single worker:
```yaml ```yaml
send_federation: false
federation_sender_instances: federation_sender_instances:
- federation_sender1 - federation_sender1
``` ```
And for multiple workers:
```yaml
federation_sender_instances:
- federation_sender1
- federation_sender2
```
--- ---
### `instance_map` ### `instance_map`
When using workers this should be a map from [`worker_name`](#worker_name) to the When using workers this should be a map from [`worker_name`](#worker_name) to the
HTTP replication listener of the worker, if configured. HTTP replication listener of the worker, if configured.
Each worker declared under [`stream_writers`](../../workers.md#stream-writers) needs Each worker declared under [`stream_writers`](../../workers.md#stream-writers) needs
a HTTP replication listener, and that listener should be included in the `instance_map`. a HTTP replication listener, and that listener should be included in the `instance_map`.
(The main process also needs an HTTP replication listener, but it should not be (The main process also needs an HTTP replication listener, but it should not be
listed in the `instance_map`.) listed in the `instance_map`.)
Example configuration: Example configuration:
@ -3897,8 +3937,8 @@ worker_replication_http_tls: true
--- ---
### `worker_listeners` ### `worker_listeners`
A worker can handle HTTP requests. To do so, a `worker_listeners` option A worker can handle HTTP requests. To do so, a `worker_listeners` option
must be declared, in the same way as the [`listeners` option](#listeners) must be declared, in the same way as the [`listeners` option](#listeners)
in the shared config. in the shared config.
Workers declared in [`stream_writers`](#stream_writers) will need to include a Workers declared in [`stream_writers`](#stream_writers) will need to include a
@ -3917,7 +3957,7 @@ worker_listeners:
### `worker_daemonize` ### `worker_daemonize`
Specifies whether the worker should be started as a daemon process. Specifies whether the worker should be started as a daemon process.
If Synapse is being managed by [systemd](../../systemd-with-workers/README.md), this option If Synapse is being managed by [systemd](../../systemd-with-workers/README.md), this option
must be omitted or set to `false`. must be omitted or set to `false`.
Defaults to `false`. Defaults to `false`.
@ -3929,11 +3969,11 @@ worker_daemonize: true
--- ---
### `worker_pid_file` ### `worker_pid_file`
When running a worker as a daemon, we need a place to store the When running a worker as a daemon, we need a place to store the
[PID](https://en.wikipedia.org/wiki/Process_identifier) of the worker. [PID](https://en.wikipedia.org/wiki/Process_identifier) of the worker.
This option defines the location of that "pid file". This option defines the location of that "pid file".
This option is required if `worker_daemonize` is `true` and ignored This option is required if `worker_daemonize` is `true` and ignored
otherwise. It has no default. otherwise. It has no default.
See also the [`pid_file` option](#pid_file) option for the main Synapse process. See also the [`pid_file` option](#pid_file) option for the main Synapse process.
@ -3983,4 +4023,3 @@ background_updates:
min_batch_size: 10 min_batch_size: 10
default_batch_size: 50 default_batch_size: 50
``` ```

View file

@ -505,6 +505,9 @@ worker application type.
### `synapse.app.pusher` ### `synapse.app.pusher`
It is likely this option will be deprecated in the future and is not recommended for new
installations. Instead, [use `synapse.app.generic_worker` with the `pusher_instances`](usage/configuration/config_documentation.md#pusher_instances).
Handles sending push notifications to sygnal and email. Doesn't handle any Handles sending push notifications to sygnal and email. Doesn't handle any
REST endpoints itself, but you should set REST endpoints itself, but you should set
[`start_pushers: false`](usage/configuration/config_documentation.md#start_pushers) in the [`start_pushers: false`](usage/configuration/config_documentation.md#start_pushers) in the
@ -543,6 +546,9 @@ Note this worker cannot be load-balanced: only one instance should be active.
### `synapse.app.federation_sender` ### `synapse.app.federation_sender`
It is likely this option will be deprecated in the future and not recommended for
new installations. Instead, [use `synapse.app.generic_worker` with the `federation_sender_instances`](usage/configuration/config_documentation.md#federation_sender_instances).
Handles sending federation traffic to other servers. Doesn't handle any Handles sending federation traffic to other servers. Doesn't handle any
REST endpoints itself, but you should set REST endpoints itself, but you should set
[`send_federation: false`](usage/configuration/config_documentation.md#send_federation) [`send_federation: false`](usage/configuration/config_documentation.md#send_federation)
@ -639,7 +645,9 @@ equivalent to `synapse.app.generic_worker`:
* `synapse.app.client_reader` * `synapse.app.client_reader`
* `synapse.app.event_creator` * `synapse.app.event_creator`
* `synapse.app.federation_reader` * `synapse.app.federation_reader`
* `synapse.app.federation_sender`
* `synapse.app.frontend_proxy` * `synapse.app.frontend_proxy`
* `synapse.app.pusher`
* `synapse.app.synchrotron` * `synapse.app.synchrotron`

View file

@ -12,6 +12,7 @@ local_partial_types = True
no_implicit_optional = True no_implicit_optional = True
disallow_untyped_defs = True disallow_untyped_defs = True
strict_equality = True strict_equality = True
warn_redundant_casts = True
files = files =
docker/, docker/,
@ -59,16 +60,6 @@ exclude = (?x)
|tests/server_notices/test_resource_limits_server_notices.py |tests/server_notices/test_resource_limits_server_notices.py
|tests/test_state.py |tests/test_state.py
|tests/test_terms_auth.py |tests/test_terms_auth.py
|tests/util/test_async_helpers.py
|tests/util/test_batching_queue.py
|tests/util/test_dict_cache.py
|tests/util/test_expiring_cache.py
|tests/util/test_file_consumer.py
|tests/util/test_linearizer.py
|tests/util/test_logcontext.py
|tests/util/test_lrucache.py
|tests/util/test_rwlock.py
|tests/util/test_wheel_timer.py
)$ )$
[mypy-synapse.federation.transport.client] [mypy-synapse.federation.transport.client]
@ -98,6 +89,15 @@ disallow_untyped_defs = False
[mypy-tests.*] [mypy-tests.*]
disallow_untyped_defs = False disallow_untyped_defs = False
[mypy-tests.config.test_api]
disallow_untyped_defs = True
[mypy-tests.federation.transport.test_client]
disallow_untyped_defs = True
[mypy-tests.handlers.test_sso]
disallow_untyped_defs = True
[mypy-tests.handlers.test_user_directory] [mypy-tests.handlers.test_user_directory]
disallow_untyped_defs = True disallow_untyped_defs = True
@ -107,28 +107,19 @@ disallow_untyped_defs = True
[mypy-tests.push.test_bulk_push_rule_evaluator] [mypy-tests.push.test_bulk_push_rule_evaluator]
disallow_untyped_defs = True disallow_untyped_defs = True
[mypy-tests.test_server] [mypy-tests.rest.*]
disallow_untyped_defs = True disallow_untyped_defs = True
[mypy-tests.state.test_profile] [mypy-tests.state.test_profile]
disallow_untyped_defs = True disallow_untyped_defs = True
[mypy-tests.storage.test_id_generators] [mypy-tests.storage.*]
disallow_untyped_defs = True disallow_untyped_defs = True
[mypy-tests.storage.test_profile] [mypy-tests.test_server]
disallow_untyped_defs = True disallow_untyped_defs = True
[mypy-tests.handlers.test_sso] [mypy-tests.types.*]
disallow_untyped_defs = True
[mypy-tests.storage.test_user_directory]
disallow_untyped_defs = True
[mypy-tests.rest.*]
disallow_untyped_defs = True
[mypy-tests.federation.transport.test_client]
disallow_untyped_defs = True disallow_untyped_defs = True
[mypy-tests.util.caches.*] [mypy-tests.util.caches.*]
@ -137,6 +128,9 @@ disallow_untyped_defs = True
[mypy-tests.util.caches.test_descriptors] [mypy-tests.util.caches.test_descriptors]
disallow_untyped_defs = False disallow_untyped_defs = False
[mypy-tests.util.*]
disallow_untyped_defs = True
[mypy-tests.utils] [mypy-tests.utils]
disallow_untyped_defs = True disallow_untyped_defs = True

184
poetry.lock generated
View file

@ -13,8 +13,8 @@ tests = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900
tests-no-zope = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins"] tests-no-zope = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins"]
[[package]] [[package]]
name = "Authlib" name = "authlib"
version = "1.1.0" version = "1.2.0"
description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients." description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients."
category = "main" category = "main"
optional = true optional = true
@ -106,11 +106,11 @@ frozendict = ["frozendict (>=1.0)"]
[[package]] [[package]]
name = "certifi" name = "certifi"
version = "2021.10.8" version = "2022.12.7"
description = "Python package for providing Mozilla's CA Bundle." description = "Python package for providing Mozilla's CA Bundle."
category = "main" category = "main"
optional = false optional = false
python-versions = "*" python-versions = ">=3.6"
[[package]] [[package]]
name = "cffi" name = "cffi"
@ -186,7 +186,7 @@ python-versions = "*"
[[package]] [[package]]
name = "cryptography" name = "cryptography"
version = "38.0.3" version = "38.0.4"
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
category = "main" category = "main"
optional = false optional = false
@ -260,7 +260,7 @@ pyflakes = ">=2.5.0,<2.6.0"
[[package]] [[package]]
name = "flake8-bugbear" name = "flake8-bugbear"
version = "22.10.27" version = "22.12.6"
description = "A plugin for flake8 finding likely bugs and design problems in your program. Contains warnings that don't belong in pyflakes and pycodestyle." description = "A plugin for flake8 finding likely bugs and design problems in your program. Contains warnings that don't belong in pyflakes and pycodestyle."
category = "dev" category = "dev"
optional = false optional = false
@ -452,7 +452,7 @@ i18n = ["Babel (>=2.7)"]
[[package]] [[package]]
name = "jsonschema" name = "jsonschema"
version = "4.17.0" version = "4.17.3"
description = "An implementation of JSON Schema validation for Python" description = "An implementation of JSON Schema validation for Python"
category = "main" category = "main"
optional = false optional = false
@ -633,14 +633,11 @@ tests = ["Sphinx", "doubles", "flake8", "flake8-quotes", "gevent", "mock", "pyte
[[package]] [[package]]
name = "packaging" name = "packaging"
version = "21.3" version = "22.0"
description = "Core utilities for Python packages" description = "Core utilities for Python packages"
category = "main" category = "main"
optional = false optional = false
python-versions = ">=3.6" python-versions = ">=3.7"
[package.dependencies]
pyparsing = ">=2.0.2,<3.0.5 || >3.0.5"
[[package]] [[package]]
name = "parameterized" name = "parameterized"
@ -663,7 +660,7 @@ python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
[[package]] [[package]]
name = "phonenumbers" name = "phonenumbers"
version = "8.13.0" version = "8.13.2"
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers." description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
category = "main" category = "main"
optional = false optional = false
@ -837,6 +834,14 @@ category = "dev"
optional = false optional = false
python-versions = ">=3.5" python-versions = ">=3.5"
[[package]]
name = "pyicu"
version = "2.10.2"
description = "Python extension wrapping the ICU C++ API"
category = "main"
optional = true
python-versions = "*"
[[package]] [[package]]
name = "pyjwt" name = "pyjwt"
version = "2.4.0" version = "2.4.0"
@ -888,30 +893,19 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"]
[[package]] [[package]]
name = "pyopenssl" name = "pyopenssl"
version = "22.0.0" version = "22.1.0"
description = "Python wrapper module around the OpenSSL library" description = "Python wrapper module around the OpenSSL library"
category = "main" category = "main"
optional = false optional = false
python-versions = ">=3.6" python-versions = ">=3.6"
[package.dependencies] [package.dependencies]
cryptography = ">=35.0" cryptography = ">=38.0.0,<39"
[package.extras] [package.extras]
docs = ["sphinx", "sphinx-rtd-theme"] docs = ["sphinx (!=5.2.0,!=5.2.0.post0)", "sphinx-rtd-theme"]
test = ["flaky", "pretend", "pytest (>=3.0.1)"] test = ["flaky", "pretend", "pytest (>=3.0.1)"]
[[package]]
name = "pyparsing"
version = "3.0.7"
description = "Python parsing module"
category = "main"
optional = false
python-versions = ">=3.6"
[package.extras]
diagrams = ["jinja2", "railroad-diagrams"]
[[package]] [[package]]
name = "pyrsistent" name = "pyrsistent"
version = "0.18.1" version = "0.18.1"
@ -1076,7 +1070,7 @@ doc = ["Sphinx", "sphinx-rtd-theme"]
[[package]] [[package]]
name = "sentry-sdk" name = "sentry-sdk"
version = "1.11.0" version = "1.11.1"
description = "Python client for Sentry (https://sentry.io)" description = "Python client for Sentry (https://sentry.io)"
category = "main" category = "main"
optional = true optional = true
@ -1295,7 +1289,7 @@ docs = ["sphinx (>=1.4.8)"]
[[package]] [[package]]
name = "twine" name = "twine"
version = "4.0.1" version = "4.0.2"
description = "Collection of utilities for publishing packages on PyPI" description = "Collection of utilities for publishing packages on PyPI"
category = "dev" category = "dev"
optional = false optional = false
@ -1380,7 +1374,7 @@ python-versions = ">=3.6"
[[package]] [[package]]
name = "types-bleach" name = "types-bleach"
version = "5.0.3" version = "5.0.3.1"
description = "Typing stubs for bleach" description = "Typing stubs for bleach"
category = "dev" category = "dev"
optional = false optional = false
@ -1440,7 +1434,7 @@ python-versions = "*"
[[package]] [[package]]
name = "types-pillow" name = "types-pillow"
version = "9.3.0.1" version = "9.3.0.4"
description = "Typing stubs for Pillow" description = "Typing stubs for Pillow"
category = "dev" category = "dev"
optional = false optional = false
@ -1448,7 +1442,7 @@ python-versions = "*"
[[package]] [[package]]
name = "types-psycopg2" name = "types-psycopg2"
version = "2.9.21.1" version = "2.9.21.2"
description = "Typing stubs for psycopg2" description = "Typing stubs for psycopg2"
category = "dev" category = "dev"
optional = false optional = false
@ -1475,7 +1469,7 @@ python-versions = "*"
[[package]] [[package]]
name = "types-requests" name = "types-requests"
version = "2.28.11.2" version = "2.28.11.5"
description = "Typing stubs for requests" description = "Typing stubs for requests"
category = "dev" category = "dev"
optional = false optional = false
@ -1486,7 +1480,7 @@ types-urllib3 = "<1.27"
[[package]] [[package]]
name = "types-setuptools" name = "types-setuptools"
version = "65.5.0.3" version = "65.6.0.1"
description = "Typing stubs for setuptools" description = "Typing stubs for setuptools"
category = "dev" category = "dev"
optional = false optional = false
@ -1622,7 +1616,7 @@ docs = ["Sphinx", "repoze.sphinx.autointerface"]
test = ["zope.i18nmessageid", "zope.testing", "zope.testrunner"] test = ["zope.i18nmessageid", "zope.testing", "zope.testrunner"]
[extras] [extras]
all = ["matrix-synapse-ldap3", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pysaml2", "authlib", "lxml", "sentry-sdk", "jaeger-client", "opentracing", "txredisapi", "hiredis", "Pympler"] all = ["matrix-synapse-ldap3", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pysaml2", "authlib", "lxml", "sentry-sdk", "jaeger-client", "opentracing", "txredisapi", "hiredis", "Pympler", "pyicu"]
cache-memory = ["Pympler"] cache-memory = ["Pympler"]
jwt = ["authlib"] jwt = ["authlib"]
matrix-synapse-ldap3 = ["matrix-synapse-ldap3"] matrix-synapse-ldap3 = ["matrix-synapse-ldap3"]
@ -1635,20 +1629,21 @@ sentry = ["sentry-sdk"]
systemd = ["systemd-python"] systemd = ["systemd-python"]
test = ["parameterized", "idna"] test = ["parameterized", "idna"]
url-preview = ["lxml"] url-preview = ["lxml"]
user-search = ["pyicu"]
[metadata] [metadata]
lock-version = "1.1" lock-version = "1.1"
python-versions = "^3.7.1" python-versions = "^3.7.1"
content-hash = "27811bd21d56ceeb0f68ded5a00375efcd1a004928f0736f5b02927ce8594cb0" content-hash = "f20007013f33bc35a01e412c48adc62a936030f3074e06286674c5ad7f44d300"
[metadata.files] [metadata.files]
attrs = [ attrs = [
{file = "attrs-22.1.0-py2.py3-none-any.whl", hash = "sha256:86efa402f67bf2df34f51a335487cf46b1ec130d02b8d39fd248abfd30da551c"}, {file = "attrs-22.1.0-py2.py3-none-any.whl", hash = "sha256:86efa402f67bf2df34f51a335487cf46b1ec130d02b8d39fd248abfd30da551c"},
{file = "attrs-22.1.0.tar.gz", hash = "sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6"}, {file = "attrs-22.1.0.tar.gz", hash = "sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6"},
] ]
Authlib = [ authlib = [
{file = "Authlib-1.1.0-py2.py3-none-any.whl", hash = "sha256:be4b6a1dea51122336c210a6945b27a105b9ac572baffd15b07bcff4376c1523"}, {file = "Authlib-1.2.0-py2.py3-none-any.whl", hash = "sha256:4ddf4fd6cfa75c9a460b361d4bd9dac71ffda0be879dbe4292a02e92349ad55a"},
{file = "Authlib-1.1.0.tar.gz", hash = "sha256:0a270c91409fc2b7b0fbee6996e09f2ee3187358762111a9a4225c874b94e891"}, {file = "Authlib-1.2.0.tar.gz", hash = "sha256:4fa3e80883a5915ef9f5bc28630564bc4ed5b5af39812a3ff130ec76bd631e9d"},
] ]
automat = [ automat = [
{file = "Automat-22.10.0-py2.py3-none-any.whl", hash = "sha256:c3164f8742b9dc440f3682482d32aaff7bb53f71740dd018533f9de286b64180"}, {file = "Automat-22.10.0-py2.py3-none-any.whl", hash = "sha256:c3164f8742b9dc440f3682482d32aaff7bb53f71740dd018533f9de286b64180"},
@ -1709,8 +1704,8 @@ canonicaljson = [
{file = "canonicaljson-1.6.4.tar.gz", hash = "sha256:6c09b2119511f30eb1126cfcd973a10824e20f1cfd25039cde3d1218dd9c8d8f"}, {file = "canonicaljson-1.6.4.tar.gz", hash = "sha256:6c09b2119511f30eb1126cfcd973a10824e20f1cfd25039cde3d1218dd9c8d8f"},
] ]
certifi = [ certifi = [
{file = "certifi-2021.10.8-py2.py3-none-any.whl", hash = "sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569"}, {file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"},
{file = "certifi-2021.10.8.tar.gz", hash = "sha256:78884e7c1d4b00ce3cea67b44566851c4343c120abd683433ce934a68ea58872"}, {file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"},
] ]
cffi = [ cffi = [
{file = "cffi-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:c2502a1a03b6312837279c8c1bd3ebedf6c12c4228ddbad40912d671ccc8a962"}, {file = "cffi-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:c2502a1a03b6312837279c8c1bd3ebedf6c12c4228ddbad40912d671ccc8a962"},
@ -1788,32 +1783,32 @@ constantly = [
{file = "constantly-15.1.0.tar.gz", hash = "sha256:586372eb92059873e29eba4f9dec8381541b4d3834660707faf8ba59146dfc35"}, {file = "constantly-15.1.0.tar.gz", hash = "sha256:586372eb92059873e29eba4f9dec8381541b4d3834660707faf8ba59146dfc35"},
] ]
cryptography = [ cryptography = [
{file = "cryptography-38.0.3-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:984fe150f350a3c91e84de405fe49e688aa6092b3525f407a18b9646f6612320"}, {file = "cryptography-38.0.4-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:2fa36a7b2cc0998a3a4d5af26ccb6273f3df133d61da2ba13b3286261e7efb70"},
{file = "cryptography-38.0.3-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:ed7b00096790213e09eb11c97cc6e2b757f15f3d2f85833cd2d3ec3fe37c1722"}, {file = "cryptography-38.0.4-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:1f13ddda26a04c06eb57119caf27a524ccae20533729f4b1e4a69b54e07035eb"},
{file = "cryptography-38.0.3-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:bbf203f1a814007ce24bd4d51362991d5cb90ba0c177a9c08825f2cc304d871f"}, {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:2ec2a8714dd005949d4019195d72abed84198d877112abb5a27740e217e0ea8d"},
{file = "cryptography-38.0.3-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:554bec92ee7d1e9d10ded2f7e92a5d70c1f74ba9524947c0ba0c850c7b011828"}, {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50a1494ed0c3f5b4d07650a68cd6ca62efe8b596ce743a5c94403e6f11bf06c1"},
{file = "cryptography-38.0.3-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1b52c9e5f8aa2b802d48bd693190341fae201ea51c7a167d69fc48b60e8a959"}, {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a10498349d4c8eab7357a8f9aa3463791292845b79597ad1b98a543686fb1ec8"},
{file = "cryptography-38.0.3-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:728f2694fa743a996d7784a6194da430f197d5c58e2f4e278612b359f455e4a2"}, {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:10652dd7282de17990b88679cb82f832752c4e8237f0c714be518044269415db"},
{file = "cryptography-38.0.3-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dfb4f4dd568de1b6af9f4cda334adf7d72cf5bc052516e1b2608b683375dd95c"}, {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:bfe6472507986613dc6cc00b3d492b2f7564b02b3b3682d25ca7f40fa3fd321b"},
{file = "cryptography-38.0.3-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:5419a127426084933076132d317911e3c6eb77568a1ce23c3ac1e12d111e61e0"}, {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:ce127dd0a6a0811c251a6cddd014d292728484e530d80e872ad9806cfb1c5b3c"},
{file = "cryptography-38.0.3-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:9b24bcff7853ed18a63cfb0c2b008936a9554af24af2fb146e16d8e1aed75748"}, {file = "cryptography-38.0.4-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:53049f3379ef05182864d13bb9686657659407148f901f3f1eee57a733fb4b00"},
{file = "cryptography-38.0.3-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:25c1d1f19729fb09d42e06b4bf9895212292cb27bb50229f5aa64d039ab29146"}, {file = "cryptography-38.0.4-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:8a4b2bdb68a447fadebfd7d24855758fe2d6fecc7fed0b78d190b1af39a8e3b0"},
{file = "cryptography-38.0.3-cp36-abi3-win32.whl", hash = "sha256:7f836217000342d448e1c9a342e9163149e45d5b5eca76a30e84503a5a96cab0"}, {file = "cryptography-38.0.4-cp36-abi3-win32.whl", hash = "sha256:1d7e632804a248103b60b16fb145e8df0bc60eed790ece0d12efe8cd3f3e7744"},
{file = "cryptography-38.0.3-cp36-abi3-win_amd64.whl", hash = "sha256:c46837ea467ed1efea562bbeb543994c2d1f6e800785bd5a2c98bc096f5cb220"}, {file = "cryptography-38.0.4-cp36-abi3-win_amd64.whl", hash = "sha256:8e45653fb97eb2f20b8c96f9cd2b3a0654d742b47d638cf2897afbd97f80fa6d"},
{file = "cryptography-38.0.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06fc3cc7b6f6cca87bd56ec80a580c88f1da5306f505876a71c8cfa7050257dd"}, {file = "cryptography-38.0.4-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca57eb3ddaccd1112c18fc80abe41db443cc2e9dcb1917078e02dfa010a4f353"},
{file = "cryptography-38.0.3-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:65535bc550b70bd6271984d9863a37741352b4aad6fb1b3344a54e6950249b55"}, {file = "cryptography-38.0.4-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:c9e0d79ee4c56d841bd4ac6e7697c8ff3c8d6da67379057f29e66acffcd1e9a7"},
{file = "cryptography-38.0.3-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:5e89468fbd2fcd733b5899333bc54d0d06c80e04cd23d8c6f3e0542358c6060b"}, {file = "cryptography-38.0.4-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:0e70da4bdff7601b0ef48e6348339e490ebfb0cbe638e083c9c41fb49f00c8bd"},
{file = "cryptography-38.0.3-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:6ab9516b85bebe7aa83f309bacc5f44a61eeb90d0b4ec125d2d003ce41932d36"}, {file = "cryptography-38.0.4-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:998cd19189d8a747b226d24c0207fdaa1e6658a1d3f2494541cb9dfbf7dcb6d2"},
{file = "cryptography-38.0.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:068147f32fa662c81aebab95c74679b401b12b57494872886eb5c1139250ec5d"}, {file = "cryptography-38.0.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67461b5ebca2e4c2ab991733f8ab637a7265bb582f07c7c88914b5afb88cb95b"},
{file = "cryptography-38.0.3-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:402852a0aea73833d982cabb6d0c3bb582c15483d29fb7085ef2c42bfa7e38d7"}, {file = "cryptography-38.0.4-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:4eb85075437f0b1fd8cd66c688469a0c4119e0ba855e3fef86691971b887caf6"},
{file = "cryptography-38.0.3-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b1b35d9d3a65542ed2e9d90115dfd16bbc027b3f07ee3304fc83580f26e43249"}, {file = "cryptography-38.0.4-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3178d46f363d4549b9a76264f41c6948752183b3f587666aff0555ac50fd7876"},
{file = "cryptography-38.0.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:6addc3b6d593cd980989261dc1cce38263c76954d758c3c94de51f1e010c9a50"}, {file = "cryptography-38.0.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:6391e59ebe7c62d9902c24a4d8bcbc79a68e7c4ab65863536127c8a9cd94043b"},
{file = "cryptography-38.0.3-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:be243c7e2bfcf6cc4cb350c0d5cdf15ca6383bbcb2a8ef51d3c9411a9d4386f0"}, {file = "cryptography-38.0.4-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:78e47e28ddc4ace41dd38c42e6feecfdadf9c3be2af389abbfeef1ff06822285"},
{file = "cryptography-38.0.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78cf5eefac2b52c10398a42765bfa981ce2372cbc0457e6bf9658f41ec3c41d8"}, {file = "cryptography-38.0.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fb481682873035600b5502f0015b664abc26466153fab5c6bc92c1ea69d478b"},
{file = "cryptography-38.0.3-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:4e269dcd9b102c5a3d72be3c45d8ce20377b8076a43cbed6f660a1afe365e436"}, {file = "cryptography-38.0.4-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:4367da5705922cf7070462e964f66e4ac24162e22ab0a2e9d31f1b270dd78083"},
{file = "cryptography-38.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8d41a46251bf0634e21fac50ffd643216ccecfaf3701a063257fe0b2be1b6548"}, {file = "cryptography-38.0.4-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b4cad0cea995af760f82820ab4ca54e5471fc782f70a007f31531957f43e9dee"},
{file = "cryptography-38.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:785e4056b5a8b28f05a533fab69febf5004458e20dad7e2e13a3120d8ecec75a"}, {file = "cryptography-38.0.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:80ca53981ceeb3241998443c4964a387771588c4e4a5d92735a493af868294f9"},
{file = "cryptography-38.0.3.tar.gz", hash = "sha256:bfbe6ee19615b07a98b1d2287d6a6073f734735b49ee45b11324d85efc4d5cbd"}, {file = "cryptography-38.0.4.tar.gz", hash = "sha256:175c1a818b87c9ac80bb7377f5520b7f31b3ef2a0004e2420319beadedb67290"},
] ]
defusedxml = [ defusedxml = [
{file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"}, {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"},
@ -1836,8 +1831,8 @@ flake8 = [
{file = "flake8-5.0.4.tar.gz", hash = "sha256:6fbe320aad8d6b95cec8b8e47bc933004678dc63095be98528b7bdd2a9f510db"}, {file = "flake8-5.0.4.tar.gz", hash = "sha256:6fbe320aad8d6b95cec8b8e47bc933004678dc63095be98528b7bdd2a9f510db"},
] ]
flake8-bugbear = [ flake8-bugbear = [
{file = "flake8-bugbear-22.10.27.tar.gz", hash = "sha256:a6708608965c9e0de5fff13904fed82e0ba21ac929fe4896459226a797e11cd5"}, {file = "flake8-bugbear-22.12.6.tar.gz", hash = "sha256:4cdb2c06e229971104443ae293e75e64c6107798229202fbe4f4091427a30ac0"},
{file = "flake8_bugbear-22.10.27-py3-none-any.whl", hash = "sha256:6ad0ab754507319060695e2f2be80e6d8977cfcea082293089a9226276bd825d"}, {file = "flake8_bugbear-22.12.6-py3-none-any.whl", hash = "sha256:b69a510634f8a9c298dfda2b18a8036455e6b19ecac4fe582e4d7a0abfa50a30"},
] ]
flake8-comprehensions = [ flake8-comprehensions = [
{file = "flake8-comprehensions-3.10.1.tar.gz", hash = "sha256:412052ac4a947f36b891143430fef4859705af11b2572fbb689f90d372cf26ab"}, {file = "flake8-comprehensions-3.10.1.tar.gz", hash = "sha256:412052ac4a947f36b891143430fef4859705af11b2572fbb689f90d372cf26ab"},
@ -2013,8 +2008,8 @@ jinja2 = [
{file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"},
] ]
jsonschema = [ jsonschema = [
{file = "jsonschema-4.17.0-py3-none-any.whl", hash = "sha256:f660066c3966db7d6daeaea8a75e0b68237a48e51cf49882087757bb59916248"}, {file = "jsonschema-4.17.3-py3-none-any.whl", hash = "sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6"},
{file = "jsonschema-4.17.0.tar.gz", hash = "sha256:5bfcf2bca16a087ade17e02b282d34af7ccd749ef76241e7f9bd7c0cb8a9424d"}, {file = "jsonschema-4.17.3.tar.gz", hash = "sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d"},
] ]
keyring = [ keyring = [
{file = "keyring-23.5.0-py3-none-any.whl", hash = "sha256:b0d28928ac3ec8e42ef4cc227822647a19f1d544f21f96457965dc01cf555261"}, {file = "keyring-23.5.0-py3-none-any.whl", hash = "sha256:b0d28928ac3ec8e42ef4cc227822647a19f1d544f21f96457965dc01cf555261"},
@ -2246,8 +2241,8 @@ opentracing = [
{file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"}, {file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"},
] ]
packaging = [ packaging = [
{file = "packaging-21.3-py3-none-any.whl", hash = "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"}, {file = "packaging-22.0-py3-none-any.whl", hash = "sha256:957e2148ba0e1a3b282772e791ef1d8083648bc131c8ab0c1feba110ce1146c3"},
{file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"}, {file = "packaging-22.0.tar.gz", hash = "sha256:2198ec20bd4c017b8f9717e00f0c8714076fc2fd93816750ab48e2c41de2cfd3"},
] ]
parameterized = [ parameterized = [
{file = "parameterized-0.8.1-py2.py3-none-any.whl", hash = "sha256:9cbb0b69a03e8695d68b3399a8a5825200976536fe1cb79db60ed6a4c8c9efe9"}, {file = "parameterized-0.8.1-py2.py3-none-any.whl", hash = "sha256:9cbb0b69a03e8695d68b3399a8a5825200976536fe1cb79db60ed6a4c8c9efe9"},
@ -2258,8 +2253,8 @@ pathspec = [
{file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"}, {file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"},
] ]
phonenumbers = [ phonenumbers = [
{file = "phonenumbers-8.13.0-py2.py3-none-any.whl", hash = "sha256:dbaea9e4005a976bcf18fbe2bb87cb9cd0a3f119136f04188ac412d7741cebf0"}, {file = "phonenumbers-8.13.2-py2.py3-none-any.whl", hash = "sha256:884b26f775205261f4dc861371dce217c1661a4942fb3ec3624e290fb51869bf"},
{file = "phonenumbers-8.13.0.tar.gz", hash = "sha256:93745d7afd38e246660bb601b07deac54eeb76c8e5e43f5e83333b0383a0a1e4"}, {file = "phonenumbers-8.13.2.tar.gz", hash = "sha256:0179f688d48c0e7e161eb7b9d86d587940af1f5174f97c1fdfd893c599c0d94a"},
] ]
pillow = [ pillow = [
{file = "Pillow-9.3.0-1-cp37-cp37m-win32.whl", hash = "sha256:e6ea6b856a74d560d9326c0f5895ef8050126acfdc7ca08ad703eb0081e82b74"}, {file = "Pillow-9.3.0-1-cp37-cp37m-win32.whl", hash = "sha256:e6ea6b856a74d560d9326c0f5895ef8050126acfdc7ca08ad703eb0081e82b74"},
@ -2427,6 +2422,9 @@ pygments = [
{file = "Pygments-2.11.2-py3-none-any.whl", hash = "sha256:44238f1b60a76d78fc8ca0528ee429702aae011c265fe6a8dd8b63049ae41c65"}, {file = "Pygments-2.11.2-py3-none-any.whl", hash = "sha256:44238f1b60a76d78fc8ca0528ee429702aae011c265fe6a8dd8b63049ae41c65"},
{file = "Pygments-2.11.2.tar.gz", hash = "sha256:4e426f72023d88d03b2fa258de560726ce890ff3b630f88c21cbb8b2503b8c6a"}, {file = "Pygments-2.11.2.tar.gz", hash = "sha256:4e426f72023d88d03b2fa258de560726ce890ff3b630f88c21cbb8b2503b8c6a"},
] ]
pyicu = [
{file = "PyICU-2.10.2.tar.gz", hash = "sha256:0c3309eea7fab6857507ace62403515b60fe096cbfb4f90d14f55ff75c5441c1"},
]
pyjwt = [ pyjwt = [
{file = "PyJWT-2.4.0-py3-none-any.whl", hash = "sha256:72d1d253f32dbd4f5c88eaf1fdc62f3a19f676ccbadb9dbc5d07e951b2b26daf"}, {file = "PyJWT-2.4.0-py3-none-any.whl", hash = "sha256:72d1d253f32dbd4f5c88eaf1fdc62f3a19f676ccbadb9dbc5d07e951b2b26daf"},
{file = "PyJWT-2.4.0.tar.gz", hash = "sha256:d42908208c699b3b973cbeb01a969ba6a96c821eefb1c5bfe4c390c01d67abba"}, {file = "PyJWT-2.4.0.tar.gz", hash = "sha256:d42908208c699b3b973cbeb01a969ba6a96c821eefb1c5bfe4c390c01d67abba"},
@ -2452,12 +2450,8 @@ pynacl = [
{file = "PyNaCl-1.5.0.tar.gz", hash = "sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba"}, {file = "PyNaCl-1.5.0.tar.gz", hash = "sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba"},
] ]
pyopenssl = [ pyopenssl = [
{file = "pyOpenSSL-22.0.0-py2.py3-none-any.whl", hash = "sha256:ea252b38c87425b64116f808355e8da644ef9b07e429398bfece610f893ee2e0"}, {file = "pyOpenSSL-22.1.0-py3-none-any.whl", hash = "sha256:b28437c9773bb6c6958628cf9c3bebe585de661dba6f63df17111966363dd15e"},
{file = "pyOpenSSL-22.0.0.tar.gz", hash = "sha256:660b1b1425aac4a1bea1d94168a85d99f0b3144c869dd4390d27629d0087f1bf"}, {file = "pyOpenSSL-22.1.0.tar.gz", hash = "sha256:7a83b7b272dd595222d672f5ce29aa030f1fb837630ef229f62e72e395ce8968"},
]
pyparsing = [
{file = "pyparsing-3.0.7-py3-none-any.whl", hash = "sha256:a6c06a88f252e6c322f65faf8f418b16213b51bdfaece0524c1c1bc30c63c484"},
{file = "pyparsing-3.0.7.tar.gz", hash = "sha256:18ee9022775d270c55187733956460083db60b37d0d0fb357445f3094eed3eea"},
] ]
pyrsistent = [ pyrsistent = [
{file = "pyrsistent-0.18.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:df46c854f490f81210870e509818b729db4488e1f30f2a1ce1698b2295a878d1"}, {file = "pyrsistent-0.18.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:df46c854f490f81210870e509818b729db4488e1f30f2a1ce1698b2295a878d1"},
@ -2569,8 +2563,8 @@ semantic-version = [
{file = "semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c"}, {file = "semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c"},
] ]
sentry-sdk = [ sentry-sdk = [
{file = "sentry-sdk-1.11.0.tar.gz", hash = "sha256:e7b78a1ddf97a5f715a50ab8c3f7a93f78b114c67307785ee828ef67a5d6f117"}, {file = "sentry-sdk-1.11.1.tar.gz", hash = "sha256:675f6279b6bb1fea09fd61751061f9a90dca3b5929ef631dd50dc8b3aeb245e9"},
{file = "sentry_sdk-1.11.0-py2.py3-none-any.whl", hash = "sha256:f467e6c7fac23d4d42bc83eb049c400f756cd2d65ab44f0cc1165d0c7c3d40bc"}, {file = "sentry_sdk-1.11.1-py2.py3-none-any.whl", hash = "sha256:8b4ff696c0bdcceb3f70bbb87a57ba84fd3168b1332d493fcd16c137f709578c"},
] ]
service-identity = [ service-identity = [
{file = "service-identity-21.1.0.tar.gz", hash = "sha256:6e6c6086ca271dc11b033d17c3a8bea9f24ebff920c587da090afc9519419d34"}, {file = "service-identity-21.1.0.tar.gz", hash = "sha256:6e6c6086ca271dc11b033d17c3a8bea9f24ebff920c587da090afc9519419d34"},
@ -2729,8 +2723,8 @@ treq = [
{file = "treq-22.2.0.tar.gz", hash = "sha256:df757e3f141fc782ede076a604521194ffcb40fa2645cf48e5a37060307f52ec"}, {file = "treq-22.2.0.tar.gz", hash = "sha256:df757e3f141fc782ede076a604521194ffcb40fa2645cf48e5a37060307f52ec"},
] ]
twine = [ twine = [
{file = "twine-4.0.1-py3-none-any.whl", hash = "sha256:42026c18e394eac3e06693ee52010baa5313e4811d5a11050e7d48436cf41b9e"}, {file = "twine-4.0.2-py3-none-any.whl", hash = "sha256:929bc3c280033347a00f847236564d1c52a3e61b1ac2516c97c48f3ceab756d8"},
{file = "twine-4.0.1.tar.gz", hash = "sha256:96b1cf12f7ae611a4a40b6ae8e9570215daff0611828f5fe1f37a16255ab24a0"}, {file = "twine-4.0.2.tar.gz", hash = "sha256:9e102ef5fdd5a20661eb88fad46338806c3bd32cf1db729603fe3697b1bc83c8"},
] ]
twisted = [ twisted = [
{file = "Twisted-22.10.0-py3-none-any.whl", hash = "sha256:86c55f712cc5ab6f6d64e02503352464f0400f66d4f079096d744080afcccbd0"}, {file = "Twisted-22.10.0-py3-none-any.whl", hash = "sha256:86c55f712cc5ab6f6d64e02503352464f0400f66d4f079096d744080afcccbd0"},
@ -2781,8 +2775,8 @@ typed-ast = [
{file = "typed_ast-1.5.2.tar.gz", hash = "sha256:525a2d4088e70a9f75b08b3f87a51acc9cde640e19cc523c7e41aa355564ae27"}, {file = "typed_ast-1.5.2.tar.gz", hash = "sha256:525a2d4088e70a9f75b08b3f87a51acc9cde640e19cc523c7e41aa355564ae27"},
] ]
types-bleach = [ types-bleach = [
{file = "types-bleach-5.0.3.tar.gz", hash = "sha256:f7b3df8278efe176d9670d0f063a66c866c77577f71f54b9c7a320e31b1a7bbd"}, {file = "types-bleach-5.0.3.1.tar.gz", hash = "sha256:ce8772ea5126dab1883851b41e3aeff229aa5213ced36096990344e632e92373"},
{file = "types_bleach-5.0.3-py3-none-any.whl", hash = "sha256:5931525d03571f36b2bb40210c34b662c4d26c8fd6f2b1e1e83fe4d2d2fd63c7"}, {file = "types_bleach-5.0.3.1-py3-none-any.whl", hash = "sha256:af5f1b3a54ff279f54c29eccb2e6988ebb6718bc4061469588a5fd4880a79287"},
] ]
types-commonmark = [ types-commonmark = [
{file = "types-commonmark-0.9.2.tar.gz", hash = "sha256:b894b67750c52fd5abc9a40a9ceb9da4652a391d75c1b480bba9cef90f19fc86"}, {file = "types-commonmark-0.9.2.tar.gz", hash = "sha256:b894b67750c52fd5abc9a40a9ceb9da4652a391d75c1b480bba9cef90f19fc86"},
@ -2809,12 +2803,12 @@ types-opentracing = [
{file = "types_opentracing-2.4.10-py3-none-any.whl", hash = "sha256:66d9cfbbdc4a6f8ca8189a15ad26f0fe41cee84c07057759c5d194e2505b84c2"}, {file = "types_opentracing-2.4.10-py3-none-any.whl", hash = "sha256:66d9cfbbdc4a6f8ca8189a15ad26f0fe41cee84c07057759c5d194e2505b84c2"},
] ]
types-pillow = [ types-pillow = [
{file = "types-Pillow-9.3.0.1.tar.gz", hash = "sha256:f3b7cada3fa496c78d75253c6b1f07a843d625f42e5639b320a72acaff6f7cfb"}, {file = "types-Pillow-9.3.0.4.tar.gz", hash = "sha256:c18d466dc18550d96b8b4a279ff94f0cbad696825b5ad55466604f1daf5709de"},
{file = "types_Pillow-9.3.0.1-py3-none-any.whl", hash = "sha256:79837755fe9659f29efd1016e9903ac4a500e0c73260483f07296bd6ca47668b"}, {file = "types_Pillow-9.3.0.4-py3-none-any.whl", hash = "sha256:98b8484ff343676f6f7051682a6cfd26896e993e86b3ce9badfa0ec8750f5405"},
] ]
types-psycopg2 = [ types-psycopg2 = [
{file = "types-psycopg2-2.9.21.1.tar.gz", hash = "sha256:f5532cf15afdc6b5ebb1e59b7d896617217321f488fd1fbd74e7efb94decfab6"}, {file = "types-psycopg2-2.9.21.2.tar.gz", hash = "sha256:bff045579642ce00b4a3c8f2e401b7f96dfaa34939f10be64b0dd3b53feca57d"},
{file = "types_psycopg2-2.9.21.1-py3-none-any.whl", hash = "sha256:858838f1972f39da2a6e28274201fed8619a40a235dd86e7f66f4548ec474395"}, {file = "types_psycopg2-2.9.21.2-py3-none-any.whl", hash = "sha256:084558d6bc4b2cfa249b06be0fdd9a14a69d307bae5bb5809a2f14cfbaa7a23f"},
] ]
types-pyopenssl = [ types-pyopenssl = [
{file = "types-pyOpenSSL-22.1.0.2.tar.gz", hash = "sha256:7a350e29e55bc3ee4571f996b4b1c18c4e4098947db45f7485b016eaa35b44bc"}, {file = "types-pyOpenSSL-22.1.0.2.tar.gz", hash = "sha256:7a350e29e55bc3ee4571f996b4b1c18c4e4098947db45f7485b016eaa35b44bc"},
@ -2825,12 +2819,12 @@ types-pyyaml = [
{file = "types_PyYAML-6.0.12.2-py3-none-any.whl", hash = "sha256:1e94e80aafee07a7e798addb2a320e32956a373f376655128ae20637adb2655b"}, {file = "types_PyYAML-6.0.12.2-py3-none-any.whl", hash = "sha256:1e94e80aafee07a7e798addb2a320e32956a373f376655128ae20637adb2655b"},
] ]
types-requests = [ types-requests = [
{file = "types-requests-2.28.11.2.tar.gz", hash = "sha256:fdcd7bd148139fb8eef72cf4a41ac7273872cad9e6ada14b11ff5dfdeee60ed3"}, {file = "types-requests-2.28.11.5.tar.gz", hash = "sha256:a7df37cc6fb6187a84097da951f8e21d335448aa2501a6b0a39cbd1d7ca9ee2a"},
{file = "types_requests-2.28.11.2-py3-none-any.whl", hash = "sha256:14941f8023a80b16441b3b46caffcbfce5265fd14555844d6029697824b5a2ef"}, {file = "types_requests-2.28.11.5-py3-none-any.whl", hash = "sha256:091d4a5a33c1b4f20d8b1b952aa8fa27a6e767c44c3cf65e56580df0b05fd8a9"},
] ]
types-setuptools = [ types-setuptools = [
{file = "types-setuptools-65.5.0.3.tar.gz", hash = "sha256:17769171f5f2a2dc69b25c0d3106552a5cda767bbf6b36cb6212b26dae5aa9fc"}, {file = "types-setuptools-65.6.0.1.tar.gz", hash = "sha256:a03cf72f336929c9405f485dd90baef31a401776675f785f69a5a519f0b099ca"},
{file = "types_setuptools-65.5.0.3-py3-none-any.whl", hash = "sha256:9254c32b0cc91c486548e7d7561243b5bd185402a383e93c6691e1b9bc8d86e2"}, {file = "types_setuptools-65.6.0.1-py3-none-any.whl", hash = "sha256:c957599502195ab98e90f0560466fa963f6a23373905e6d4e1772dbfaf1e44b7"},
] ]
types-urllib3 = [ types-urllib3 = [
{file = "types-urllib3-1.26.10.tar.gz", hash = "sha256:a26898f530e6c3f43f25b907f2b884486868ffd56a9faa94cbf9b3eb6e165d6a"}, {file = "types-urllib3-1.26.10.tar.gz", hash = "sha256:a26898f530e6c3f43f25b907f2b884486868ffd56a9faa94cbf9b3eb6e165d6a"},

View file

@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml"
[tool.poetry] [tool.poetry]
name = "matrix-synapse" name = "matrix-synapse"
version = "1.73.0" version = "1.74.0rc1"
description = "Homeserver for the Matrix decentralised comms protocol" description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"] authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "Apache-2.0" license = "Apache-2.0"
@ -141,7 +141,8 @@ pyasn1 = ">=0.1.9"
pyasn1-modules = ">=0.0.7" pyasn1-modules = ">=0.0.7"
bcrypt = ">=3.1.7" bcrypt = ">=3.1.7"
Pillow = ">=5.4.0" Pillow = ">=5.4.0"
sortedcontainers = ">=1.4.4" # We use SortedDict.peekitem(), which was added in sortedcontainers 1.5.2.
sortedcontainers = ">=1.5.2"
pymacaroons = ">=0.13.0" pymacaroons = ">=0.13.0"
msgpack = ">=0.5.2" msgpack = ">=0.5.2"
phonenumbers = ">=8.2.0" phonenumbers = ">=8.2.0"
@ -207,6 +208,7 @@ hiredis = { version = "*", optional = true }
Pympler = { version = "*", optional = true } Pympler = { version = "*", optional = true }
parameterized = { version = ">=0.7.4", optional = true } parameterized = { version = ">=0.7.4", optional = true }
idna = { version = ">=2.5", optional = true } idna = { version = ">=2.5", optional = true }
pyicu = { version = ">=2.10.2", optional = true }
[tool.poetry.extras] [tool.poetry.extras]
# NB: Packages that should be part of `pip install matrix-synapse[all]` need to be specified # NB: Packages that should be part of `pip install matrix-synapse[all]` need to be specified
@ -229,6 +231,10 @@ redis = ["txredisapi", "hiredis"]
# Required to use experimental `caches.track_memory_usage` config option. # Required to use experimental `caches.track_memory_usage` config option.
cache-memory = ["pympler"] cache-memory = ["pympler"]
test = ["parameterized", "idna"] test = ["parameterized", "idna"]
# Allows for better search for international characters in the user directory. This
# requires libicu's development headers installed on the system (e.g. libicu-dev on
# Debian-based distributions).
user-search = ["pyicu"]
# The duplication here is awful. I hate hate hate hate hate it. However, for now I want # The duplication here is awful. I hate hate hate hate hate it. However, for now I want
# to ensure you can still `pip install matrix-synapse[all]` like today. Two motivations: # to ensure you can still `pip install matrix-synapse[all]` like today. Two motivations:
@ -260,6 +266,8 @@ all = [
"txredisapi", "hiredis", "txredisapi", "hiredis",
# cache-memory # cache-memory
"pympler", "pympler",
# improved user search
"pyicu",
# omitted: # omitted:
# - test: it's useful to have this separate from dev deps in the olddeps job # - test: it's useful to have this separate from dev deps in the olddeps job
# - systemd: this is a system-based requirement # - systemd: this is a system-based requirement

View file

@ -1,9 +1,9 @@
attrs==22.1.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \ attrs==22.1.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
--hash=sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6 \ --hash=sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6 \
--hash=sha256:86efa402f67bf2df34f51a335487cf46b1ec130d02b8d39fd248abfd30da551c --hash=sha256:86efa402f67bf2df34f51a335487cf46b1ec130d02b8d39fd248abfd30da551c
authlib==1.1.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \ authlib==1.2.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
--hash=sha256:0a270c91409fc2b7b0fbee6996e09f2ee3187358762111a9a4225c874b94e891 \ --hash=sha256:4ddf4fd6cfa75c9a460b361d4bd9dac71ffda0be879dbe4292a02e92349ad55a \
--hash=sha256:be4b6a1dea51122336c210a6945b27a105b9ac572baffd15b07bcff4376c1523 --hash=sha256:4fa3e80883a5915ef9f5bc28630564bc4ed5b5af39812a3ff130ec76bd631e9d
automat==22.10.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \ automat==22.10.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
--hash=sha256:c3164f8742b9dc440f3682482d32aaff7bb53f71740dd018533f9de286b64180 \ --hash=sha256:c3164f8742b9dc440f3682482d32aaff7bb53f71740dd018533f9de286b64180 \
--hash=sha256:e56beb84edad19dcc11d30e8d9b895f75deeb5ef5e96b84a467066b3b84bb04e --hash=sha256:e56beb84edad19dcc11d30e8d9b895f75deeb5ef5e96b84a467066b3b84bb04e
@ -35,9 +35,9 @@ bleach==5.0.1 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0"
canonicaljson==1.6.4 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \ canonicaljson==1.6.4 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
--hash=sha256:55d282853b4245dbcd953fe54c39b91571813d7c44e1dbf66e3c4f97ff134a48 \ --hash=sha256:55d282853b4245dbcd953fe54c39b91571813d7c44e1dbf66e3c4f97ff134a48 \
--hash=sha256:6c09b2119511f30eb1126cfcd973a10824e20f1cfd25039cde3d1218dd9c8d8f --hash=sha256:6c09b2119511f30eb1126cfcd973a10824e20f1cfd25039cde3d1218dd9c8d8f
certifi==2021.10.8 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \ certifi==2022.12.7 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
--hash=sha256:78884e7c1d4b00ce3cea67b44566851c4343c120abd683433ce934a68ea58872 \ --hash=sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3 \
--hash=sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569 --hash=sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18
cffi==1.15.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \ cffi==1.15.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
--hash=sha256:00c878c90cb53ccfaae6b8bc18ad05d2036553e6d9d1d9dbcf323bbe83854ca3 \ --hash=sha256:00c878c90cb53ccfaae6b8bc18ad05d2036553e6d9d1d9dbcf323bbe83854ca3 \
--hash=sha256:0104fb5ae2391d46a4cb082abdd5c69ea4eab79d8d44eaaf79f1b1fd806ee4c2 \ --hash=sha256:0104fb5ae2391d46a4cb082abdd5c69ea4eab79d8d44eaaf79f1b1fd806ee4c2 \
@ -95,33 +95,33 @@ charset-normalizer==2.0.12 ; python_full_version >= "3.7.1" and python_full_vers
constantly==15.1.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \ constantly==15.1.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
--hash=sha256:586372eb92059873e29eba4f9dec8381541b4d3834660707faf8ba59146dfc35 \ --hash=sha256:586372eb92059873e29eba4f9dec8381541b4d3834660707faf8ba59146dfc35 \
--hash=sha256:dd2fa9d6b1a51a83f0d7dd76293d734046aa176e384bf6e33b7e44880eb37c5d --hash=sha256:dd2fa9d6b1a51a83f0d7dd76293d734046aa176e384bf6e33b7e44880eb37c5d
cryptography==38.0.3 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \ cryptography==38.0.4 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
--hash=sha256:068147f32fa662c81aebab95c74679b401b12b57494872886eb5c1139250ec5d \ --hash=sha256:0e70da4bdff7601b0ef48e6348339e490ebfb0cbe638e083c9c41fb49f00c8bd \
--hash=sha256:06fc3cc7b6f6cca87bd56ec80a580c88f1da5306f505876a71c8cfa7050257dd \ --hash=sha256:10652dd7282de17990b88679cb82f832752c4e8237f0c714be518044269415db \
--hash=sha256:25c1d1f19729fb09d42e06b4bf9895212292cb27bb50229f5aa64d039ab29146 \ --hash=sha256:175c1a818b87c9ac80bb7377f5520b7f31b3ef2a0004e2420319beadedb67290 \
--hash=sha256:402852a0aea73833d982cabb6d0c3bb582c15483d29fb7085ef2c42bfa7e38d7 \ --hash=sha256:1d7e632804a248103b60b16fb145e8df0bc60eed790ece0d12efe8cd3f3e7744 \
--hash=sha256:4e269dcd9b102c5a3d72be3c45d8ce20377b8076a43cbed6f660a1afe365e436 \ --hash=sha256:1f13ddda26a04c06eb57119caf27a524ccae20533729f4b1e4a69b54e07035eb \
--hash=sha256:5419a127426084933076132d317911e3c6eb77568a1ce23c3ac1e12d111e61e0 \ --hash=sha256:2ec2a8714dd005949d4019195d72abed84198d877112abb5a27740e217e0ea8d \
--hash=sha256:554bec92ee7d1e9d10ded2f7e92a5d70c1f74ba9524947c0ba0c850c7b011828 \ --hash=sha256:2fa36a7b2cc0998a3a4d5af26ccb6273f3df133d61da2ba13b3286261e7efb70 \
--hash=sha256:5e89468fbd2fcd733b5899333bc54d0d06c80e04cd23d8c6f3e0542358c6060b \ --hash=sha256:2fb481682873035600b5502f0015b664abc26466153fab5c6bc92c1ea69d478b \
--hash=sha256:65535bc550b70bd6271984d9863a37741352b4aad6fb1b3344a54e6950249b55 \ --hash=sha256:3178d46f363d4549b9a76264f41c6948752183b3f587666aff0555ac50fd7876 \
--hash=sha256:6ab9516b85bebe7aa83f309bacc5f44a61eeb90d0b4ec125d2d003ce41932d36 \ --hash=sha256:4367da5705922cf7070462e964f66e4ac24162e22ab0a2e9d31f1b270dd78083 \
--hash=sha256:6addc3b6d593cd980989261dc1cce38263c76954d758c3c94de51f1e010c9a50 \ --hash=sha256:4eb85075437f0b1fd8cd66c688469a0c4119e0ba855e3fef86691971b887caf6 \
--hash=sha256:728f2694fa743a996d7784a6194da430f197d5c58e2f4e278612b359f455e4a2 \ --hash=sha256:50a1494ed0c3f5b4d07650a68cd6ca62efe8b596ce743a5c94403e6f11bf06c1 \
--hash=sha256:785e4056b5a8b28f05a533fab69febf5004458e20dad7e2e13a3120d8ecec75a \ --hash=sha256:53049f3379ef05182864d13bb9686657659407148f901f3f1eee57a733fb4b00 \
--hash=sha256:78cf5eefac2b52c10398a42765bfa981ce2372cbc0457e6bf9658f41ec3c41d8 \ --hash=sha256:6391e59ebe7c62d9902c24a4d8bcbc79a68e7c4ab65863536127c8a9cd94043b \
--hash=sha256:7f836217000342d448e1c9a342e9163149e45d5b5eca76a30e84503a5a96cab0 \ --hash=sha256:67461b5ebca2e4c2ab991733f8ab637a7265bb582f07c7c88914b5afb88cb95b \
--hash=sha256:8d41a46251bf0634e21fac50ffd643216ccecfaf3701a063257fe0b2be1b6548 \ --hash=sha256:78e47e28ddc4ace41dd38c42e6feecfdadf9c3be2af389abbfeef1ff06822285 \
--hash=sha256:984fe150f350a3c91e84de405fe49e688aa6092b3525f407a18b9646f6612320 \ --hash=sha256:80ca53981ceeb3241998443c4964a387771588c4e4a5d92735a493af868294f9 \
--hash=sha256:9b24bcff7853ed18a63cfb0c2b008936a9554af24af2fb146e16d8e1aed75748 \ --hash=sha256:8a4b2bdb68a447fadebfd7d24855758fe2d6fecc7fed0b78d190b1af39a8e3b0 \
--hash=sha256:b1b35d9d3a65542ed2e9d90115dfd16bbc027b3f07ee3304fc83580f26e43249 \ --hash=sha256:8e45653fb97eb2f20b8c96f9cd2b3a0654d742b47d638cf2897afbd97f80fa6d \
--hash=sha256:b1b52c9e5f8aa2b802d48bd693190341fae201ea51c7a167d69fc48b60e8a959 \ --hash=sha256:998cd19189d8a747b226d24c0207fdaa1e6658a1d3f2494541cb9dfbf7dcb6d2 \
--hash=sha256:bbf203f1a814007ce24bd4d51362991d5cb90ba0c177a9c08825f2cc304d871f \ --hash=sha256:a10498349d4c8eab7357a8f9aa3463791292845b79597ad1b98a543686fb1ec8 \
--hash=sha256:be243c7e2bfcf6cc4cb350c0d5cdf15ca6383bbcb2a8ef51d3c9411a9d4386f0 \ --hash=sha256:b4cad0cea995af760f82820ab4ca54e5471fc782f70a007f31531957f43e9dee \
--hash=sha256:bfbe6ee19615b07a98b1d2287d6a6073f734735b49ee45b11324d85efc4d5cbd \ --hash=sha256:bfe6472507986613dc6cc00b3d492b2f7564b02b3b3682d25ca7f40fa3fd321b \
--hash=sha256:c46837ea467ed1efea562bbeb543994c2d1f6e800785bd5a2c98bc096f5cb220 \ --hash=sha256:c9e0d79ee4c56d841bd4ac6e7697c8ff3c8d6da67379057f29e66acffcd1e9a7 \
--hash=sha256:dfb4f4dd568de1b6af9f4cda334adf7d72cf5bc052516e1b2608b683375dd95c \ --hash=sha256:ca57eb3ddaccd1112c18fc80abe41db443cc2e9dcb1917078e02dfa010a4f353 \
--hash=sha256:ed7b00096790213e09eb11c97cc6e2b757f15f3d2f85833cd2d3ec3fe37c1722 --hash=sha256:ce127dd0a6a0811c251a6cddd014d292728484e530d80e872ad9806cfb1c5b3c
frozendict==2.3.4 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \ frozendict==2.3.4 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
--hash=sha256:15b4b18346259392b0d27598f240e9390fafbff882137a9c48a1e0104fb17f78 \ --hash=sha256:15b4b18346259392b0d27598f240e9390fafbff882137a9c48a1e0104fb17f78 \
--hash=sha256:25a6d2e8b7cf6b6e5677a1a4b53b4073e5d9ec640d1db30dc679627668d25e90 \ --hash=sha256:25a6d2e8b7cf6b6e5677a1a4b53b4073e5d9ec640d1db30dc679627668d25e90 \
@ -263,9 +263,9 @@ incremental==21.3.0 ; python_full_version >= "3.7.1" and python_full_version < "
jinja2==3.1.2 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \ jinja2==3.1.2 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
--hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \ --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \
--hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61 --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61
jsonschema==4.17.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \ jsonschema==4.17.3 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
--hash=sha256:5bfcf2bca16a087ade17e02b282d34af7ccd749ef76241e7f9bd7c0cb8a9424d \ --hash=sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d \
--hash=sha256:f660066c3966db7d6daeaea8a75e0b68237a48e51cf49882087757bb59916248 --hash=sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6
lxml==4.9.1 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \ lxml==4.9.1 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
--hash=sha256:04da965dfebb5dac2619cb90fcf93efdb35b3c6994fea58a157a834f2f94b318 \ --hash=sha256:04da965dfebb5dac2619cb90fcf93efdb35b3c6994fea58a157a834f2f94b318 \
--hash=sha256:0538747a9d7827ce3e16a8fdd201a99e661c7dee3c96c885d8ecba3c35d1032c \ --hash=sha256:0538747a9d7827ce3e16a8fdd201a99e661c7dee3c96c885d8ecba3c35d1032c \
@ -437,15 +437,15 @@ msgpack==1.0.4 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0
netaddr==0.8.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \ netaddr==0.8.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
--hash=sha256:9666d0232c32d2656e5e5f8d735f58fd6c7457ce52fc21c98d45f2af78f990ac \ --hash=sha256:9666d0232c32d2656e5e5f8d735f58fd6c7457ce52fc21c98d45f2af78f990ac \
--hash=sha256:d6cc57c7a07b1d9d2e917aa8b36ae8ce61c35ba3fcd1b83ca31c5a0ee2b5a243 --hash=sha256:d6cc57c7a07b1d9d2e917aa8b36ae8ce61c35ba3fcd1b83ca31c5a0ee2b5a243
packaging==21.3 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \ packaging==22.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
--hash=sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb \ --hash=sha256:2198ec20bd4c017b8f9717e00f0c8714076fc2fd93816750ab48e2c41de2cfd3 \
--hash=sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522 --hash=sha256:957e2148ba0e1a3b282772e791ef1d8083648bc131c8ab0c1feba110ce1146c3
parameterized==0.8.1 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \ parameterized==0.8.1 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
--hash=sha256:41bbff37d6186430f77f900d777e5bb6a24928a1c46fb1de692f8b52b8833b5c \ --hash=sha256:41bbff37d6186430f77f900d777e5bb6a24928a1c46fb1de692f8b52b8833b5c \
--hash=sha256:9cbb0b69a03e8695d68b3399a8a5825200976536fe1cb79db60ed6a4c8c9efe9 --hash=sha256:9cbb0b69a03e8695d68b3399a8a5825200976536fe1cb79db60ed6a4c8c9efe9
phonenumbers==8.13.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \ phonenumbers==8.13.2 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
--hash=sha256:93745d7afd38e246660bb601b07deac54eeb76c8e5e43f5e83333b0383a0a1e4 \ --hash=sha256:0179f688d48c0e7e161eb7b9d86d587940af1f5174f97c1fdfd893c599c0d94a \
--hash=sha256:dbaea9e4005a976bcf18fbe2bb87cb9cd0a3f119136f04188ac412d7741cebf0 --hash=sha256:884b26f775205261f4dc861371dce217c1661a4942fb3ec3624e290fb51869bf
pillow==9.3.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \ pillow==9.3.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
--hash=sha256:03150abd92771742d4a8cd6f2fa6246d847dcd2e332a18d0c15cc75bf6703040 \ --hash=sha256:03150abd92771742d4a8cd6f2fa6246d847dcd2e332a18d0c15cc75bf6703040 \
--hash=sha256:073adb2ae23431d3b9bcbcff3fe698b62ed47211d0716b067385538a1b0f28b8 \ --hash=sha256:073adb2ae23431d3b9bcbcff3fe698b62ed47211d0716b067385538a1b0f28b8 \
@ -592,12 +592,9 @@ pynacl==1.5.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0"
--hash=sha256:a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394 \ --hash=sha256:a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394 \
--hash=sha256:a422368fc821589c228f4c49438a368831cb5bbc0eab5ebe1d7fac9dded6567b \ --hash=sha256:a422368fc821589c228f4c49438a368831cb5bbc0eab5ebe1d7fac9dded6567b \
--hash=sha256:e46dae94e34b085175f8abb3b0aaa7da40767865ac82c928eeb9e57e1ea8a543 --hash=sha256:e46dae94e34b085175f8abb3b0aaa7da40767865ac82c928eeb9e57e1ea8a543
pyopenssl==22.0.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \ pyopenssl==22.1.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
--hash=sha256:660b1b1425aac4a1bea1d94168a85d99f0b3144c869dd4390d27629d0087f1bf \ --hash=sha256:7a83b7b272dd595222d672f5ce29aa030f1fb837630ef229f62e72e395ce8968 \
--hash=sha256:ea252b38c87425b64116f808355e8da644ef9b07e429398bfece610f893ee2e0 --hash=sha256:b28437c9773bb6c6958628cf9c3bebe585de661dba6f63df17111966363dd15e
pyparsing==3.0.7 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
--hash=sha256:18ee9022775d270c55187733956460083db60b37d0d0fb357445f3094eed3eea \
--hash=sha256:a6c06a88f252e6c322f65faf8f418b16213b51bdfaece0524c1c1bc30c63c484
pyrsistent==0.18.1 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \ pyrsistent==0.18.1 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
--hash=sha256:0e3e1fcc45199df76053026a51cc59ab2ea3fc7c094c6627e93b7b44cdae2c8c \ --hash=sha256:0e3e1fcc45199df76053026a51cc59ab2ea3fc7c094c6627e93b7b44cdae2c8c \
--hash=sha256:1b34eedd6812bf4d33814fca1b66005805d3640ce53140ab8bbb1e2651b0d9bc \ --hash=sha256:1b34eedd6812bf4d33814fca1b66005805d3640ce53140ab8bbb1e2651b0d9bc \

View file

@ -33,10 +33,12 @@ fn bench_match_exact(b: &mut Bencher) {
let eval = PushRuleEvaluator::py_new( let eval = PushRuleEvaluator::py_new(
flattened_keys, flattened_keys,
10, 10,
0, Some(0),
Default::default(), Default::default(),
Default::default(), Default::default(),
true, true,
vec![],
false,
) )
.unwrap(); .unwrap();
@ -67,10 +69,12 @@ fn bench_match_word(b: &mut Bencher) {
let eval = PushRuleEvaluator::py_new( let eval = PushRuleEvaluator::py_new(
flattened_keys, flattened_keys,
10, 10,
0, Some(0),
Default::default(), Default::default(),
Default::default(), Default::default(),
true, true,
vec![],
false,
) )
.unwrap(); .unwrap();
@ -101,10 +105,12 @@ fn bench_match_word_miss(b: &mut Bencher) {
let eval = PushRuleEvaluator::py_new( let eval = PushRuleEvaluator::py_new(
flattened_keys, flattened_keys,
10, 10,
0, Some(0),
Default::default(), Default::default(),
Default::default(), Default::default(),
true, true,
vec![],
false,
) )
.unwrap(); .unwrap();
@ -135,10 +141,12 @@ fn bench_eval_message(b: &mut Bencher) {
let eval = PushRuleEvaluator::py_new( let eval = PushRuleEvaluator::py_new(
flattened_keys, flattened_keys,
10, 10,
0, Some(0),
Default::default(), Default::default(),
Default::default(), Default::default(),
true, true,
vec![],
false,
) )
.unwrap(); .unwrap();

View file

@ -12,10 +12,8 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
use std::borrow::Cow;
use std::collections::BTreeMap; use std::collections::BTreeMap;
use crate::push::{PushRule, PushRules};
use anyhow::{Context, Error}; use anyhow::{Context, Error};
use lazy_static::lazy_static; use lazy_static::lazy_static;
use log::warn; use log::warn;
@ -98,6 +96,7 @@ pub struct PushRuleEvaluator {
#[pymethods] #[pymethods]
impl PushRuleEvaluator { impl PushRuleEvaluator {
/// Create a new `PushRuleEvaluator`. See struct docstring for details. /// Create a new `PushRuleEvaluator`. See struct docstring for details.
#[allow(clippy::too_many_arguments)]
#[new] #[new]
pub fn py_new( pub fn py_new(
flattened_keys: BTreeMap<String, String>, flattened_keys: BTreeMap<String, String>,
@ -153,15 +152,12 @@ impl PushRuleEvaluator {
let mut has_rver_condition = false; let mut has_rver_condition = false;
for condition in push_rule.conditions.iter() { for condition in push_rule.conditions.iter() {
has_rver_condition = has_rver_condition has_rver_condition |= matches!(
|| match condition { condition,
Condition::Known(known) => match known { // per MSC3932, we just need *any* room version condition to match
// per MSC3932, we just need *any* room version condition to match Condition::Known(KnownCondition::RoomVersionSupports { feature: _ }),
KnownCondition::RoomVersionSupports { feature: _ } => true, );
_ => false,
},
_ => false,
};
match self.match_condition(condition, user_id, display_name) { match self.match_condition(condition, user_id, display_name) {
Ok(true) => {} Ok(true) => {}
Ok(false) => continue 'outer, Ok(false) => continue 'outer,
@ -444,6 +440,10 @@ fn push_rule_evaluator() {
#[test] #[test]
fn test_requires_room_version_supports_condition() { fn test_requires_room_version_supports_condition() {
use std::borrow::Cow;
use crate::push::{PushRule, PushRules};
let mut flattened_keys = BTreeMap::new(); let mut flattened_keys = BTreeMap::new();
flattened_keys.insert("content.body".to_string(), "foo bar bob hello".to_string()); flattened_keys.insert("content.body".to_string(), "foo bar bob hello".to_string());
let flags = vec![RoomVersionFeatures::ExtensibleEvents.as_str().to_string()]; let flags = vec![RoomVersionFeatures::ExtensibleEvents.as_str().to_string()];

View file

@ -53,6 +53,12 @@ Run the complement test suite on Synapse.
Only build the Docker images. Don't actually run Complement. Only build the Docker images. Don't actually run Complement.
Conflicts with -f/--fast. Conflicts with -f/--fast.
-e, --editable
Use an editable build of Synapse, rebuilding the image if necessary.
This is suitable for use in development where a fast turn-around time
is important.
Not suitable for use in CI in case the editable environment is impure.
For help on arguments to 'go test', run 'go help testflag'. For help on arguments to 'go test', run 'go help testflag'.
EOF EOF
} }
@ -73,6 +79,9 @@ while [ $# -ge 1 ]; do
"--build-only") "--build-only")
skip_complement_run=1 skip_complement_run=1
;; ;;
"-e"|"--editable")
use_editable_synapse=1
;;
*) *)
# unknown arg: presumably an argument to gotest. break the loop. # unknown arg: presumably an argument to gotest. break the loop.
break break
@ -96,25 +105,76 @@ if [[ -z "$COMPLEMENT_DIR" ]]; then
echo "Checkout available at 'complement-${COMPLEMENT_REF}'" echo "Checkout available at 'complement-${COMPLEMENT_REF}'"
fi fi
if [ -n "$use_editable_synapse" ]; then
if [[ -e synapse/synapse_rust.abi3.so ]]; then
# In an editable install, back up the host's compiled Rust module to prevent
# inconvenience; the container will overwrite the module with its own copy.
mv -n synapse/synapse_rust.abi3.so synapse/synapse_rust.abi3.so~host
# And restore it on exit:
synapse_pkg=`realpath synapse`
trap "mv -f '$synapse_pkg/synapse_rust.abi3.so~host' '$synapse_pkg/synapse_rust.abi3.so'" EXIT
fi
editable_mount="$(realpath .):/editable-src:z"
if docker inspect complement-synapse-editable &>/dev/null; then
# complement-synapse-editable already exists: see if we can still use it:
# - The Rust module must still be importable; it will fail to import if the Rust source has changed.
# - The Poetry lock file must be the same (otherwise we assume dependencies have changed)
# First set up the module in the right place for an editable installation.
docker run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
if (docker run --rm -v $editable_mount --entrypoint 'python' complement-synapse-editable -c 'import synapse.synapse_rust' \
&& docker run --rm -v $editable_mount --entrypoint 'diff' complement-synapse-editable --brief /editable-src/poetry.lock /poetry.lock.bak); then
skip_docker_build=1
else
echo "Editable Synapse image is stale. Will rebuild."
unset skip_docker_build
fi
fi
fi
if [ -z "$skip_docker_build" ]; then if [ -z "$skip_docker_build" ]; then
# Build the base Synapse image from the local checkout if [ -n "$use_editable_synapse" ]; then
echo_if_github "::group::Build Docker image: matrixdotorg/synapse"
docker build -t matrixdotorg/synapse \
--build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \
--build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \
-f "docker/Dockerfile" .
echo_if_github "::endgroup::"
# Build the workers docker image (from the base Synapse image we just built). # Build a special image designed for use in development with editable
echo_if_github "::group::Build Docker image: matrixdotorg/synapse-workers" # installs.
docker build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" . docker build -t synapse-editable \
echo_if_github "::endgroup::" -f "docker/editable.Dockerfile" .
# Build the unified Complement image (from the worker Synapse image we just built). docker build -t synapse-workers-editable \
echo_if_github "::group::Build Docker image: complement/Dockerfile" --build-arg FROM=synapse-editable \
docker build -t complement-synapse \ -f "docker/Dockerfile-workers" .
-f "docker/complement/Dockerfile" "docker/complement"
echo_if_github "::endgroup::" docker build -t complement-synapse-editable \
--build-arg FROM=synapse-workers-editable \
-f "docker/complement/Dockerfile" "docker/complement"
# Prepare the Rust module
docker run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
else
# Build the base Synapse image from the local checkout
echo_if_github "::group::Build Docker image: matrixdotorg/synapse"
docker build -t matrixdotorg/synapse \
--build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \
--build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \
-f "docker/Dockerfile" .
echo_if_github "::endgroup::"
# Build the workers docker image (from the base Synapse image we just built).
echo_if_github "::group::Build Docker image: matrixdotorg/synapse-workers"
docker build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" .
echo_if_github "::endgroup::"
# Build the unified Complement image (from the worker Synapse image we just built).
echo_if_github "::group::Build Docker image: complement/Dockerfile"
docker build -t complement-synapse \
-f "docker/complement/Dockerfile" "docker/complement"
echo_if_github "::endgroup::"
fi
fi fi
if [ -n "$skip_complement_run" ]; then if [ -n "$skip_complement_run" ]; then
@ -123,6 +183,10 @@ if [ -n "$skip_complement_run" ]; then
fi fi
export COMPLEMENT_BASE_IMAGE=complement-synapse export COMPLEMENT_BASE_IMAGE=complement-synapse
if [ -n "$use_editable_synapse" ]; then
export COMPLEMENT_BASE_IMAGE=complement-synapse-editable
export COMPLEMENT_HOST_MOUNTS="$editable_mount"
fi
extra_test_args=() extra_test_args=()

View file

@ -27,7 +27,7 @@ import time
import urllib.request import urllib.request
from os import path from os import path
from tempfile import TemporaryDirectory from tempfile import TemporaryDirectory
from typing import Any, List, Optional, cast from typing import Any, List, Optional
import attr import attr
import click import click
@ -174,9 +174,7 @@ def _prepare() -> None:
click.get_current_context().abort() click.get_current_context().abort()
# Switch to the release branch. # Switch to the release branch.
# Cast safety: parse() won't return a version.LegacyVersion from our parsed_new_version = version.parse(new_version)
# version string format.
parsed_new_version = cast(version.Version, version.parse(new_version))
# We assume for debian changelogs that we only do RCs or full releases. # We assume for debian changelogs that we only do RCs or full releases.
assert not parsed_new_version.is_devrelease assert not parsed_new_version.is_devrelease

25
stubs/icu.pyi Normal file
View file

@ -0,0 +1,25 @@
# Copyright 2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Stub for PyICU.
class Locale:
@staticmethod
def getDefault() -> Locale: ...
class BreakIterator:
@staticmethod
def createWordInstance(locale: Locale) -> BreakIterator: ...
def setText(self, text: str) -> None: ...
def nextBoundary(self) -> int: ...

View file

@ -45,7 +45,7 @@ class PushRuleEvaluator:
notification_power_levels: Mapping[str, int], notification_power_levels: Mapping[str, int],
related_events_flattened: Mapping[str, Mapping[str, str]], related_events_flattened: Mapping[str, Mapping[str, str]],
related_event_match_enabled: bool, related_event_match_enabled: bool,
room_version_feature_flags: list[str], room_version_feature_flags: Tuple[str, ...],
msc3931_enabled: bool, msc3931_enabled: bool,
): ... ): ...
def run( def run(

View file

@ -222,6 +222,7 @@ def main() -> None:
args = parser.parse_args() args = parser.parse_args()
config: Optional[Dict[str, Any]] = None
if "config" in args and args.config: if "config" in args and args.config:
config = yaml.safe_load(args.config) config = yaml.safe_load(args.config)
@ -229,7 +230,7 @@ def main() -> None:
secret = args.shared_secret secret = args.shared_secret
else: else:
# argparse should check that we have either config or shared secret # argparse should check that we have either config or shared secret
assert config assert config is not None
secret = config.get("registration_shared_secret") secret = config.get("registration_shared_secret")
secret_file = config.get("registration_shared_secret_path") secret_file = config.get("registration_shared_secret_path")
@ -244,7 +245,7 @@ def main() -> None:
if args.server_url: if args.server_url:
server_url = args.server_url server_url = args.server_url
elif config: elif config is not None:
server_url = _find_client_listener(config) server_url = _find_client_listener(config)
if not server_url: if not server_url:
server_url = _DEFAULT_SERVER_URL server_url = _DEFAULT_SERVER_URL

View file

@ -152,6 +152,7 @@ class EduTypes:
class RejectedReason: class RejectedReason:
AUTH_ERROR: Final = "auth_error" AUTH_ERROR: Final = "auth_error"
OVERSIZED_EVENT: Final = "oversized_event"
class RoomCreationPreset: class RoomCreationPreset:
@ -230,6 +231,9 @@ class EventContentFields:
# The authorising user for joining a restricted room. # The authorising user for joining a restricted room.
AUTHORISING_USER: Final = "join_authorised_via_users_server" AUTHORISING_USER: Final = "join_authorised_via_users_server"
# an unspecced field added to to-device messages to identify them uniquely-ish
TO_DEVICE_MSGID: Final = "org.matrix.msgid"
class RoomTypes: class RoomTypes:
"""Understood values of the room_type field of m.room.create events.""" """Understood values of the room_type field of m.room.create events."""

View file

@ -300,10 +300,8 @@ class InteractiveAuthIncompleteError(Exception):
class UnrecognizedRequestError(SynapseError): class UnrecognizedRequestError(SynapseError):
"""An error indicating we don't understand the request you're trying to make""" """An error indicating we don't understand the request you're trying to make"""
def __init__( def __init__(self, msg: str = "Unrecognized request", code: int = 400):
self, msg: str = "Unrecognized request", errcode: str = Codes.UNRECOGNIZED super().__init__(code, msg, Codes.UNRECOGNIZED)
):
super().__init__(400, msg, errcode)
class NotFoundError(SynapseError): class NotFoundError(SynapseError):
@ -426,8 +424,17 @@ class ResourceLimitError(SynapseError):
class EventSizeError(SynapseError): class EventSizeError(SynapseError):
"""An error raised when an event is too big.""" """An error raised when an event is too big."""
def __init__(self, msg: str): def __init__(self, msg: str, unpersistable: bool):
"""
unpersistable:
if True, the PDU must not be persisted, not even as a rejected PDU
when received over federation.
This is notably true when the entire PDU exceeds the size limit for a PDU,
(as opposed to an individual key's size limit being exceeded).
"""
super().__init__(413, msg, Codes.TOO_LARGE) super().__init__(413, msg, Codes.TOO_LARGE)
self.unpersistable = unpersistable
class LoginError(SynapseError): class LoginError(SynapseError):

View file

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from typing import Callable, Dict, List, Optional from typing import Callable, Dict, Optional, Tuple
import attr import attr
@ -103,7 +103,7 @@ class RoomVersion:
# is not enough to mark it "supported": the push rule evaluator also needs to # is not enough to mark it "supported": the push rule evaluator also needs to
# support the flag. Unknown flags are ignored by the evaluator, making conditions # support the flag. Unknown flags are ignored by the evaluator, making conditions
# fail if used. # fail if used.
msc3931_push_features: List[str] # values from PushRuleRoomFlag msc3931_push_features: Tuple[str, ...] # values from PushRuleRoomFlag
class RoomVersions: class RoomVersions:
@ -124,7 +124,7 @@ class RoomVersions:
msc2716_redactions=False, msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False, msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False, msc3667_int_only_power_levels=False,
msc3931_push_features=[], msc3931_push_features=(),
) )
V2 = RoomVersion( V2 = RoomVersion(
"2", "2",
@ -143,7 +143,7 @@ class RoomVersions:
msc2716_redactions=False, msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False, msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False, msc3667_int_only_power_levels=False,
msc3931_push_features=[], msc3931_push_features=(),
) )
V3 = RoomVersion( V3 = RoomVersion(
"3", "3",
@ -162,7 +162,7 @@ class RoomVersions:
msc2716_redactions=False, msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False, msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False, msc3667_int_only_power_levels=False,
msc3931_push_features=[], msc3931_push_features=(),
) )
V4 = RoomVersion( V4 = RoomVersion(
"4", "4",
@ -181,7 +181,7 @@ class RoomVersions:
msc2716_redactions=False, msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False, msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False, msc3667_int_only_power_levels=False,
msc3931_push_features=[], msc3931_push_features=(),
) )
V5 = RoomVersion( V5 = RoomVersion(
"5", "5",
@ -200,7 +200,7 @@ class RoomVersions:
msc2716_redactions=False, msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False, msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False, msc3667_int_only_power_levels=False,
msc3931_push_features=[], msc3931_push_features=(),
) )
V6 = RoomVersion( V6 = RoomVersion(
"6", "6",
@ -219,7 +219,7 @@ class RoomVersions:
msc2716_redactions=False, msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False, msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False, msc3667_int_only_power_levels=False,
msc3931_push_features=[], msc3931_push_features=(),
) )
MSC2176 = RoomVersion( MSC2176 = RoomVersion(
"org.matrix.msc2176", "org.matrix.msc2176",
@ -238,7 +238,7 @@ class RoomVersions:
msc2716_redactions=False, msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False, msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False, msc3667_int_only_power_levels=False,
msc3931_push_features=[], msc3931_push_features=(),
) )
V7 = RoomVersion( V7 = RoomVersion(
"7", "7",
@ -257,7 +257,7 @@ class RoomVersions:
msc2716_redactions=False, msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False, msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False, msc3667_int_only_power_levels=False,
msc3931_push_features=[], msc3931_push_features=(),
) )
V8 = RoomVersion( V8 = RoomVersion(
"8", "8",
@ -276,7 +276,7 @@ class RoomVersions:
msc2716_redactions=False, msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False, msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False, msc3667_int_only_power_levels=False,
msc3931_push_features=[], msc3931_push_features=(),
) )
V9 = RoomVersion( V9 = RoomVersion(
"9", "9",
@ -295,7 +295,7 @@ class RoomVersions:
msc2716_redactions=False, msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False, msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False, msc3667_int_only_power_levels=False,
msc3931_push_features=[], msc3931_push_features=(),
) )
MSC3787 = RoomVersion( MSC3787 = RoomVersion(
"org.matrix.msc3787", "org.matrix.msc3787",
@ -314,7 +314,7 @@ class RoomVersions:
msc2716_redactions=False, msc2716_redactions=False,
msc3787_knock_restricted_join_rule=True, msc3787_knock_restricted_join_rule=True,
msc3667_int_only_power_levels=False, msc3667_int_only_power_levels=False,
msc3931_push_features=[], msc3931_push_features=(),
) )
V10 = RoomVersion( V10 = RoomVersion(
"10", "10",
@ -333,7 +333,7 @@ class RoomVersions:
msc2716_redactions=False, msc2716_redactions=False,
msc3787_knock_restricted_join_rule=True, msc3787_knock_restricted_join_rule=True,
msc3667_int_only_power_levels=True, msc3667_int_only_power_levels=True,
msc3931_push_features=[], msc3931_push_features=(),
) )
MSC2716v4 = RoomVersion( MSC2716v4 = RoomVersion(
"org.matrix.msc2716v4", "org.matrix.msc2716v4",
@ -352,7 +352,7 @@ class RoomVersions:
msc2716_redactions=True, msc2716_redactions=True,
msc3787_knock_restricted_join_rule=False, msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False, msc3667_int_only_power_levels=False,
msc3931_push_features=[], msc3931_push_features=(),
) )
MSC1767v10 = RoomVersion( MSC1767v10 = RoomVersion(
# MSC1767 (Extensible Events) based on room version "10" # MSC1767 (Extensible Events) based on room version "10"
@ -372,7 +372,7 @@ class RoomVersions:
msc2716_redactions=False, msc2716_redactions=False,
msc3787_knock_restricted_join_rule=True, msc3787_knock_restricted_join_rule=True,
msc3667_int_only_power_levels=True, msc3667_int_only_power_levels=True,
msc3931_push_features=[PushRuleRoomFlag.EXTENSIBLE_EVENTS], msc3931_push_features=(PushRuleRoomFlag.EXTENSIBLE_EVENTS,),
) )

View file

@ -44,40 +44,8 @@ from synapse.http.server import JsonResource, OptionsResource
from synapse.logging.context import LoggingContext from synapse.logging.context import LoggingContext
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
from synapse.rest import ClientRestResource
from synapse.rest.admin import register_servlets_for_media_repo from synapse.rest.admin import register_servlets_for_media_repo
from synapse.rest.client import (
account_data,
events,
initial_sync,
login,
presence,
profile,
push_rule,
read_marker,
receipts,
relations,
room,
room_batch,
room_keys,
sendtodevice,
sync,
tags,
user_directory,
versions,
voip,
)
from synapse.rest.client.account import ThreepidRestServlet, WhoamiRestServlet
from synapse.rest.client.devices import DevicesRestServlet
from synapse.rest.client.keys import (
KeyChangesServlet,
KeyQueryServlet,
KeyUploadServlet,
OneTimeKeyServlet,
)
from synapse.rest.client.register import (
RegisterRestServlet,
RegistrationTokenValidityRestServlet,
)
from synapse.rest.health import HealthResource from synapse.rest.health import HealthResource
from synapse.rest.key.v2 import KeyResource from synapse.rest.key.v2 import KeyResource
from synapse.rest.synapse.client import build_synapse_client_resource_tree from synapse.rest.synapse.client import build_synapse_client_resource_tree
@ -200,45 +168,7 @@ class GenericWorkerServer(HomeServer):
if name == "metrics": if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy) resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
elif name == "client": elif name == "client":
resource = JsonResource(self, canonical_json=False) resource: Resource = ClientRestResource(self)
RegisterRestServlet(self).register(resource)
RegistrationTokenValidityRestServlet(self).register(resource)
login.register_servlets(self, resource)
ThreepidRestServlet(self).register(resource)
WhoamiRestServlet(self).register(resource)
DevicesRestServlet(self).register(resource)
# Read-only
KeyUploadServlet(self).register(resource)
KeyQueryServlet(self).register(resource)
KeyChangesServlet(self).register(resource)
OneTimeKeyServlet(self).register(resource)
voip.register_servlets(self, resource)
push_rule.register_servlets(self, resource)
versions.register_servlets(self, resource)
profile.register_servlets(self, resource)
sync.register_servlets(self, resource)
events.register_servlets(self, resource)
room.register_servlets(self, resource, is_worker=True)
relations.register_servlets(self, resource)
room.register_deprecated_servlets(self, resource)
initial_sync.register_servlets(self, resource)
room_batch.register_servlets(self, resource)
room_keys.register_servlets(self, resource)
tags.register_servlets(self, resource)
account_data.register_servlets(self, resource)
receipts.register_servlets(self, resource)
read_marker.register_servlets(self, resource)
sendtodevice.register_servlets(self, resource)
user_directory.register_servlets(self, resource)
presence.register_servlets(self, resource)
resources[CLIENT_API_PREFIX] = resource resources[CLIENT_API_PREFIX] = resource

View file

@ -245,7 +245,9 @@ class ApplicationService:
return True return True
# likewise with the room's aliases (if it has any) # likewise with the room's aliases (if it has any)
alias_list = await store.get_aliases_for_room(room_id) alias_list = await store.get_aliases_for_room(
room_id, on_invalidate=cache_context.invalidate
)
for alias in alias_list: for alias in alias_list:
if self.is_room_alias_in_namespace(alias): if self.is_room_alias_in_namespace(alias):
return True return True
@ -311,7 +313,9 @@ class ApplicationService:
# Find all the rooms the sender is in # Find all the rooms the sender is in
if self.is_interested_in_user(user_id.to_string()): if self.is_interested_in_user(user_id.to_string()):
return True return True
room_ids = await store.get_rooms_for_user(user_id.to_string()) room_ids = await store.get_rooms_for_user(
user_id.to_string(), on_invalidate=cache_context.invalidate
)
# Then find out if the appservice is interested in any of those rooms # Then find out if the appservice is interested in any of those rooms
for room_id in room_ids: for room_id in room_ids:

View file

@ -33,6 +33,9 @@ def validate_config(
config: the configuration value to be validated config: the configuration value to be validated
config_path: the path within the config file. This will be used as a basis config_path: the path within the config file. This will be used as a basis
for the error message. for the error message.
Raises:
ConfigError, if validation fails.
""" """
try: try:
jsonschema.validate(config, json_schema) jsonschema.validate(config, json_schema)

View file

@ -13,12 +13,13 @@
# limitations under the License. # limitations under the License.
import logging import logging
from typing import Any, Iterable from typing import Any, Iterable, Optional, Tuple
from synapse.api.constants import EventTypes from synapse.api.constants import EventTypes
from synapse.config._base import Config, ConfigError from synapse.config._base import Config, ConfigError
from synapse.config._util import validate_config from synapse.config._util import validate_config
from synapse.types import JsonDict from synapse.types import JsonDict
from synapse.types.state import StateFilter
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -26,16 +27,20 @@ logger = logging.getLogger(__name__)
class ApiConfig(Config): class ApiConfig(Config):
section = "api" section = "api"
room_prejoin_state: StateFilter
track_puppetted_users_ips: bool
def read_config(self, config: JsonDict, **kwargs: Any) -> None: def read_config(self, config: JsonDict, **kwargs: Any) -> None:
validate_config(_MAIN_SCHEMA, config, ()) validate_config(_MAIN_SCHEMA, config, ())
self.room_prejoin_state = list(self._get_prejoin_state_types(config)) self.room_prejoin_state = StateFilter.from_types(
self._get_prejoin_state_entries(config)
)
self.track_puppeted_user_ips = config.get("track_puppeted_user_ips", False) self.track_puppeted_user_ips = config.get("track_puppeted_user_ips", False)
def _get_prejoin_state_types(self, config: JsonDict) -> Iterable[str]: def _get_prejoin_state_entries(
"""Get the event types to include in the prejoin state self, config: JsonDict
) -> Iterable[Tuple[str, Optional[str]]]:
Parses the config and returns an iterable of the event types to be included. """Get the event types and state keys to include in the prejoin state."""
"""
room_prejoin_state_config = config.get("room_prejoin_state") or {} room_prejoin_state_config = config.get("room_prejoin_state") or {}
# backwards-compatibility support for room_invite_state_types # backwards-compatibility support for room_invite_state_types
@ -50,33 +55,39 @@ class ApiConfig(Config):
logger.warning(_ROOM_INVITE_STATE_TYPES_WARNING) logger.warning(_ROOM_INVITE_STATE_TYPES_WARNING)
yield from config["room_invite_state_types"] for event_type in config["room_invite_state_types"]:
yield event_type, None
return return
if not room_prejoin_state_config.get("disable_default_event_types"): if not room_prejoin_state_config.get("disable_default_event_types"):
yield from _DEFAULT_PREJOIN_STATE_TYPES yield from _DEFAULT_PREJOIN_STATE_TYPES_AND_STATE_KEYS
yield from room_prejoin_state_config.get("additional_event_types", []) for entry in room_prejoin_state_config.get("additional_event_types", []):
if isinstance(entry, str):
yield entry, None
else:
yield entry
_ROOM_INVITE_STATE_TYPES_WARNING = """\ _ROOM_INVITE_STATE_TYPES_WARNING = """\
WARNING: The 'room_invite_state_types' configuration setting is now deprecated, WARNING: The 'room_invite_state_types' configuration setting is now deprecated,
and replaced with 'room_prejoin_state'. New features may not work correctly and replaced with 'room_prejoin_state'. New features may not work correctly
unless 'room_invite_state_types' is removed. See the sample configuration file for unless 'room_invite_state_types' is removed. See the config documentation at
details of 'room_prejoin_state'. https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#room_prejoin_state
for details of 'room_prejoin_state'.
-------------------------------------------------------------------------------- --------------------------------------------------------------------------------
""" """
_DEFAULT_PREJOIN_STATE_TYPES = [ _DEFAULT_PREJOIN_STATE_TYPES_AND_STATE_KEYS = [
EventTypes.JoinRules, (EventTypes.JoinRules, ""),
EventTypes.CanonicalAlias, (EventTypes.CanonicalAlias, ""),
EventTypes.RoomAvatar, (EventTypes.RoomAvatar, ""),
EventTypes.RoomEncryption, (EventTypes.RoomEncryption, ""),
EventTypes.Name, (EventTypes.Name, ""),
# Per MSC1772. # Per MSC1772.
EventTypes.Create, (EventTypes.Create, ""),
# Per MSC3173. # Per MSC3173.
EventTypes.Topic, (EventTypes.Topic, ""),
] ]
@ -90,7 +101,17 @@ _ROOM_PREJOIN_STATE_CONFIG_SCHEMA = {
"disable_default_event_types": {"type": "boolean"}, "disable_default_event_types": {"type": "boolean"},
"additional_event_types": { "additional_event_types": {
"type": "array", "type": "array",
"items": {"type": "string"}, "items": {
"oneOf": [
{"type": "string"},
{
"type": "array",
"items": {"type": "string"},
"minItems": 2,
"maxItems": 2,
},
],
},
}, },
}, },
}, },

View file

@ -26,6 +26,7 @@ class PushConfig(Config):
def read_config(self, config: JsonDict, **kwargs: Any) -> None: def read_config(self, config: JsonDict, **kwargs: Any) -> None:
push_config = config.get("push") or {} push_config = config.get("push") or {}
self.push_include_content = push_config.get("include_content", True) self.push_include_content = push_config.get("include_content", True)
self.enable_push = push_config.get("enabled", True)
self.push_group_unread_count_by_room = push_config.get( self.push_group_unread_count_by_room = push_config.get(
"group_unread_count_by_room", True "group_unread_count_by_room", True
) )

View file

@ -14,7 +14,6 @@
import abc import abc
import logging import logging
import urllib
from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Tuple from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Tuple
import attr import attr
@ -813,31 +812,27 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
results = {} results = {}
async def get_key(key_to_fetch_item: _FetchKeyRequest) -> None: async def get_keys(key_to_fetch_item: _FetchKeyRequest) -> None:
server_name = key_to_fetch_item.server_name server_name = key_to_fetch_item.server_name
key_ids = key_to_fetch_item.key_ids
try: try:
keys = await self.get_server_verify_key_v2_direct(server_name, key_ids) keys = await self.get_server_verify_keys_v2_direct(server_name)
results[server_name] = keys results[server_name] = keys
except KeyLookupError as e: except KeyLookupError as e:
logger.warning( logger.warning("Error looking up keys from %s: %s", server_name, e)
"Error looking up keys %s from %s: %s", key_ids, server_name, e
)
except Exception: except Exception:
logger.exception("Error getting keys %s from %s", key_ids, server_name) logger.exception("Error getting keys from %s", server_name)
await yieldable_gather_results(get_key, keys_to_fetch) await yieldable_gather_results(get_keys, keys_to_fetch)
return results return results
async def get_server_verify_key_v2_direct( async def get_server_verify_keys_v2_direct(
self, server_name: str, key_ids: Iterable[str] self, server_name: str
) -> Dict[str, FetchKeyResult]: ) -> Dict[str, FetchKeyResult]:
""" """
Args: Args:
server_name: server_name: Server to request keys from
key_ids:
Returns: Returns:
Map from key ID to lookup result Map from key ID to lookup result
@ -845,57 +840,41 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
Raises: Raises:
KeyLookupError if there was a problem making the lookup KeyLookupError if there was a problem making the lookup
""" """
keys: Dict[str, FetchKeyResult] = {} time_now_ms = self.clock.time_msec()
try:
for requested_key_id in key_ids: response = await self.client.get_json(
# we may have found this key as a side-effect of asking for another. destination=server_name,
if requested_key_id in keys: path="/_matrix/key/v2/server",
continue ignore_backoff=True,
# we only give the remote server 10s to respond. It should be an
time_now_ms = self.clock.time_msec() # easy request to handle, so if it doesn't reply within 10s, it's
try: # probably not going to.
response = await self.client.get_json( #
destination=server_name, # Furthermore, when we are acting as a notary server, we cannot
path="/_matrix/key/v2/server/" # wait all day for all of the origin servers, as the requesting
+ urllib.parse.quote(requested_key_id, safe=""), # server will otherwise time out before we can respond.
ignore_backoff=True, #
# we only give the remote server 10s to respond. It should be an # (Note that get_json may make 4 attempts, so this can still take
# easy request to handle, so if it doesn't reply within 10s, it's # almost 45 seconds to fetch the headers, plus up to another 60s to
# probably not going to. # read the response).
# timeout=10000,
# Furthermore, when we are acting as a notary server, we cannot
# wait all day for all of the origin servers, as the requesting
# server will otherwise time out before we can respond.
#
# (Note that get_json may make 4 attempts, so this can still take
# almost 45 seconds to fetch the headers, plus up to another 60s to
# read the response).
timeout=10000,
)
except (NotRetryingDestination, RequestSendFailed) as e:
# these both have str() representations which we can't really improve
# upon
raise KeyLookupError(str(e))
except HttpResponseException as e:
raise KeyLookupError("Remote server returned an error: %s" % (e,))
assert isinstance(response, dict)
if response["server_name"] != server_name:
raise KeyLookupError(
"Expected a response for server %r not %r"
% (server_name, response["server_name"])
)
response_keys = await self.process_v2_response(
from_server=server_name,
response_json=response,
time_added_ms=time_now_ms,
) )
await self.store.store_server_verify_keys( except (NotRetryingDestination, RequestSendFailed) as e:
server_name, # these both have str() representations which we can't really improve
time_now_ms, # upon
((server_name, key_id, key) for key_id, key in response_keys.items()), raise KeyLookupError(str(e))
) except HttpResponseException as e:
keys.update(response_keys) raise KeyLookupError("Remote server returned an error: %s" % (e,))
return keys assert isinstance(response, dict)
if response["server_name"] != server_name:
raise KeyLookupError(
"Expected a response for server %r not %r"
% (server_name, response["server_name"])
)
return await self.process_v2_response(
from_server=server_name,
response_json=response,
time_added_ms=time_now_ms,
)

View file

@ -52,6 +52,7 @@ from synapse.api.room_versions import (
KNOWN_ROOM_VERSIONS, KNOWN_ROOM_VERSIONS,
EventFormatVersions, EventFormatVersions,
RoomVersion, RoomVersion,
RoomVersions,
) )
from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.types import MutableStateMap, StateMap, UserID, get_domain_from_id from synapse.types import MutableStateMap, StateMap, UserID, get_domain_from_id
@ -341,19 +342,80 @@ def check_state_dependent_auth_rules(
logger.debug("Allowing! %s", event) logger.debug("Allowing! %s", event)
# Set of room versions where Synapse did not apply event key size limits
# in bytes, but rather in codepoints.
# In these room versions, we are more lenient with event size validation.
LENIENT_EVENT_BYTE_LIMITS_ROOM_VERSIONS = {
RoomVersions.V1,
RoomVersions.V2,
RoomVersions.V3,
RoomVersions.V4,
RoomVersions.V5,
RoomVersions.V6,
RoomVersions.MSC2176,
RoomVersions.V7,
RoomVersions.V8,
RoomVersions.V9,
RoomVersions.MSC3787,
RoomVersions.V10,
RoomVersions.MSC2716v4,
RoomVersions.MSC1767v10,
}
def _check_size_limits(event: "EventBase") -> None: def _check_size_limits(event: "EventBase") -> None:
if len(event.user_id) > 255: """
raise EventSizeError("'user_id' too large") Checks the size limits in a PDU.
if len(event.room_id) > 255:
raise EventSizeError("'room_id' too large") The entire size limit of the PDU is checked first.
if event.is_state() and len(event.state_key) > 255: Then the size of fields is checked, first in codepoints and then in bytes.
raise EventSizeError("'state_key' too large")
if len(event.type) > 255: The codepoint size limits are only for Synapse compatibility.
raise EventSizeError("'type' too large")
if len(event.event_id) > 255: Raises:
raise EventSizeError("'event_id' too large") EventSizeError:
when a size limit has been violated.
unpersistable=True if Synapse never would have accepted the event and
the PDU must NOT be persisted.
unpersistable=False if a prior version of Synapse would have accepted the
event and so the PDU must be persisted as rejected to avoid
breaking the room.
"""
# Whole PDU check
if len(encode_canonical_json(event.get_pdu_json())) > MAX_PDU_SIZE: if len(encode_canonical_json(event.get_pdu_json())) > MAX_PDU_SIZE:
raise EventSizeError("event too large") raise EventSizeError("event too large", unpersistable=True)
# Codepoint size check: Synapse always enforced these limits, so apply
# them strictly.
if len(event.user_id) > 255:
raise EventSizeError("'user_id' too large", unpersistable=True)
if len(event.room_id) > 255:
raise EventSizeError("'room_id' too large", unpersistable=True)
if event.is_state() and len(event.state_key) > 255:
raise EventSizeError("'state_key' too large", unpersistable=True)
if len(event.type) > 255:
raise EventSizeError("'type' too large", unpersistable=True)
if len(event.event_id) > 255:
raise EventSizeError("'event_id' too large", unpersistable=True)
strict_byte_limits = (
event.room_version not in LENIENT_EVENT_BYTE_LIMITS_ROOM_VERSIONS
)
# Byte size check: if these fail, then be lenient to avoid breaking rooms.
if len(event.user_id.encode("utf-8")) > 255:
raise EventSizeError("'user_id' too large", unpersistable=strict_byte_limits)
if len(event.room_id.encode("utf-8")) > 255:
raise EventSizeError("'room_id' too large", unpersistable=strict_byte_limits)
if event.is_state() and len(event.state_key.encode("utf-8")) > 255:
raise EventSizeError("'state_key' too large", unpersistable=strict_byte_limits)
if len(event.type.encode("utf-8")) > 255:
raise EventSizeError("'type' too large", unpersistable=strict_byte_limits)
if len(event.event_id.encode("utf-8")) > 255:
raise EventSizeError("'event_id' too large", unpersistable=strict_byte_limits)
def _check_create(event: "EventBase") -> None: def _check_create(event: "EventBase") -> None:

View file

@ -28,8 +28,8 @@ from synapse.event_auth import auth_types_for_event
from synapse.events import EventBase, _EventInternalMetadata, make_event_from_dict from synapse.events import EventBase, _EventInternalMetadata, make_event_from_dict
from synapse.state import StateHandler from synapse.state import StateHandler
from synapse.storage.databases.main import DataStore from synapse.storage.databases.main import DataStore
from synapse.storage.state import StateFilter
from synapse.types import EventID, JsonDict from synapse.types import EventID, JsonDict
from synapse.types.state import StateFilter
from synapse.util import Clock from synapse.util import Clock
from synapse.util.stringutils import random_string from synapse.util.stringutils import random_string

View file

@ -23,7 +23,7 @@ from synapse.types import JsonDict, StateMap
if TYPE_CHECKING: if TYPE_CHECKING:
from synapse.storage.controllers import StorageControllers from synapse.storage.controllers import StorageControllers
from synapse.storage.databases.main import DataStore from synapse.storage.databases.main import DataStore
from synapse.storage.state import StateFilter from synapse.types.state import StateFilter
@attr.s(slots=True, auto_attribs=True) @attr.s(slots=True, auto_attribs=True)

View file

@ -28,8 +28,14 @@ from typing import (
) )
import attr import attr
from canonicaljson import encode_canonical_json
from synapse.api.constants import EventContentFields, EventTypes, RelationTypes from synapse.api.constants import (
MAX_PDU_SIZE,
EventContentFields,
EventTypes,
RelationTypes,
)
from synapse.api.errors import Codes, SynapseError from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import RoomVersion from synapse.api.room_versions import RoomVersion
from synapse.types import JsonDict from synapse.types import JsonDict
@ -674,3 +680,27 @@ def validate_canonicaljson(value: Any) -> None:
elif not isinstance(value, (bool, str)) and value is not None: elif not isinstance(value, (bool, str)) and value is not None:
# Other potential JSON values (bool, None, str) are safe. # Other potential JSON values (bool, None, str) are safe.
raise SynapseError(400, "Unknown JSON value", Codes.BAD_JSON) raise SynapseError(400, "Unknown JSON value", Codes.BAD_JSON)
def maybe_upsert_event_field(
event: EventBase, container: JsonDict, key: str, value: object
) -> bool:
"""Upsert an event field, but only if this doesn't make the event too large.
Returns true iff the upsert took place.
"""
if key in container:
old_value: object = container[key]
container[key] = value
# NB: here and below, we assume that passing a non-None `time_now` argument to
# get_pdu_json doesn't increase the size of the encoded result.
upsert_okay = len(encode_canonical_json(event.get_pdu_json())) <= MAX_PDU_SIZE
if not upsert_okay:
container[key] = old_value
else:
container[key] = value
upsert_okay = len(encode_canonical_json(event.get_pdu_json())) <= MAX_PDU_SIZE
if not upsert_okay:
del container[key]
return upsert_okay

View file

@ -771,17 +771,28 @@ class FederationClient(FederationBase):
""" """
if synapse_error is None: if synapse_error is None:
synapse_error = e.to_synapse_error() synapse_error = e.to_synapse_error()
# There is no good way to detect an "unknown" endpoint. # MSC3743 specifies that servers should return a 404 or 405 with an errcode
# of M_UNRECOGNIZED when they receive a request to an unknown endpoint or
# to an unknown method, respectively.
# #
# Dendrite returns a 404 (with a body of "404 page not found"); # Older versions of servers don't properly handle this. This needs to be
# Conduit returns a 404 (with no body); and Synapse returns a 400 # rather specific as some endpoints truly do return 404 errors.
# with M_UNRECOGNIZED.
#
# This needs to be rather specific as some endpoints truly do return 404
# errors.
return ( return (
e.code == 404 and (not e.response or e.response == b"404 page not found") # 404 is an unknown endpoint, 405 is a known endpoint, but unknown method.
) or (e.code == 400 and synapse_error.errcode == Codes.UNRECOGNIZED) (e.code == 404 or e.code == 405)
and (
# Older Dendrites returned a text or empty body.
# Older Conduit returned an empty body.
not e.response
or e.response == b"404 page not found"
# The proper response JSON with M_UNRECOGNIZED errcode.
or synapse_error.errcode == Codes.UNRECOGNIZED
)
) or (
# Older Synapses returned a 400 error.
e.code == 400
and synapse_error.errcode == Codes.UNRECOGNIZED
)
async def _try_destination_list( async def _try_destination_list(
self, self,

View file

@ -647,7 +647,7 @@ class FederationSender(AbstractFederationSender):
room_id = receipt.room_id room_id = receipt.room_id
# Work out which remote servers should be poked and poke them. # Work out which remote servers should be poked and poke them.
domains_set = await self._storage_controllers.state.get_current_hosts_in_room( domains_set = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation(
room_id room_id
) )
domains = [ domains = [

View file

@ -641,7 +641,7 @@ class PerDestinationQueue:
if not message_id: if not message_id:
continue continue
set_tag(SynapseTags.TO_DEVICE_MESSAGE_ID, message_id) set_tag(SynapseTags.TO_DEVICE_EDU_ID, message_id)
edus = [ edus = [
Edu( Edu(

View file

@ -578,9 +578,6 @@ class ApplicationServicesHandler:
device_id, device_id,
), messages in recipient_device_to_messages.items(): ), messages in recipient_device_to_messages.items():
for message_json in messages: for message_json in messages:
# Remove 'message_id' from the to-device message, as it's an internal ID
message_json.pop("message_id", None)
message_payload.append( message_payload.append(
{ {
"to_user_id": user_id, "to_user_id": user_id,
@ -615,8 +612,8 @@ class ApplicationServicesHandler:
) )
# Fetch the users who have modified their device list since then. # Fetch the users who have modified their device list since then.
users_with_changed_device_lists = ( users_with_changed_device_lists = await self.store.get_all_devices_changed(
await self.store.get_users_whose_devices_changed(from_key, to_key=new_key) from_key, to_key=new_key
) )
# Filter out any users the application service is not interested in # Filter out any users the application service is not interested in

View file

@ -996,7 +996,7 @@ class DeviceListUpdater(DeviceListWorkerUpdater):
# Check if we are partially joining any rooms. If so we need to store # Check if we are partially joining any rooms. If so we need to store
# all device list updates so that we can handle them correctly once we # all device list updates so that we can handle them correctly once we
# know who is in the room. # know who is in the room.
# TODO(faster joins): this fetches and processes a bunch of data that we don't # TODO(faster_joins): this fetches and processes a bunch of data that we don't
# use. Could be replaced by a tighter query e.g. # use. Could be replaced by a tighter query e.g.
# SELECT EXISTS(SELECT 1 FROM partial_state_rooms) # SELECT EXISTS(SELECT 1 FROM partial_state_rooms)
partial_rooms = await self.store.get_partial_state_room_resync_info() partial_rooms = await self.store.get_partial_state_room_resync_info()

View file

@ -15,7 +15,7 @@
import logging import logging
from typing import TYPE_CHECKING, Any, Dict from typing import TYPE_CHECKING, Any, Dict
from synapse.api.constants import EduTypes, ToDeviceEventTypes from synapse.api.constants import EduTypes, EventContentFields, ToDeviceEventTypes
from synapse.api.errors import SynapseError from synapse.api.errors import SynapseError
from synapse.api.ratelimiting import Ratelimiter from synapse.api.ratelimiting import Ratelimiter
from synapse.logging.context import run_in_background from synapse.logging.context import run_in_background
@ -216,14 +216,24 @@ class DeviceMessageHandler:
""" """
sender_user_id = requester.user.to_string() sender_user_id = requester.user.to_string()
message_id = random_string(16) set_tag(SynapseTags.TO_DEVICE_TYPE, message_type)
set_tag(SynapseTags.TO_DEVICE_MESSAGE_ID, message_id) set_tag(SynapseTags.TO_DEVICE_SENDER, sender_user_id)
log_kv({"number_of_to_device_messages": len(messages)})
set_tag("sender", sender_user_id)
local_messages = {} local_messages = {}
remote_messages: Dict[str, Dict[str, Dict[str, JsonDict]]] = {} remote_messages: Dict[str, Dict[str, Dict[str, JsonDict]]] = {}
for user_id, by_device in messages.items(): for user_id, by_device in messages.items():
# add an opentracing log entry for each message
for device_id, message_content in by_device.items():
log_kv(
{
"event": "send_to_device_message",
"user_id": user_id,
"device_id": device_id,
EventContentFields.TO_DEVICE_MSGID: message_content.get(
EventContentFields.TO_DEVICE_MSGID
),
}
)
# Ratelimit local cross-user key requests by the sending device. # Ratelimit local cross-user key requests by the sending device.
if ( if (
message_type == ToDeviceEventTypes.RoomKeyRequest message_type == ToDeviceEventTypes.RoomKeyRequest
@ -233,6 +243,7 @@ class DeviceMessageHandler:
requester, (sender_user_id, requester.device_id) requester, (sender_user_id, requester.device_id)
) )
if not allowed: if not allowed:
log_kv({"message": f"dropping key requests to {user_id}"})
logger.info( logger.info(
"Dropping room_key_request from %s to %s due to rate limit", "Dropping room_key_request from %s to %s due to rate limit",
sender_user_id, sender_user_id,
@ -247,18 +258,11 @@ class DeviceMessageHandler:
"content": message_content, "content": message_content,
"type": message_type, "type": message_type,
"sender": sender_user_id, "sender": sender_user_id,
"message_id": message_id,
} }
for device_id, message_content in by_device.items() for device_id, message_content in by_device.items()
} }
if messages_by_device: if messages_by_device:
local_messages[user_id] = messages_by_device local_messages[user_id] = messages_by_device
log_kv(
{
"user_id": user_id,
"device_id": list(messages_by_device),
}
)
else: else:
destination = get_domain_from_id(user_id) destination = get_domain_from_id(user_id)
remote_messages.setdefault(destination, {})[user_id] = by_device remote_messages.setdefault(destination, {})[user_id] = by_device
@ -267,7 +271,11 @@ class DeviceMessageHandler:
remote_edu_contents = {} remote_edu_contents = {}
for destination, messages in remote_messages.items(): for destination, messages in remote_messages.items():
log_kv({"destination": destination}) # The EDU contains a "message_id" property which is used for
# idempotence. Make up a random one.
message_id = random_string(16)
log_kv({"destination": destination, "message_id": message_id})
remote_edu_contents[destination] = { remote_edu_contents[destination] = {
"messages": messages, "messages": messages,
"sender": sender_user_id, "sender": sender_user_id,

View file

@ -70,8 +70,8 @@ from synapse.replication.http.federation import (
) )
from synapse.storage.databases.main.events import PartialStateConflictError from synapse.storage.databases.main.events import PartialStateConflictError
from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.storage.state import StateFilter
from synapse.types import JsonDict, get_domain_from_id from synapse.types import JsonDict, get_domain_from_id
from synapse.types.state import StateFilter
from synapse.util.async_helpers import Linearizer from synapse.util.async_helpers import Linearizer
from synapse.util.retryutils import NotRetryingDestination from synapse.util.retryutils import NotRetryingDestination
from synapse.visibility import filter_events_for_server from synapse.visibility import filter_events_for_server
@ -152,6 +152,7 @@ class FederationHandler:
self._federation_event_handler = hs.get_federation_event_handler() self._federation_event_handler = hs.get_federation_event_handler()
self._device_handler = hs.get_device_handler() self._device_handler = hs.get_device_handler()
self._bulk_push_rule_evaluator = hs.get_bulk_push_rule_evaluator() self._bulk_push_rule_evaluator = hs.get_bulk_push_rule_evaluator()
self._notifier = hs.get_notifier()
self._clean_room_for_join_client = ReplicationCleanRoomRestServlet.make_client( self._clean_room_for_join_client = ReplicationCleanRoomRestServlet.make_client(
hs hs
@ -1692,6 +1693,9 @@ class FederationHandler:
self._storage_controllers.state.notify_room_un_partial_stated( self._storage_controllers.state.notify_room_un_partial_stated(
room_id room_id
) )
# Poke the notifier so that other workers see the write to
# the un-partial-stated rooms stream.
self._notifier.notify_replication()
# TODO(faster_joins) update room stats and user directory? # TODO(faster_joins) update room stats and user directory?
# https://github.com/matrix-org/synapse/issues/12814 # https://github.com/matrix-org/synapse/issues/12814

View file

@ -43,6 +43,7 @@ from synapse.api.constants import (
from synapse.api.errors import ( from synapse.api.errors import (
AuthError, AuthError,
Codes, Codes,
EventSizeError,
FederationError, FederationError,
FederationPullAttemptBackoffError, FederationPullAttemptBackoffError,
HttpResponseException, HttpResponseException,
@ -75,7 +76,6 @@ from synapse.replication.http.federation import (
from synapse.state import StateResolutionStore from synapse.state import StateResolutionStore
from synapse.storage.databases.main.events import PartialStateConflictError from synapse.storage.databases.main.events import PartialStateConflictError
from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.storage.state import StateFilter
from synapse.types import ( from synapse.types import (
PersistedEventPosition, PersistedEventPosition,
RoomStreamToken, RoomStreamToken,
@ -83,6 +83,7 @@ from synapse.types import (
UserID, UserID,
get_domain_from_id, get_domain_from_id,
) )
from synapse.types.state import StateFilter
from synapse.util.async_helpers import Linearizer, concurrently_execute from synapse.util.async_helpers import Linearizer, concurrently_execute
from synapse.util.iterutils import batch_iter from synapse.util.iterutils import batch_iter
from synapse.util.retryutils import NotRetryingDestination from synapse.util.retryutils import NotRetryingDestination
@ -1736,6 +1737,15 @@ class FederationEventHandler:
except AuthError as e: except AuthError as e:
logger.warning("Rejecting %r because %s", event, e) logger.warning("Rejecting %r because %s", event, e)
context.rejected = RejectedReason.AUTH_ERROR context.rejected = RejectedReason.AUTH_ERROR
except EventSizeError as e:
if e.unpersistable:
# This event is completely unpersistable.
raise e
# Otherwise, we are somewhat lenient and just persist the event
# as rejected, for moderate compatibility with older Synapse
# versions.
logger.warning("While validating received event %r: %s", event, e)
context.rejected = RejectedReason.OVERSIZED_EVENT
events_and_contexts_to_persist.append((event, context)) events_and_contexts_to_persist.append((event, context))
@ -1781,6 +1791,16 @@ class FederationEventHandler:
# TODO: use a different rejected reason here? # TODO: use a different rejected reason here?
context.rejected = RejectedReason.AUTH_ERROR context.rejected = RejectedReason.AUTH_ERROR
return return
except EventSizeError as e:
if e.unpersistable:
# This event is completely unpersistable.
raise e
# Otherwise, we are somewhat lenient and just persist the event
# as rejected, for moderate compatibility with older Synapse
# versions.
logger.warning("While validating received event %r: %s", event, e)
context.rejected = RejectedReason.OVERSIZED_EVENT
return
# next, check that we have all of the event's auth events. # next, check that we have all of the event's auth events.
# #

View file

@ -50,6 +50,7 @@ from synapse.event_auth import validate_event_for_room_version
from synapse.events import EventBase, relation_from_event from synapse.events import EventBase, relation_from_event
from synapse.events.builder import EventBuilder from synapse.events.builder import EventBuilder
from synapse.events.snapshot import EventContext from synapse.events.snapshot import EventContext
from synapse.events.utils import maybe_upsert_event_field
from synapse.events.validator import EventValidator from synapse.events.validator import EventValidator
from synapse.handlers.directory import DirectoryHandler from synapse.handlers.directory import DirectoryHandler
from synapse.logging import opentracing from synapse.logging import opentracing
@ -59,7 +60,6 @@ from synapse.replication.http.send_event import ReplicationSendEventRestServlet
from synapse.replication.http.send_events import ReplicationSendEventsRestServlet from synapse.replication.http.send_events import ReplicationSendEventsRestServlet
from synapse.storage.databases.main.events import PartialStateConflictError from synapse.storage.databases.main.events import PartialStateConflictError
from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.storage.state import StateFilter
from synapse.types import ( from synapse.types import (
MutableStateMap, MutableStateMap,
PersistedEventPosition, PersistedEventPosition,
@ -70,6 +70,7 @@ from synapse.types import (
UserID, UserID,
create_requester, create_requester,
) )
from synapse.types.state import StateFilter
from synapse.util import json_decoder, json_encoder, log_failure, unwrapFirstError from synapse.util import json_decoder, json_encoder, log_failure, unwrapFirstError
from synapse.util.async_helpers import Linearizer, gather_results from synapse.util.async_helpers import Linearizer, gather_results
from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.caches.expiringcache import ExpiringCache
@ -1752,12 +1753,15 @@ class EventCreationHandler:
if event.type == EventTypes.Member: if event.type == EventTypes.Member:
if event.content["membership"] == Membership.INVITE: if event.content["membership"] == Membership.INVITE:
event.unsigned[ maybe_upsert_event_field(
"invite_room_state" event,
] = await self.store.get_stripped_room_state_from_event_context( event.unsigned,
context, "invite_room_state",
self.room_prejoin_state_types, await self.store.get_stripped_room_state_from_event_context(
membership_user_id=event.sender, context,
self.room_prejoin_state_types,
membership_user_id=event.sender,
),
) )
invitee = UserID.from_string(event.state_key) invitee = UserID.from_string(event.state_key)
@ -1775,11 +1779,14 @@ class EventCreationHandler:
event.signatures.update(returned_invite.signatures) event.signatures.update(returned_invite.signatures)
if event.content["membership"] == Membership.KNOCK: if event.content["membership"] == Membership.KNOCK:
event.unsigned[ maybe_upsert_event_field(
"knock_room_state" event,
] = await self.store.get_stripped_room_state_from_event_context( event.unsigned,
context, "knock_room_state",
self.room_prejoin_state_types, await self.store.get_stripped_room_state_from_event_context(
context,
self.room_prejoin_state_types,
),
) )
if event.type == EventTypes.Redaction: if event.type == EventTypes.Redaction:

View file

@ -27,9 +27,9 @@ from synapse.handlers.room import ShutdownRoomResponse
from synapse.logging.opentracing import trace from synapse.logging.opentracing import trace
from synapse.metrics.background_process_metrics import run_as_background_process from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.rest.admin._base import assert_user_is_admin from synapse.rest.admin._base import assert_user_is_admin
from synapse.storage.state import StateFilter
from synapse.streams.config import PaginationConfig from synapse.streams.config import PaginationConfig
from synapse.types import JsonDict, Requester, StreamKeyType from synapse.types import JsonDict, Requester, StreamKeyType
from synapse.types.state import StateFilter
from synapse.util.async_helpers import ReadWriteLock from synapse.util.async_helpers import ReadWriteLock
from synapse.util.stringutils import random_string from synapse.util.stringutils import random_string
from synapse.visibility import filter_events_for_client from synapse.visibility import filter_events_for_client

View file

@ -1692,10 +1692,12 @@ class PresenceEventSource(EventSource[int, UserPresenceState]):
if from_key is not None: if from_key is not None:
# First get all users that have had a presence update # First get all users that have had a presence update
updated_users = stream_change_cache.get_all_entities_changed(from_key) result = stream_change_cache.get_all_entities_changed(from_key)
# Cross-reference users we're interested in with those that have had updates. # Cross-reference users we're interested in with those that have had updates.
if updated_users is not None: if result.hit:
updated_users = result.entities
# If we have the full list of changes for presence we can # If we have the full list of changes for presence we can
# simply check which ones share a room with the user. # simply check which ones share a room with the user.
get_updates_counter.labels("stream").inc() get_updates_counter.labels("stream").inc()
@ -1764,14 +1766,14 @@ class PresenceEventSource(EventSource[int, UserPresenceState]):
Returns: Returns:
A list of presence states for the given user to receive. A list of presence states for the given user to receive.
""" """
updated_users = None
if from_key: if from_key:
# Only return updates since the last sync # Only return updates since the last sync
updated_users = self.store.presence_stream_cache.get_all_entities_changed( result = self.store.presence_stream_cache.get_all_entities_changed(from_key)
from_key if result.hit:
) updated_users = result.entities
if not updated_users:
updated_users = []
if updated_users is not None:
# Get the actual presence update for each change # Get the actual presence update for each change
users_to_state = await self.get_presence_handler().current_state_for_users( users_to_state = await self.get_presence_handler().current_state_for_users(
updated_users updated_users

View file

@ -46,8 +46,8 @@ from synapse.replication.http.register import (
ReplicationRegisterServlet, ReplicationRegisterServlet,
) )
from synapse.spam_checker_api import RegistrationBehaviour from synapse.spam_checker_api import RegistrationBehaviour
from synapse.storage.state import StateFilter
from synapse.types import RoomAlias, UserID, create_requester from synapse.types import RoomAlias, UserID, create_requester
from synapse.types.state import StateFilter
if TYPE_CHECKING: if TYPE_CHECKING:
from synapse.server import HomeServer from synapse.server import HomeServer

View file

@ -62,7 +62,6 @@ from synapse.events.utils import copy_and_fixup_power_levels_contents
from synapse.handlers.relations import BundledAggregations from synapse.handlers.relations import BundledAggregations
from synapse.module_api import NOT_SPAM from synapse.module_api import NOT_SPAM
from synapse.rest.admin._base import assert_user_is_admin from synapse.rest.admin._base import assert_user_is_admin
from synapse.storage.state import StateFilter
from synapse.streams import EventSource from synapse.streams import EventSource
from synapse.types import ( from synapse.types import (
JsonDict, JsonDict,
@ -77,6 +76,7 @@ from synapse.types import (
UserID, UserID,
create_requester, create_requester,
) )
from synapse.types.state import StateFilter
from synapse.util import stringutils from synapse.util import stringutils
from synapse.util.caches.response_cache import ResponseCache from synapse.util.caches.response_cache import ResponseCache
from synapse.util.stringutils import parse_and_validate_server_name from synapse.util.stringutils import parse_and_validate_server_name

View file

@ -34,7 +34,6 @@ from synapse.events.snapshot import EventContext
from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN
from synapse.logging import opentracing from synapse.logging import opentracing
from synapse.module_api import NOT_SPAM from synapse.module_api import NOT_SPAM
from synapse.storage.state import StateFilter
from synapse.types import ( from synapse.types import (
JsonDict, JsonDict,
Requester, Requester,
@ -45,6 +44,7 @@ from synapse.types import (
create_requester, create_requester,
get_domain_from_id, get_domain_from_id,
) )
from synapse.types.state import StateFilter
from synapse.util.async_helpers import Linearizer from synapse.util.async_helpers import Linearizer
from synapse.util.distributor import user_left_room from synapse.util.distributor import user_left_room

View file

@ -23,8 +23,8 @@ from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import NotFoundError, SynapseError from synapse.api.errors import NotFoundError, SynapseError
from synapse.api.filtering import Filter from synapse.api.filtering import Filter
from synapse.events import EventBase from synapse.events import EventBase
from synapse.storage.state import StateFilter
from synapse.types import JsonDict, StreamKeyType, UserID from synapse.types import JsonDict, StreamKeyType, UserID
from synapse.types.state import StateFilter
from synapse.visibility import filter_events_for_client from synapse.visibility import filter_events_for_client
if TYPE_CHECKING: if TYPE_CHECKING:

View file

@ -31,19 +31,24 @@ from typing import (
import attr import attr
from prometheus_client import Counter from prometheus_client import Counter
from synapse.api.constants import EventTypes, Membership from synapse.api.constants import EventContentFields, EventTypes, Membership
from synapse.api.filtering import FilterCollection from synapse.api.filtering import FilterCollection
from synapse.api.presence import UserPresenceState from synapse.api.presence import UserPresenceState
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.events import EventBase from synapse.events import EventBase
from synapse.handlers.relations import BundledAggregations from synapse.handlers.relations import BundledAggregations
from synapse.logging.context import current_context from synapse.logging.context import current_context
from synapse.logging.opentracing import SynapseTags, log_kv, set_tag, start_active_span from synapse.logging.opentracing import (
SynapseTags,
log_kv,
set_tag,
start_active_span,
trace,
)
from synapse.push.clientformat import format_push_rules_for_user from synapse.push.clientformat import format_push_rules_for_user
from synapse.storage.databases.main.event_push_actions import RoomNotifCounts from synapse.storage.databases.main.event_push_actions import RoomNotifCounts
from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary
from synapse.storage.roommember import MemberSummary from synapse.storage.roommember import MemberSummary
from synapse.storage.state import StateFilter
from synapse.types import ( from synapse.types import (
DeviceListUpdates, DeviceListUpdates,
JsonDict, JsonDict,
@ -55,6 +60,7 @@ from synapse.types import (
StreamToken, StreamToken,
UserID, UserID,
) )
from synapse.types.state import StateFilter
from synapse.util.async_helpers import concurrently_execute from synapse.util.async_helpers import concurrently_execute
from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.caches.lrucache import LruCache from synapse.util.caches.lrucache import LruCache
@ -1527,10 +1533,12 @@ class SyncHandler:
# #
# If we don't have that info cached then we get all the users that # If we don't have that info cached then we get all the users that
# share a room with our user and check if those users have changed. # share a room with our user and check if those users have changed.
changed_users = self.store.get_cached_device_list_changes( cache_result = self.store.get_cached_device_list_changes(
since_token.device_list_key since_token.device_list_key
) )
if changed_users is not None: if cache_result.hit:
changed_users = cache_result.entities
result = await self.store.get_rooms_for_users(changed_users) result = await self.store.get_rooms_for_users(changed_users)
for changed_user_id, entries in result.items(): for changed_user_id, entries in result.items():
@ -1583,6 +1591,7 @@ class SyncHandler:
else: else:
return DeviceListUpdates() return DeviceListUpdates()
@trace
async def _generate_sync_entry_for_to_device( async def _generate_sync_entry_for_to_device(
self, sync_result_builder: "SyncResultBuilder" self, sync_result_builder: "SyncResultBuilder"
) -> None: ) -> None:
@ -1602,11 +1611,16 @@ class SyncHandler:
) )
for message in messages: for message in messages:
# We pop here as we shouldn't be sending the message ID down log_kv(
# `/sync` {
message_id = message.pop("message_id", None) "event": "to_device_message",
if message_id: "sender": message["sender"],
set_tag(SynapseTags.TO_DEVICE_MESSAGE_ID, message_id) "type": message["type"],
EventContentFields.TO_DEVICE_MSGID: message["content"].get(
EventContentFields.TO_DEVICE_MSGID
),
}
)
logger.debug( logger.debug(
"Returning %d to-device messages between %d and %d (current token: %d)", "Returning %d to-device messages between %d and %d (current token: %d)",

View file

@ -420,11 +420,11 @@ class TypingWriterHandler(FollowerTypingHandler):
if last_id == current_id: if last_id == current_id:
return [], current_id, False return [], current_id, False
changed_rooms: Optional[ result = self._typing_stream_change_cache.get_all_entities_changed(last_id)
Iterable[str]
] = self._typing_stream_change_cache.get_all_entities_changed(last_id)
if changed_rooms is None: if result.hit:
changed_rooms: Iterable[str] = result.entities
else:
changed_rooms = self._room_serials changed_rooms = self._room_serials
rows = [] rows = []

View file

@ -577,7 +577,24 @@ def _unrecognised_request_handler(request: Request) -> NoReturn:
Args: Args:
request: Unused, but passed in to match the signature of ServletCallback. request: Unused, but passed in to match the signature of ServletCallback.
""" """
raise UnrecognizedRequestError() raise UnrecognizedRequestError(code=404)
class UnrecognizedRequestResource(resource.Resource):
"""
Similar to twisted.web.resource.NoResource, but returns a JSON 404 with an
errcode of M_UNRECOGNIZED.
"""
def render(self, request: SynapseRequest) -> int:
f = failure.Failure(UnrecognizedRequestError(code=404))
return_json_error(f, request, None)
# A response has already been sent but Twisted requires either NOT_DONE_YET
# or the response bytes as a return value.
return NOT_DONE_YET
def getChild(self, name: str, request: Request) -> resource.Resource:
return self
class RootRedirect(resource.Resource): class RootRedirect(resource.Resource):

View file

@ -292,8 +292,15 @@ logger = logging.getLogger(__name__)
class SynapseTags: class SynapseTags:
# The message ID of any to_device message processed # The message ID of any to_device EDU processed
TO_DEVICE_MESSAGE_ID = "to_device.message_id" TO_DEVICE_EDU_ID = "to_device.edu_id"
# Details about to-device messages
TO_DEVICE_TYPE = "to_device.type"
TO_DEVICE_SENDER = "to_device.sender"
TO_DEVICE_RECIPIENT = "to_device.recipient"
TO_DEVICE_RECIPIENT_DEVICE = "to_device.recipient_device"
TO_DEVICE_MSGID = "to_device.msgid" # client-generated ID
# Whether the sync response has new data to be returned to the client. # Whether the sync response has new data to be returned to the client.
SYNC_RESULT = "sync.new_data" SYNC_RESULT = "sync.new_data"

View file

@ -111,7 +111,6 @@ from synapse.storage.background_updates import (
) )
from synapse.storage.database import DatabasePool, LoggingTransaction from synapse.storage.database import DatabasePool, LoggingTransaction
from synapse.storage.databases.main.roommember import ProfileInfo from synapse.storage.databases.main.roommember import ProfileInfo
from synapse.storage.state import StateFilter
from synapse.types import ( from synapse.types import (
DomainSpecificString, DomainSpecificString,
JsonDict, JsonDict,
@ -124,6 +123,7 @@ from synapse.types import (
UserProfile, UserProfile,
create_requester, create_requester,
) )
from synapse.types.state import StateFilter
from synapse.util import Clock from synapse.util import Clock
from synapse.util.async_helpers import maybe_awaitable from synapse.util.async_helpers import maybe_awaitable
from synapse.util.caches.descriptors import CachedFunction, cached from synapse.util.caches.descriptors import CachedFunction, cached

View file

@ -35,8 +35,8 @@ from synapse.events import EventBase, relation_from_event
from synapse.events.snapshot import EventContext from synapse.events.snapshot import EventContext
from synapse.state import POWER_KEY from synapse.state import POWER_KEY
from synapse.storage.databases.main.roommember import EventIdMembership from synapse.storage.databases.main.roommember import EventIdMembership
from synapse.storage.state import StateFilter
from synapse.synapse_rust.push import FilteredPushRules, PushRuleEvaluator from synapse.synapse_rust.push import FilteredPushRules, PushRuleEvaluator
from synapse.types.state import StateFilter
from synapse.util.caches import register_cache from synapse.util.caches import register_cache
from synapse.util.metrics import measure_func from synapse.util.metrics import measure_func
from synapse.visibility import filter_event_for_clients_with_state from synapse.visibility import filter_event_for_clients_with_state
@ -106,6 +106,7 @@ class BulkPushRuleEvaluator:
self.store = hs.get_datastores().main self.store = hs.get_datastores().main
self.clock = hs.get_clock() self.clock = hs.get_clock()
self._event_auth_handler = hs.get_event_auth_handler() self._event_auth_handler = hs.get_event_auth_handler()
self.should_calculate_push_rules = self.hs.config.push.enable_push
self._related_event_match_enabled = self.hs.config.experimental.msc3664_enabled self._related_event_match_enabled = self.hs.config.experimental.msc3664_enabled
@ -269,6 +270,8 @@ class BulkPushRuleEvaluator:
for each event, check if the message should increment the unread count, and for each event, check if the message should increment the unread count, and
insert the results into the event_push_actions_staging table. insert the results into the event_push_actions_staging table.
""" """
if not self.should_calculate_push_rules:
return
# For batched events the power level events may not have been persisted yet, # For batched events the power level events may not have been persisted yet,
# so we pass in the batched events. Thus if the event cannot be found in the # so we pass in the batched events. Thus if the event cannot be found in the
# database we can check in the batch. # database we can check in the batch.
@ -340,10 +343,6 @@ class BulkPushRuleEvaluator:
for user_id, level in notification_levels.items(): for user_id, level in notification_levels.items():
notification_levels[user_id] = int(level) notification_levels[user_id] = int(level)
room_version_features = event.room_version.msc3931_push_features
if not room_version_features:
room_version_features = []
evaluator = PushRuleEvaluator( evaluator = PushRuleEvaluator(
_flatten_dict(event, room_version=event.room_version), _flatten_dict(event, room_version=event.room_version),
room_member_count, room_member_count,
@ -351,7 +350,7 @@ class BulkPushRuleEvaluator:
notification_levels, notification_levels,
related_events, related_events,
self._related_event_match_enabled, self._related_event_match_enabled,
room_version_features, event.room_version.msc3931_push_features,
self.hs.config.experimental.msc1767_enabled, # MSC3931 flag self.hs.config.experimental.msc1767_enabled, # MSC3931 flag
) )

View file

@ -37,8 +37,8 @@ from synapse.push.push_types import (
TemplateVars, TemplateVars,
) )
from synapse.storage.databases.main.event_push_actions import EmailPushAction from synapse.storage.databases.main.event_push_actions import EmailPushAction
from synapse.storage.state import StateFilter
from synapse.types import StateMap, UserID from synapse.types import StateMap, UserID
from synapse.types.state import StateFilter
from synapse.util.async_helpers import concurrently_execute from synapse.util.async_helpers import concurrently_execute
from synapse.visibility import filter_events_for_client from synapse.visibility import filter_events_for_client

View file

@ -17,7 +17,6 @@ from synapse.events import EventBase
from synapse.push.presentable_names import calculate_room_name, name_from_member_event from synapse.push.presentable_names import calculate_room_name, name_from_member_event
from synapse.storage.controllers import StorageControllers from synapse.storage.controllers import StorageControllers
from synapse.storage.databases.main import DataStore from synapse.storage.databases.main import DataStore
from synapse.util.async_helpers import concurrently_execute
async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) -> int: async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) -> int:
@ -26,23 +25,12 @@ async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) -
badge = len(invites) badge = len(invites)
room_notifs = [] room_to_count = await store.get_unread_counts_by_room_for_user(user_id)
for room_id, notify_count in room_to_count.items():
async def get_room_unread_count(room_id: str) -> None: # room_to_count may include rooms which the user has left,
room_notifs.append( # ignore those.
await store.get_unread_event_push_actions_by_room_for_user( if room_id not in joins:
room_id, continue
user_id,
)
)
await concurrently_execute(get_room_unread_count, joins, 10)
for notifs in room_notifs:
# Combine the counts from all the threads.
notify_count = notifs.main_timeline.notify_count + sum(
n.notify_count for n in notifs.threads.values()
)
if notify_count == 0: if notify_count == 0:
continue continue
@ -51,8 +39,10 @@ async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) -
# return one badge count per conversation # return one badge count per conversation
badge += 1 badge += 1
else: else:
# increment the badge count by the number of unread messages in the room # Increase badge by number of notifications in room
# NOTE: this includes threaded and unthreaded notifications.
badge += notify_count badge += notify_count
return badge return badge

View file

@ -36,12 +36,14 @@ from synapse.replication.tcp.streams import (
TagAccountDataStream, TagAccountDataStream,
ToDeviceStream, ToDeviceStream,
TypingStream, TypingStream,
UnPartialStatedRoomStream,
) )
from synapse.replication.tcp.streams.events import ( from synapse.replication.tcp.streams.events import (
EventsStream, EventsStream,
EventsStreamEventRow, EventsStreamEventRow,
EventsStreamRow, EventsStreamRow,
) )
from synapse.replication.tcp.streams.partial_state import UnPartialStatedRoomStreamRow
from synapse.types import PersistedEventPosition, ReadReceipt, StreamKeyType, UserID from synapse.types import PersistedEventPosition, ReadReceipt, StreamKeyType, UserID
from synapse.util.async_helpers import Linearizer, timeout_deferred from synapse.util.async_helpers import Linearizer, timeout_deferred
from synapse.util.metrics import Measure from synapse.util.metrics import Measure
@ -117,6 +119,7 @@ class ReplicationDataHandler:
self._streams = hs.get_replication_streams() self._streams = hs.get_replication_streams()
self._instance_name = hs.get_instance_name() self._instance_name = hs.get_instance_name()
self._typing_handler = hs.get_typing_handler() self._typing_handler = hs.get_typing_handler()
self._state_storage_controller = hs.get_storage_controllers().state
self._notify_pushers = hs.config.worker.start_pushers self._notify_pushers = hs.config.worker.start_pushers
self._pusher_pool = hs.get_pusherpool() self._pusher_pool = hs.get_pusherpool()
@ -236,6 +239,14 @@ class ReplicationDataHandler:
self.notifier.notify_user_joined_room( self.notifier.notify_user_joined_room(
row.data.event_id, row.data.room_id row.data.event_id, row.data.room_id
) )
elif stream_name == UnPartialStatedRoomStream.NAME:
for row in rows:
assert isinstance(row, UnPartialStatedRoomStreamRow)
# Wake up any tasks waiting for the room to be un-partial-stated.
self._state_storage_controller.notify_room_un_partial_stated(
row.room_id
)
await self._presence_handler.process_replication_rows( await self._presence_handler.process_replication_rows(
stream_name, instance_name, token, rows stream_name, instance_name, token, rows

View file

@ -42,6 +42,7 @@ from synapse.replication.tcp.streams._base import (
) )
from synapse.replication.tcp.streams.events import EventsStream from synapse.replication.tcp.streams.events import EventsStream
from synapse.replication.tcp.streams.federation import FederationStream from synapse.replication.tcp.streams.federation import FederationStream
from synapse.replication.tcp.streams.partial_state import UnPartialStatedRoomStream
STREAMS_MAP = { STREAMS_MAP = {
stream.NAME: stream stream.NAME: stream
@ -61,6 +62,7 @@ STREAMS_MAP = {
TagAccountDataStream, TagAccountDataStream,
AccountDataStream, AccountDataStream,
UserSignatureStream, UserSignatureStream,
UnPartialStatedRoomStream,
) )
} }
@ -80,4 +82,5 @@ __all__ = [
"TagAccountDataStream", "TagAccountDataStream",
"AccountDataStream", "AccountDataStream",
"UserSignatureStream", "UserSignatureStream",
"UnPartialStatedRoomStream",
] ]

View file

@ -0,0 +1,48 @@
# Copyright 2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import attr
from synapse.replication.tcp.streams import Stream
from synapse.replication.tcp.streams._base import current_token_without_instance
if TYPE_CHECKING:
from synapse.server import HomeServer
@attr.s(slots=True, frozen=True, auto_attribs=True)
class UnPartialStatedRoomStreamRow:
# ID of the room that has been un-partial-stated.
room_id: str
class UnPartialStatedRoomStream(Stream):
"""
Stream to notify about rooms becoming un-partial-stated;
that is, when the background sync finishes such that we now have full state for
the room.
"""
NAME = "un_partial_stated_room"
ROW_TYPE = UnPartialStatedRoomStreamRow
def __init__(self, hs: "HomeServer"):
store = hs.get_datastores().main
super().__init__(
hs.get_instance_name(),
# TODO(faster_joins, multiple writers): we need to account for instance names
current_token_without_instance(store.get_un_partial_stated_rooms_token),
store.get_un_partial_stated_rooms_from_stream,
)

View file

@ -13,13 +13,13 @@
<body> <body>
<header class="mx_Header"> <header class="mx_Header">
{% if app_name == "Riot" %} {% if app_name == "Riot" %}
<img src="http://riot.im/img/external/riot-logo-email.png" width="83" height="83" alt="[Riot]"/> <img src="https://riot.im/img/external/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
{% elif app_name == "Vector" %} {% elif app_name == "Vector" %}
<img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/> <img src="https://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
{% elif app_name == "Element" %} {% elif app_name == "Element" %}
<img src="https://static.element.io/images/email-logo.png" width="83" height="83" alt="[Element]"/> <img src="https://static.element.io/images/email-logo.png" width="83" height="83" alt="[Element]"/>
{% else %} {% else %}
<img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/> <img src="https://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
{% endif %} {% endif %}
</header> </header>

View file

@ -21,13 +21,13 @@
</td> </td>
<td class="logo"> <td class="logo">
{% if app_name == "Riot" %} {% if app_name == "Riot" %}
<img src="http://riot.im/img/external/riot-logo-email.png" width="83" height="83" alt="[Riot]"/> <img src="https://riot.im/img/external/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
{% elif app_name == "Vector" %} {% elif app_name == "Vector" %}
<img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/> <img src="https://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
{% elif app_name == "Element" %} {% elif app_name == "Element" %}
<img src="https://static.element.io/images/email-logo.png" width="83" height="83" alt="[Element]"/> <img src="https://static.element.io/images/email-logo.png" width="83" height="83" alt="[Element]"/>
{% else %} {% else %}
<img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/> <img src="https://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
{% endif %} {% endif %}
</td> </td>
</tr> </tr>

View file

@ -22,13 +22,13 @@
</td> </td>
<td class="logo"> <td class="logo">
{%- if app_name == "Riot" %} {%- if app_name == "Riot" %}
<img src="http://riot.im/img/external/riot-logo-email.png" width="83" height="83" alt="[Riot]"/> <img src="https://riot.im/img/external/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
{%- elif app_name == "Vector" %} {%- elif app_name == "Vector" %}
<img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/> <img src="https://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
{%- elif app_name == "Element" %} {%- elif app_name == "Element" %}
<img src="https://static.element.io/images/email-logo.png" width="83" height="83" alt="[Element]"/> <img src="https://static.element.io/images/email-logo.png" width="83" height="83" alt="[Element]"/>
{%- else %} {%- else %}
<img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/> <img src="https://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
{%- endif %} {%- endif %}
</td> </td>
</tr> </tr>

View file

@ -29,7 +29,7 @@ from synapse.rest.client import (
initial_sync, initial_sync,
keys, keys,
knock, knock,
login as v1_login, login,
login_token_request, login_token_request,
logout, logout,
mutual_rooms, mutual_rooms,
@ -82,6 +82,10 @@ class ClientRestResource(JsonResource):
@staticmethod @staticmethod
def register_servlets(client_resource: HttpServer, hs: "HomeServer") -> None: def register_servlets(client_resource: HttpServer, hs: "HomeServer") -> None:
# Some servlets are only registered on the main process (and not worker
# processes).
is_main_process = hs.config.worker.worker_app is None
versions.register_servlets(hs, client_resource) versions.register_servlets(hs, client_resource)
# Deprecated in r0 # Deprecated in r0
@ -92,45 +96,58 @@ class ClientRestResource(JsonResource):
events.register_servlets(hs, client_resource) events.register_servlets(hs, client_resource)
room.register_servlets(hs, client_resource) room.register_servlets(hs, client_resource)
v1_login.register_servlets(hs, client_resource) login.register_servlets(hs, client_resource)
profile.register_servlets(hs, client_resource) profile.register_servlets(hs, client_resource)
presence.register_servlets(hs, client_resource) presence.register_servlets(hs, client_resource)
directory.register_servlets(hs, client_resource) if is_main_process:
directory.register_servlets(hs, client_resource)
voip.register_servlets(hs, client_resource) voip.register_servlets(hs, client_resource)
pusher.register_servlets(hs, client_resource) if is_main_process:
pusher.register_servlets(hs, client_resource)
push_rule.register_servlets(hs, client_resource) push_rule.register_servlets(hs, client_resource)
logout.register_servlets(hs, client_resource) if is_main_process:
logout.register_servlets(hs, client_resource)
sync.register_servlets(hs, client_resource) sync.register_servlets(hs, client_resource)
filter.register_servlets(hs, client_resource) if is_main_process:
filter.register_servlets(hs, client_resource)
account.register_servlets(hs, client_resource) account.register_servlets(hs, client_resource)
register.register_servlets(hs, client_resource) register.register_servlets(hs, client_resource)
auth.register_servlets(hs, client_resource) if is_main_process:
auth.register_servlets(hs, client_resource)
receipts.register_servlets(hs, client_resource) receipts.register_servlets(hs, client_resource)
read_marker.register_servlets(hs, client_resource) read_marker.register_servlets(hs, client_resource)
room_keys.register_servlets(hs, client_resource) room_keys.register_servlets(hs, client_resource)
keys.register_servlets(hs, client_resource) keys.register_servlets(hs, client_resource)
tokenrefresh.register_servlets(hs, client_resource) if is_main_process:
tokenrefresh.register_servlets(hs, client_resource)
tags.register_servlets(hs, client_resource) tags.register_servlets(hs, client_resource)
account_data.register_servlets(hs, client_resource) account_data.register_servlets(hs, client_resource)
report_event.register_servlets(hs, client_resource) if is_main_process:
openid.register_servlets(hs, client_resource) report_event.register_servlets(hs, client_resource)
notifications.register_servlets(hs, client_resource) openid.register_servlets(hs, client_resource)
notifications.register_servlets(hs, client_resource)
devices.register_servlets(hs, client_resource) devices.register_servlets(hs, client_resource)
thirdparty.register_servlets(hs, client_resource) if is_main_process:
thirdparty.register_servlets(hs, client_resource)
sendtodevice.register_servlets(hs, client_resource) sendtodevice.register_servlets(hs, client_resource)
user_directory.register_servlets(hs, client_resource) user_directory.register_servlets(hs, client_resource)
room_upgrade_rest_servlet.register_servlets(hs, client_resource) if is_main_process:
room_upgrade_rest_servlet.register_servlets(hs, client_resource)
room_batch.register_servlets(hs, client_resource) room_batch.register_servlets(hs, client_resource)
capabilities.register_servlets(hs, client_resource) if is_main_process:
account_validity.register_servlets(hs, client_resource) capabilities.register_servlets(hs, client_resource)
account_validity.register_servlets(hs, client_resource)
relations.register_servlets(hs, client_resource) relations.register_servlets(hs, client_resource)
password_policy.register_servlets(hs, client_resource) if is_main_process:
knock.register_servlets(hs, client_resource) password_policy.register_servlets(hs, client_resource)
knock.register_servlets(hs, client_resource)
# moving to /_synapse/admin # moving to /_synapse/admin
admin.register_servlets_for_client_rest_resource(hs, client_resource) if is_main_process:
admin.register_servlets_for_client_rest_resource(hs, client_resource)
# unstable # unstable
mutual_rooms.register_servlets(hs, client_resource) if is_main_process:
login_token_request.register_servlets(hs, client_resource) mutual_rooms.register_servlets(hs, client_resource)
rendezvous.register_servlets(hs, client_resource) login_token_request.register_servlets(hs, client_resource)
rendezvous.register_servlets(hs, client_resource)

View file

@ -34,9 +34,9 @@ from synapse.rest.admin._base import (
assert_user_is_admin, assert_user_is_admin,
) )
from synapse.storage.databases.main.room import RoomSortOrder from synapse.storage.databases.main.room import RoomSortOrder
from synapse.storage.state import StateFilter
from synapse.streams.config import PaginationConfig from synapse.streams.config import PaginationConfig
from synapse.types import JsonDict, RoomID, UserID, create_requester from synapse.types import JsonDict, RoomID, UserID, create_requester
from synapse.types.state import StateFilter
from synapse.util import json_decoder from synapse.util import json_decoder
if TYPE_CHECKING: if TYPE_CHECKING:

View file

@ -875,19 +875,21 @@ class AccountStatusRestServlet(RestServlet):
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
EmailPasswordRequestTokenRestServlet(hs).register(http_server) if hs.config.worker.worker_app is None:
PasswordRestServlet(hs).register(http_server) EmailPasswordRequestTokenRestServlet(hs).register(http_server)
DeactivateAccountRestServlet(hs).register(http_server) PasswordRestServlet(hs).register(http_server)
EmailThreepidRequestTokenRestServlet(hs).register(http_server) DeactivateAccountRestServlet(hs).register(http_server)
MsisdnThreepidRequestTokenRestServlet(hs).register(http_server) EmailThreepidRequestTokenRestServlet(hs).register(http_server)
AddThreepidEmailSubmitTokenServlet(hs).register(http_server) MsisdnThreepidRequestTokenRestServlet(hs).register(http_server)
AddThreepidMsisdnSubmitTokenServlet(hs).register(http_server) AddThreepidEmailSubmitTokenServlet(hs).register(http_server)
AddThreepidMsisdnSubmitTokenServlet(hs).register(http_server)
ThreepidRestServlet(hs).register(http_server) ThreepidRestServlet(hs).register(http_server)
ThreepidAddRestServlet(hs).register(http_server) if hs.config.worker.worker_app is None:
ThreepidBindRestServlet(hs).register(http_server) ThreepidAddRestServlet(hs).register(http_server)
ThreepidUnbindRestServlet(hs).register(http_server) ThreepidBindRestServlet(hs).register(http_server)
ThreepidDeleteRestServlet(hs).register(http_server) ThreepidUnbindRestServlet(hs).register(http_server)
ThreepidDeleteRestServlet(hs).register(http_server)
WhoamiRestServlet(hs).register(http_server) WhoamiRestServlet(hs).register(http_server)
if hs.config.experimental.msc3720_enabled: if hs.config.worker.worker_app is None and hs.config.experimental.msc3720_enabled:
AccountStatusRestServlet(hs).register(http_server) AccountStatusRestServlet(hs).register(http_server)

View file

@ -342,8 +342,10 @@ class ClaimDehydratedDeviceServlet(RestServlet):
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
DeleteDevicesRestServlet(hs).register(http_server) if hs.config.worker.worker_app is None:
DeleteDevicesRestServlet(hs).register(http_server)
DevicesRestServlet(hs).register(http_server) DevicesRestServlet(hs).register(http_server)
DeviceRestServlet(hs).register(http_server) if hs.config.worker.worker_app is None:
DehydratedDeviceServlet(hs).register(http_server) DeviceRestServlet(hs).register(http_server)
ClaimDehydratedDeviceServlet(hs).register(http_server) DehydratedDeviceServlet(hs).register(http_server)
ClaimDehydratedDeviceServlet(hs).register(http_server)

View file

@ -376,5 +376,6 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
KeyQueryServlet(hs).register(http_server) KeyQueryServlet(hs).register(http_server)
KeyChangesServlet(hs).register(http_server) KeyChangesServlet(hs).register(http_server)
OneTimeKeyServlet(hs).register(http_server) OneTimeKeyServlet(hs).register(http_server)
SigningKeyUploadServlet(hs).register(http_server) if hs.config.worker.worker_app is None:
SignaturesUploadServlet(hs).register(http_server) SigningKeyUploadServlet(hs).register(http_server)
SignaturesUploadServlet(hs).register(http_server)

View file

@ -20,7 +20,7 @@ from synapse.api.errors import Codes, SynapseError
from synapse.http.server import HttpServer from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.http.servlet import RestServlet, parse_json_object_from_request
from synapse.http.site import SynapseRequest from synapse.http.site import SynapseRequest
from synapse.types import JsonDict from synapse.types import EventID, JsonDict, RoomID
from ._base import client_patterns from ._base import client_patterns
@ -56,6 +56,9 @@ class ReceiptRestServlet(RestServlet):
) -> Tuple[int, JsonDict]: ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request) requester = await self.auth.get_user_by_req(request)
if not RoomID.is_valid(room_id) or not event_id.startswith(EventID.SIGIL):
raise SynapseError(400, "A valid room ID and event ID must be specified")
if receipt_type not in self._known_receipt_types: if receipt_type not in self._known_receipt_types:
raise SynapseError( raise SynapseError(
400, 400,

View file

@ -949,9 +949,10 @@ def _calculate_registration_flows(
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
EmailRegisterRequestTokenRestServlet(hs).register(http_server) if hs.config.worker.worker_app is None:
MsisdnRegisterRequestTokenRestServlet(hs).register(http_server) EmailRegisterRequestTokenRestServlet(hs).register(http_server)
UsernameAvailabilityRestServlet(hs).register(http_server) MsisdnRegisterRequestTokenRestServlet(hs).register(http_server)
RegistrationSubmitTokenServlet(hs).register(http_server) UsernameAvailabilityRestServlet(hs).register(http_server)
RegistrationSubmitTokenServlet(hs).register(http_server)
RegistrationTokenValidityRestServlet(hs).register(http_server) RegistrationTokenValidityRestServlet(hs).register(http_server)
RegisterRestServlet(hs).register(http_server) RegisterRestServlet(hs).register(http_server)

View file

@ -55,9 +55,9 @@ from synapse.logging.opentracing import set_tag
from synapse.metrics.background_process_metrics import run_as_background_process from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.rest.client._base import client_patterns from synapse.rest.client._base import client_patterns
from synapse.rest.client.transactions import HttpTransactionCache from synapse.rest.client.transactions import HttpTransactionCache
from synapse.storage.state import StateFilter
from synapse.streams.config import PaginationConfig from synapse.streams.config import PaginationConfig
from synapse.types import JsonDict, StreamToken, ThirdPartyInstanceID, UserID from synapse.types import JsonDict, StreamToken, ThirdPartyInstanceID, UserID
from synapse.types.state import StateFilter
from synapse.util import json_decoder from synapse.util import json_decoder
from synapse.util.cancellation import cancellable from synapse.util.cancellation import cancellable
from synapse.util.stringutils import parse_and_validate_server_name, random_string from synapse.util.stringutils import parse_and_validate_server_name, random_string
@ -400,12 +400,7 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet):
) -> Tuple[int, JsonDict]: ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True) requester = await self.auth.get_user_by_req(request, allow_guest=True)
try: content = parse_json_object_from_request(request, allow_empty_body=True)
content = parse_json_object_from_request(request)
except Exception:
# Turns out we used to ignore the body entirely, and some clients
# cheekily send invalid bodies.
content = {}
# twisted.web.server.Request.args is incorrectly defined as Optional[Any] # twisted.web.server.Request.args is incorrectly defined as Optional[Any]
args: Dict[bytes, List[bytes]] = request.args # type: ignore args: Dict[bytes, List[bytes]] = request.args # type: ignore
@ -956,12 +951,7 @@ class RoomMembershipRestServlet(TransactionRestServlet):
}: }:
raise AuthError(403, "Guest access not allowed") raise AuthError(403, "Guest access not allowed")
try: content = parse_json_object_from_request(request, allow_empty_body=True)
content = parse_json_object_from_request(request)
except Exception:
# Turns out we used to ignore the body entirely, and some clients
# cheekily send invalid bodies.
content = {}
if membership_action == "invite" and all( if membership_action == "invite" and all(
key in content for key in ("medium", "address") key in content for key in ("medium", "address")
@ -1399,9 +1389,7 @@ class RoomSummaryRestServlet(ResolveRoomIdMixin, RestServlet):
) )
def register_servlets( def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
hs: "HomeServer", http_server: HttpServer, is_worker: bool = False
) -> None:
RoomStateEventRestServlet(hs).register(http_server) RoomStateEventRestServlet(hs).register(http_server)
RoomMemberListRestServlet(hs).register(http_server) RoomMemberListRestServlet(hs).register(http_server)
JoinedRoomMemberListRestServlet(hs).register(http_server) JoinedRoomMemberListRestServlet(hs).register(http_server)
@ -1425,7 +1413,7 @@ def register_servlets(
TimestampLookupRestServlet(hs).register(http_server) TimestampLookupRestServlet(hs).register(http_server)
# Some servlets only get registered for the main process. # Some servlets only get registered for the main process.
if not is_worker: if hs.config.worker.worker_app is None:
RoomForgetRestServlet(hs).register(http_server) RoomForgetRestServlet(hs).register(http_server)

View file

@ -46,7 +46,6 @@ class SendToDeviceRestServlet(servlet.RestServlet):
def on_PUT( def on_PUT(
self, request: SynapseRequest, message_type: str, txn_id: str self, request: SynapseRequest, message_type: str, txn_id: str
) -> Awaitable[Tuple[int, JsonDict]]: ) -> Awaitable[Tuple[int, JsonDict]]:
set_tag("message_type", message_type)
set_tag("txn_id", txn_id) set_tag("txn_id", txn_id)
return self.txns.fetch_or_execute_request( return self.txns.fetch_or_execute_request(
request, self._put, request, message_type, txn_id request, self._put, request, message_type, txn_id

View file

@ -63,8 +63,8 @@ class UserDirectorySearchRestServlet(RestServlet):
body = parse_json_object_from_request(request) body = parse_json_object_from_request(request)
limit = body.get("limit", 10) limit = int(body.get("limit", 10))
limit = min(limit, 50) limit = max(min(limit, 50), 0)
try: try:
search_term = body["search_term"] search_term = body["search_term"]

View file

@ -77,6 +77,7 @@ class VersionsRestServlet(RestServlet):
"v1.2", "v1.2",
"v1.3", "v1.3",
"v1.4", "v1.4",
"v1.5",
], ],
# as per MSC1497: # as per MSC1497:
"unstable_features": { "unstable_features": {

View file

@ -24,7 +24,6 @@ from matrix_common.types.mxc_uri import MXCUri
import twisted.internet.error import twisted.internet.error
import twisted.web.http import twisted.web.http
from twisted.internet.defer import Deferred from twisted.internet.defer import Deferred
from twisted.web.resource import Resource
from synapse.api.errors import ( from synapse.api.errors import (
FederationDeniedError, FederationDeniedError,
@ -35,6 +34,7 @@ from synapse.api.errors import (
) )
from synapse.config._base import ConfigError from synapse.config._base import ConfigError
from synapse.config.repository import ThumbnailRequirement from synapse.config.repository import ThumbnailRequirement
from synapse.http.server import UnrecognizedRequestResource
from synapse.http.site import SynapseRequest from synapse.http.site import SynapseRequest
from synapse.logging.context import defer_to_thread from synapse.logging.context import defer_to_thread
from synapse.metrics.background_process_metrics import run_as_background_process from synapse.metrics.background_process_metrics import run_as_background_process
@ -1046,7 +1046,7 @@ class MediaRepository:
return removed_media, len(removed_media) return removed_media, len(removed_media)
class MediaRepositoryResource(Resource): class MediaRepositoryResource(UnrecognizedRequestResource):
"""File uploading and downloading. """File uploading and downloading.
Uploads are POSTed to a resource which returns a token which is used to GET Uploads are POSTed to a resource which returns a token which is used to GET

View file

@ -44,8 +44,8 @@ from synapse.logging.context import ContextResourceUsage
from synapse.replication.http.state import ReplicationUpdateCurrentStateRestServlet from synapse.replication.http.state import ReplicationUpdateCurrentStateRestServlet
from synapse.state import v1, v2 from synapse.state import v1, v2
from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.storage.state import StateFilter
from synapse.types import StateMap from synapse.types import StateMap
from synapse.types.state import StateFilter
from synapse.util.async_helpers import Linearizer from synapse.util.async_helpers import Linearizer
from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.metrics import Measure, measure_func from synapse.util.metrics import Measure, measure_func

View file

@ -544,6 +544,48 @@ class BackgroundUpdater:
The named index will be dropped upon completion of the new index. The named index will be dropped upon completion of the new index.
""" """
async def updater(progress: JsonDict, batch_size: int) -> int:
await self.create_index_in_background(
index_name=index_name,
table=table,
columns=columns,
where_clause=where_clause,
unique=unique,
psql_only=psql_only,
replaces_index=replaces_index,
)
await self._end_background_update(update_name)
return 1
self._background_update_handlers[update_name] = _BackgroundUpdateHandler(
updater, oneshot=True
)
async def create_index_in_background(
self,
index_name: str,
table: str,
columns: Iterable[str],
where_clause: Optional[str] = None,
unique: bool = False,
psql_only: bool = False,
replaces_index: Optional[str] = None,
) -> None:
"""Add an index in the background.
Args:
update_name: update_name to register for
index_name: name of index to add
table: table to add index to
columns: columns/expressions to include in index
where_clause: A WHERE clause to specify a partial unique index.
unique: true to make a UNIQUE index
psql_only: true to only create this index on psql databases (useful
for virtual sqlite tables)
replaces_index: The name of an index that this index replaces.
The named index will be dropped upon completion of the new index.
"""
def create_index_psql(conn: Connection) -> None: def create_index_psql(conn: Connection) -> None:
conn.rollback() conn.rollback()
# postgres insists on autocommit for the index # postgres insists on autocommit for the index
@ -618,16 +660,11 @@ class BackgroundUpdater:
else: else:
runner = create_index_sqlite runner = create_index_sqlite
async def updater(progress: JsonDict, batch_size: int) -> int: if runner is None:
if runner is not None: return
logger.info("Adding index %s to %s", index_name, table)
await self.db_pool.runWithConnection(runner)
await self._end_background_update(update_name)
return 1
self._background_update_handlers[update_name] = _BackgroundUpdateHandler( logger.info("Adding index %s to %s", index_name, table)
updater, oneshot=True await self.db_pool.runWithConnection(runner)
)
async def _end_background_update(self, update_name: str) -> None: async def _end_background_update(self, update_name: str) -> None:
"""Removes a completed background update task from the queue. """Removes a completed background update task from the queue.

View file

@ -58,13 +58,13 @@ from synapse.storage.controllers.state import StateStorageController
from synapse.storage.databases import Databases from synapse.storage.databases import Databases
from synapse.storage.databases.main.events import DeltaState from synapse.storage.databases.main.events import DeltaState
from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.storage.state import StateFilter
from synapse.types import ( from synapse.types import (
PersistedEventPosition, PersistedEventPosition,
RoomStreamToken, RoomStreamToken,
StateMap, StateMap,
get_domain_from_id, get_domain_from_id,
) )
from synapse.types.state import StateFilter
from synapse.util.async_helpers import ObservableDeferred, yieldable_gather_results from synapse.util.async_helpers import ObservableDeferred, yieldable_gather_results
from synapse.util.metrics import Measure from synapse.util.metrics import Measure

View file

@ -31,12 +31,12 @@ from synapse.api.constants import EventTypes
from synapse.events import EventBase from synapse.events import EventBase
from synapse.logging.opentracing import tag_args, trace from synapse.logging.opentracing import tag_args, trace
from synapse.storage.roommember import ProfileInfo from synapse.storage.roommember import ProfileInfo
from synapse.storage.state import StateFilter
from synapse.storage.util.partial_state_events_tracker import ( from synapse.storage.util.partial_state_events_tracker import (
PartialCurrentStateTracker, PartialCurrentStateTracker,
PartialStateEventsTracker, PartialStateEventsTracker,
) )
from synapse.types import MutableStateMap, StateMap from synapse.types import MutableStateMap, StateMap
from synapse.types.state import StateFilter
from synapse.util.cancellation import cancellable from synapse.util.cancellation import cancellable
if TYPE_CHECKING: if TYPE_CHECKING:

View file

@ -667,7 +667,8 @@ class DatabasePool:
) )
# also check variables referenced in func's closure # also check variables referenced in func's closure
if inspect.isfunction(func): if inspect.isfunction(func):
f = cast(types.FunctionType, func) # Keep the cast for now---it helps PyCharm to understand what `func` is.
f = cast(types.FunctionType, func) # type: ignore[redundant-cast]
if f.__closure__: if f.__closure__:
for i, cell in enumerate(f.__closure__): for i, cell in enumerate(f.__closure__):
if inspect.isgenerator(cell.cell_contents): if inspect.isgenerator(cell.cell_contents):

View file

@ -26,8 +26,15 @@ from typing import (
cast, cast,
) )
from synapse.api.constants import EventContentFields
from synapse.logging import issue9533_logger from synapse.logging import issue9533_logger
from synapse.logging.opentracing import log_kv, set_tag, trace from synapse.logging.opentracing import (
SynapseTags,
log_kv,
set_tag,
start_active_span,
trace,
)
from synapse.replication.tcp.streams import ToDeviceStream from synapse.replication.tcp.streams import ToDeviceStream
from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage._base import SQLBaseStore, db_to_json
from synapse.storage.database import ( from synapse.storage.database import (
@ -397,6 +404,17 @@ class DeviceInboxWorkerStore(SQLBaseStore):
(recipient_user_id, recipient_device_id), [] (recipient_user_id, recipient_device_id), []
).append(message_dict) ).append(message_dict)
# start a new span for each message, so that we can tag each separately
with start_active_span("get_to_device_message"):
set_tag(SynapseTags.TO_DEVICE_TYPE, message_dict["type"])
set_tag(SynapseTags.TO_DEVICE_SENDER, message_dict["sender"])
set_tag(SynapseTags.TO_DEVICE_RECIPIENT, recipient_user_id)
set_tag(SynapseTags.TO_DEVICE_RECIPIENT_DEVICE, recipient_device_id)
set_tag(
SynapseTags.TO_DEVICE_MSGID,
message_dict["content"].get(EventContentFields.TO_DEVICE_MSGID),
)
if limit is not None and rowcount == limit: if limit is not None and rowcount == limit:
# We ended up bumping up against the message limit. There may be more messages # We ended up bumping up against the message limit. There may be more messages
# to retrieve. Return what we have, as well as the last stream position that # to retrieve. Return what we have, as well as the last stream position that
@ -678,12 +696,35 @@ class DeviceInboxWorkerStore(SQLBaseStore):
], ],
) )
if remote_messages_by_destination: for destination, edu in remote_messages_by_destination.items():
issue9533_logger.debug( if issue9533_logger.isEnabledFor(logging.DEBUG):
"Queued outgoing to-device messages with stream_id %i for %s", issue9533_logger.debug(
stream_id, "Queued outgoing to-device messages with "
list(remote_messages_by_destination.keys()), "stream_id %i, EDU message_id %s, type %s for %s: %s",
) stream_id,
edu["message_id"],
edu["type"],
destination,
[
f"{user_id}/{device_id} (msgid "
f"{msg.get(EventContentFields.TO_DEVICE_MSGID)})"
for (user_id, messages_by_device) in edu["messages"].items()
for (device_id, msg) in messages_by_device.items()
],
)
for (user_id, messages_by_device) in edu["messages"].items():
for (device_id, msg) in messages_by_device.items():
with start_active_span("store_outgoing_to_device_message"):
set_tag(SynapseTags.TO_DEVICE_EDU_ID, edu["sender"])
set_tag(SynapseTags.TO_DEVICE_EDU_ID, edu["message_id"])
set_tag(SynapseTags.TO_DEVICE_TYPE, edu["type"])
set_tag(SynapseTags.TO_DEVICE_RECIPIENT, user_id)
set_tag(SynapseTags.TO_DEVICE_RECIPIENT_DEVICE, device_id)
set_tag(
SynapseTags.TO_DEVICE_MSGID,
msg.get(EventContentFields.TO_DEVICE_MSGID),
)
async with self._device_inbox_id_gen.get_next() as stream_id: async with self._device_inbox_id_gen.get_next() as stream_id:
now_ms = self._clock.time_msec() now_ms = self._clock.time_msec()
@ -801,7 +842,19 @@ class DeviceInboxWorkerStore(SQLBaseStore):
# Only insert into the local inbox if the device exists on # Only insert into the local inbox if the device exists on
# this server # this server
device_id = row["device_id"] device_id = row["device_id"]
message_json = json_encoder.encode(messages_by_device[device_id])
with start_active_span("serialise_to_device_message"):
msg = messages_by_device[device_id]
set_tag(SynapseTags.TO_DEVICE_TYPE, msg["type"])
set_tag(SynapseTags.TO_DEVICE_SENDER, msg["sender"])
set_tag(SynapseTags.TO_DEVICE_RECIPIENT, user_id)
set_tag(SynapseTags.TO_DEVICE_RECIPIENT_DEVICE, device_id)
set_tag(
SynapseTags.TO_DEVICE_MSGID,
msg["content"].get(EventContentFields.TO_DEVICE_MSGID),
)
message_json = json_encoder.encode(msg)
messages_json_for_user[device_id] = message_json messages_json_for_user[device_id] = message_json
if messages_json_for_user: if messages_json_for_user:
@ -821,15 +874,20 @@ class DeviceInboxWorkerStore(SQLBaseStore):
], ],
) )
issue9533_logger.debug( if issue9533_logger.isEnabledFor(logging.DEBUG):
"Stored to-device messages with stream_id %i for %s", issue9533_logger.debug(
stream_id, "Stored to-device messages with stream_id %i: %s",
[ stream_id,
(user_id, device_id) [
for (user_id, messages_by_device) in local_by_user_then_device.items() f"{user_id}/{device_id} (msgid "
for device_id in messages_by_device.keys() f"{msg['content'].get(EventContentFields.TO_DEVICE_MSGID)})"
], for (
) user_id,
messages_by_device,
) in messages_by_user_then_device.items()
for (device_id, msg) in messages_by_device.items()
],
)
class DeviceInboxBackgroundUpdateStore(SQLBaseStore): class DeviceInboxBackgroundUpdateStore(SQLBaseStore):

View file

@ -58,7 +58,10 @@ from synapse.types import JsonDict, get_verify_key_from_cross_signing_key
from synapse.util import json_decoder, json_encoder from synapse.util import json_decoder, json_encoder
from synapse.util.caches.descriptors import cached, cachedList from synapse.util.caches.descriptors import cached, cachedList
from synapse.util.caches.lrucache import LruCache from synapse.util.caches.lrucache import LruCache
from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.caches.stream_change_cache import (
AllEntitiesChangedResult,
StreamChangeCache,
)
from synapse.util.cancellation import cancellable from synapse.util.cancellation import cancellable
from synapse.util.iterutils import batch_iter from synapse.util.iterutils import batch_iter
from synapse.util.stringutils import shortstr from synapse.util.stringutils import shortstr
@ -799,18 +802,66 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
def get_cached_device_list_changes( def get_cached_device_list_changes(
self, self,
from_key: int, from_key: int,
) -> Optional[List[str]]: ) -> AllEntitiesChangedResult:
"""Get set of users whose devices have changed since `from_key`, or None """Get set of users whose devices have changed since `from_key`, or None
if that information is not in our cache. if that information is not in our cache.
""" """
return self._device_list_stream_cache.get_all_entities_changed(from_key) return self._device_list_stream_cache.get_all_entities_changed(from_key)
@cancellable
async def get_all_devices_changed(
self,
from_key: int,
to_key: int,
) -> Set[str]:
"""Get all users whose devices have changed in the given range.
Args:
from_key: The minimum device lists stream token to query device list
changes for, exclusive.
to_key: The maximum device lists stream token to query device list
changes for, inclusive.
Returns:
The set of user_ids whose devices have changed since `from_key`
(exclusive) until `to_key` (inclusive).
"""
result = self._device_list_stream_cache.get_all_entities_changed(from_key)
if result.hit:
# We know which users might have changed devices.
if not result.entities:
# If no users then we can return early.
return set()
# Otherwise we need to filter down the list
return await self.get_users_whose_devices_changed(
from_key, result.entities, to_key
)
# If the cache didn't tell us anything, we just need to query the full
# range.
sql = """
SELECT DISTINCT user_id FROM device_lists_stream
WHERE ? < stream_id AND stream_id <= ?
"""
rows = await self.db_pool.execute(
"get_all_devices_changed",
None,
sql,
from_key,
to_key,
)
return {u for u, in rows}
@cancellable @cancellable
async def get_users_whose_devices_changed( async def get_users_whose_devices_changed(
self, self,
from_key: int, from_key: int,
user_ids: Optional[Collection[str]] = None, user_ids: Collection[str],
to_key: Optional[int] = None, to_key: Optional[int] = None,
) -> Set[str]: ) -> Set[str]:
"""Get set of users whose devices have changed since `from_key` that """Get set of users whose devices have changed since `from_key` that
@ -830,46 +881,31 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
""" """
# Get set of users who *may* have changed. Users not in the returned # Get set of users who *may* have changed. Users not in the returned
# list have definitely not changed. # list have definitely not changed.
user_ids_to_check: Optional[Collection[str]] user_ids_to_check = self._device_list_stream_cache.get_entities_changed(
if user_ids is None: user_ids, from_key
# Get set of all users that have had device list changes since 'from_key' )
user_ids_to_check = self._device_list_stream_cache.get_all_entities_changed(
from_key
)
else:
# The same as above, but filter results to only those users in 'user_ids'
user_ids_to_check = self._device_list_stream_cache.get_entities_changed(
user_ids, from_key
)
# If an empty set was returned, there's nothing to do.
if not user_ids_to_check: if not user_ids_to_check:
return set() return set()
if to_key is None:
to_key = self._device_list_id_gen.get_current_token()
def _get_users_whose_devices_changed_txn(txn: LoggingTransaction) -> Set[str]: def _get_users_whose_devices_changed_txn(txn: LoggingTransaction) -> Set[str]:
changes: Set[str] = set() sql = """
stream_id_where_clause = "stream_id > ?"
sql_args = [from_key]
if to_key:
stream_id_where_clause += " AND stream_id <= ?"
sql_args.append(to_key)
sql = f"""
SELECT DISTINCT user_id FROM device_lists_stream SELECT DISTINCT user_id FROM device_lists_stream
WHERE {stream_id_where_clause} WHERE ? < stream_id AND stream_id <= ? AND %s
AND
""" """
changes: Set[str] = set()
# Query device changes with a batch of users at a time # Query device changes with a batch of users at a time
# Assertion for mypy's benefit; see also
# https://mypy.readthedocs.io/en/stable/common_issues.html#narrowing-and-inner-functions
assert user_ids_to_check is not None
for chunk in batch_iter(user_ids_to_check, 100): for chunk in batch_iter(user_ids_to_check, 100):
clause, args = make_in_list_sql_clause( clause, args = make_in_list_sql_clause(
txn.database_engine, "user_id", chunk txn.database_engine, "user_id", chunk
) )
txn.execute(sql + clause, sql_args + args) txn.execute(sql % (clause,), [from_key, to_key] + args)
changes.update(user_id for user_id, in txn) changes.update(user_id for user_id, in txn)
return changes return changes

View file

@ -140,7 +140,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
@cancellable @cancellable
async def get_e2e_device_keys_for_cs_api( async def get_e2e_device_keys_for_cs_api(
self, self,
query_list: List[Tuple[str, Optional[str]]], query_list: Collection[Tuple[str, Optional[str]]],
include_displaynames: bool = True, include_displaynames: bool = True,
) -> Dict[str, Dict[str, JsonDict]]: ) -> Dict[str, Dict[str, JsonDict]]:
"""Fetch a list of device keys, formatted suitably for the C/S API. """Fetch a list of device keys, formatted suitably for the C/S API.

View file

@ -74,6 +74,7 @@ receipt.
""" """
import logging import logging
from collections import defaultdict
from typing import ( from typing import (
TYPE_CHECKING, TYPE_CHECKING,
Collection, Collection,
@ -95,6 +96,7 @@ from synapse.storage.database import (
DatabasePool, DatabasePool,
LoggingDatabaseConnection, LoggingDatabaseConnection,
LoggingTransaction, LoggingTransaction,
PostgresEngine,
) )
from synapse.storage.databases.main.receipts import ReceiptsWorkerStore from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
from synapse.storage.databases.main.stream import StreamWorkerStore from synapse.storage.databases.main.stream import StreamWorkerStore
@ -463,6 +465,153 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
return result return result
async def get_unread_counts_by_room_for_user(self, user_id: str) -> Dict[str, int]:
"""Get the notification count by room for a user. Only considers notifications,
not highlight or unread counts, and threads are currently aggregated under their room.
This function is intentionally not cached because it is called to calculate the
unread badge for push notifications and thus the result is expected to change.
Note that this function assumes the user is a member of the room. Because
summary rows are not removed when a user leaves a room, the caller must
filter out those results from the result.
Returns:
A map of room ID to notification counts for the given user.
"""
return await self.db_pool.runInteraction(
"get_unread_counts_by_room_for_user",
self._get_unread_counts_by_room_for_user_txn,
user_id,
)
def _get_unread_counts_by_room_for_user_txn(
self, txn: LoggingTransaction, user_id: str
) -> Dict[str, int]:
receipt_types_clause, args = make_in_list_sql_clause(
self.database_engine,
"receipt_type",
(ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE),
)
args.extend([user_id, user_id])
receipts_cte = f"""
WITH all_receipts AS (
SELECT room_id, thread_id, MAX(event_stream_ordering) AS max_receipt_stream_ordering
FROM receipts_linearized
LEFT JOIN events USING (room_id, event_id)
WHERE
{receipt_types_clause}
AND user_id = ?
GROUP BY room_id, thread_id
)
"""
receipts_joins = """
LEFT JOIN (
SELECT room_id, thread_id,
max_receipt_stream_ordering AS threaded_receipt_stream_ordering
FROM all_receipts
WHERE thread_id IS NOT NULL
) AS threaded_receipts USING (room_id, thread_id)
LEFT JOIN (
SELECT room_id, thread_id,
max_receipt_stream_ordering AS unthreaded_receipt_stream_ordering
FROM all_receipts
WHERE thread_id IS NULL
) AS unthreaded_receipts USING (room_id)
"""
# First get summary counts by room / thread for the user. We use the max receipt
# stream ordering of both threaded & unthreaded receipts to compare against the
# summary table.
#
# PostgreSQL and SQLite differ in comparing scalar numerics.
if isinstance(self.database_engine, PostgresEngine):
# GREATEST ignores NULLs.
max_clause = """GREATEST(
threaded_receipt_stream_ordering,
unthreaded_receipt_stream_ordering
)"""
else:
# MAX returns NULL if any are NULL, so COALESCE to 0 first.
max_clause = """MAX(
COALESCE(threaded_receipt_stream_ordering, 0),
COALESCE(unthreaded_receipt_stream_ordering, 0)
)"""
sql = f"""
{receipts_cte}
SELECT eps.room_id, eps.thread_id, notif_count
FROM event_push_summary AS eps
{receipts_joins}
WHERE user_id = ?
AND notif_count != 0
AND (
(last_receipt_stream_ordering IS NULL AND stream_ordering > {max_clause})
OR last_receipt_stream_ordering = {max_clause}
)
"""
txn.execute(sql, args)
seen_thread_ids = set()
room_to_count: Dict[str, int] = defaultdict(int)
for room_id, thread_id, notif_count in txn:
room_to_count[room_id] += notif_count
seen_thread_ids.add(thread_id)
# Now get any event push actions that haven't been rotated using the same OR
# join and filter by receipt and event push summary rotated up to stream ordering.
sql = f"""
{receipts_cte}
SELECT epa.room_id, epa.thread_id, COUNT(CASE WHEN epa.notif = 1 THEN 1 END) AS notif_count
FROM event_push_actions AS epa
{receipts_joins}
WHERE user_id = ?
AND epa.notif = 1
AND stream_ordering > (SELECT stream_ordering FROM event_push_summary_stream_ordering)
AND (threaded_receipt_stream_ordering IS NULL OR stream_ordering > threaded_receipt_stream_ordering)
AND (unthreaded_receipt_stream_ordering IS NULL OR stream_ordering > unthreaded_receipt_stream_ordering)
GROUP BY epa.room_id, epa.thread_id
"""
txn.execute(sql, args)
for room_id, thread_id, notif_count in txn:
# Note: only count push actions we have valid summaries for with up to date receipt.
if thread_id not in seen_thread_ids:
continue
room_to_count[room_id] += notif_count
thread_id_clause, thread_ids_args = make_in_list_sql_clause(
self.database_engine, "epa.thread_id", seen_thread_ids
)
# Finally re-check event_push_actions for any rooms not in the summary, ignoring
# the rotated up-to position. This handles the case where a read receipt has arrived
# but not been rotated meaning the summary table is out of date, so we go back to
# the push actions table.
sql = f"""
{receipts_cte}
SELECT epa.room_id, COUNT(CASE WHEN epa.notif = 1 THEN 1 END) AS notif_count
FROM event_push_actions AS epa
{receipts_joins}
WHERE user_id = ?
AND NOT {thread_id_clause}
AND epa.notif = 1
AND (threaded_receipt_stream_ordering IS NULL OR stream_ordering > threaded_receipt_stream_ordering)
AND (unthreaded_receipt_stream_ordering IS NULL OR stream_ordering > unthreaded_receipt_stream_ordering)
GROUP BY epa.room_id
"""
args.extend(thread_ids_args)
txn.execute(sql, args)
for room_id, notif_count in txn:
room_to_count[room_id] += notif_count
return room_to_count
@cached(tree=True, max_entries=5000, iterable=True) @cached(tree=True, max_entries=5000, iterable=True)
async def get_unread_event_push_actions_by_room_for_user( async def get_unread_event_push_actions_by_room_for_user(
self, self,

View file

@ -16,11 +16,11 @@ import logging
import threading import threading
import weakref import weakref
from enum import Enum, auto from enum import Enum, auto
from itertools import chain
from typing import ( from typing import (
TYPE_CHECKING, TYPE_CHECKING,
Any, Any,
Collection, Collection,
Container,
Dict, Dict,
Iterable, Iterable,
List, List,
@ -76,6 +76,7 @@ from synapse.storage.util.id_generators import (
) )
from synapse.storage.util.sequence import build_sequence_generator from synapse.storage.util.sequence import build_sequence_generator
from synapse.types import JsonDict, get_domain_from_id from synapse.types import JsonDict, get_domain_from_id
from synapse.types.state import StateFilter
from synapse.util import unwrapFirstError from synapse.util import unwrapFirstError
from synapse.util.async_helpers import ObservableDeferred, delay_cancellation from synapse.util.async_helpers import ObservableDeferred, delay_cancellation
from synapse.util.caches.descriptors import cached, cachedList from synapse.util.caches.descriptors import cached, cachedList
@ -879,7 +880,7 @@ class EventsWorkerStore(SQLBaseStore):
async def get_stripped_room_state_from_event_context( async def get_stripped_room_state_from_event_context(
self, self,
context: EventContext, context: EventContext,
state_types_to_include: Container[str], state_keys_to_include: StateFilter,
membership_user_id: Optional[str] = None, membership_user_id: Optional[str] = None,
) -> List[JsonDict]: ) -> List[JsonDict]:
""" """
@ -892,7 +893,7 @@ class EventsWorkerStore(SQLBaseStore):
Args: Args:
context: The event context to retrieve state of the room from. context: The event context to retrieve state of the room from.
state_types_to_include: The type of state events to include. state_keys_to_include: The state events to include, for each event type.
membership_user_id: An optional user ID to include the stripped membership state membership_user_id: An optional user ID to include the stripped membership state
events of. This is useful when generating the stripped state of a room for events of. This is useful when generating the stripped state of a room for
invites. We want to send membership events of the inviter, so that the invites. We want to send membership events of the inviter, so that the
@ -901,21 +902,25 @@ class EventsWorkerStore(SQLBaseStore):
Returns: Returns:
A list of dictionaries, each representing a stripped state event from the room. A list of dictionaries, each representing a stripped state event from the room.
""" """
current_state_ids = await context.get_current_state_ids() if membership_user_id:
types = chain(
state_keys_to_include.to_types(),
[(EventTypes.Member, membership_user_id)],
)
filter = StateFilter.from_types(types)
else:
filter = state_keys_to_include
selected_state_ids = await context.get_current_state_ids(filter)
# We know this event is not an outlier, so this must be # We know this event is not an outlier, so this must be
# non-None. # non-None.
assert current_state_ids is not None assert selected_state_ids is not None
# The state to include # Confusingly, get_current_state_events may return events that are discarded by
state_to_include_ids = [ # the filter, if they're in context._state_delta_due_to_event. Strip these away.
e_id selected_state_ids = filter.filter_state(selected_state_ids)
for k, e_id in current_state_ids.items()
if k[0] in state_types_to_include
or (membership_user_id and k == (EventTypes.Member, membership_user_id))
]
state_to_include = await self.get_events(state_to_include_ids) state_to_include = await self.get_events(selected_state_ids.values())
return [ return [
{ {

View file

@ -924,39 +924,6 @@ class ReceiptsBackgroundUpdateStore(SQLBaseStore):
return batch_size return batch_size
async def _create_receipts_index(self, index_name: str, table: str) -> None:
"""Adds a unique index on `(room_id, receipt_type, user_id)` to the given
receipts table, for non-thread receipts."""
def _create_index(conn: LoggingDatabaseConnection) -> None:
conn.rollback()
# we have to set autocommit, because postgres refuses to
# CREATE INDEX CONCURRENTLY without it.
if isinstance(self.database_engine, PostgresEngine):
conn.set_session(autocommit=True)
try:
c = conn.cursor()
# Now that the duplicates are gone, we can create the index.
concurrently = (
"CONCURRENTLY"
if isinstance(self.database_engine, PostgresEngine)
else ""
)
sql = f"""
CREATE UNIQUE INDEX {concurrently} {index_name}
ON {table}(room_id, receipt_type, user_id)
WHERE thread_id IS NULL
"""
c.execute(sql)
finally:
if isinstance(self.database_engine, PostgresEngine):
conn.set_session(autocommit=False)
await self.db_pool.runWithConnection(_create_index)
async def _background_receipts_linearized_unique_index( async def _background_receipts_linearized_unique_index(
self, progress: dict, batch_size: int self, progress: dict, batch_size: int
) -> int: ) -> int:
@ -999,9 +966,12 @@ class ReceiptsBackgroundUpdateStore(SQLBaseStore):
_remote_duplicate_receipts_txn, _remote_duplicate_receipts_txn,
) )
await self._create_receipts_index( await self.db_pool.updates.create_index_in_background(
"receipts_linearized_unique_index", index_name="receipts_linearized_unique_index",
"receipts_linearized", table="receipts_linearized",
columns=["room_id", "receipt_type", "user_id"],
where_clause="thread_id IS NULL",
unique=True,
) )
await self.db_pool.updates._end_background_update( await self.db_pool.updates._end_background_update(
@ -1050,9 +1020,12 @@ class ReceiptsBackgroundUpdateStore(SQLBaseStore):
_remote_duplicate_receipts_txn, _remote_duplicate_receipts_txn,
) )
await self._create_receipts_index( await self.db_pool.updates.create_index_in_background(
"receipts_graph_unique_index", index_name="receipts_graph_unique_index",
"receipts_graph", table="receipts_graph",
columns=["room_id", "receipt_type", "user_id"],
where_clause="thread_id IS NULL",
unique=True,
) )
await self.db_pool.updates._end_background_update( await self.db_pool.updates._end_background_update(

View file

@ -1,5 +1,5 @@
# Copyright 2014-2016 OpenMarket Ltd # Copyright 2014-2016 OpenMarket Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C. # Copyright 2019, 2022 The Matrix.org Foundation C.I.C.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
@ -50,8 +50,14 @@ from synapse.storage.database import (
LoggingTransaction, LoggingTransaction,
) )
from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
from synapse.storage.engines import PostgresEngine
from synapse.storage.types import Cursor from synapse.storage.types import Cursor
from synapse.storage.util.id_generators import IdGenerator from synapse.storage.util.id_generators import (
AbstractStreamIdGenerator,
IdGenerator,
MultiWriterIdGenerator,
StreamIdGenerator,
)
from synapse.types import JsonDict, RetentionPolicy, ThirdPartyInstanceID from synapse.types import JsonDict, RetentionPolicy, ThirdPartyInstanceID
from synapse.util import json_encoder from synapse.util import json_encoder
from synapse.util.caches.descriptors import cached from synapse.util.caches.descriptors import cached
@ -114,6 +120,26 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
self.config: HomeServerConfig = hs.config self.config: HomeServerConfig = hs.config
self._un_partial_stated_rooms_stream_id_gen: AbstractStreamIdGenerator
if isinstance(database.engine, PostgresEngine):
self._un_partial_stated_rooms_stream_id_gen = MultiWriterIdGenerator(
db_conn=db_conn,
db=database,
stream_name="un_partial_stated_room_stream",
instance_name=self._instance_name,
tables=[
("un_partial_stated_room_stream", "instance_name", "stream_id")
],
sequence_name="un_partial_stated_room_stream_sequence",
# TODO(faster_joins, multiple writers) Support multiple writers.
writers=["master"],
)
else:
self._un_partial_stated_rooms_stream_id_gen = StreamIdGenerator(
db_conn, "un_partial_stated_room_stream", "stream_id"
)
async def store_room( async def store_room(
self, self,
room_id: str, room_id: str,
@ -1216,70 +1242,6 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
return room_servers return room_servers
async def clear_partial_state_room(self, room_id: str) -> bool:
"""Clears the partial state flag for a room.
Args:
room_id: The room whose partial state flag is to be cleared.
Returns:
`True` if the partial state flag has been cleared successfully.
`False` if the partial state flag could not be cleared because the room
still contains events with partial state.
"""
try:
await self.db_pool.runInteraction(
"clear_partial_state_room", self._clear_partial_state_room_txn, room_id
)
return True
except self.db_pool.engine.module.IntegrityError as e:
# Assume that any `IntegrityError`s are due to partial state events.
logger.info(
"Exception while clearing lazy partial-state-room %s, retrying: %s",
room_id,
e,
)
return False
def _clear_partial_state_room_txn(
self, txn: LoggingTransaction, room_id: str
) -> None:
DatabasePool.simple_delete_txn(
txn,
table="partial_state_rooms_servers",
keyvalues={"room_id": room_id},
)
DatabasePool.simple_delete_one_txn(
txn,
table="partial_state_rooms",
keyvalues={"room_id": room_id},
)
self._invalidate_cache_and_stream(txn, self.is_partial_state_room, (room_id,))
self._invalidate_cache_and_stream(
txn, self.get_partial_state_servers_at_join, (room_id,)
)
# We now delete anything from `device_lists_remote_pending` with a
# stream ID less than the minimum
# `partial_state_rooms.device_lists_stream_id`, as we no longer need them.
device_lists_stream_id = DatabasePool.simple_select_one_onecol_txn(
txn,
table="partial_state_rooms",
keyvalues={},
retcol="MIN(device_lists_stream_id)",
allow_none=True,
)
if device_lists_stream_id is None:
# There are no rooms being currently partially joined, so we delete everything.
txn.execute("DELETE FROM device_lists_remote_pending")
else:
sql = """
DELETE FROM device_lists_remote_pending
WHERE stream_id <= ?
"""
txn.execute(sql, (device_lists_stream_id,))
@cached() @cached()
async def is_partial_state_room(self, room_id: str) -> bool: async def is_partial_state_room(self, room_id: str) -> bool:
"""Checks if this room has partial state. """Checks if this room has partial state.
@ -1315,6 +1277,66 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
) )
return result["join_event_id"], result["device_lists_stream_id"] return result["join_event_id"], result["device_lists_stream_id"]
def get_un_partial_stated_rooms_token(self) -> int:
# TODO(faster_joins, multiple writers): This is inappropriate if there
# are multiple writers because workers that don't write often will
# hold all readers up.
# (See `MultiWriterIdGenerator.get_persisted_upto_position` for an
# explanation.)
return self._un_partial_stated_rooms_stream_id_gen.get_current_token()
async def get_un_partial_stated_rooms_from_stream(
self, instance_name: str, last_id: int, current_id: int, limit: int
) -> Tuple[List[Tuple[int, Tuple[str]]], int, bool]:
"""Get updates for caches replication stream.
Args:
instance_name: The writer we want to fetch updates from. Unused
here since there is only ever one writer.
last_id: The token to fetch updates from. Exclusive.
current_id: The token to fetch updates up to. Inclusive.
limit: The requested limit for the number of rows to return. The
function may return more or fewer rows.
Returns:
A tuple consisting of: the updates, a token to use to fetch
subsequent updates, and whether we returned fewer rows than exists
between the requested tokens due to the limit.
The token returned can be used in a subsequent call to this
function to get further updatees.
The updates are a list of 2-tuples of stream ID and the row data
"""
if last_id == current_id:
return [], current_id, False
def get_un_partial_stated_rooms_from_stream_txn(
txn: LoggingTransaction,
) -> Tuple[List[Tuple[int, Tuple[str]]], int, bool]:
sql = """
SELECT stream_id, room_id
FROM un_partial_stated_room_stream
WHERE ? < stream_id AND stream_id <= ? AND instance_name = ?
ORDER BY stream_id ASC
LIMIT ?
"""
txn.execute(sql, (last_id, current_id, instance_name, limit))
updates = [(row[0], (row[1],)) for row in txn]
limited = False
upto_token = current_id
if len(updates) >= limit:
upto_token = updates[-1][0]
limited = True
return updates, upto_token, limited
return await self.db_pool.runInteraction(
"get_un_partial_stated_rooms_from_stream",
get_un_partial_stated_rooms_from_stream_txn,
)
class _BackgroundUpdates: class _BackgroundUpdates:
REMOVE_TOMESTONED_ROOMS_BG_UPDATE = "remove_tombstoned_rooms_from_directory" REMOVE_TOMESTONED_ROOMS_BG_UPDATE = "remove_tombstoned_rooms_from_directory"
@ -1806,6 +1828,8 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
self._event_reports_id_gen = IdGenerator(db_conn, "event_reports", "id") self._event_reports_id_gen = IdGenerator(db_conn, "event_reports", "id")
self._instance_name = hs.get_instance_name()
async def upsert_room_on_join( async def upsert_room_on_join(
self, room_id: str, room_version: RoomVersion, state_events: List[EventBase] self, room_id: str, room_version: RoomVersion, state_events: List[EventBase]
) -> None: ) -> None:
@ -2270,3 +2294,84 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
self.is_room_blocked, self.is_room_blocked,
(room_id,), (room_id,),
) )
async def clear_partial_state_room(self, room_id: str) -> bool:
"""Clears the partial state flag for a room.
Args:
room_id: The room whose partial state flag is to be cleared.
Returns:
`True` if the partial state flag has been cleared successfully.
`False` if the partial state flag could not be cleared because the room
still contains events with partial state.
"""
try:
async with self._un_partial_stated_rooms_stream_id_gen.get_next() as un_partial_state_room_stream_id:
await self.db_pool.runInteraction(
"clear_partial_state_room",
self._clear_partial_state_room_txn,
room_id,
un_partial_state_room_stream_id,
)
return True
except self.db_pool.engine.module.IntegrityError as e:
# Assume that any `IntegrityError`s are due to partial state events.
logger.info(
"Exception while clearing lazy partial-state-room %s, retrying: %s",
room_id,
e,
)
return False
def _clear_partial_state_room_txn(
self,
txn: LoggingTransaction,
room_id: str,
un_partial_state_room_stream_id: int,
) -> None:
DatabasePool.simple_delete_txn(
txn,
table="partial_state_rooms_servers",
keyvalues={"room_id": room_id},
)
DatabasePool.simple_delete_one_txn(
txn,
table="partial_state_rooms",
keyvalues={"room_id": room_id},
)
self._invalidate_cache_and_stream(txn, self.is_partial_state_room, (room_id,))
self._invalidate_cache_and_stream(
txn, self.get_partial_state_servers_at_join, (room_id,)
)
DatabasePool.simple_insert_txn(
txn,
"un_partial_stated_room_stream",
{
"stream_id": un_partial_state_room_stream_id,
"instance_name": self._instance_name,
"room_id": room_id,
},
)
# We now delete anything from `device_lists_remote_pending` with a
# stream ID less than the minimum
# `partial_state_rooms.device_lists_stream_id`, as we no longer need them.
device_lists_stream_id = DatabasePool.simple_select_one_onecol_txn(
txn,
table="partial_state_rooms",
keyvalues={},
retcol="MIN(device_lists_stream_id)",
allow_none=True,
)
if device_lists_stream_id is None:
# There are no rooms being currently partially joined, so we delete everything.
txn.execute("DELETE FROM device_lists_remote_pending")
else:
sql = """
DELETE FROM device_lists_remote_pending
WHERE stream_id <= ?
"""
txn.execute(sql, (device_lists_stream_id,))

View file

@ -33,8 +33,8 @@ from synapse.storage.database import (
) )
from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.databases.main.events_worker import EventsWorkerStore
from synapse.storage.databases.main.roommember import RoomMemberWorkerStore from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
from synapse.storage.state import StateFilter
from synapse.types import JsonDict, JsonMapping, StateMap from synapse.types import JsonDict, JsonMapping, StateMap
from synapse.types.state import StateFilter
from synapse.util.caches import intern_string from synapse.util.caches import intern_string
from synapse.util.caches.descriptors import cached, cachedList from synapse.util.caches.descriptors import cached, cachedList
from synapse.util.cancellation import cancellable from synapse.util.cancellation import cancellable

View file

@ -26,6 +26,14 @@ from typing import (
cast, cast,
) )
try:
# Figure out if ICU support is available for searching users.
import icu
USE_ICU = True
except ModuleNotFoundError:
USE_ICU = False
from typing_extensions import TypedDict from typing_extensions import TypedDict
from synapse.api.errors import StoreError from synapse.api.errors import StoreError
@ -886,7 +894,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
limited = len(results) > limit limited = len(results) > limit
return {"limited": limited, "results": results} return {"limited": limited, "results": results[0:limit]}
def _parse_query_sqlite(search_term: str) -> str: def _parse_query_sqlite(search_term: str) -> str:
@ -900,7 +908,7 @@ def _parse_query_sqlite(search_term: str) -> str:
""" """
# Pull out the individual words, discarding any non-word characters. # Pull out the individual words, discarding any non-word characters.
results = re.findall(r"([\w\-]+)", search_term, re.UNICODE) results = _parse_words(search_term)
return " & ".join("(%s* OR %s)" % (result, result) for result in results) return " & ".join("(%s* OR %s)" % (result, result) for result in results)
@ -910,12 +918,63 @@ def _parse_query_postgres(search_term: str) -> Tuple[str, str, str]:
We use this so that we can add prefix matching, which isn't something We use this so that we can add prefix matching, which isn't something
that is supported by default. that is supported by default.
""" """
results = _parse_words(search_term)
# Pull out the individual words, discarding any non-word characters.
results = re.findall(r"([\w\-]+)", search_term, re.UNICODE)
both = " & ".join("(%s:* | %s)" % (result, result) for result in results) both = " & ".join("(%s:* | %s)" % (result, result) for result in results)
exact = " & ".join("%s" % (result,) for result in results) exact = " & ".join("%s" % (result,) for result in results)
prefix = " & ".join("%s:*" % (result,) for result in results) prefix = " & ".join("%s:*" % (result,) for result in results)
return both, exact, prefix return both, exact, prefix
def _parse_words(search_term: str) -> List[str]:
"""Split the provided search string into a list of its words.
If support for ICU (International Components for Unicode) is available, use it.
Otherwise, fall back to using a regex to detect word boundaries. This latter
solution works well enough for most latin-based languages, but doesn't work as well
with other languages.
Args:
search_term: The search string.
Returns:
A list of the words in the search string.
"""
if USE_ICU:
return _parse_words_with_icu(search_term)
return re.findall(r"([\w\-]+)", search_term, re.UNICODE)
def _parse_words_with_icu(search_term: str) -> List[str]:
"""Break down the provided search string into its individual words using ICU
(International Components for Unicode).
Args:
search_term: The search string.
Returns:
A list of the words in the search string.
"""
results = []
breaker = icu.BreakIterator.createWordInstance(icu.Locale.getDefault())
breaker.setText(search_term)
i = 0
while True:
j = breaker.nextBoundary()
if j < 0:
break
result = search_term[i:j]
# libicu considers spaces and punctuation between words as words, but we don't
# want to include those in results as they would result in syntax errors in SQL
# queries (e.g. "foo bar" would result in the search query including "foo & &
# bar").
if len(re.findall(r"([\w\-]+)", result, re.UNICODE)):
results.append(result)
i = j
return results

View file

@ -22,8 +22,8 @@ from synapse.storage.database import (
LoggingTransaction, LoggingTransaction,
) )
from synapse.storage.engines import PostgresEngine from synapse.storage.engines import PostgresEngine
from synapse.storage.state import StateFilter
from synapse.types import MutableStateMap, StateMap from synapse.types import MutableStateMap, StateMap
from synapse.types.state import StateFilter
from synapse.util.caches import intern_string from synapse.util.caches import intern_string
if TYPE_CHECKING: if TYPE_CHECKING:

Some files were not shown because too many files have changed in this diff Show more