mirror of
https://git.anonymousland.org/anonymousland/synapse.git
synced 2025-04-12 00:39:01 -04:00
Merge remote-tracking branch 'upstream/release-v1.76'
This commit is contained in:
commit
d7a3d540ed
23
.ci/scripts/check_lockfile.py
Executable file
23
.ci/scripts/check_lockfile.py
Executable file
@ -0,0 +1,23 @@
|
||||
#! /usr/bin/env python
|
||||
import sys
|
||||
|
||||
if sys.version_info < (3, 11):
|
||||
raise RuntimeError("Requires at least Python 3.11, to import tomllib")
|
||||
|
||||
import tomllib
|
||||
|
||||
with open("poetry.lock", "rb") as f:
|
||||
lockfile = tomllib.load(f)
|
||||
|
||||
try:
|
||||
lock_version = lockfile["metadata"]["lock-version"]
|
||||
assert lock_version == "2.0"
|
||||
except Exception:
|
||||
print(
|
||||
"""\
|
||||
Lockfile is not version 2.0. You probably need to upgrade poetry on your local box
|
||||
and re-run `poetry lock --no-update`. See the Poetry cheat sheet at
|
||||
https://matrix-org.github.io/synapse/develop/development/dependencies.html
|
||||
"""
|
||||
)
|
||||
raise
|
@ -53,7 +53,7 @@ with open('pyproject.toml', 'w') as f:
|
||||
"
|
||||
python3 -c "$REMOVE_DEV_DEPENDENCIES"
|
||||
|
||||
pip install poetry==1.2.0
|
||||
pip install poetry==1.3.2
|
||||
poetry lock
|
||||
|
||||
echo "::group::Patched pyproject.toml"
|
||||
|
2
.github/workflows/docs.yaml
vendored
2
.github/workflows/docs.yaml
vendored
@ -58,7 +58,7 @@ jobs:
|
||||
|
||||
# Deploy to the target directory.
|
||||
- name: Deploy to gh pages
|
||||
uses: peaceiris/actions-gh-pages@64b46b4226a4a12da2239ba3ea5aa73e3163c75b # v3.9.1
|
||||
uses: peaceiris/actions-gh-pages@bd8c6b06eba6b3d25d72b7a1767993c0aeee42e7 # v3.9.2
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ./book
|
||||
|
2
.github/workflows/latest_deps.yml
vendored
2
.github/workflows/latest_deps.yml
vendored
@ -37,7 +37,7 @@ jobs:
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: "3.x"
|
||||
poetry-version: "1.2.0"
|
||||
poetry-version: "1.3.2"
|
||||
extras: "all"
|
||||
# Dump installed versions for debugging.
|
||||
- run: poetry run pip list > before.txt
|
||||
|
2
.github/workflows/release-artifacts.yml
vendored
2
.github/workflows/release-artifacts.yml
vendored
@ -127,7 +127,7 @@ jobs:
|
||||
python-version: "3.x"
|
||||
|
||||
- name: Install cibuildwheel
|
||||
run: python -m pip install cibuildwheel==2.9.0 poetry==1.2.0
|
||||
run: python -m pip install cibuildwheel==2.9.0
|
||||
|
||||
- name: Set up QEMU to emulate aarch64
|
||||
if: matrix.arch == 'aarch64'
|
||||
|
21
.github/workflows/tests.yml
vendored
21
.github/workflows/tests.yml
vendored
@ -33,11 +33,10 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: "3.x"
|
||||
poetry-version: "1.3.2"
|
||||
extras: "all"
|
||||
- run: poetry run scripts-dev/generate_sample_config.sh --check
|
||||
- run: poetry run scripts-dev/config-lint.sh
|
||||
@ -52,6 +51,15 @@ jobs:
|
||||
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
|
||||
- run: scripts-dev/check_schema_delta.py --force-colors
|
||||
|
||||
check-lockfile:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: .ci/scripts/check_lockfile.py
|
||||
|
||||
lint:
|
||||
uses: "matrix-org/backend-meta/.github/workflows/python-poetry-ci.yml@v2"
|
||||
with:
|
||||
@ -88,6 +96,7 @@ jobs:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
poetry-version: "1.3.2"
|
||||
extras: "all"
|
||||
- run: poetry run scripts-dev/check_pydantic_models.py
|
||||
|
||||
@ -163,6 +172,7 @@ jobs:
|
||||
- lint-pydantic
|
||||
- check-sampleconfig
|
||||
- check-schema-delta
|
||||
- check-lockfile
|
||||
- lint-clippy
|
||||
- lint-rustfmt
|
||||
runs-on: ubuntu-latest
|
||||
@ -219,6 +229,7 @@ jobs:
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: ${{ matrix.job.python-version }}
|
||||
poetry-version: "1.3.2"
|
||||
extras: ${{ matrix.job.extras }}
|
||||
- name: Await PostgreSQL
|
||||
if: ${{ matrix.job.postgres-version }}
|
||||
@ -294,6 +305,7 @@ jobs:
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: '3.7'
|
||||
poetry-version: "1.3.2"
|
||||
extras: "all test"
|
||||
|
||||
- run: poetry run trial -j6 tests
|
||||
@ -328,6 +340,7 @@ jobs:
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: "1.3.2"
|
||||
extras: ${{ matrix.extras }}
|
||||
- run: poetry run trial --jobs=2 tests
|
||||
- name: Dump logs
|
||||
@ -419,6 +432,7 @@ jobs:
|
||||
- run: sudo apt-get -qq install xmlsec1 postgresql-client
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
poetry-version: "1.3.2"
|
||||
extras: "postgres"
|
||||
- run: .ci/scripts/test_export_data_command.sh
|
||||
env:
|
||||
@ -470,6 +484,7 @@ jobs:
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: "1.3.2"
|
||||
extras: "postgres"
|
||||
- run: .ci/scripts/test_synapse_port_db.sh
|
||||
id: run_tester_script
|
||||
|
2
.github/workflows/twisted_trunk.yml
vendored
2
.github/workflows/twisted_trunk.yml
vendored
@ -148,7 +148,7 @@ jobs:
|
||||
run: |
|
||||
set -x
|
||||
DEBIAN_FRONTEND=noninteractive sudo apt-get install -yqq python3 pipx
|
||||
pipx install poetry==1.2.0
|
||||
pipx install poetry==1.3.2
|
||||
|
||||
poetry remove -n twisted
|
||||
poetry add -n --extras tls git+https://github.com/twisted/twisted.git#trunk
|
||||
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -69,3 +69,6 @@ book/
|
||||
|
||||
# Poetry will create a setup.py, which we don't want to include.
|
||||
/setup.py
|
||||
|
||||
# Don't include users' poetry configs
|
||||
/poetry.toml
|
||||
|
87
CHANGES.md
87
CHANGES.md
@ -1,3 +1,90 @@
|
||||
Synapse 1.76.0rc1 (2023-01-25)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Update the default room version to [v10](https://spec.matrix.org/v1.5/rooms/v10/) ([MSC 3904](https://github.com/matrix-org/matrix-spec-proposals/pull/3904)). Contributed by @FSG-Cat. ([\#14111](https://github.com/matrix-org/synapse/issues/14111))
|
||||
- Adds a `set_displayname()` method to the module API for setting a user's display name. ([\#14629](https://github.com/matrix-org/synapse/issues/14629))
|
||||
- Add a dedicated listener configuration for `health` endpoint. ([\#14747](https://github.com/matrix-org/synapse/issues/14747))
|
||||
- Implement support for MSC3890: Remotely silence local notifications. ([\#14775](https://github.com/matrix-org/synapse/issues/14775))
|
||||
- Implement experimental support for MSC3930: Push rules for (MSC3381) Polls. ([\#14787](https://github.com/matrix-org/synapse/issues/14787))
|
||||
- Per [MSC3925](https://github.com/matrix-org/matrix-spec-proposals/pull/3925), bundle the whole of the replacement with any edited events, and optionally inhibit server-side replacement. ([\#14811](https://github.com/matrix-org/synapse/issues/14811))
|
||||
- Faster joins: always serve a partial join response to servers that request it with the stable query param. ([\#14839](https://github.com/matrix-org/synapse/issues/14839))
|
||||
- Faster joins: allow non-lazy-loading ("eager") syncs to complete after a partial join by omitting partial state rooms until they become fully stated. ([\#14870](https://github.com/matrix-org/synapse/issues/14870))
|
||||
- Faster joins: request partial joins by default. Admins can opt-out of this for the time being---see the upgrade notes. ([\#14905](https://github.com/matrix-org/synapse/issues/14905))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Add index to improve performance of the `/timestamp_to_event` endpoint used for jumping to a specific date in the timeline of a room. ([\#14799](https://github.com/matrix-org/synapse/issues/14799))
|
||||
- Fix a long-standing bug where Synapse would exhaust the stack when processing many federation requests where the remote homeserver has disconencted early. ([\#14812](https://github.com/matrix-org/synapse/issues/14812), [\#14842](https://github.com/matrix-org/synapse/issues/14842))
|
||||
- Fix rare races when using workers. ([\#14820](https://github.com/matrix-org/synapse/issues/14820))
|
||||
- Fix a bug introduced in Synapse 1.64.0 when using room version 10 with frozen events enabled. ([\#14864](https://github.com/matrix-org/synapse/issues/14864))
|
||||
- Fix a long-standing bug where the `populate_room_stats` background job could fail on broken rooms. ([\#14873](https://github.com/matrix-org/synapse/issues/14873))
|
||||
- Faster joins: Fix a bug in worker deployments where the room stats and user directory would not get updated when finishing a fast join until another event is sent or received. ([\#14874](https://github.com/matrix-org/synapse/issues/14874))
|
||||
- Faster joins: Fix incompatibility with joins into restricted rooms where no local users have the ability to invite. ([\#14882](https://github.com/matrix-org/synapse/issues/14882))
|
||||
- Fix a regression introduced in Synapse 1.69.0 which can result in database corruption when database migrations are interrupted on sqlite. ([\#14910](https://github.com/matrix-org/synapse/issues/14910))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Bump default Python version in the Dockerfile from 3.9 to 3.11. ([\#14875](https://github.com/matrix-org/synapse/issues/14875))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Include `x_forwarded` entry in the HTTP listener example configs and remove the remaining `worker_main_http_uri` entries. ([\#14667](https://github.com/matrix-org/synapse/issues/14667))
|
||||
- Remove duplicate commands from the Code Style documentation page; point to the Contributing Guide instead. ([\#14773](https://github.com/matrix-org/synapse/issues/14773))
|
||||
- Add missing documentation for `tag` to `listeners` section. ([\#14803](https://github.com/matrix-org/synapse/issues/14803))
|
||||
- Updated documentation in configuration manual for `user_directory.search_all_users`. ([\#14818](https://github.com/matrix-org/synapse/issues/14818))
|
||||
- Add `worker_manhole` to configuration manual. ([\#14824](https://github.com/matrix-org/synapse/issues/14824))
|
||||
- Fix the example config missing the `id` field in [application service documentation](https://matrix-org.github.io/synapse/latest/application_services.html). ([\#14845](https://github.com/matrix-org/synapse/issues/14845))
|
||||
- Minor corrections to the logging configuration documentation. ([\#14868](https://github.com/matrix-org/synapse/issues/14868))
|
||||
- Document the export user data command. Contributed by @thezaidbintariq. ([\#14883](https://github.com/matrix-org/synapse/issues/14883))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Poetry 1.3.2 or higher is now required when `poetry install`ing from source. ([\#14860](https://github.com/matrix-org/synapse/issues/14860))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Faster remote room joins (worker mode): do not populate external hosts-in-room cache when sending events as this requires blocking for full state. ([\#14749](https://github.com/matrix-org/synapse/issues/14749))
|
||||
- Enable Complement tests for Faster Remote Room Joins against worker-mode Synapse. ([\#14752](https://github.com/matrix-org/synapse/issues/14752))
|
||||
- Add some clarifying comments and refactor a portion of the `Keyring` class for readability. ([\#14804](https://github.com/matrix-org/synapse/issues/14804))
|
||||
- Add local poetry config files (`poetry.toml`) to `.gitignore`. ([\#14807](https://github.com/matrix-org/synapse/issues/14807))
|
||||
- Add missing type hints. ([\#14816](https://github.com/matrix-org/synapse/issues/14816), [\#14885](https://github.com/matrix-org/synapse/issues/14885), [\#14889](https://github.com/matrix-org/synapse/issues/14889))
|
||||
- Refactor push tests. ([\#14819](https://github.com/matrix-org/synapse/issues/14819))
|
||||
- Re-enable some linting that was disabled when we switched to ruff. ([\#14821](https://github.com/matrix-org/synapse/issues/14821))
|
||||
- Add `cargo fmt` and `cargo clippy` to the lint script. ([\#14822](https://github.com/matrix-org/synapse/issues/14822))
|
||||
- Drop unused table `presence`. ([\#14825](https://github.com/matrix-org/synapse/issues/14825))
|
||||
- Merge the two account data and the two device list replication streams. ([\#14826](https://github.com/matrix-org/synapse/issues/14826), [\#14833](https://github.com/matrix-org/synapse/issues/14833))
|
||||
- Faster joins: use stable identifiers from [MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706). ([\#14832](https://github.com/matrix-org/synapse/issues/14832), [\#14841](https://github.com/matrix-org/synapse/issues/14841))
|
||||
- Add a parameter to control whether the federation client performs a partial state join. ([\#14843](https://github.com/matrix-org/synapse/issues/14843))
|
||||
- Add check to avoid starting duplicate partial state syncs. ([\#14844](https://github.com/matrix-org/synapse/issues/14844))
|
||||
- Bump regex from 1.7.0 to 1.7.1. ([\#14848](https://github.com/matrix-org/synapse/issues/14848))
|
||||
- Add an early return when handling no-op presence updates. ([\#14855](https://github.com/matrix-org/synapse/issues/14855))
|
||||
- Fix `wait_for_stream_position` to correctly wait for the right instance to advance its token. ([\#14856](https://github.com/matrix-org/synapse/issues/14856), [\#14872](https://github.com/matrix-org/synapse/issues/14872))
|
||||
- Bump peaceiris/actions-gh-pages from 3.9.1 to 3.9.2. ([\#14861](https://github.com/matrix-org/synapse/issues/14861))
|
||||
- Bump ruff from 0.0.215 to 0.0.224. ([\#14862](https://github.com/matrix-org/synapse/issues/14862))
|
||||
- Bump types-pillow from 9.4.0.0 to 9.4.0.3. ([\#14863](https://github.com/matrix-org/synapse/issues/14863))
|
||||
- Always notify replication when a stream advances automatically. ([\#14877](https://github.com/matrix-org/synapse/issues/14877))
|
||||
- Reduce max time we wait for stream positions. ([\#14881](https://github.com/matrix-org/synapse/issues/14881))
|
||||
- Bump types-opentracing from 2.4.10 to 2.4.10.1. ([\#14896](https://github.com/matrix-org/synapse/issues/14896))
|
||||
- Bump ruff from 0.0.224 to 0.0.230. ([\#14897](https://github.com/matrix-org/synapse/issues/14897))
|
||||
- Bump types-requests from 2.28.11.7 to 2.28.11.8. ([\#14899](https://github.com/matrix-org/synapse/issues/14899))
|
||||
- Bump types-psycopg2 from 2.9.21.2 to 2.9.21.4. ([\#14900](https://github.com/matrix-org/synapse/issues/14900))
|
||||
- Bump types-commonmark from 0.9.2 to 0.9.2.1. ([\#14901](https://github.com/matrix-org/synapse/issues/14901))
|
||||
- Faster joins: allow the resync process more time to fetch `/state` ids. ([\#14912](https://github.com/matrix-org/synapse/issues/14912))
|
||||
|
||||
|
||||
Synapse 1.75.0 (2023-01-17)
|
||||
===========================
|
||||
|
||||
|
4
Cargo.lock
generated
4
Cargo.lock
generated
@ -294,9 +294,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.7.0"
|
||||
version = "1.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a"
|
||||
checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
|
@ -15,19 +15,19 @@ worker_name: generic_worker$i
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_main_http_uri: http://localhost:8008/
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 808$i
|
||||
x_forwarded: true
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml
|
||||
#worker_pid_file: DATADIR/generic_worker$i.pid
|
||||
EOF
|
||||
done
|
||||
```
|
||||
|
||||
This would create five generic workers with a unique `worker_name` field in each file and listening on ports 8081-8085.
|
||||
|
||||
Customise the script to your needs.
|
||||
Customise the script to your needs. Note that `worker_pid_file` is required if `worker_daemonize` is `true`. Uncomment and/or modify the line if needed.
|
||||
|
@ -8,7 +8,9 @@ It also prints out the example lines for Synapse main configuration file.
|
||||
|
||||
Remember to route necessary endpoints directly to a worker associated with it.
|
||||
|
||||
If you run the script as-is, it will create workers with the replication listener starting from port 8034 and another, regular http listener starting from 8044. If you don't need all of the stream writers listed in the script, just remove them from the ```STREAM_WRITERS``` array.
|
||||
If you run the script as-is, it will create workers with the replication listener starting from port 8034 and another, regular http listener starting from 8044. If you don't need all of the stream writers listed in the script, just remove them from the ```STREAM_WRITERS``` array.
|
||||
|
||||
Hint: Note that `worker_pid_file` is required if `worker_daemonize` is `true`. Uncomment and/or modify the line if needed.
|
||||
|
||||
```sh
|
||||
#!/bin/bash
|
||||
@ -46,9 +48,11 @@ worker_listeners:
|
||||
|
||||
- type: http
|
||||
port: $(expr $HTTP_START_PORT + $i)
|
||||
x_forwarded: true
|
||||
resources:
|
||||
- names: [client]
|
||||
|
||||
#worker_pid_file: DATADIR/${STREAM_WRITERS[$i]}.pid
|
||||
worker_log_config: /etc/matrix-synapse/stream-writer-log.yaml
|
||||
EOF
|
||||
HOMESERVER_YAML_INSTANCE_MAP+=$" ${STREAM_WRITERS[$i]}_stream_writer:
|
||||
@ -91,7 +95,9 @@ Simply run the script to create YAML files in the current folder and print out t
|
||||
|
||||
```console
|
||||
$ ./create_stream_writers.sh
|
||||
|
||||
```
|
||||
You should receive an output similar to the following:
|
||||
```console
|
||||
# Add these lines to your homeserver.yaml.
|
||||
# Don't forget to configure your reverse proxy and
|
||||
# necessary endpoints to their respective worker.
|
||||
|
3
debian/build_virtualenv
vendored
3
debian/build_virtualenv
vendored
@ -31,12 +31,11 @@ case $(dpkg-architecture -q DEB_HOST_ARCH) in
|
||||
esac
|
||||
|
||||
# Manually install Poetry and export a pip-compatible `requirements.txt`
|
||||
# We need a Poetry pre-release as the export command is buggy in < 1.2
|
||||
TEMP_VENV="$(mktemp -d)"
|
||||
python3 -m venv "$TEMP_VENV"
|
||||
source "$TEMP_VENV/bin/activate"
|
||||
pip install -U pip
|
||||
pip install poetry==1.2.0
|
||||
pip install poetry==1.3.2
|
||||
poetry export \
|
||||
--extras all \
|
||||
--extras test \
|
||||
|
7
debian/changelog
vendored
7
debian/changelog
vendored
@ -1,3 +1,10 @@
|
||||
matrix-synapse-py3 (1.76.0~rc1) stable; urgency=medium
|
||||
|
||||
* Use Poetry 1.3.2 to manage the bundled virtualenv included with this package.
|
||||
* New Synapse release 1.76.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 25 Jan 2023 16:21:16 +0000
|
||||
|
||||
matrix-synapse-py3 (1.75.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.75.0.
|
||||
|
@ -17,16 +17,10 @@
|
||||
|
||||
# Irritatingly, there is no blessed guide on how to distribute an application with its
|
||||
# poetry-managed environment in a docker image. We have opted for
|
||||
# `poetry export | pip install -r /dev/stdin`, but there are known bugs in
|
||||
# in `poetry export` whose fixes (scheduled for poetry 1.2) have yet to be released.
|
||||
# In case we get bitten by those bugs in the future, the recommendations here might
|
||||
# be useful:
|
||||
# https://github.com/python-poetry/poetry/discussions/1879#discussioncomment-216865
|
||||
# https://stackoverflow.com/questions/53835198/integrating-python-poetry-with-docker?answertab=scoredesc
|
||||
# `poetry export | pip install -r /dev/stdin`, but beware: we have experienced bugs in
|
||||
# in `poetry export` in the past.
|
||||
|
||||
|
||||
|
||||
ARG PYTHON_VERSION=3.9
|
||||
ARG PYTHON_VERSION=3.11
|
||||
|
||||
###
|
||||
### Stage 0: generate requirements.txt
|
||||
@ -40,16 +34,16 @@ FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as requirements
|
||||
# Here we use it to set up a cache for apt (and below for pip), to improve
|
||||
# rebuild speeds on slow connections.
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update -qq && apt-get install -yqq \
|
||||
build-essential git libffi-dev libssl-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update -qq && apt-get install -yqq \
|
||||
build-essential git libffi-dev libssl-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# We install poetry in its own build stage to avoid its dependencies conflicting with
|
||||
# synapse's dependencies.
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
pip install --user "poetry==1.2.0"
|
||||
pip install --user "poetry==1.3.2"
|
||||
|
||||
WORKDIR /synapse
|
||||
|
||||
@ -70,9 +64,9 @@ ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
|
||||
# Otherwise, just create an empty requirements file so that the Dockerfile can
|
||||
# proceed.
|
||||
RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
|
||||
/root/.local/bin/poetry export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes}; \
|
||||
/root/.local/bin/poetry export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes}; \
|
||||
else \
|
||||
touch /synapse/requirements.txt; \
|
||||
touch /synapse/requirements.txt; \
|
||||
fi
|
||||
|
||||
###
|
||||
@ -82,24 +76,24 @@ FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as builder
|
||||
|
||||
# install the OS build deps
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update -qq && apt-get install -yqq \
|
||||
build-essential \
|
||||
libffi-dev \
|
||||
libjpeg-dev \
|
||||
libpq-dev \
|
||||
libssl-dev \
|
||||
libwebp-dev \
|
||||
libxml++2.6-dev \
|
||||
libxslt1-dev \
|
||||
openssl \
|
||||
zlib1g-dev \
|
||||
git \
|
||||
curl \
|
||||
libicu-dev \
|
||||
pkg-config \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update -qq && apt-get install -yqq \
|
||||
build-essential \
|
||||
libffi-dev \
|
||||
libjpeg-dev \
|
||||
libpq-dev \
|
||||
libssl-dev \
|
||||
libwebp-dev \
|
||||
libxml++2.6-dev \
|
||||
libxslt1-dev \
|
||||
openssl \
|
||||
zlib1g-dev \
|
||||
git \
|
||||
curl \
|
||||
libicu-dev \
|
||||
pkg-config \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
# Install rust and ensure its in the PATH
|
||||
@ -140,9 +134,9 @@ ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
|
||||
RUN --mount=type=cache,target=/synapse/target,sharing=locked \
|
||||
--mount=type=cache,target=${CARGO_HOME}/registry,sharing=locked \
|
||||
if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
|
||||
pip install --prefix="/install" --no-deps --no-warn-script-location /synapse[all]; \
|
||||
pip install --prefix="/install" --no-deps --no-warn-script-location /synapse[all]; \
|
||||
else \
|
||||
pip install --prefix="/install" --no-warn-script-location /synapse[all]; \
|
||||
pip install --prefix="/install" --no-warn-script-location /synapse[all]; \
|
||||
fi
|
||||
|
||||
###
|
||||
@ -157,20 +151,20 @@ LABEL org.opencontainers.image.source='https://github.com/matrix-org/synapse.git
|
||||
LABEL org.opencontainers.image.licenses='Apache-2.0'
|
||||
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update -qq && apt-get install -yqq \
|
||||
curl \
|
||||
gosu \
|
||||
libjpeg62-turbo \
|
||||
libpq5 \
|
||||
libwebp6 \
|
||||
xmlsec1 \
|
||||
libjemalloc2 \
|
||||
libicu67 \
|
||||
libssl-dev \
|
||||
openssl \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
curl \
|
||||
gosu \
|
||||
libjpeg62-turbo \
|
||||
libpq5 \
|
||||
libwebp6 \
|
||||
xmlsec1 \
|
||||
libjemalloc2 \
|
||||
libicu67 \
|
||||
libssl-dev \
|
||||
openssl \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY --from=builder /install /usr/local
|
||||
COPY ./docker/start.py /start.py
|
||||
@ -181,4 +175,4 @@ EXPOSE 8008/tcp 8009/tcp 8448/tcp
|
||||
ENTRYPOINT ["/start.py"]
|
||||
|
||||
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
|
||||
CMD curl -fSs http://localhost:8008/health || exit 1
|
||||
CMD curl -fSs http://localhost:8008/health || exit 1
|
||||
|
@ -94,16 +94,16 @@ allow_device_name_lookup_over_federation: true
|
||||
experimental_features:
|
||||
# Enable history backfilling support
|
||||
msc2716_enabled: true
|
||||
# server-side support for partial state in /send_join responses
|
||||
msc3706_enabled: true
|
||||
{% if not workers_in_use %}
|
||||
# client-side support for partial state in /send_join responses
|
||||
faster_joins: true
|
||||
{% endif %}
|
||||
# Filtering /messages by relation type.
|
||||
msc3874_enabled: true
|
||||
# Enable support for polls
|
||||
msc3381_polls_enabled: true
|
||||
# Enable deleting device-specific notification settings stored in account data
|
||||
msc3890_enabled: true
|
||||
# Enable removing account data support
|
||||
msc3391_enabled: true
|
||||
# Filtering /messages by relation type.
|
||||
msc3874_enabled: true
|
||||
|
||||
server_notices:
|
||||
system_mxid_localpart: _server
|
||||
|
@ -15,6 +15,7 @@ app_service_config_files:
|
||||
The format of the AS configuration file is as follows:
|
||||
|
||||
```yaml
|
||||
id: <your-AS-id>
|
||||
url: <base url of AS>
|
||||
as_token: <token AS will add to requests to HS>
|
||||
hs_token: <token HS will add to requests to AS>
|
||||
|
@ -13,23 +13,14 @@ The necessary tools are:
|
||||
- [ruff](https://github.com/charliermarsh/ruff), which can spot common errors; and
|
||||
- [mypy](https://mypy.readthedocs.io/en/stable/), a type checker.
|
||||
|
||||
Install them with:
|
||||
|
||||
```sh
|
||||
pip install -e ".[lint,mypy]"
|
||||
```
|
||||
|
||||
The easiest way to run the lints is to invoke the linter script as follows.
|
||||
|
||||
```sh
|
||||
scripts-dev/lint.sh
|
||||
```
|
||||
See [the contributing guide](development/contributing_guide.md#run-the-linters) for instructions
|
||||
on how to install the above tools and run the linters.
|
||||
|
||||
It's worth noting that modern IDEs and text editors can run these tools
|
||||
automatically on save. It may be worth looking into whether this
|
||||
functionality is supported in your editor for a more convenient
|
||||
development workflow. It is not, however, recommended to run `mypy`
|
||||
on save as they take a while and can be very resource intensive.
|
||||
on save as it takes a while and can be very resource intensive.
|
||||
|
||||
## General rules
|
||||
|
||||
|
@ -67,7 +67,7 @@ pipx install poetry
|
||||
but see poetry's [installation instructions](https://python-poetry.org/docs/#installation)
|
||||
for other installation methods.
|
||||
|
||||
Synapse requires Poetry version 1.2.0 or later.
|
||||
Developing Synapse requires Poetry version 1.3.2 or later.
|
||||
|
||||
Next, open a terminal and install dependencies as follows:
|
||||
|
||||
|
@ -2,6 +2,13 @@
|
||||
|
||||
This is a quick cheat sheet for developers on how to use [`poetry`](https://python-poetry.org/).
|
||||
|
||||
# Installing
|
||||
|
||||
See the [contributing guide](contributing_guide.md#4-install-the-dependencies).
|
||||
|
||||
Developers should use Poetry 1.3.2 or higher. If you encounter problems related
|
||||
to poetry, please [double-check your poetry version](#check-the-version-of-poetry-with-poetry---version).
|
||||
|
||||
# Background
|
||||
|
||||
Synapse uses a variety of third-party Python packages to function as a homeserver.
|
||||
@ -123,7 +130,7 @@ context of poetry's venv, without having to run `poetry shell` beforehand.
|
||||
## ...reset my venv to the locked environment?
|
||||
|
||||
```shell
|
||||
poetry install --extras all --remove-untracked
|
||||
poetry install --all-extras --sync
|
||||
```
|
||||
|
||||
## ...delete everything and start over from scratch?
|
||||
@ -183,7 +190,6 @@ Either:
|
||||
- manually update `pyproject.toml`; then `poetry lock --no-update`; or else
|
||||
- `poetry add packagename`. See `poetry add --help`; note the `--dev`,
|
||||
`--extras` and `--optional` flags in particular.
|
||||
- **NB**: this specifies the new package with a version given by a "caret bound". This won't get forced to its lowest version in the old deps CI job: see [this TODO](https://github.com/matrix-org/synapse/blob/4e1374373857f2f7a911a31c50476342d9070681/.ci/scripts/test_old_deps.sh#L35-L39).
|
||||
|
||||
Include the updated `pyproject.toml` and `poetry.lock` files in your commit.
|
||||
|
||||
@ -196,7 +202,7 @@ poetry remove packagename
|
||||
```
|
||||
|
||||
ought to do the trick. Alternatively, manually update `pyproject.toml` and
|
||||
`poetry lock --no-update`. Include the updated `pyproject.toml` and poetry.lock`
|
||||
`poetry lock --no-update`. Include the updated `pyproject.toml` and `poetry.lock`
|
||||
files in your commit.
|
||||
|
||||
## ...update the version range for an existing dependency?
|
||||
@ -240,9 +246,6 @@ poetry export --extras all
|
||||
|
||||
Be wary of bugs in `poetry export` and `pip install -r requirements.txt`.
|
||||
|
||||
Note: `poetry export` will be made a plugin in Poetry 1.2. Additional config may
|
||||
be required.
|
||||
|
||||
## ...build a test wheel?
|
||||
|
||||
I usually use
|
||||
@ -260,7 +263,7 @@ doesn't require poetry. (It's what we use in CI too). However, you could try
|
||||
|
||||
## Check the version of poetry with `poetry --version`.
|
||||
|
||||
The minimum version of poetry supported by Synapse is 1.2.
|
||||
The minimum version of poetry supported by Synapse is 1.3.2.
|
||||
|
||||
It can also be useful to check the version of `poetry-core` in use. If you've
|
||||
installed `poetry` with `pipx`, try `pipx runpip poetry list | grep
|
||||
|
@ -17,6 +17,7 @@ worker_listeners:
|
||||
#
|
||||
#- type: http
|
||||
# port: 8035
|
||||
# x_forwarded: true
|
||||
# resources:
|
||||
# - names: [client]
|
||||
|
||||
|
@ -5,11 +5,10 @@ worker_name: generic_worker1
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_main_http_uri: http://localhost:8008/
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8083
|
||||
x_forwarded: true
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
|
||||
|
@ -8,6 +8,7 @@ worker_replication_http_port: 9093
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8085
|
||||
x_forwarded: true
|
||||
resources:
|
||||
- names: [media]
|
||||
|
||||
|
@ -88,6 +88,39 @@ process, for example:
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
# Upgrading to v1.76.0
|
||||
|
||||
## Faster joins are enabled by default
|
||||
|
||||
When joining a room for the first time, Synapse 1.76.0rc1 will request a partial join from the other server by default. Previously, server admins had to opt-in to this using an experimental config flag.
|
||||
|
||||
Server admins can opt out of this feature for the time being by setting
|
||||
|
||||
```yaml
|
||||
experimental:
|
||||
faster_joins: false
|
||||
```
|
||||
|
||||
in their server config.
|
||||
|
||||
## Changes to the account data replication streams
|
||||
|
||||
Synapse has changed the format of the account data and devices replication
|
||||
streams (between workers). This is a forwards- and backwards-incompatible
|
||||
change: v1.75 workers cannot process account data replicated by v1.76 workers,
|
||||
and vice versa.
|
||||
|
||||
Once all workers are upgraded to v1.76 (or downgraded to v1.75), account data
|
||||
and device replication will resume as normal.
|
||||
|
||||
## Minimum version of Poetry is now 1.3.2
|
||||
|
||||
The minimum supported version of Poetry is now 1.3.2 (previously 1.2.0, [since
|
||||
Synapse 1.67](#upgrading-to-v1670)). If you have used `poetry install` to
|
||||
install Synapse from a source checkout, you should upgrade poetry: see its
|
||||
[installation instructions](https://python-poetry.org/docs/#installation).
|
||||
For all other installation methods, no acction is required.
|
||||
|
||||
# Upgrading to v1.74.0
|
||||
|
||||
## Unicode support in user search
|
||||
|
@ -32,6 +32,14 @@ What users are registered on my server?
|
||||
SELECT NAME from users;
|
||||
```
|
||||
|
||||
How can I export user data?
|
||||
---
|
||||
Synapse includes a Python command to export data for a specific user. It takes the homeserver
|
||||
configuration file and the full Matrix ID of the user to export:
|
||||
```console
|
||||
python -m synapse.app.admin_cmd -c <config_file> export-data <user_id>
|
||||
```
|
||||
|
||||
Manually resetting passwords
|
||||
---
|
||||
Users can reset their password through their client. Alternatively, a server admin
|
||||
|
@ -10,10 +10,10 @@ See the following for how to decode the dense data available from the default lo
|
||||
```
|
||||
|
||||
|
||||
| Part | Explanation |
|
||||
| Part | Explanation |
|
||||
| ----- | ------------ |
|
||||
| AAAA | Timestamp request was logged (not received) |
|
||||
| BBBB | Logger name (`synapse.access.(http\|https).<tag>`, where 'tag' is defined in the `listeners` config section, normally the port) |
|
||||
| BBBB | Logger name (`synapse.access.(http\|https).<tag>`, where 'tag' is defined in the [`listeners`](../configuration/config_documentation.md#listeners) config section, normally the port) |
|
||||
| CCCC | Line number in code |
|
||||
| DDDD | Log Level |
|
||||
| EEEE | Request Identifier (This identifier is shared by related log lines)|
|
||||
|
@ -295,7 +295,9 @@ Known room versions are listed [here](https://spec.matrix.org/latest/rooms/#comp
|
||||
For example, for room version 1, `default_room_version` should be set
|
||||
to "1".
|
||||
|
||||
Currently defaults to "9".
|
||||
Currently defaults to ["10"](https://spec.matrix.org/v1.5/rooms/v10/).
|
||||
|
||||
_Changed in Synapse 1.76:_ the default version room version was increased from [9](https://spec.matrix.org/v1.5/rooms/v9/) to [10](https://spec.matrix.org/v1.5/rooms/v10/).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
@ -422,6 +424,10 @@ Sub-options for each listener include:
|
||||
|
||||
* `port`: the TCP port to bind to.
|
||||
|
||||
* `tag`: An alias for the port in the logger name. If set the tag is logged instead
|
||||
of the port. Default to `None`, is optional and only valid for listener with `type: http`.
|
||||
See the docs [request log format](../administration/request_log.md).
|
||||
|
||||
* `bind_addresses`: a list of local addresses to listen on. The default is
|
||||
'all local interfaces'.
|
||||
|
||||
@ -476,6 +482,12 @@ Valid resource names are:
|
||||
|
||||
* `static`: static resources under synapse/static (/_matrix/static). (Mostly useful for 'fallback authentication'.)
|
||||
|
||||
* `health`: the [health check endpoint](../../reverse_proxy.md#health-check-endpoint). This endpoint
|
||||
is by default active for all other resources and does not have to be activated separately.
|
||||
This is only useful if you want to use the health endpoint explicitly on a dedicated port or
|
||||
for [workers](../../workers.md) and containers without listener e.g.
|
||||
[application services](../../workers.md#notifying-application-services).
|
||||
|
||||
Example configuration #1:
|
||||
```yaml
|
||||
listeners:
|
||||
@ -3462,8 +3474,8 @@ This setting defines options related to the user directory.
|
||||
This option has the following sub-options:
|
||||
* `enabled`: Defines whether users can search the user directory. If false then
|
||||
empty responses are returned to all queries. Defaults to true.
|
||||
* `search_all_users`: Defines whether to search all users visible to your HS when searching
|
||||
the user directory. If false, search results will only contain users
|
||||
* `search_all_users`: Defines whether to search all users visible to your HS at the time the search is performed. If set to true, will return all users who share a room with the user from the homeserver.
|
||||
If false, search results will only contain users
|
||||
visible in public rooms and users sharing a room with the requester.
|
||||
Defaults to false.
|
||||
|
||||
@ -4019,6 +4031,27 @@ worker_listeners:
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
```
|
||||
---
|
||||
### `worker_manhole`
|
||||
|
||||
A worker may have a listener for [`manhole`](../../manhole.md).
|
||||
It allows server administrators to access a Python shell on the worker.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_manhole: 9000
|
||||
```
|
||||
|
||||
This is a short form for:
|
||||
```yaml
|
||||
worker_listeners:
|
||||
- port: 9000
|
||||
bind_addresses: ['127.0.0.1']
|
||||
type: manhole
|
||||
```
|
||||
|
||||
It needs also an additional [`manhole_settings`](#manhole_settings) configuration.
|
||||
|
||||
---
|
||||
### `worker_daemonize`
|
||||
|
||||
|
@ -1,9 +1,11 @@
|
||||
# Logging Sample Configuration File
|
||||
|
||||
Below is a sample logging configuration file. This file can be tweaked to control how your
|
||||
homeserver will output logs. A restart of the server is generally required to apply any
|
||||
changes made to this file. The value of the `log_config` option in your homeserver
|
||||
config should be the path to this file.
|
||||
homeserver will output logs. The value of the `log_config` option in your homeserver config
|
||||
should be the path to this file.
|
||||
|
||||
To apply changes made to this file, send Synapse a SIGHUP signal (or, if using `systemd`, run
|
||||
`systemctl reload` on the Synapse service).
|
||||
|
||||
Note that a default logging configuration (shown below) is created automatically alongside
|
||||
the homeserver config when following the [installation instructions](../../setup/installation.md).
|
||||
|
7
mypy.ini
7
mypy.ini
@ -33,7 +33,6 @@ exclude = (?x)
|
||||
|synapse/storage/schema/
|
||||
|
||||
|tests/api/test_auth.py
|
||||
|tests/api/test_ratelimiting.py
|
||||
|tests/app/test_openid_listener.py
|
||||
|tests/appservice/test_scheduler.py
|
||||
|tests/events/test_presence_router.py
|
||||
@ -48,13 +47,9 @@ exclude = (?x)
|
||||
|tests/logging/__init__.py
|
||||
|tests/logging/test_terse_json.py
|
||||
|tests/module_api/test_api.py
|
||||
|tests/push/test_email.py
|
||||
|tests/push/test_presentable_names.py
|
||||
|tests/push/test_push_rule_evaluator.py
|
||||
|tests/rest/client/test_transactions.py
|
||||
|tests/rest/media/v1/test_media_storage.py
|
||||
|tests/server.py
|
||||
|tests/server_notices/test_resource_limits_server_notices.py
|
||||
|tests/test_state.py
|
||||
|tests/test_terms_auth.py
|
||||
)$
|
||||
@ -101,7 +96,7 @@ disallow_untyped_defs = True
|
||||
[mypy-tests.metrics.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.push.test_bulk_push_rule_evaluator]
|
||||
[mypy-tests.push.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.rest.*]
|
||||
|
3388
poetry.lock
generated
3388
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@ -48,11 +48,6 @@ line-length = 88
|
||||
# E731: do not assign a lambda expression, use a def
|
||||
# E501: Line too long (black enforces this for us)
|
||||
#
|
||||
# See https://github.com/charliermarsh/ruff/#pyflakes
|
||||
# F401: unused import
|
||||
# F811: Redefinition of unused
|
||||
# F821: Undefined name
|
||||
#
|
||||
# flake8-bugbear compatible checks. Its error codes are described at
|
||||
# https://github.com/charliermarsh/ruff/#flake8-bugbear
|
||||
# B019: Use of functools.lru_cache or functools.cache on methods can lead to memory leaks
|
||||
@ -64,9 +59,6 @@ ignore = [
|
||||
"B024",
|
||||
"E501",
|
||||
"E731",
|
||||
"F401",
|
||||
"F811",
|
||||
"F821",
|
||||
]
|
||||
select = [
|
||||
# pycodestyle checks.
|
||||
@ -97,7 +89,7 @@ manifest-path = "rust/Cargo.toml"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.75.0"
|
||||
version = "1.76.0rc1"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "Apache-2.0"
|
||||
@ -317,7 +309,7 @@ all = [
|
||||
# We pin black so that our tests don't start failing on new releases.
|
||||
isort = ">=5.10.1"
|
||||
black = ">=22.3.0"
|
||||
ruff = "0.0.215"
|
||||
ruff = "0.0.230"
|
||||
|
||||
# Typechecking
|
||||
mypy = "*"
|
||||
|
@ -458,16 +458,17 @@ msgpack==1.0.4 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0
|
||||
netaddr==0.8.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
|
||||
--hash=sha256:9666d0232c32d2656e5e5f8d735f58fd6c7457ce52fc21c98d45f2af78f990ac \
|
||||
--hash=sha256:d6cc57c7a07b1d9d2e917aa8b36ae8ce61c35ba3fcd1b83ca31c5a0ee2b5a243
|
||||
packaging==22.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
|
||||
--hash=sha256:2198ec20bd4c017b8f9717e00f0c8714076fc2fd93816750ab48e2c41de2cfd3 \
|
||||
--hash=sha256:957e2148ba0e1a3b282772e791ef1d8083648bc131c8ab0c1feba110ce1146c3
|
||||
packaging==23.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
|
||||
--hash=sha256:714ac14496c3e68c99c29b00845f7a2b85f3bb6f1078fd9f72fd20f0570002b2 \
|
||||
--hash=sha256:b6ad297f8907de0fa2fe1ccbd26fdaf387f5f47c7275fedf8cce89f99446cf97
|
||||
parameterized==0.8.1 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
|
||||
--hash=sha256:41bbff37d6186430f77f900d777e5bb6a24928a1c46fb1de692f8b52b8833b5c \
|
||||
--hash=sha256:9cbb0b69a03e8695d68b3399a8a5825200976536fe1cb79db60ed6a4c8c9efe9
|
||||
phonenumbers==8.13.2 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
|
||||
--hash=sha256:0179f688d48c0e7e161eb7b9d86d587940af1f5174f97c1fdfd893c599c0d94a \
|
||||
--hash=sha256:884b26f775205261f4dc861371dce217c1661a4942fb3ec3624e290fb51869bf
|
||||
phonenumbers==8.13.4 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
|
||||
--hash=sha256:6d63455012fc9431105ffc7739befca61c3efc551b287dca58d2be2e745475a9 \
|
||||
--hash=sha256:a577a46c069ad889c7b7cf4dd978751d059edeab28b97acead4775d2ea1fc70a
|
||||
pillow==9.4.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
|
||||
--hash=sha256:013016af6b3a12a2f40b704677f8b51f72cb007dac785a9933d5c86a72a7fe33 \
|
||||
--hash=sha256:0845adc64fe9886db00f5ab68c4a8cd933ab749a87747555cec1c95acea64b0b \
|
||||
--hash=sha256:0884ba7b515163a1a05440a138adeb722b8a6ae2c2b33aea93ea3118dd3a899e \
|
||||
--hash=sha256:09b89ddc95c248ee788328528e6a2996e09eaccddeeb82a5356e92645733be35 \
|
||||
@ -501,10 +502,16 @@ pillow==9.4.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0"
|
||||
--hash=sha256:7a21222644ab69ddd9967cfe6f2bb420b460dae4289c9d40ff9a4896e7c35c9a \
|
||||
--hash=sha256:7ac7594397698f77bce84382929747130765f66406dc2cd8b4ab4da68ade4c6e \
|
||||
--hash=sha256:7cfc287da09f9d2a7ec146ee4d72d6ea1342e770d975e49a8621bf54eaa8f30f \
|
||||
--hash=sha256:83125753a60cfc8c412de5896d10a0a405e0bd88d0470ad82e0869ddf0cb3848 \
|
||||
--hash=sha256:847b114580c5cc9ebaf216dd8c8dbc6b00a3b7ab0131e173d7120e6deade1f57 \
|
||||
--hash=sha256:87708d78a14d56a990fbf4f9cb350b7d89ee8988705e58e39bdf4d82c149210f \
|
||||
--hash=sha256:8a2b5874d17e72dfb80d917213abd55d7e1ed2479f38f001f264f7ce7bae757c \
|
||||
--hash=sha256:8f127e7b028900421cad64f51f75c051b628db17fb00e099eb148761eed598c9 \
|
||||
--hash=sha256:94cdff45173b1919350601f82d61365e792895e3c3a3443cf99819e6fbf717a5 \
|
||||
--hash=sha256:99d92d148dd03fd19d16175b6d355cc1b01faf80dae93c6c3eb4163709edc0a9 \
|
||||
--hash=sha256:9a3049a10261d7f2b6514d35bbb7a4dfc3ece4c4de14ef5876c4b7a23a0e566d \
|
||||
--hash=sha256:9d9a62576b68cd90f7075876f4e8444487db5eeea0e4df3ba298ee38a8d067b0 \
|
||||
--hash=sha256:9e5f94742033898bfe84c93c831a6f552bb629448d4072dd312306bab3bd96f1 \
|
||||
--hash=sha256:a1c2d7780448eb93fbcc3789bf3916aa5720d942e37945f4056680317f1cd23e \
|
||||
--hash=sha256:a2e0f87144fcbbe54297cae708c5e7f9da21a4646523456b00cc956bd4c65815 \
|
||||
--hash=sha256:a4dfdae195335abb4e89cc9762b2edc524f3c6e80d647a9a81bf81e17e3fb6f0 \
|
||||
|
@ -150,8 +150,13 @@ fn bench_eval_message(b: &mut Bencher) {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let rules =
|
||||
FilteredPushRules::py_new(PushRules::new(Vec::new()), Default::default(), false, false);
|
||||
let rules = FilteredPushRules::py_new(
|
||||
PushRules::new(Vec::new()),
|
||||
Default::default(),
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
);
|
||||
|
||||
b.iter(|| eval.run(&rules, Some("bob"), Some("person")));
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
// Copyright 2022, 2023 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@ -208,6 +208,20 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.org.matrix.msc3930.rule.poll_response"),
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
|
||||
EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.response")),
|
||||
pattern_type: None,
|
||||
},
|
||||
))]),
|
||||
actions: Cow::Borrowed(&[]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
];
|
||||
|
||||
pub const BASE_APPEND_CONTENT_RULES: &[PushRule] = &[PushRule {
|
||||
@ -596,6 +610,68 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc3930.rule.poll_start_one_to_one"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::RoomMemberCount {
|
||||
is: Some(Cow::Borrowed("2")),
|
||||
}),
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.start")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc3930.rule.poll_start"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
|
||||
EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.start")),
|
||||
pattern_type: None,
|
||||
},
|
||||
))]),
|
||||
actions: Cow::Borrowed(&[Action::Notify]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc3930.rule.poll_end_one_to_one"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::RoomMemberCount {
|
||||
is: Some(Cow::Borrowed("2")),
|
||||
}),
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.end")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc3930.rule.poll_end"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
|
||||
EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.end")),
|
||||
pattern_type: None,
|
||||
},
|
||||
))]),
|
||||
actions: Cow::Borrowed(&[Action::Notify]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
];
|
||||
|
||||
lazy_static! {
|
||||
|
@ -483,7 +483,7 @@ fn test_requires_room_version_supports_condition() {
|
||||
};
|
||||
let rules = PushRules::new(vec![custom_rule]);
|
||||
result = evaluator.run(
|
||||
&FilteredPushRules::py_new(rules, BTreeMap::new(), true, true),
|
||||
&FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
@ -411,8 +411,9 @@ impl PushRules {
|
||||
pub struct FilteredPushRules {
|
||||
push_rules: PushRules,
|
||||
enabled_map: BTreeMap<String, bool>,
|
||||
msc3664_enabled: bool,
|
||||
msc1767_enabled: bool,
|
||||
msc3381_polls_enabled: bool,
|
||||
msc3664_enabled: bool,
|
||||
}
|
||||
|
||||
#[pymethods]
|
||||
@ -421,14 +422,16 @@ impl FilteredPushRules {
|
||||
pub fn py_new(
|
||||
push_rules: PushRules,
|
||||
enabled_map: BTreeMap<String, bool>,
|
||||
msc3664_enabled: bool,
|
||||
msc1767_enabled: bool,
|
||||
msc3381_polls_enabled: bool,
|
||||
msc3664_enabled: bool,
|
||||
) -> Self {
|
||||
Self {
|
||||
push_rules,
|
||||
enabled_map,
|
||||
msc3664_enabled,
|
||||
msc1767_enabled,
|
||||
msc3381_polls_enabled,
|
||||
msc3664_enabled,
|
||||
}
|
||||
}
|
||||
|
||||
@ -447,13 +450,18 @@ impl FilteredPushRules {
|
||||
.iter()
|
||||
.filter(|rule| {
|
||||
// Ignore disabled experimental push rules
|
||||
|
||||
if !self.msc1767_enabled && rule.rule_id.contains("org.matrix.msc1767") {
|
||||
return false;
|
||||
}
|
||||
|
||||
if !self.msc3664_enabled
|
||||
&& rule.rule_id == "global/override/.im.nheko.msc3664.reply"
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if !self.msc1767_enabled && rule.rule_id.contains("org.matrix.msc1767") {
|
||||
if !self.msc3381_polls_enabled && rule.rule_id.contains("org.matrix.msc3930") {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -190,7 +190,7 @@ fi
|
||||
|
||||
extra_test_args=()
|
||||
|
||||
test_tags="synapse_blacklist,msc3787,msc3874,msc3391"
|
||||
test_tags="synapse_blacklist,msc3787,msc3874,msc3890,msc3391,msc3930,faster_joins"
|
||||
|
||||
# All environment variables starting with PASS_ will be shared.
|
||||
# (The prefix is stripped off before reaching the container.)
|
||||
@ -223,12 +223,9 @@ else
|
||||
export PASS_SYNAPSE_COMPLEMENT_DATABASE=sqlite
|
||||
fi
|
||||
|
||||
# We only test faster room joins on monoliths, because they are purposefully
|
||||
# being developed without worker support to start with.
|
||||
#
|
||||
# The tests for importing historical messages (MSC2716) also only pass with monoliths,
|
||||
# currently.
|
||||
test_tags="$test_tags,faster_joins,msc2716"
|
||||
# The tests for importing historical messages (MSC2716)
|
||||
# only pass with monoliths, currently.
|
||||
test_tags="$test_tags,msc2716"
|
||||
fi
|
||||
|
||||
|
||||
|
@ -11,6 +11,5 @@
|
||||
sqlite3 "$1" <<'EOF' >table-save.sql
|
||||
.dump users
|
||||
.dump access_tokens
|
||||
.dump presence
|
||||
.dump profiles
|
||||
EOF
|
||||
|
@ -101,10 +101,43 @@ echo
|
||||
# Print out the commands being run
|
||||
set -x
|
||||
|
||||
# Ensure the sort order of imports.
|
||||
isort "${files[@]}"
|
||||
|
||||
# Ensure Python code conforms to an opinionated style.
|
||||
python3 -m black "${files[@]}"
|
||||
|
||||
# Ensure the sample configuration file conforms to style checks.
|
||||
./scripts-dev/config-lint.sh
|
||||
|
||||
# Catch any common programming mistakes in Python code.
|
||||
# --quiet suppresses the update check.
|
||||
ruff --quiet "${files[@]}"
|
||||
|
||||
# Catch any common programming mistakes in Rust code.
|
||||
#
|
||||
# --bins, --examples, --lib, --tests combined explicitly disable checking
|
||||
# the benchmarks, which can fail due to `#![feature]` macros not being
|
||||
# allowed on the stable rust toolchain (rustc error E0554).
|
||||
#
|
||||
# --allow-staged and --allow-dirty suppress clippy raising errors
|
||||
# for uncommitted files. Only needed when using --fix.
|
||||
#
|
||||
# -D warnings disables the "warnings" lint.
|
||||
#
|
||||
# Using --fix has a tendency to cause subsequent runs of clippy to recompile
|
||||
# rust code, which can slow down this script. Thus we run clippy without --fix
|
||||
# first which is quick, and then re-run it with --fix if an error was found.
|
||||
if ! cargo-clippy --bins --examples --lib --tests -- -D warnings > /dev/null 2>&1; then
|
||||
cargo-clippy \
|
||||
--bins --examples --lib --tests --allow-staged --allow-dirty --fix -- -D warnings
|
||||
fi
|
||||
|
||||
# Ensure the formatting of Rust code.
|
||||
cargo-fmt
|
||||
|
||||
# Ensure all Pydantic models use strict types.
|
||||
./scripts-dev/check_pydantic_models.py lint
|
||||
|
||||
# Ensure type hints are correct.
|
||||
mypy
|
||||
|
@ -7,7 +7,6 @@ from __future__ import annotations
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Generic,
|
||||
Iterable,
|
||||
Iterator,
|
||||
List,
|
||||
|
@ -5,10 +5,8 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import (
|
||||
AbstractSet,
|
||||
Any,
|
||||
Callable,
|
||||
Generic,
|
||||
Hashable,
|
||||
Iterable,
|
||||
Iterator,
|
||||
|
@ -1,3 +1,17 @@
|
||||
# Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Tuple, Union
|
||||
|
||||
from synapse.types import JsonDict
|
||||
@ -29,8 +43,9 @@ class FilteredPushRules:
|
||||
self,
|
||||
push_rules: PushRules,
|
||||
enabled_map: Dict[str, bool],
|
||||
msc3664_enabled: bool,
|
||||
msc1767_enabled: bool,
|
||||
msc3381_polls_enabled: bool,
|
||||
msc3664_enabled: bool,
|
||||
): ...
|
||||
def rules(self) -> Collection[Tuple[PushRule, bool]]: ...
|
||||
|
||||
@ -54,3 +69,6 @@ class PushRuleEvaluator:
|
||||
user_id: Optional[str],
|
||||
display_name: Optional[str],
|
||||
) -> Collection[Union[Mapping, str]]: ...
|
||||
def matches(
|
||||
self, condition: JsonDict, user_id: Optional[str], display_name: Optional[str]
|
||||
) -> bool: ...
|
||||
|
@ -51,6 +51,7 @@ from synapse.logging.context import (
|
||||
make_deferred_yieldable,
|
||||
run_in_background,
|
||||
)
|
||||
from synapse.notifier import ReplicationNotifier
|
||||
from synapse.storage.database import DatabasePool, LoggingTransaction, make_conn
|
||||
from synapse.storage.databases.main import PushRuleStore
|
||||
from synapse.storage.databases.main.account_data import AccountDataWorkerStore
|
||||
@ -260,6 +261,9 @@ class MockHomeserver:
|
||||
def should_send_federation(self) -> bool:
|
||||
return False
|
||||
|
||||
def get_replication_notifier(self) -> ReplicationNotifier:
|
||||
return ReplicationNotifier()
|
||||
|
||||
|
||||
class Porter:
|
||||
def __init__(
|
||||
|
@ -249,6 +249,7 @@ class RoomEncryptionAlgorithms:
|
||||
class AccountDataTypes:
|
||||
DIRECT: Final = "m.direct"
|
||||
IGNORED_USER_LIST: Final = "m.ignored_user_list"
|
||||
TAG: Final = "m.tag"
|
||||
|
||||
|
||||
class HistoryVisibility:
|
||||
|
@ -199,6 +199,9 @@ class GenericWorkerServer(HomeServer):
|
||||
"A 'media' listener is configured but the media"
|
||||
" repository is disabled. Ignoring."
|
||||
)
|
||||
elif name == "health":
|
||||
# Skip loading, health resource is always included
|
||||
continue
|
||||
|
||||
if name == "openid" and "federation" not in res.names:
|
||||
# Only load the openid resource separately if federation resource
|
||||
@ -279,13 +282,6 @@ def start(config_options: List[str]) -> None:
|
||||
"synapse.app.user_dir",
|
||||
)
|
||||
|
||||
if config.experimental.faster_joins_enabled:
|
||||
raise ConfigError(
|
||||
"You have enabled the experimental `faster_joins` config option, but it is "
|
||||
"not compatible with worker deployments yet. Please disable `faster_joins` "
|
||||
"or run Synapse as a single process deployment instead."
|
||||
)
|
||||
|
||||
synapse.events.USE_FROZEN_DICTS = config.server.use_frozen_dicts
|
||||
synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage
|
||||
|
||||
|
@ -96,6 +96,9 @@ class SynapseHomeServer(HomeServer):
|
||||
# Skip loading openid resource if federation is defined
|
||||
# since federation resource will include openid
|
||||
continue
|
||||
if name == "health":
|
||||
# Skip loading, health resource is always included
|
||||
continue
|
||||
resources.update(self._configure_named_resource(name, res.compress))
|
||||
|
||||
additional_resources = listener_config.http_options.additional_resources
|
||||
|
@ -1,5 +1,3 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
from typing import (
|
||||
Any,
|
||||
@ -20,7 +18,7 @@ from typing import (
|
||||
|
||||
import jinja2
|
||||
|
||||
from synapse.config import (
|
||||
from synapse.config import ( # noqa: F401
|
||||
account_validity,
|
||||
api,
|
||||
appservice,
|
||||
@ -171,7 +169,7 @@ class RootConfig:
|
||||
self, section_name: Literal["caches"]
|
||||
) -> cache.CacheConfig: ...
|
||||
@overload
|
||||
def reload_config_section(self, section_name: str) -> Config: ...
|
||||
def reload_config_section(self, section_name: str) -> "Config": ...
|
||||
|
||||
class Config:
|
||||
root: RootConfig
|
||||
@ -204,9 +202,9 @@ def find_config_files(search_paths: List[str]) -> List[str]: ...
|
||||
class ShardedWorkerHandlingConfig:
|
||||
instances: List[str]
|
||||
def __init__(self, instances: List[str]) -> None: ...
|
||||
def should_handle(self, instance_name: str, key: str) -> bool: ...
|
||||
def should_handle(self, instance_name: str, key: str) -> bool: ... # noqa: F811
|
||||
|
||||
class RoutableShardedWorkerHandlingConfig(ShardedWorkerHandlingConfig):
|
||||
def get_instance(self, key: str) -> str: ...
|
||||
def get_instance(self, key: str) -> str: ... # noqa: F811
|
||||
|
||||
def read_file(file_path: Any, config_path: Iterable[str]) -> str: ...
|
||||
|
@ -17,6 +17,7 @@ from typing import Any, Optional
|
||||
import attr
|
||||
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
|
||||
from synapse.config import ConfigError
|
||||
from synapse.config._base import Config
|
||||
from synapse.types import JsonDict
|
||||
|
||||
@ -74,12 +75,16 @@ class ExperimentalConfig(Config):
|
||||
)
|
||||
|
||||
# MSC3706 (server-side support for partial state in /send_join responses)
|
||||
# Synapse will always serve partial state responses to requests using the stable
|
||||
# query parameter `omit_members`. If this flag is set, Synapse will also serve
|
||||
# partial state responses to requests using the unstable query parameter
|
||||
# `org.matrix.msc3706.partial_state`.
|
||||
self.msc3706_enabled: bool = experimental.get("msc3706_enabled", False)
|
||||
|
||||
# experimental support for faster joins over federation
|
||||
# (MSC2775, MSC3706, MSC3895)
|
||||
# requires a target server with msc3706_enabled enabled.
|
||||
self.faster_joins_enabled: bool = experimental.get("faster_joins", False)
|
||||
# requires a target server that can provide a partial join response (MSC3706)
|
||||
self.faster_joins_enabled: bool = experimental.get("faster_joins", True)
|
||||
|
||||
# MSC3720 (Account status endpoint)
|
||||
self.msc3720_enabled: bool = experimental.get("msc3720_enabled", False)
|
||||
@ -93,6 +98,9 @@ class ExperimentalConfig(Config):
|
||||
# MSC2815 (allow room moderators to view redacted event content)
|
||||
self.msc2815_enabled: bool = experimental.get("msc2815_enabled", False)
|
||||
|
||||
# MSC3391: Removing account data.
|
||||
self.msc3391_enabled = experimental.get("msc3391_enabled", False)
|
||||
|
||||
# MSC3773: Thread notifications
|
||||
self.msc3773_enabled: bool = experimental.get("msc3773_enabled", False)
|
||||
|
||||
@ -127,6 +135,24 @@ class ExperimentalConfig(Config):
|
||||
"msc3886_endpoint", None
|
||||
)
|
||||
|
||||
# MSC3890: Remotely silence local notifications
|
||||
# Note: This option requires "experimental_features.msc3391_enabled" to be
|
||||
# set to "true", in order to communicate account data deletions to clients.
|
||||
self.msc3890_enabled: bool = experimental.get("msc3890_enabled", False)
|
||||
if self.msc3890_enabled and not self.msc3391_enabled:
|
||||
raise ConfigError(
|
||||
"Option 'experimental_features.msc3391' must be set to 'true' to "
|
||||
"enable 'experimental_features.msc3890'. MSC3391 functionality is "
|
||||
"required to communicate account data deletions to clients."
|
||||
)
|
||||
|
||||
# MSC3381: Polls.
|
||||
# In practice, supporting polls in Synapse only requires an implementation of
|
||||
# MSC3930: Push rules for MSC3391 polls; which is what this option enables.
|
||||
self.msc3381_polls_enabled: bool = experimental.get(
|
||||
"msc3381_polls_enabled", False
|
||||
)
|
||||
|
||||
# MSC3912: Relation-based redactions.
|
||||
self.msc3912_enabled: bool = experimental.get("msc3912_enabled", False)
|
||||
|
||||
@ -139,3 +165,6 @@ class ExperimentalConfig(Config):
|
||||
|
||||
# MSC3391: Removing account data.
|
||||
self.msc3391_enabled = experimental.get("msc3391_enabled", False)
|
||||
|
||||
# MSC3925: do not replace events with their edits
|
||||
self.msc3925_inhibit_edit = experimental.get("msc3925_inhibit_edit", False)
|
||||
|
@ -151,7 +151,7 @@ DEFAULT_IP_RANGE_BLACKLIST = [
|
||||
"fec0::/10",
|
||||
]
|
||||
|
||||
DEFAULT_ROOM_VERSION = "9"
|
||||
DEFAULT_ROOM_VERSION = "10"
|
||||
|
||||
ROOM_COMPLEXITY_TOO_GREAT = (
|
||||
"Your homeserver is unable to join rooms this large or complex. "
|
||||
|
@ -154,17 +154,21 @@ class Keyring:
|
||||
|
||||
if key_fetchers is None:
|
||||
key_fetchers = (
|
||||
# Fetch keys from the database.
|
||||
StoreKeyFetcher(hs),
|
||||
# Fetch keys from a configured Perspectives server.
|
||||
PerspectivesKeyFetcher(hs),
|
||||
# Fetch keys from the origin server directly.
|
||||
ServerKeyFetcher(hs),
|
||||
)
|
||||
self._key_fetchers = key_fetchers
|
||||
|
||||
self._server_queue: BatchingQueue[
|
||||
self._fetch_keys_queue: BatchingQueue[
|
||||
_FetchKeyRequest, Dict[str, Dict[str, FetchKeyResult]]
|
||||
] = BatchingQueue(
|
||||
"keyring_server",
|
||||
clock=hs.get_clock(),
|
||||
# The method called to fetch each key
|
||||
process_batch_callback=self._inner_fetch_key_requests,
|
||||
)
|
||||
|
||||
@ -287,7 +291,7 @@ class Keyring:
|
||||
minimum_valid_until_ts=verify_request.minimum_valid_until_ts,
|
||||
key_ids=list(key_ids_to_find),
|
||||
)
|
||||
found_keys_by_server = await self._server_queue.add_to_queue(
|
||||
found_keys_by_server = await self._fetch_keys_queue.add_to_queue(
|
||||
key_request, key=verify_request.server_name
|
||||
)
|
||||
|
||||
@ -352,7 +356,17 @@ class Keyring:
|
||||
async def _inner_fetch_key_requests(
|
||||
self, requests: List[_FetchKeyRequest]
|
||||
) -> Dict[str, Dict[str, FetchKeyResult]]:
|
||||
"""Processing function for the queue of `_FetchKeyRequest`."""
|
||||
"""Processing function for the queue of `_FetchKeyRequest`.
|
||||
|
||||
Takes a list of key fetch requests, de-duplicates them and then carries out
|
||||
each request by invoking self._inner_fetch_key_request.
|
||||
|
||||
Args:
|
||||
requests: A list of requests for homeserver verify keys.
|
||||
|
||||
Returns:
|
||||
{server name: {key id: fetch key result}}
|
||||
"""
|
||||
|
||||
logger.debug("Starting fetch for %s", requests)
|
||||
|
||||
@ -397,8 +411,23 @@ class Keyring:
|
||||
async def _inner_fetch_key_request(
|
||||
self, verify_request: _FetchKeyRequest
|
||||
) -> Dict[str, FetchKeyResult]:
|
||||
"""Attempt to fetch the given key by calling each key fetcher one by
|
||||
one.
|
||||
"""Attempt to fetch the given key by calling each key fetcher one by one.
|
||||
|
||||
If a key is found, check whether its `valid_until_ts` attribute satisfies the
|
||||
`minimum_valid_until_ts` attribute of the `verify_request`. If it does, we
|
||||
refrain from asking subsequent fetchers for that key.
|
||||
|
||||
Even if the above check fails, we still return the found key - the caller may
|
||||
still find the invalid key result useful. In this case, we continue to ask
|
||||
subsequent fetchers for the invalid key, in case they return a valid result
|
||||
for it. This can happen when fetching a stale key result from the database,
|
||||
before querying the origin server for an up-to-date result.
|
||||
|
||||
Args:
|
||||
verify_request: The request for a verify key. Can include multiple key IDs.
|
||||
|
||||
Returns:
|
||||
A map of {key_id: the key fetch result}.
|
||||
"""
|
||||
logger.debug("Starting fetch for %s", verify_request)
|
||||
|
||||
@ -420,25 +449,21 @@ class Keyring:
|
||||
if not key:
|
||||
continue
|
||||
|
||||
# If we already have a result for the given key ID we keep the
|
||||
# If we already have a result for the given key ID, we keep the
|
||||
# one with the highest `valid_until_ts`.
|
||||
existing_key = found_keys.get(key_id)
|
||||
if existing_key:
|
||||
if key.valid_until_ts <= existing_key.valid_until_ts:
|
||||
continue
|
||||
|
||||
# We always store the returned key even if it doesn't the
|
||||
# `minimum_valid_until_ts` requirement, as some verification
|
||||
# requests may still be able to be satisfied by it.
|
||||
#
|
||||
# We still keep looking for the key from other fetchers in that
|
||||
# case though.
|
||||
found_keys[key_id] = key
|
||||
|
||||
if key.valid_until_ts < verify_request.minimum_valid_until_ts:
|
||||
if existing_key and existing_key.valid_until_ts > key.valid_until_ts:
|
||||
continue
|
||||
|
||||
missing_key_ids.discard(key_id)
|
||||
# Check if this key's expiry timestamp is valid for the verify request.
|
||||
if key.valid_until_ts >= verify_request.minimum_valid_until_ts:
|
||||
# Stop looking for this key from subsequent fetchers.
|
||||
missing_key_ids.discard(key_id)
|
||||
|
||||
# We always store the returned key even if it doesn't meet the
|
||||
# `minimum_valid_until_ts` requirement, as some verification
|
||||
# requests may still be able to be satisfied by it.
|
||||
found_keys[key_id] = key
|
||||
|
||||
return found_keys
|
||||
|
||||
|
@ -13,6 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import collections.abc
|
||||
import logging
|
||||
import typing
|
||||
from typing import (
|
||||
@ -877,7 +878,7 @@ def _check_power_levels(
|
||||
if not isinstance(v, int):
|
||||
raise SynapseError(400, f"{v!r} must be an integer.")
|
||||
if k in {"events", "notifications", "users"}:
|
||||
if not isinstance(v, dict) or not all(
|
||||
if not isinstance(v, collections.abc.Mapping) or not all(
|
||||
isinstance(v, int) for v in v.values()
|
||||
):
|
||||
raise SynapseError(
|
||||
|
@ -403,6 +403,14 @@ class EventClientSerializer:
|
||||
clients.
|
||||
"""
|
||||
|
||||
def __init__(self, inhibit_replacement_via_edits: bool = False):
|
||||
"""
|
||||
Args:
|
||||
inhibit_replacement_via_edits: If this is set to True, then events are
|
||||
never replaced by their edits.
|
||||
"""
|
||||
self._inhibit_replacement_via_edits = inhibit_replacement_via_edits
|
||||
|
||||
def serialize_event(
|
||||
self,
|
||||
event: Union[JsonDict, EventBase],
|
||||
@ -422,6 +430,8 @@ class EventClientSerializer:
|
||||
into the event.
|
||||
apply_edits: Whether the content of the event should be modified to reflect
|
||||
any replacement in `bundle_aggregations[<event_id>].replace`.
|
||||
See also the `inhibit_replacement_via_edits` constructor arg: if that is
|
||||
set to True, then this argument is ignored.
|
||||
Returns:
|
||||
The serialized event
|
||||
"""
|
||||
@ -495,7 +505,8 @@ class EventClientSerializer:
|
||||
again for additional events in a recursive manner.
|
||||
serialized_event: The serialized event which may be modified.
|
||||
apply_edits: Whether the content of the event should be modified to reflect
|
||||
any replacement in `aggregations.replace`.
|
||||
any replacement in `aggregations.replace` (subject to the
|
||||
`inhibit_replacement_via_edits` constructor arg).
|
||||
"""
|
||||
|
||||
# We have already checked that aggregations exist for this event.
|
||||
@ -518,15 +529,21 @@ class EventClientSerializer:
|
||||
if event_aggregations.replace:
|
||||
# If there is an edit, optionally apply it to the event.
|
||||
edit = event_aggregations.replace
|
||||
if apply_edits:
|
||||
if apply_edits and not self._inhibit_replacement_via_edits:
|
||||
self._apply_edit(event, serialized_event, edit)
|
||||
|
||||
# Include information about it in the relations dict.
|
||||
serialized_aggregations[RelationTypes.REPLACE] = {
|
||||
"event_id": edit.event_id,
|
||||
"origin_server_ts": edit.origin_server_ts,
|
||||
"sender": edit.sender,
|
||||
}
|
||||
#
|
||||
# Matrix spec v1.5 (https://spec.matrix.org/v1.5/client-server-api/#server-side-aggregation-of-mreplace-relationships)
|
||||
# said that we should only include the `event_id`, `origin_server_ts` and
|
||||
# `sender` of the edit; however MSC3925 proposes extending it to the whole
|
||||
# of the edit, which is what we do here.
|
||||
serialized_aggregations[RelationTypes.REPLACE] = self.serialize_event(
|
||||
edit,
|
||||
time_now,
|
||||
config=config,
|
||||
apply_edits=False,
|
||||
)
|
||||
|
||||
# Include any threaded replies to this event.
|
||||
if event_aggregations.thread:
|
||||
|
@ -1014,7 +1014,11 @@ class FederationClient(FederationBase):
|
||||
)
|
||||
|
||||
async def send_join(
|
||||
self, destinations: Iterable[str], pdu: EventBase, room_version: RoomVersion
|
||||
self,
|
||||
destinations: Iterable[str],
|
||||
pdu: EventBase,
|
||||
room_version: RoomVersion,
|
||||
partial_state: bool = True,
|
||||
) -> SendJoinResult:
|
||||
"""Sends a join event to one of a list of homeservers.
|
||||
|
||||
@ -1027,6 +1031,10 @@ class FederationClient(FederationBase):
|
||||
pdu: event to be sent
|
||||
room_version: the version of the room (according to the server that
|
||||
did the make_join)
|
||||
partial_state: whether to ask the remote server to omit membership state
|
||||
events from the response. If the remote server complies,
|
||||
`partial_state` in the send join result will be set. Defaults to
|
||||
`True`.
|
||||
|
||||
Returns:
|
||||
The result of the send join request.
|
||||
@ -1037,7 +1045,9 @@ class FederationClient(FederationBase):
|
||||
"""
|
||||
|
||||
async def send_request(destination: str) -> SendJoinResult:
|
||||
response = await self._do_send_join(room_version, destination, pdu)
|
||||
response = await self._do_send_join(
|
||||
room_version, destination, pdu, omit_members=partial_state
|
||||
)
|
||||
|
||||
# If an event was returned (and expected to be returned):
|
||||
#
|
||||
@ -1142,9 +1152,14 @@ class FederationClient(FederationBase):
|
||||
% (auth_chain_create_events,)
|
||||
)
|
||||
|
||||
if response.partial_state and not response.servers_in_room:
|
||||
if response.members_omitted and not response.servers_in_room:
|
||||
raise InvalidResponseError(
|
||||
"partial_state was set, but no servers were listed in the room"
|
||||
"members_omitted was set, but no servers were listed in the room"
|
||||
)
|
||||
|
||||
if response.members_omitted and not partial_state:
|
||||
raise InvalidResponseError(
|
||||
"members_omitted was set, but we asked for full state"
|
||||
)
|
||||
|
||||
return SendJoinResult(
|
||||
@ -1152,7 +1167,7 @@ class FederationClient(FederationBase):
|
||||
state=signed_state,
|
||||
auth_chain=signed_auth,
|
||||
origin=destination,
|
||||
partial_state=response.partial_state,
|
||||
partial_state=response.members_omitted,
|
||||
servers_in_room=response.servers_in_room or [],
|
||||
)
|
||||
|
||||
@ -1177,7 +1192,11 @@ class FederationClient(FederationBase):
|
||||
)
|
||||
|
||||
async def _do_send_join(
|
||||
self, room_version: RoomVersion, destination: str, pdu: EventBase
|
||||
self,
|
||||
room_version: RoomVersion,
|
||||
destination: str,
|
||||
pdu: EventBase,
|
||||
omit_members: bool,
|
||||
) -> SendJoinResponse:
|
||||
time_now = self._clock.time_msec()
|
||||
|
||||
@ -1188,6 +1207,7 @@ class FederationClient(FederationBase):
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
content=pdu.get_pdu_json(time_now),
|
||||
omit_members=omit_members,
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
# If an error is received that is due to an unrecognised endpoint,
|
||||
|
@ -725,10 +725,12 @@ class FederationServer(FederationBase):
|
||||
"state": [p.get_pdu_json(time_now) for p in state_events],
|
||||
"auth_chain": [p.get_pdu_json(time_now) for p in auth_chain_events],
|
||||
"org.matrix.msc3706.partial_state": caller_supports_partial_state,
|
||||
"members_omitted": caller_supports_partial_state,
|
||||
}
|
||||
|
||||
if servers_in_room is not None:
|
||||
resp["org.matrix.msc3706.servers_in_room"] = list(servers_in_room)
|
||||
resp["servers_in_room"] = list(servers_in_room)
|
||||
|
||||
return resp
|
||||
|
||||
@ -1500,7 +1502,7 @@ def _get_event_ids_for_partial_state_join(
|
||||
prev_state_ids: StateMap[str],
|
||||
summary: Dict[str, MemberSummary],
|
||||
) -> Collection[str]:
|
||||
"""Calculate state to be retuned in a partial_state send_join
|
||||
"""Calculate state to be returned in a partial_state send_join
|
||||
|
||||
Args:
|
||||
join_event: the join event being send_joined
|
||||
|
@ -102,6 +102,10 @@ class TransportLayerClient:
|
||||
destination,
|
||||
path=path,
|
||||
args={"event_id": event_id},
|
||||
# This can take a looooooong time for large rooms. Give this a generous
|
||||
# timeout of 10 minutes to avoid the partial state resync timing out early
|
||||
# and trying a bunch of servers who haven't seen our join yet.
|
||||
timeout=600_000,
|
||||
parser=_StateParser(room_version),
|
||||
)
|
||||
|
||||
@ -351,12 +355,16 @@ class TransportLayerClient:
|
||||
room_id: str,
|
||||
event_id: str,
|
||||
content: JsonDict,
|
||||
omit_members: bool,
|
||||
) -> "SendJoinResponse":
|
||||
path = _create_v2_path("/send_join/%s/%s", room_id, event_id)
|
||||
query_params: Dict[str, str] = {}
|
||||
if self._faster_joins_enabled:
|
||||
# lazy-load state on join
|
||||
query_params["org.matrix.msc3706.partial_state"] = "true"
|
||||
query_params["org.matrix.msc3706.partial_state"] = (
|
||||
"true" if omit_members else "false"
|
||||
)
|
||||
query_params["omit_members"] = "true" if omit_members else "false"
|
||||
|
||||
return await self.client.put_json(
|
||||
destination=destination,
|
||||
@ -794,7 +802,7 @@ class SendJoinResponse:
|
||||
event: Optional[EventBase] = None
|
||||
|
||||
# The room state is incomplete
|
||||
partial_state: bool = False
|
||||
members_omitted: bool = False
|
||||
|
||||
# List of servers in the room
|
||||
servers_in_room: Optional[List[str]] = None
|
||||
@ -834,16 +842,18 @@ def _event_list_parser(
|
||||
|
||||
|
||||
@ijson.coroutine
|
||||
def _partial_state_parser(response: SendJoinResponse) -> Generator[None, Any, None]:
|
||||
def _members_omitted_parser(response: SendJoinResponse) -> Generator[None, Any, None]:
|
||||
"""Helper function for use with `ijson.items_coro`
|
||||
|
||||
Parses the partial_state field in send_join responses
|
||||
Parses the members_omitted field in send_join responses
|
||||
"""
|
||||
while True:
|
||||
val = yield
|
||||
if not isinstance(val, bool):
|
||||
raise TypeError("partial_state must be a boolean")
|
||||
response.partial_state = val
|
||||
raise TypeError(
|
||||
"members_omitted (formerly org.matrix.msc370c.partial_state) must be a boolean"
|
||||
)
|
||||
response.members_omitted = val
|
||||
|
||||
|
||||
@ijson.coroutine
|
||||
@ -904,11 +914,19 @@ class SendJoinParser(ByteParser[SendJoinResponse]):
|
||||
if not v1_api:
|
||||
self._coros.append(
|
||||
ijson.items_coro(
|
||||
_partial_state_parser(self._response),
|
||||
_members_omitted_parser(self._response),
|
||||
"org.matrix.msc3706.partial_state",
|
||||
use_float="True",
|
||||
)
|
||||
)
|
||||
# The stable field name comes last, so it "wins" if the fields disagree
|
||||
self._coros.append(
|
||||
ijson.items_coro(
|
||||
_members_omitted_parser(self._response),
|
||||
"members_omitted",
|
||||
use_float="True",
|
||||
)
|
||||
)
|
||||
|
||||
self._coros.append(
|
||||
ijson.items_coro(
|
||||
@ -918,6 +936,15 @@ class SendJoinParser(ByteParser[SendJoinResponse]):
|
||||
)
|
||||
)
|
||||
|
||||
# Again, stable field name comes last
|
||||
self._coros.append(
|
||||
ijson.items_coro(
|
||||
_servers_in_room_parser(self._response),
|
||||
"servers_in_room",
|
||||
use_float="True",
|
||||
)
|
||||
)
|
||||
|
||||
def write(self, data: bytes) -> int:
|
||||
for c in self._coros:
|
||||
c.send(data)
|
||||
|
@ -422,7 +422,7 @@ class FederationV2SendJoinServlet(BaseFederationServerServlet):
|
||||
server_name: str,
|
||||
):
|
||||
super().__init__(hs, authenticator, ratelimiter, server_name)
|
||||
self._msc3706_enabled = hs.config.experimental.msc3706_enabled
|
||||
self._read_msc3706_query_param = hs.config.experimental.msc3706_enabled
|
||||
|
||||
async def on_PUT(
|
||||
self,
|
||||
@ -436,10 +436,16 @@ class FederationV2SendJoinServlet(BaseFederationServerServlet):
|
||||
# match those given in content
|
||||
|
||||
partial_state = False
|
||||
if self._msc3706_enabled:
|
||||
# The stable query parameter wins, if it disagrees with the unstable
|
||||
# parameter for some reason.
|
||||
stable_param = parse_boolean_from_args(query, "omit_members", default=None)
|
||||
if stable_param is not None:
|
||||
partial_state = stable_param
|
||||
elif self._read_msc3706_query_param:
|
||||
partial_state = parse_boolean_from_args(
|
||||
query, "org.matrix.msc3706.partial_state", default=False
|
||||
)
|
||||
|
||||
result = await self.handler.on_send_join_request(
|
||||
origin, content, room_id, caller_supports_partial_state=partial_state
|
||||
)
|
||||
|
@ -16,6 +16,7 @@ import logging
|
||||
import random
|
||||
from typing import TYPE_CHECKING, Awaitable, Callable, Collection, List, Optional, Tuple
|
||||
|
||||
from synapse.api.constants import AccountDataTypes
|
||||
from synapse.replication.http.account_data import (
|
||||
ReplicationAddRoomAccountDataRestServlet,
|
||||
ReplicationAddTagRestServlet,
|
||||
@ -335,7 +336,11 @@ class AccountDataEventSource(EventSource[int, JsonDict]):
|
||||
|
||||
for room_id, room_tags in tags.items():
|
||||
results.append(
|
||||
{"type": "m.tag", "content": {"tags": room_tags}, "room_id": room_id}
|
||||
{
|
||||
"type": AccountDataTypes.TAG,
|
||||
"content": {"tags": room_tags},
|
||||
"room_id": room_id,
|
||||
}
|
||||
)
|
||||
|
||||
(
|
||||
|
@ -346,6 +346,7 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
super().__init__(hs)
|
||||
|
||||
self.federation_sender = hs.get_federation_sender()
|
||||
self._account_data_handler = hs.get_account_data_handler()
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
|
||||
self.device_list_updater = DeviceListUpdater(hs, self)
|
||||
@ -502,7 +503,7 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
else:
|
||||
raise
|
||||
|
||||
# Delete access tokens and e2e keys for each device. Not optimised as it is not
|
||||
# Delete data specific to each device. Not optimised as it is not
|
||||
# considered as part of a critical path.
|
||||
for device_id in device_ids:
|
||||
await self._auth_handler.delete_access_tokens_for_user(
|
||||
@ -512,6 +513,14 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
user_id=user_id, device_id=device_id
|
||||
)
|
||||
|
||||
if self.hs.config.experimental.msc3890_enabled:
|
||||
# Remove any local notification settings for this device in accordance
|
||||
# with MSC3890.
|
||||
await self._account_data_handler.remove_account_data_for_user(
|
||||
user_id,
|
||||
f"org.matrix.msc3890.local_notification_settings.{device_id}",
|
||||
)
|
||||
|
||||
await self.notify_device_update(user_id, device_ids)
|
||||
|
||||
async def update_device(self, user_id: str, device_id: str, content: dict) -> None:
|
||||
@ -965,6 +974,7 @@ class DeviceListUpdater(DeviceListWorkerUpdater):
|
||||
self.federation = hs.get_federation_client()
|
||||
self.clock = hs.get_clock()
|
||||
self.device_handler = device_handler
|
||||
self._notifier = hs.get_notifier()
|
||||
|
||||
self._remote_edu_linearizer = Linearizer(name="remote_device_list")
|
||||
|
||||
@ -1045,6 +1055,7 @@ class DeviceListUpdater(DeviceListWorkerUpdater):
|
||||
user_id,
|
||||
device_id,
|
||||
)
|
||||
self._notifier.notify_replication()
|
||||
|
||||
room_ids = await self.store.get_rooms_for_user(user_id)
|
||||
if not room_ids:
|
||||
|
@ -27,6 +27,7 @@ from typing import (
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
@ -47,7 +48,6 @@ from synapse.api.errors import (
|
||||
FederationError,
|
||||
FederationPullAttemptBackoffError,
|
||||
HttpResponseException,
|
||||
LimitExceededError,
|
||||
NotFoundError,
|
||||
RequestSendFailed,
|
||||
SynapseError,
|
||||
@ -171,12 +171,29 @@ class FederationHandler:
|
||||
|
||||
self.third_party_event_rules = hs.get_third_party_event_rules()
|
||||
|
||||
# Tracks running partial state syncs by room ID.
|
||||
# Partial state syncs currently only run on the main process, so it's okay to
|
||||
# track them in-memory for now.
|
||||
self._active_partial_state_syncs: Set[str] = set()
|
||||
# Tracks partial state syncs we may want to restart.
|
||||
# A dictionary mapping room IDs to (initial destination, other destinations)
|
||||
# tuples.
|
||||
self._partial_state_syncs_maybe_needing_restart: Dict[
|
||||
str, Tuple[Optional[str], Collection[str]]
|
||||
] = {}
|
||||
# A lock guarding the partial state flag for rooms.
|
||||
# When the lock is held for a given room, no other concurrent code may
|
||||
# partial state or un-partial state the room.
|
||||
self._is_partial_state_room_linearizer = Linearizer(
|
||||
name="_is_partial_state_room_linearizer"
|
||||
)
|
||||
|
||||
# if this is the main process, fire off a background process to resume
|
||||
# any partial-state-resync operations which were in flight when we
|
||||
# were shut down.
|
||||
if not hs.config.worker.worker_app:
|
||||
run_as_background_process(
|
||||
"resume_sync_partial_state_room", self._resume_sync_partial_state_room
|
||||
"resume_sync_partial_state_room", self._resume_partial_state_room_sync
|
||||
)
|
||||
|
||||
@trace
|
||||
@ -587,7 +604,23 @@ class FederationHandler:
|
||||
|
||||
self._federation_event_handler.room_queues[room_id] = []
|
||||
|
||||
await self._clean_room_for_join(room_id)
|
||||
is_host_joined = await self.store.is_host_joined(room_id, self.server_name)
|
||||
|
||||
if not is_host_joined:
|
||||
# We may have old forward extremities lying around if the homeserver left
|
||||
# the room completely in the past. Clear them out.
|
||||
#
|
||||
# Note that this check-then-clear is subject to races where
|
||||
# * the homeserver is in the room and stops being in the room just after
|
||||
# the check. We won't reset the forward extremities, but that's okay,
|
||||
# since they will be almost up to date.
|
||||
# * the homeserver is not in the room and starts being in the room just
|
||||
# after the check. This can't happen, since `RoomMemberHandler` has a
|
||||
# linearizer lock which prevents concurrent remote joins into the same
|
||||
# room.
|
||||
# In short, the races either have an acceptable outcome or should be
|
||||
# impossible.
|
||||
await self._clean_room_for_join(room_id)
|
||||
|
||||
try:
|
||||
# Try the host we successfully got a response to /make_join/
|
||||
@ -599,94 +632,116 @@ class FederationHandler:
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
ret = await self.federation_client.send_join(
|
||||
host_list, event, room_version_obj
|
||||
)
|
||||
async with self._is_partial_state_room_linearizer.queue(room_id):
|
||||
already_partial_state_room = await self.store.is_partial_state_room(
|
||||
room_id
|
||||
)
|
||||
|
||||
event = ret.event
|
||||
origin = ret.origin
|
||||
state = ret.state
|
||||
auth_chain = ret.auth_chain
|
||||
auth_chain.sort(key=lambda e: e.depth)
|
||||
ret = await self.federation_client.send_join(
|
||||
host_list,
|
||||
event,
|
||||
room_version_obj,
|
||||
# Perform a full join when we are already in the room and it is a
|
||||
# full state room, since we are not allowed to persist a partial
|
||||
# state join event in a full state room. In the future, we could
|
||||
# optimize this by always performing a partial state join and
|
||||
# computing the state ourselves or retrieving it from the remote
|
||||
# homeserver if necessary.
|
||||
#
|
||||
# There's a race where we leave the room, then perform a full join
|
||||
# anyway. This should end up being fast anyway, since we would
|
||||
# already have the full room state and auth chain persisted.
|
||||
partial_state=not is_host_joined or already_partial_state_room,
|
||||
)
|
||||
|
||||
logger.debug("do_invite_join auth_chain: %s", auth_chain)
|
||||
logger.debug("do_invite_join state: %s", state)
|
||||
event = ret.event
|
||||
origin = ret.origin
|
||||
state = ret.state
|
||||
auth_chain = ret.auth_chain
|
||||
auth_chain.sort(key=lambda e: e.depth)
|
||||
|
||||
logger.debug("do_invite_join event: %s", event)
|
||||
logger.debug("do_invite_join auth_chain: %s", auth_chain)
|
||||
logger.debug("do_invite_join state: %s", state)
|
||||
|
||||
# if this is the first time we've joined this room, it's time to add
|
||||
# a row to `rooms` with the correct room version. If there's already a
|
||||
# row there, we should override it, since it may have been populated
|
||||
# based on an invite request which lied about the room version.
|
||||
#
|
||||
# federation_client.send_join has already checked that the room
|
||||
# version in the received create event is the same as room_version_obj,
|
||||
# so we can rely on it now.
|
||||
#
|
||||
await self.store.upsert_room_on_join(
|
||||
room_id=room_id,
|
||||
room_version=room_version_obj,
|
||||
state_events=state,
|
||||
)
|
||||
logger.debug("do_invite_join event: %s", event)
|
||||
|
||||
if ret.partial_state:
|
||||
# Mark the room as having partial state.
|
||||
# The background process is responsible for unmarking this flag,
|
||||
# even if the join fails.
|
||||
await self.store.store_partial_state_room(
|
||||
# if this is the first time we've joined this room, it's time to add
|
||||
# a row to `rooms` with the correct room version. If there's already a
|
||||
# row there, we should override it, since it may have been populated
|
||||
# based on an invite request which lied about the room version.
|
||||
#
|
||||
# federation_client.send_join has already checked that the room
|
||||
# version in the received create event is the same as room_version_obj,
|
||||
# so we can rely on it now.
|
||||
#
|
||||
await self.store.upsert_room_on_join(
|
||||
room_id=room_id,
|
||||
servers=ret.servers_in_room,
|
||||
device_lists_stream_id=self.store.get_device_stream_token(),
|
||||
joined_via=origin,
|
||||
room_version=room_version_obj,
|
||||
state_events=state,
|
||||
)
|
||||
|
||||
try:
|
||||
max_stream_id = (
|
||||
await self._federation_event_handler.process_remote_join(
|
||||
origin,
|
||||
room_id,
|
||||
auth_chain,
|
||||
state,
|
||||
event,
|
||||
room_version_obj,
|
||||
partial_state=ret.partial_state,
|
||||
)
|
||||
)
|
||||
except PartialStateConflictError as e:
|
||||
# The homeserver was already in the room and it is no longer partial
|
||||
# stated. We ought to be doing a local join instead. Turn the error into
|
||||
# a 429, as a hint to the client to try again.
|
||||
# TODO(faster_joins): `_should_perform_remote_join` suggests that we may
|
||||
# do a remote join for restricted rooms even if we have full state.
|
||||
logger.error(
|
||||
"Room %s was un-partial stated while processing remote join.",
|
||||
room_id,
|
||||
)
|
||||
raise LimitExceededError(msg=e.msg, errcode=e.errcode, retry_after_ms=0)
|
||||
else:
|
||||
# Record the join event id for future use (when we finish the full
|
||||
# join). We have to do this after persisting the event to keep foreign
|
||||
# key constraints intact.
|
||||
if ret.partial_state:
|
||||
await self.store.write_partial_state_rooms_join_event_id(
|
||||
room_id, event.event_id
|
||||
)
|
||||
finally:
|
||||
# Always kick off the background process that asynchronously fetches
|
||||
# state for the room.
|
||||
# If the join failed, the background process is responsible for
|
||||
# cleaning up — including unmarking the room as a partial state room.
|
||||
if ret.partial_state:
|
||||
# Kick off the process of asynchronously fetching the state for this
|
||||
# room.
|
||||
run_as_background_process(
|
||||
desc="sync_partial_state_room",
|
||||
func=self._sync_partial_state_room,
|
||||
initial_destination=origin,
|
||||
other_destinations=ret.servers_in_room,
|
||||
if ret.partial_state and not already_partial_state_room:
|
||||
# Mark the room as having partial state.
|
||||
# The background process is responsible for unmarking this flag,
|
||||
# even if the join fails.
|
||||
# TODO(faster_joins):
|
||||
# We may want to reset the partial state info if it's from an
|
||||
# old, failed partial state join.
|
||||
# https://github.com/matrix-org/synapse/issues/13000
|
||||
await self.store.store_partial_state_room(
|
||||
room_id=room_id,
|
||||
servers=ret.servers_in_room,
|
||||
device_lists_stream_id=self.store.get_device_stream_token(),
|
||||
joined_via=origin,
|
||||
)
|
||||
|
||||
try:
|
||||
max_stream_id = (
|
||||
await self._federation_event_handler.process_remote_join(
|
||||
origin,
|
||||
room_id,
|
||||
auth_chain,
|
||||
state,
|
||||
event,
|
||||
room_version_obj,
|
||||
partial_state=ret.partial_state,
|
||||
)
|
||||
)
|
||||
except PartialStateConflictError:
|
||||
# This should be impossible, since we hold the lock on the room's
|
||||
# partial statedness.
|
||||
logger.error(
|
||||
"Room %s was un-partial stated while processing remote join.",
|
||||
room_id,
|
||||
)
|
||||
raise
|
||||
else:
|
||||
# Record the join event id for future use (when we finish the full
|
||||
# join). We have to do this after persisting the event to keep
|
||||
# foreign key constraints intact.
|
||||
if ret.partial_state and not already_partial_state_room:
|
||||
# TODO(faster_joins):
|
||||
# We may want to reset the partial state info if it's from
|
||||
# an old, failed partial state join.
|
||||
# https://github.com/matrix-org/synapse/issues/13000
|
||||
await self.store.write_partial_state_rooms_join_event_id(
|
||||
room_id, event.event_id
|
||||
)
|
||||
finally:
|
||||
# Always kick off the background process that asynchronously fetches
|
||||
# state for the room.
|
||||
# If the join failed, the background process is responsible for
|
||||
# cleaning up — including unmarking the room as a partial state
|
||||
# room.
|
||||
if ret.partial_state:
|
||||
# Kick off the process of asynchronously fetching the state for
|
||||
# this room.
|
||||
self._start_partial_state_room_sync(
|
||||
initial_destination=origin,
|
||||
other_destinations=ret.servers_in_room,
|
||||
room_id=room_id,
|
||||
)
|
||||
|
||||
# We wait here until this instance has seen the events come down
|
||||
# replication (if we're using replication) as the below uses caches.
|
||||
await self._replication.wait_for_stream_position(
|
||||
@ -1660,20 +1715,100 @@ class FederationHandler:
|
||||
# well.
|
||||
return None
|
||||
|
||||
async def _resume_sync_partial_state_room(self) -> None:
|
||||
async def _resume_partial_state_room_sync(self) -> None:
|
||||
"""Resumes resyncing of all partial-state rooms after a restart."""
|
||||
assert not self.config.worker.worker_app
|
||||
|
||||
partial_state_rooms = await self.store.get_partial_state_room_resync_info()
|
||||
for room_id, resync_info in partial_state_rooms.items():
|
||||
run_as_background_process(
|
||||
desc="sync_partial_state_room",
|
||||
func=self._sync_partial_state_room,
|
||||
self._start_partial_state_room_sync(
|
||||
initial_destination=resync_info.joined_via,
|
||||
other_destinations=resync_info.servers_in_room,
|
||||
room_id=room_id,
|
||||
)
|
||||
|
||||
def _start_partial_state_room_sync(
|
||||
self,
|
||||
initial_destination: Optional[str],
|
||||
other_destinations: Collection[str],
|
||||
room_id: str,
|
||||
) -> None:
|
||||
"""Starts the background process to resync the state of a partial state room,
|
||||
if it is not already running.
|
||||
|
||||
Args:
|
||||
initial_destination: the initial homeserver to pull the state from
|
||||
other_destinations: other homeservers to try to pull the state from, if
|
||||
`initial_destination` is unavailable
|
||||
room_id: room to be resynced
|
||||
"""
|
||||
|
||||
async def _sync_partial_state_room_wrapper() -> None:
|
||||
if room_id in self._active_partial_state_syncs:
|
||||
# Another local user has joined the room while there is already a
|
||||
# partial state sync running. This implies that there is a new join
|
||||
# event to un-partial state. We might find ourselves in one of a few
|
||||
# scenarios:
|
||||
# 1. There is an existing partial state sync. The partial state sync
|
||||
# un-partial states the new join event before completing and all is
|
||||
# well.
|
||||
# 2. Before the latest join, the homeserver was no longer in the room
|
||||
# and there is an existing partial state sync from our previous
|
||||
# membership of the room. The partial state sync may have:
|
||||
# a) succeeded, but not yet terminated. The room will not be
|
||||
# un-partial stated again unless we restart the partial state
|
||||
# sync.
|
||||
# b) failed, because we were no longer in the room and remote
|
||||
# homeservers were refusing our requests, but not yet
|
||||
# terminated. After the latest join, remote homeservers may
|
||||
# start answering our requests again, so we should restart the
|
||||
# partial state sync.
|
||||
# In the cases where we would want to restart the partial state sync,
|
||||
# the room would have the partial state flag when the partial state sync
|
||||
# terminates.
|
||||
self._partial_state_syncs_maybe_needing_restart[room_id] = (
|
||||
initial_destination,
|
||||
other_destinations,
|
||||
)
|
||||
return
|
||||
|
||||
self._active_partial_state_syncs.add(room_id)
|
||||
|
||||
try:
|
||||
await self._sync_partial_state_room(
|
||||
initial_destination=initial_destination,
|
||||
other_destinations=other_destinations,
|
||||
room_id=room_id,
|
||||
)
|
||||
finally:
|
||||
# Read the room's partial state flag while we still hold the claim to
|
||||
# being the active partial state sync (so that another partial state
|
||||
# sync can't come along and mess with it under us).
|
||||
# Normally, the partial state flag will be gone. If it isn't, then we
|
||||
# may find ourselves in scenario 2a or 2b as described in the comment
|
||||
# above, where we want to restart the partial state sync.
|
||||
is_still_partial_state_room = await self.store.is_partial_state_room(
|
||||
room_id
|
||||
)
|
||||
self._active_partial_state_syncs.remove(room_id)
|
||||
|
||||
if room_id in self._partial_state_syncs_maybe_needing_restart:
|
||||
(
|
||||
restart_initial_destination,
|
||||
restart_other_destinations,
|
||||
) = self._partial_state_syncs_maybe_needing_restart.pop(room_id)
|
||||
|
||||
if is_still_partial_state_room:
|
||||
self._start_partial_state_room_sync(
|
||||
initial_destination=restart_initial_destination,
|
||||
other_destinations=restart_other_destinations,
|
||||
room_id=room_id,
|
||||
)
|
||||
|
||||
run_as_background_process(
|
||||
desc="sync_partial_state_room", func=_sync_partial_state_room_wrapper
|
||||
)
|
||||
|
||||
async def _sync_partial_state_room(
|
||||
self,
|
||||
initial_destination: Optional[str],
|
||||
@ -1688,6 +1823,12 @@ class FederationHandler:
|
||||
`initial_destination` is unavailable
|
||||
room_id: room to be resynced
|
||||
"""
|
||||
# Assume that we run on the main process for now.
|
||||
# TODO(faster_joins,multiple workers)
|
||||
# When moving the sync to workers, we need to ensure that
|
||||
# * `_start_partial_state_room_sync` still prevents duplicate resyncs
|
||||
# * `_is_partial_state_room_linearizer` correctly guards partial state flags
|
||||
# for rooms between the workers doing remote joins and resync.
|
||||
assert not self.config.worker.worker_app
|
||||
|
||||
# TODO(faster_joins): do we need to lock to avoid races? What happens if other
|
||||
@ -1725,20 +1866,19 @@ class FederationHandler:
|
||||
logger.info("Handling any pending device list updates")
|
||||
await self._device_handler.handle_room_un_partial_stated(room_id)
|
||||
|
||||
logger.info("Clearing partial-state flag for %s", room_id)
|
||||
success = await self.store.clear_partial_state_room(room_id)
|
||||
if success:
|
||||
async with self._is_partial_state_room_linearizer.queue(room_id):
|
||||
logger.info("Clearing partial-state flag for %s", room_id)
|
||||
new_stream_id = await self.store.clear_partial_state_room(room_id)
|
||||
|
||||
if new_stream_id is not None:
|
||||
logger.info("State resync complete for %s", room_id)
|
||||
self._storage_controllers.state.notify_room_un_partial_stated(
|
||||
room_id
|
||||
)
|
||||
# Poke the notifier so that other workers see the write to
|
||||
# the un-partial-stated rooms stream.
|
||||
self._notifier.notify_replication()
|
||||
|
||||
# TODO(faster_joins) update room stats and user directory?
|
||||
# https://github.com/matrix-org/synapse/issues/12814
|
||||
# https://github.com/matrix-org/synapse/issues/12815
|
||||
await self._notifier.on_un_partial_stated_room(
|
||||
room_id, new_stream_id
|
||||
)
|
||||
return
|
||||
|
||||
# we raced against more events arriving with partial state. Go round
|
||||
|
@ -2259,6 +2259,10 @@ class FederationEventHandler:
|
||||
event_and_contexts, backfilled=backfilled
|
||||
)
|
||||
|
||||
# After persistence we always need to notify replication there may
|
||||
# be new data.
|
||||
self._notifier.notify_replication()
|
||||
|
||||
if self._ephemeral_messages_enabled:
|
||||
for event in events:
|
||||
# If there's an expiry timestamp on the event, schedule its expiry.
|
||||
|
@ -15,7 +15,7 @@
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple, cast
|
||||
|
||||
from synapse.api.constants import EduTypes, EventTypes, Membership
|
||||
from synapse.api.constants import AccountDataTypes, EduTypes, EventTypes, Membership
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.utils import SerializeEventConfig
|
||||
@ -239,7 +239,7 @@ class InitialSyncHandler:
|
||||
tags = tags_by_room.get(event.room_id)
|
||||
if tags:
|
||||
account_data_events.append(
|
||||
{"type": "m.tag", "content": {"tags": tags}}
|
||||
{"type": AccountDataTypes.TAG, "content": {"tags": tags}}
|
||||
)
|
||||
|
||||
account_data = account_data_by_room.get(event.room_id, {})
|
||||
@ -326,7 +326,9 @@ class InitialSyncHandler:
|
||||
account_data_events = []
|
||||
tags = await self.store.get_tags_for_room(user_id, room_id)
|
||||
if tags:
|
||||
account_data_events.append({"type": "m.tag", "content": {"tags": tags}})
|
||||
account_data_events.append(
|
||||
{"type": AccountDataTypes.TAG, "content": {"tags": tags}}
|
||||
)
|
||||
|
||||
account_data = await self.store.get_account_data_for_room(user_id, room_id)
|
||||
for account_data_type, content in account_data.items():
|
||||
|
@ -1542,12 +1542,23 @@ class EventCreationHandler:
|
||||
external federation senders don't have to recalculate it themselves.
|
||||
"""
|
||||
|
||||
for event, _ in events_and_context:
|
||||
if not self._external_cache.is_enabled():
|
||||
return
|
||||
if not self._external_cache.is_enabled():
|
||||
return
|
||||
|
||||
# If external cache is enabled we should always have this.
|
||||
assert self._external_cache_joined_hosts_updates is not None
|
||||
# If external cache is enabled we should always have this.
|
||||
assert self._external_cache_joined_hosts_updates is not None
|
||||
|
||||
for event, event_context in events_and_context:
|
||||
if event_context.partial_state:
|
||||
# To populate the cache for a partial-state event, we either have to
|
||||
# block until full state, which the code below does, or change the
|
||||
# meaning of cache values to be the list of hosts to which we plan to
|
||||
# send events and calculate that instead.
|
||||
#
|
||||
# The federation senders don't use the external cache when sending
|
||||
# events in partial-state rooms anyway, so let's not bother populating
|
||||
# the cache.
|
||||
continue
|
||||
|
||||
# We actually store two mappings, event ID -> prev state group,
|
||||
# state group -> joined hosts, which is much more space efficient
|
||||
|
@ -2155,6 +2155,11 @@ class PresenceFederationQueue:
|
||||
# This should only be called on a presence writer.
|
||||
assert self._presence_writer
|
||||
|
||||
if not states or not destinations:
|
||||
# Ignore calls which either don't have any new states or don't need
|
||||
# to be sent anywhere.
|
||||
return
|
||||
|
||||
if self._federation:
|
||||
self._federation.send_presence_to_destinations(
|
||||
states=states,
|
||||
|
@ -31,7 +31,12 @@ from typing import (
|
||||
import attr
|
||||
from prometheus_client import Counter
|
||||
|
||||
from synapse.api.constants import EventContentFields, EventTypes, Membership
|
||||
from synapse.api.constants import (
|
||||
AccountDataTypes,
|
||||
EventContentFields,
|
||||
EventTypes,
|
||||
Membership,
|
||||
)
|
||||
from synapse.api.filtering import FilterCollection
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
@ -285,7 +290,7 @@ class SyncHandler:
|
||||
expiry_ms=LAZY_LOADED_MEMBERS_CACHE_MAX_AGE,
|
||||
)
|
||||
|
||||
self.rooms_to_exclude = hs.config.server.rooms_to_exclude_from_sync
|
||||
self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync
|
||||
|
||||
async def wait_for_sync_for_user(
|
||||
self,
|
||||
@ -1334,7 +1339,10 @@ class SyncHandler:
|
||||
membership_change_events = []
|
||||
if since_token:
|
||||
membership_change_events = await self.store.get_membership_changes_for_user(
|
||||
user_id, since_token.room_key, now_token.room_key, self.rooms_to_exclude
|
||||
user_id,
|
||||
since_token.room_key,
|
||||
now_token.room_key,
|
||||
self.rooms_to_exclude_globally,
|
||||
)
|
||||
|
||||
mem_last_change_by_room_id: Dict[str, EventBase] = {}
|
||||
@ -1369,12 +1377,39 @@ class SyncHandler:
|
||||
else:
|
||||
mutable_joined_room_ids.discard(room_id)
|
||||
|
||||
# Tweak the set of rooms to return to the client for eager (non-lazy) syncs.
|
||||
mutable_rooms_to_exclude = set(self.rooms_to_exclude_globally)
|
||||
if not sync_config.filter_collection.lazy_load_members():
|
||||
# Non-lazy syncs should never include partially stated rooms.
|
||||
# Exclude all partially stated rooms from this sync.
|
||||
for room_id in mutable_joined_room_ids:
|
||||
if await self.store.is_partial_state_room(room_id):
|
||||
mutable_rooms_to_exclude.add(room_id)
|
||||
|
||||
# Incremental eager syncs should additionally include rooms that
|
||||
# - we are joined to
|
||||
# - are full-stated
|
||||
# - became fully-stated at some point during the sync period
|
||||
# (These rooms will have been omitted during a previous eager sync.)
|
||||
forced_newly_joined_room_ids = set()
|
||||
if since_token and not sync_config.filter_collection.lazy_load_members():
|
||||
un_partial_stated_rooms = (
|
||||
await self.store.get_un_partial_stated_rooms_between(
|
||||
since_token.un_partial_stated_rooms_key,
|
||||
now_token.un_partial_stated_rooms_key,
|
||||
mutable_joined_room_ids,
|
||||
)
|
||||
)
|
||||
for room_id in un_partial_stated_rooms:
|
||||
if not await self.store.is_partial_state_room(room_id):
|
||||
forced_newly_joined_room_ids.add(room_id)
|
||||
|
||||
# Now we have our list of joined room IDs, exclude as configured and freeze
|
||||
joined_room_ids = frozenset(
|
||||
(
|
||||
room_id
|
||||
for room_id in mutable_joined_room_ids
|
||||
if room_id not in self.rooms_to_exclude
|
||||
if room_id not in mutable_rooms_to_exclude
|
||||
)
|
||||
)
|
||||
|
||||
@ -1391,6 +1426,8 @@ class SyncHandler:
|
||||
since_token=since_token,
|
||||
now_token=now_token,
|
||||
joined_room_ids=joined_room_ids,
|
||||
excluded_room_ids=frozenset(mutable_rooms_to_exclude),
|
||||
forced_newly_joined_room_ids=frozenset(forced_newly_joined_room_ids),
|
||||
membership_change_events=membership_change_events,
|
||||
)
|
||||
|
||||
@ -1828,14 +1865,16 @@ class SyncHandler:
|
||||
# 3. Work out which rooms need reporting in the sync response.
|
||||
ignored_users = await self.store.ignored_users(user_id)
|
||||
if since_token:
|
||||
room_changes = await self._get_rooms_changed(
|
||||
room_changes = await self._get_room_changes_for_incremental_sync(
|
||||
sync_result_builder, ignored_users
|
||||
)
|
||||
tags_by_room = await self.store.get_updated_tags(
|
||||
user_id, since_token.account_data_key
|
||||
)
|
||||
else:
|
||||
room_changes = await self._get_all_rooms(sync_result_builder, ignored_users)
|
||||
room_changes = await self._get_room_changes_for_initial_sync(
|
||||
sync_result_builder, ignored_users
|
||||
)
|
||||
tags_by_room = await self.store.get_tags_for_user(user_id)
|
||||
|
||||
log_kv({"rooms_changed": len(room_changes.room_entries)})
|
||||
@ -1894,7 +1933,7 @@ class SyncHandler:
|
||||
|
||||
assert since_token
|
||||
|
||||
if membership_change_events:
|
||||
if membership_change_events or sync_result_builder.forced_newly_joined_room_ids:
|
||||
return True
|
||||
|
||||
stream_id = since_token.room_key.stream
|
||||
@ -1903,7 +1942,7 @@ class SyncHandler:
|
||||
return True
|
||||
return False
|
||||
|
||||
async def _get_rooms_changed(
|
||||
async def _get_room_changes_for_incremental_sync(
|
||||
self,
|
||||
sync_result_builder: "SyncResultBuilder",
|
||||
ignored_users: FrozenSet[str],
|
||||
@ -1941,7 +1980,9 @@ class SyncHandler:
|
||||
for event in membership_change_events:
|
||||
mem_change_events_by_room_id.setdefault(event.room_id, []).append(event)
|
||||
|
||||
newly_joined_rooms: List[str] = []
|
||||
newly_joined_rooms: List[str] = list(
|
||||
sync_result_builder.forced_newly_joined_room_ids
|
||||
)
|
||||
newly_left_rooms: List[str] = []
|
||||
room_entries: List[RoomSyncResultBuilder] = []
|
||||
invited: List[InvitedSyncResult] = []
|
||||
@ -2147,7 +2188,7 @@ class SyncHandler:
|
||||
newly_left_rooms,
|
||||
)
|
||||
|
||||
async def _get_all_rooms(
|
||||
async def _get_room_changes_for_initial_sync(
|
||||
self,
|
||||
sync_result_builder: "SyncResultBuilder",
|
||||
ignored_users: FrozenSet[str],
|
||||
@ -2172,7 +2213,7 @@ class SyncHandler:
|
||||
room_list = await self.store.get_rooms_for_local_user_where_membership_is(
|
||||
user_id=user_id,
|
||||
membership_list=Membership.LIST,
|
||||
excluded_rooms=self.rooms_to_exclude,
|
||||
excluded_rooms=sync_result_builder.excluded_room_ids,
|
||||
)
|
||||
|
||||
room_entries = []
|
||||
@ -2330,7 +2371,9 @@ class SyncHandler:
|
||||
|
||||
account_data_events = []
|
||||
if tags is not None:
|
||||
account_data_events.append({"type": "m.tag", "content": {"tags": tags}})
|
||||
account_data_events.append(
|
||||
{"type": AccountDataTypes.TAG, "content": {"tags": tags}}
|
||||
)
|
||||
|
||||
for account_data_type, content in account_data.items():
|
||||
account_data_events.append(
|
||||
@ -2541,6 +2584,13 @@ class SyncResultBuilder:
|
||||
since_token: The token supplied by user, or None.
|
||||
now_token: The token to sync up to.
|
||||
joined_room_ids: List of rooms the user is joined to
|
||||
excluded_room_ids: Set of room ids we should omit from the /sync response.
|
||||
forced_newly_joined_room_ids:
|
||||
Rooms that should be presented in the /sync response as if they were
|
||||
newly joined during the sync period, even if that's not the case.
|
||||
(This is useful if the room was previously excluded from a /sync response,
|
||||
and now the client should be made aware of it.)
|
||||
Only used by incremental syncs.
|
||||
|
||||
# The following mirror the fields in a sync response
|
||||
presence
|
||||
@ -2557,6 +2607,8 @@ class SyncResultBuilder:
|
||||
since_token: Optional[StreamToken]
|
||||
now_token: StreamToken
|
||||
joined_room_ids: FrozenSet[str]
|
||||
excluded_room_ids: FrozenSet[str]
|
||||
forced_newly_joined_room_ids: FrozenSet[str]
|
||||
membership_change_events: List[EventBase]
|
||||
|
||||
presence: List[UserPresenceState] = attr.Factory(list)
|
||||
|
@ -1585,6 +1585,33 @@ class ModuleApi:
|
||||
|
||||
return room_id_and_alias["room_id"], room_id_and_alias.get("room_alias", None)
|
||||
|
||||
async def set_displayname(
|
||||
self,
|
||||
user_id: UserID,
|
||||
new_displayname: str,
|
||||
deactivation: bool = False,
|
||||
) -> None:
|
||||
"""Sets a user's display name.
|
||||
|
||||
Added in Synapse v1.76.0.
|
||||
|
||||
Args:
|
||||
user_id:
|
||||
The user whose display name is to be changed.
|
||||
new_displayname:
|
||||
The new display name to give the user.
|
||||
deactivation:
|
||||
Whether this change was made while deactivating the user.
|
||||
"""
|
||||
requester = create_requester(user_id)
|
||||
await self._hs.get_profile_handler().set_displayname(
|
||||
target_user=user_id,
|
||||
requester=requester,
|
||||
new_displayname=new_displayname,
|
||||
by_admin=True,
|
||||
deactivation=deactivation,
|
||||
)
|
||||
|
||||
|
||||
class PublicRoomListManager:
|
||||
"""Contains methods for adding to, removing from and querying whether a room
|
||||
|
@ -226,8 +226,7 @@ class Notifier:
|
||||
self.store = hs.get_datastores().main
|
||||
self.pending_new_room_events: List[_PendingRoomEventEntry] = []
|
||||
|
||||
# Called when there are new things to stream over replication
|
||||
self.replication_callbacks: List[Callable[[], None]] = []
|
||||
self._replication_notifier = hs.get_replication_notifier()
|
||||
self._new_join_in_room_callbacks: List[Callable[[str, str], None]] = []
|
||||
|
||||
self._federation_client = hs.get_federation_http_client()
|
||||
@ -279,7 +278,7 @@ class Notifier:
|
||||
it needs to do any asynchronous work, a background thread should be started and
|
||||
wrapped with run_as_background_process.
|
||||
"""
|
||||
self.replication_callbacks.append(cb)
|
||||
self._replication_notifier.add_replication_callback(cb)
|
||||
|
||||
def add_new_join_in_room_callback(self, cb: Callable[[str, str], None]) -> None:
|
||||
"""Add a callback that will be called when a user joins a room.
|
||||
@ -315,6 +314,32 @@ class Notifier:
|
||||
event_entries.append((entry, event.event_id))
|
||||
await self.notify_new_room_events(event_entries, max_room_stream_token)
|
||||
|
||||
async def on_un_partial_stated_room(
|
||||
self,
|
||||
room_id: str,
|
||||
new_token: int,
|
||||
) -> None:
|
||||
"""Used by the resync background processes to wake up all listeners
|
||||
of this room when it is un-partial-stated.
|
||||
|
||||
It will also notify replication listeners of the change in stream.
|
||||
"""
|
||||
|
||||
# Wake up all related user stream notifiers
|
||||
user_streams = self.room_to_user_streams.get(room_id, set())
|
||||
time_now_ms = self.clock.time_msec()
|
||||
for user_stream in user_streams:
|
||||
try:
|
||||
user_stream.notify(
|
||||
StreamKeyType.UN_PARTIAL_STATED_ROOMS, new_token, time_now_ms
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Failed to notify listener")
|
||||
|
||||
# Poke the replication so that other workers also see the write to
|
||||
# the un-partial-stated rooms stream.
|
||||
self.notify_replication()
|
||||
|
||||
async def notify_new_room_events(
|
||||
self,
|
||||
event_entries: List[Tuple[_PendingRoomEventEntry, str]],
|
||||
@ -741,8 +766,7 @@ class Notifier:
|
||||
|
||||
def notify_replication(self) -> None:
|
||||
"""Notify the any replication listeners that there's a new event"""
|
||||
for cb in self.replication_callbacks:
|
||||
cb()
|
||||
self._replication_notifier.notify_replication()
|
||||
|
||||
def notify_user_joined_room(self, event_id: str, room_id: str) -> None:
|
||||
for cb in self._new_join_in_room_callbacks:
|
||||
@ -759,3 +783,26 @@ class Notifier:
|
||||
# Tell the federation client about the fact the server is back up, so
|
||||
# that any in flight requests can be immediately retried.
|
||||
self._federation_client.wake_destination(server)
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True)
|
||||
class ReplicationNotifier:
|
||||
"""Tracks callbacks for things that need to know about stream changes.
|
||||
|
||||
This is separate from the notifier to avoid circular dependencies.
|
||||
"""
|
||||
|
||||
_replication_callbacks: List[Callable[[], None]] = attr.Factory(list)
|
||||
|
||||
def add_replication_callback(self, cb: Callable[[], None]) -> None:
|
||||
"""Add a callback that will be called when some new data is available.
|
||||
Callback is not given any arguments. It should *not* return a Deferred - if
|
||||
it needs to do any asynchronous work, a background thread should be started and
|
||||
wrapped with run_as_background_process.
|
||||
"""
|
||||
self._replication_callbacks.append(cb)
|
||||
|
||||
def notify_replication(self) -> None:
|
||||
"""Notify the any replication listeners that there's a new event"""
|
||||
for cb in self._replication_callbacks:
|
||||
cb()
|
||||
|
@ -17,7 +17,7 @@ import logging
|
||||
import re
|
||||
import urllib.parse
|
||||
from inspect import signature
|
||||
from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, List, Tuple
|
||||
from typing import TYPE_CHECKING, Any, Awaitable, Callable, ClassVar, Dict, List, Tuple
|
||||
|
||||
from prometheus_client import Counter, Gauge
|
||||
|
||||
@ -27,6 +27,7 @@ from twisted.web.server import Request
|
||||
from synapse.api.errors import HttpResponseException, SynapseError
|
||||
from synapse.http import RequestTimedOutError
|
||||
from synapse.http.server import HttpServer
|
||||
from synapse.http.servlet import parse_json_object_from_request
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging import opentracing
|
||||
from synapse.logging.opentracing import trace_with_opname
|
||||
@ -53,6 +54,9 @@ _outgoing_request_counter = Counter(
|
||||
)
|
||||
|
||||
|
||||
_STREAM_POSITION_KEY = "_INT_STREAM_POS"
|
||||
|
||||
|
||||
class ReplicationEndpoint(metaclass=abc.ABCMeta):
|
||||
"""Helper base class for defining new replication HTTP endpoints.
|
||||
|
||||
@ -94,6 +98,9 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
|
||||
a connection error is received.
|
||||
RETRY_ON_CONNECT_ERROR_ATTEMPTS (int): Number of attempts to retry when
|
||||
receiving connection errors, each will backoff exponentially longer.
|
||||
WAIT_FOR_STREAMS (bool): Whether to wait for replication streams to
|
||||
catch up before processing the request and/or response. Defaults to
|
||||
True.
|
||||
"""
|
||||
|
||||
NAME: str = abc.abstractproperty() # type: ignore
|
||||
@ -104,6 +111,8 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
|
||||
RETRY_ON_CONNECT_ERROR = True
|
||||
RETRY_ON_CONNECT_ERROR_ATTEMPTS = 5 # =63s (2^6-1)
|
||||
|
||||
WAIT_FOR_STREAMS: ClassVar[bool] = True
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
if self.CACHE:
|
||||
self.response_cache: ResponseCache[str] = ResponseCache(
|
||||
@ -126,6 +135,10 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
|
||||
if hs.config.worker.worker_replication_secret:
|
||||
self._replication_secret = hs.config.worker.worker_replication_secret
|
||||
|
||||
self._streams = hs.get_replication_command_handler().get_streams_to_replicate()
|
||||
self._replication = hs.get_replication_data_handler()
|
||||
self._instance_name = hs.get_instance_name()
|
||||
|
||||
def _check_auth(self, request: Request) -> None:
|
||||
# Get the authorization header.
|
||||
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
|
||||
@ -160,7 +173,7 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
|
||||
|
||||
@abc.abstractmethod
|
||||
async def _handle_request(
|
||||
self, request: Request, **kwargs: Any
|
||||
self, request: Request, content: JsonDict, **kwargs: Any
|
||||
) -> Tuple[int, JsonDict]:
|
||||
"""Handle incoming request.
|
||||
|
||||
@ -201,6 +214,10 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
|
||||
|
||||
@trace_with_opname("outgoing_replication_request")
|
||||
async def send_request(*, instance_name: str = "master", **kwargs: Any) -> Any:
|
||||
# We have to pull these out here to avoid circular dependencies...
|
||||
streams = hs.get_replication_command_handler().get_streams_to_replicate()
|
||||
replication = hs.get_replication_data_handler()
|
||||
|
||||
with outgoing_gauge.track_inprogress():
|
||||
if instance_name == local_instance_name:
|
||||
raise Exception("Trying to send HTTP request to self")
|
||||
@ -219,6 +236,24 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
|
||||
|
||||
data = await cls._serialize_payload(**kwargs)
|
||||
|
||||
if cls.METHOD != "GET" and cls.WAIT_FOR_STREAMS:
|
||||
# Include the current stream positions that we write to. We
|
||||
# don't do this for GETs as they don't have a body, and we
|
||||
# generally assume that a GET won't rely on data we have
|
||||
# written.
|
||||
if _STREAM_POSITION_KEY in data:
|
||||
raise Exception(
|
||||
"data to send contains %r key", _STREAM_POSITION_KEY
|
||||
)
|
||||
|
||||
data[_STREAM_POSITION_KEY] = {
|
||||
"streams": {
|
||||
stream.NAME: stream.current_token(local_instance_name)
|
||||
for stream in streams
|
||||
},
|
||||
"instance_name": local_instance_name,
|
||||
}
|
||||
|
||||
url_args = [
|
||||
urllib.parse.quote(kwargs[name], safe="") for name in cls.PATH_ARGS
|
||||
]
|
||||
@ -308,6 +343,17 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
|
||||
) from e
|
||||
|
||||
_outgoing_request_counter.labels(cls.NAME, 200).inc()
|
||||
|
||||
# Wait on any streams that the remote may have written to.
|
||||
for stream_name, position in result.get(
|
||||
_STREAM_POSITION_KEY, {}
|
||||
).items():
|
||||
await replication.wait_for_stream_position(
|
||||
instance_name=instance_name,
|
||||
stream_name=stream_name,
|
||||
position=position,
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
return send_request
|
||||
@ -353,6 +399,22 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
|
||||
if self._replication_secret:
|
||||
self._check_auth(request)
|
||||
|
||||
if self.METHOD == "GET":
|
||||
# GET APIs always have an empty body.
|
||||
content = {}
|
||||
else:
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
# Wait on any streams that the remote may have written to.
|
||||
for stream_name, position in content.get(_STREAM_POSITION_KEY, {"streams": {}})[
|
||||
"streams"
|
||||
].items():
|
||||
await self._replication.wait_for_stream_position(
|
||||
instance_name=content[_STREAM_POSITION_KEY]["instance_name"],
|
||||
stream_name=stream_name,
|
||||
position=position,
|
||||
)
|
||||
|
||||
if self.CACHE:
|
||||
txn_id = kwargs.pop("txn_id")
|
||||
|
||||
@ -361,13 +423,28 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
|
||||
# correctly yet. In particular, there may be issues to do with logging
|
||||
# context lifetimes.
|
||||
|
||||
return await self.response_cache.wrap(
|
||||
txn_id, self._handle_request, request, **kwargs
|
||||
code, response = await self.response_cache.wrap(
|
||||
txn_id, self._handle_request, request, content, **kwargs
|
||||
)
|
||||
else:
|
||||
# The `@cancellable` decorator may be applied to `_handle_request`. But we
|
||||
# told `HttpServer.register_paths` that our handler is `_check_auth_and_handle`,
|
||||
# so we have to set up the cancellable flag ourselves.
|
||||
request.is_render_cancellable = is_function_cancellable(
|
||||
self._handle_request
|
||||
)
|
||||
|
||||
# The `@cancellable` decorator may be applied to `_handle_request`. But we
|
||||
# told `HttpServer.register_paths` that our handler is `_check_auth_and_handle`,
|
||||
# so we have to set up the cancellable flag ourselves.
|
||||
request.is_render_cancellable = is_function_cancellable(self._handle_request)
|
||||
code, response = await self._handle_request(request, content, **kwargs)
|
||||
|
||||
return await self._handle_request(request, **kwargs)
|
||||
# Return streams we may have written to in the course of processing this
|
||||
# request.
|
||||
if _STREAM_POSITION_KEY in response:
|
||||
raise Exception("data to send contains %r key", _STREAM_POSITION_KEY)
|
||||
|
||||
if self.WAIT_FOR_STREAMS:
|
||||
response[_STREAM_POSITION_KEY] = {
|
||||
stream.NAME: stream.current_token(self._instance_name)
|
||||
for stream in self._streams
|
||||
}
|
||||
|
||||
return code, response
|
||||
|
@ -18,7 +18,6 @@ from typing import TYPE_CHECKING, Tuple
|
||||
from twisted.web.server import Request
|
||||
|
||||
from synapse.http.server import HttpServer
|
||||
from synapse.http.servlet import parse_json_object_from_request
|
||||
from synapse.replication.http._base import ReplicationEndpoint
|
||||
from synapse.types import JsonDict
|
||||
|
||||
@ -61,10 +60,8 @@ class ReplicationAddUserAccountDataRestServlet(ReplicationEndpoint):
|
||||
return payload
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request, user_id: str, account_data_type: str
|
||||
self, request: Request, content: JsonDict, user_id: str, account_data_type: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
max_stream_id = await self.handler.add_account_data_for_user(
|
||||
user_id, account_data_type, content["content"]
|
||||
)
|
||||
@ -101,7 +98,7 @@ class ReplicationRemoveUserAccountDataRestServlet(ReplicationEndpoint):
|
||||
return {}
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request, user_id: str, account_data_type: str
|
||||
self, request: Request, content: JsonDict, user_id: str, account_data_type: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
max_stream_id = await self.handler.remove_account_data_for_user(
|
||||
user_id, account_data_type
|
||||
@ -143,10 +140,13 @@ class ReplicationAddRoomAccountDataRestServlet(ReplicationEndpoint):
|
||||
return payload
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request, user_id: str, room_id: str, account_data_type: str
|
||||
self,
|
||||
request: Request,
|
||||
content: JsonDict,
|
||||
user_id: str,
|
||||
room_id: str,
|
||||
account_data_type: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
max_stream_id = await self.handler.add_account_data_to_room(
|
||||
user_id, room_id, account_data_type, content["content"]
|
||||
)
|
||||
@ -183,7 +183,12 @@ class ReplicationRemoveRoomAccountDataRestServlet(ReplicationEndpoint):
|
||||
return {}
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request, user_id: str, room_id: str, account_data_type: str
|
||||
self,
|
||||
request: Request,
|
||||
content: JsonDict,
|
||||
user_id: str,
|
||||
room_id: str,
|
||||
account_data_type: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
max_stream_id = await self.handler.remove_account_data_for_room(
|
||||
user_id, room_id, account_data_type
|
||||
@ -225,10 +230,8 @@ class ReplicationAddTagRestServlet(ReplicationEndpoint):
|
||||
return payload
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request, user_id: str, room_id: str, tag: str
|
||||
self, request: Request, content: JsonDict, user_id: str, room_id: str, tag: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
max_stream_id = await self.handler.add_tag_to_room(
|
||||
user_id, room_id, tag, content["content"]
|
||||
)
|
||||
@ -266,7 +269,7 @@ class ReplicationRemoveTagRestServlet(ReplicationEndpoint):
|
||||
return {}
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request, user_id: str, room_id: str, tag: str
|
||||
self, request: Request, content: JsonDict, user_id: str, room_id: str, tag: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
max_stream_id = await self.handler.remove_tag_from_room(
|
||||
user_id,
|
||||
|
@ -18,7 +18,6 @@ from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
|
||||
from twisted.web.server import Request
|
||||
|
||||
from synapse.http.server import HttpServer
|
||||
from synapse.http.servlet import parse_json_object_from_request
|
||||
from synapse.logging.opentracing import active_span
|
||||
from synapse.replication.http._base import ReplicationEndpoint
|
||||
from synapse.types import JsonDict
|
||||
@ -78,7 +77,7 @@ class ReplicationUserDevicesResyncRestServlet(ReplicationEndpoint):
|
||||
return {}
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request, user_id: str
|
||||
self, request: Request, content: JsonDict, user_id: str
|
||||
) -> Tuple[int, Optional[JsonDict]]:
|
||||
user_devices = await self.device_list_updater.user_device_resync(user_id)
|
||||
|
||||
@ -138,9 +137,8 @@ class ReplicationMultiUserDevicesResyncRestServlet(ReplicationEndpoint):
|
||||
return {"user_ids": user_ids}
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request
|
||||
self, request: Request, content: JsonDict
|
||||
) -> Tuple[int, Dict[str, Optional[JsonDict]]]:
|
||||
content = parse_json_object_from_request(request)
|
||||
user_ids: List[str] = content["user_ids"]
|
||||
|
||||
logger.info("Resync for %r", user_ids)
|
||||
@ -205,10 +203,8 @@ class ReplicationUploadKeysForUserRestServlet(ReplicationEndpoint):
|
||||
}
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request
|
||||
self, request: Request, content: JsonDict
|
||||
) -> Tuple[int, JsonDict]:
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
user_id = content["user_id"]
|
||||
device_id = content["device_id"]
|
||||
keys = content["keys"]
|
||||
|
@ -21,7 +21,6 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
|
||||
from synapse.events import EventBase, make_event_from_dict
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.http.server import HttpServer
|
||||
from synapse.http.servlet import parse_json_object_from_request
|
||||
from synapse.replication.http._base import ReplicationEndpoint
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.metrics import Measure
|
||||
@ -114,10 +113,8 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
|
||||
|
||||
return payload
|
||||
|
||||
async def _handle_request(self, request: Request) -> Tuple[int, JsonDict]: # type: ignore[override]
|
||||
async def _handle_request(self, request: Request, content: JsonDict) -> Tuple[int, JsonDict]: # type: ignore[override]
|
||||
with Measure(self.clock, "repl_fed_send_events_parse"):
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
room_id = content["room_id"]
|
||||
backfilled = content["backfilled"]
|
||||
|
||||
@ -181,13 +178,10 @@ class ReplicationFederationSendEduRestServlet(ReplicationEndpoint):
|
||||
return {"origin": origin, "content": content}
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request, edu_type: str
|
||||
self, request: Request, content: JsonDict, edu_type: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
with Measure(self.clock, "repl_fed_send_edu_parse"):
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
origin = content["origin"]
|
||||
edu_content = content["content"]
|
||||
origin = content["origin"]
|
||||
edu_content = content["content"]
|
||||
|
||||
logger.info("Got %r edu from %s", edu_type, origin)
|
||||
|
||||
@ -231,13 +225,10 @@ class ReplicationGetQueryRestServlet(ReplicationEndpoint):
|
||||
return {"args": args}
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request, query_type: str
|
||||
self, request: Request, content: JsonDict, query_type: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
with Measure(self.clock, "repl_fed_query_parse"):
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
args = content["args"]
|
||||
args["origin"] = content["origin"]
|
||||
args = content["args"]
|
||||
args["origin"] = content["origin"]
|
||||
|
||||
logger.info("Got %r query from %s", query_type, args["origin"])
|
||||
|
||||
@ -274,7 +265,7 @@ class ReplicationCleanRoomRestServlet(ReplicationEndpoint):
|
||||
return {}
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request, room_id: str
|
||||
self, request: Request, content: JsonDict, room_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
await self.store.clean_room_for_join(room_id)
|
||||
|
||||
@ -307,9 +298,8 @@ class ReplicationStoreRoomOnOutlierMembershipRestServlet(ReplicationEndpoint):
|
||||
return {"room_version": room_version.identifier}
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request, room_id: str
|
||||
self, request: Request, content: JsonDict, room_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
content = parse_json_object_from_request(request)
|
||||
room_version = KNOWN_ROOM_VERSIONS[content["room_version"]]
|
||||
await self.store.maybe_store_room_on_outlier_membership(room_id, room_version)
|
||||
return 200, {}
|
||||
|
@ -18,7 +18,6 @@ from typing import TYPE_CHECKING, Optional, Tuple, cast
|
||||
from twisted.web.server import Request
|
||||
|
||||
from synapse.http.server import HttpServer
|
||||
from synapse.http.servlet import parse_json_object_from_request
|
||||
from synapse.replication.http._base import ReplicationEndpoint
|
||||
from synapse.types import JsonDict
|
||||
|
||||
@ -73,10 +72,8 @@ class RegisterDeviceReplicationServlet(ReplicationEndpoint):
|
||||
}
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request, user_id: str
|
||||
self, request: Request, content: JsonDict, user_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
device_id = content["device_id"]
|
||||
initial_display_name = content["initial_display_name"]
|
||||
is_guest = content["is_guest"]
|
||||
|
@ -17,7 +17,6 @@ from typing import TYPE_CHECKING, List, Optional, Tuple
|
||||
from twisted.web.server import Request
|
||||
|
||||
from synapse.http.server import HttpServer
|
||||
from synapse.http.servlet import parse_json_object_from_request
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.replication.http._base import ReplicationEndpoint
|
||||
from synapse.types import JsonDict, Requester, UserID
|
||||
@ -79,10 +78,8 @@ class ReplicationRemoteJoinRestServlet(ReplicationEndpoint):
|
||||
}
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: SynapseRequest, room_id: str, user_id: str
|
||||
self, request: SynapseRequest, content: JsonDict, room_id: str, user_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
remote_room_hosts = content["remote_room_hosts"]
|
||||
event_content = content["content"]
|
||||
|
||||
@ -147,11 +144,10 @@ class ReplicationRemoteKnockRestServlet(ReplicationEndpoint):
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
content: JsonDict,
|
||||
room_id: str,
|
||||
user_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
remote_room_hosts = content["remote_room_hosts"]
|
||||
event_content = content["content"]
|
||||
|
||||
@ -217,10 +213,8 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint):
|
||||
}
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: SynapseRequest, invite_event_id: str
|
||||
self, request: SynapseRequest, content: JsonDict, invite_event_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
txn_id = content["txn_id"]
|
||||
event_content = content["content"]
|
||||
|
||||
@ -285,10 +279,9 @@ class ReplicationRemoteRescindKnockRestServlet(ReplicationEndpoint):
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
content: JsonDict,
|
||||
knock_event_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
txn_id = content["txn_id"]
|
||||
event_content = content["content"]
|
||||
|
||||
@ -347,7 +340,12 @@ class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint):
|
||||
return {}
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request, room_id: str, user_id: str, change: str
|
||||
self,
|
||||
request: Request,
|
||||
content: JsonDict,
|
||||
room_id: str,
|
||||
user_id: str,
|
||||
change: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
logger.info("user membership change: %s in %s", user_id, room_id)
|
||||
|
||||
|
@ -18,7 +18,6 @@ from typing import TYPE_CHECKING, Tuple
|
||||
from twisted.web.server import Request
|
||||
|
||||
from synapse.http.server import HttpServer
|
||||
from synapse.http.servlet import parse_json_object_from_request
|
||||
from synapse.replication.http._base import ReplicationEndpoint
|
||||
from synapse.types import JsonDict, UserID
|
||||
|
||||
@ -56,7 +55,7 @@ class ReplicationBumpPresenceActiveTime(ReplicationEndpoint):
|
||||
return {}
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request, user_id: str
|
||||
self, request: Request, content: JsonDict, user_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
await self._presence_handler.bump_presence_active_time(
|
||||
UserID.from_string(user_id)
|
||||
@ -107,10 +106,8 @@ class ReplicationPresenceSetState(ReplicationEndpoint):
|
||||
}
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request, user_id: str
|
||||
self, request: Request, content: JsonDict, user_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
await self._presence_handler.set_state(
|
||||
UserID.from_string(user_id),
|
||||
content["state"],
|
||||
|
@ -18,7 +18,6 @@ from typing import TYPE_CHECKING, Tuple
|
||||
from twisted.web.server import Request
|
||||
|
||||
from synapse.http.server import HttpServer
|
||||
from synapse.http.servlet import parse_json_object_from_request
|
||||
from synapse.replication.http._base import ReplicationEndpoint
|
||||
from synapse.types import JsonDict
|
||||
|
||||
@ -61,10 +60,8 @@ class ReplicationRemovePusherRestServlet(ReplicationEndpoint):
|
||||
return payload
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request, user_id: str
|
||||
self, request: Request, content: JsonDict, user_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
app_id = content["app_id"]
|
||||
pushkey = content["pushkey"]
|
||||
|
||||
|
@ -18,7 +18,6 @@ from typing import TYPE_CHECKING, Optional, Tuple
|
||||
from twisted.web.server import Request
|
||||
|
||||
from synapse.http.server import HttpServer
|
||||
from synapse.http.servlet import parse_json_object_from_request
|
||||
from synapse.replication.http._base import ReplicationEndpoint
|
||||
from synapse.types import JsonDict
|
||||
|
||||
@ -96,10 +95,8 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
|
||||
}
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request, user_id: str
|
||||
self, request: Request, content: JsonDict, user_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
await self.registration_handler.check_registration_ratelimit(content["address"])
|
||||
|
||||
# Always default admin users to approved (since it means they were created by
|
||||
@ -150,10 +147,8 @@ class ReplicationPostRegisterActionsServlet(ReplicationEndpoint):
|
||||
return {"auth_result": auth_result, "access_token": access_token}
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request, user_id: str
|
||||
self, request: Request, content: JsonDict, user_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
auth_result = content["auth_result"]
|
||||
access_token = content["access_token"]
|
||||
|
||||
|
@ -21,7 +21,6 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.events import EventBase, make_event_from_dict
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.http.server import HttpServer
|
||||
from synapse.http.servlet import parse_json_object_from_request
|
||||
from synapse.replication.http._base import ReplicationEndpoint
|
||||
from synapse.types import JsonDict, Requester, UserID
|
||||
from synapse.util.metrics import Measure
|
||||
@ -117,11 +116,9 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
|
||||
return payload
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request, event_id: str
|
||||
self, request: Request, content: JsonDict, event_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
with Measure(self.clock, "repl_send_event_parse"):
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
event_dict = content["event"]
|
||||
room_ver = KNOWN_ROOM_VERSIONS[content["room_version"]]
|
||||
internal_metadata = content["internal_metadata"]
|
||||
|
@ -21,7 +21,6 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.events import EventBase, make_event_from_dict
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.http.server import HttpServer
|
||||
from synapse.http.servlet import parse_json_object_from_request
|
||||
from synapse.replication.http._base import ReplicationEndpoint
|
||||
from synapse.types import JsonDict, Requester, UserID
|
||||
from synapse.util.metrics import Measure
|
||||
@ -115,10 +114,9 @@ class ReplicationSendEventsRestServlet(ReplicationEndpoint):
|
||||
return payload
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request
|
||||
self, request: Request, payload: JsonDict
|
||||
) -> Tuple[int, JsonDict]:
|
||||
with Measure(self.clock, "repl_send_events_parse"):
|
||||
payload = parse_json_object_from_request(request)
|
||||
events_and_context = []
|
||||
events = payload["events"]
|
||||
dont_notify = payload["dont_notify"]
|
||||
|
@ -57,7 +57,7 @@ class ReplicationUpdateCurrentStateRestServlet(ReplicationEndpoint):
|
||||
return {}
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request, room_id: str
|
||||
self, request: Request, content: JsonDict, room_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
writer_instance = self._events_shard_config.get_instance(room_id)
|
||||
if writer_instance != self._instance_name:
|
||||
|
@ -54,6 +54,10 @@ class ReplicationGetStreamUpdates(ReplicationEndpoint):
|
||||
PATH_ARGS = ("stream_name",)
|
||||
METHOD = "GET"
|
||||
|
||||
# We don't want to wait for replication streams to catch up, as this gets
|
||||
# called in the process of catching replication streams up.
|
||||
WAIT_FOR_STREAMS = False
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
|
||||
@ -67,7 +71,7 @@ class ReplicationGetStreamUpdates(ReplicationEndpoint):
|
||||
return {"from_token": from_token, "upto_token": upto_token}
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request, stream_name: str
|
||||
self, request: Request, content: JsonDict, stream_name: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
stream = self.streams.get(stream_name)
|
||||
if stream is None:
|
||||
|
@ -16,6 +16,7 @@
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.defer import Deferred
|
||||
from twisted.internet.interfaces import IAddress, IConnector
|
||||
from twisted.internet.protocol import ReconnectingClientFactory
|
||||
@ -33,7 +34,6 @@ from synapse.replication.tcp.streams import (
|
||||
PushersStream,
|
||||
PushRulesStream,
|
||||
ReceiptsStream,
|
||||
TagAccountDataStream,
|
||||
ToDeviceStream,
|
||||
TypingStream,
|
||||
UnPartialStatedEventStream,
|
||||
@ -59,7 +59,7 @@ if TYPE_CHECKING:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# How long we allow callers to wait for replication updates before timing out.
|
||||
_WAIT_FOR_REPLICATION_TIMEOUT_SECONDS = 30
|
||||
_WAIT_FOR_REPLICATION_TIMEOUT_SECONDS = 5
|
||||
|
||||
|
||||
class DirectTcpReplicationClientFactory(ReconnectingClientFactory):
|
||||
@ -133,9 +133,9 @@ class ReplicationDataHandler:
|
||||
if hs.should_send_federation():
|
||||
self.send_handler = FederationSenderHandler(hs)
|
||||
|
||||
# Map from stream to list of deferreds waiting for the stream to
|
||||
# Map from stream and instance to list of deferreds waiting for the stream to
|
||||
# arrive at a particular position. The lists are sorted by stream position.
|
||||
self._streams_to_waiters: Dict[str, List[Tuple[int, Deferred]]] = {}
|
||||
self._streams_to_waiters: Dict[Tuple[str, str], List[Tuple[int, Deferred]]] = {}
|
||||
|
||||
async def on_rdata(
|
||||
self, stream_name: str, instance_name: str, token: int, rows: list
|
||||
@ -168,7 +168,7 @@ class ReplicationDataHandler:
|
||||
self.notifier.on_new_event(
|
||||
StreamKeyType.PUSH_RULES, token, users=[row.user_id for row in rows]
|
||||
)
|
||||
elif stream_name in (AccountDataStream.NAME, TagAccountDataStream.NAME):
|
||||
elif stream_name in AccountDataStream.NAME:
|
||||
self.notifier.on_new_event(
|
||||
StreamKeyType.ACCOUNT_DATA, token, users=[row.user_id for row in rows]
|
||||
)
|
||||
@ -188,7 +188,7 @@ class ReplicationDataHandler:
|
||||
elif stream_name == DeviceListsStream.NAME:
|
||||
all_room_ids: Set[str] = set()
|
||||
for row in rows:
|
||||
if row.entity.startswith("@"):
|
||||
if row.entity.startswith("@") and not row.is_signature:
|
||||
room_ids = await self.store.get_rooms_for_user(row.entity)
|
||||
all_room_ids.update(room_ids)
|
||||
self.notifier.on_new_event(
|
||||
@ -207,6 +207,12 @@ class ReplicationDataHandler:
|
||||
# we don't need to optimise this for multiple rows.
|
||||
for row in rows:
|
||||
if row.type != EventsStreamEventRow.TypeId:
|
||||
# The row's data is an `EventsStreamCurrentStateRow`.
|
||||
# When we recompute the current state of a room based on forward
|
||||
# extremities (see `update_current_state`), no new events are
|
||||
# persisted, so we must poke the replication callbacks ourselves.
|
||||
# This functionality is used when finishing up a partial state join.
|
||||
self.notifier.notify_replication()
|
||||
continue
|
||||
assert isinstance(row, EventsStreamRow)
|
||||
assert isinstance(row.data, EventsStreamEventRow)
|
||||
@ -254,6 +260,7 @@ class ReplicationDataHandler:
|
||||
self._state_storage_controller.notify_room_un_partial_stated(
|
||||
row.room_id
|
||||
)
|
||||
await self.notifier.on_un_partial_stated_room(row.room_id, token)
|
||||
elif stream_name == UnPartialStatedEventStream.NAME:
|
||||
for row in rows:
|
||||
assert isinstance(row, UnPartialStatedEventStreamRow)
|
||||
@ -270,7 +277,7 @@ class ReplicationDataHandler:
|
||||
# Notify any waiting deferreds. The list is ordered by position so we
|
||||
# just iterate through the list until we reach a position that is
|
||||
# greater than the received row position.
|
||||
waiting_list = self._streams_to_waiters.get(stream_name, [])
|
||||
waiting_list = self._streams_to_waiters.get((stream_name, instance_name), [])
|
||||
|
||||
# Index of first item with a position after the current token, i.e we
|
||||
# have called all deferreds before this index. If not overwritten by
|
||||
@ -279,14 +286,13 @@ class ReplicationDataHandler:
|
||||
# `len(list)` works for both cases.
|
||||
index_of_first_deferred_not_called = len(waiting_list)
|
||||
|
||||
# We don't fire the deferreds until after we finish iterating over the
|
||||
# list, to avoid the list changing when we fire the deferreds.
|
||||
deferreds_to_callback = []
|
||||
|
||||
for idx, (position, deferred) in enumerate(waiting_list):
|
||||
if position <= token:
|
||||
try:
|
||||
with PreserveLoggingContext():
|
||||
deferred.callback(None)
|
||||
except Exception:
|
||||
# The deferred has been cancelled or timed out.
|
||||
pass
|
||||
deferreds_to_callback.append(deferred)
|
||||
else:
|
||||
# The list is sorted by position so we don't need to continue
|
||||
# checking any further entries in the list.
|
||||
@ -297,6 +303,14 @@ class ReplicationDataHandler:
|
||||
# loop. (This maintains the order so no need to resort)
|
||||
waiting_list[:] = waiting_list[index_of_first_deferred_not_called:]
|
||||
|
||||
for deferred in deferreds_to_callback:
|
||||
try:
|
||||
with PreserveLoggingContext():
|
||||
deferred.callback(None)
|
||||
except Exception:
|
||||
# The deferred has been cancelled or timed out.
|
||||
pass
|
||||
|
||||
async def on_position(
|
||||
self, stream_name: str, instance_name: str, token: int
|
||||
) -> None:
|
||||
@ -315,10 +329,18 @@ class ReplicationDataHandler:
|
||||
self.send_handler.wake_destination(server)
|
||||
|
||||
async def wait_for_stream_position(
|
||||
self, instance_name: str, stream_name: str, position: int
|
||||
self,
|
||||
instance_name: str,
|
||||
stream_name: str,
|
||||
position: int,
|
||||
) -> None:
|
||||
"""Wait until this instance has received updates up to and including
|
||||
the given stream position.
|
||||
|
||||
Args:
|
||||
instance_name
|
||||
stream_name
|
||||
position
|
||||
"""
|
||||
|
||||
if instance_name == self._instance_name:
|
||||
@ -326,7 +348,7 @@ class ReplicationDataHandler:
|
||||
# anyway in that case we don't need to wait.
|
||||
return
|
||||
|
||||
current_position = self._streams[stream_name].current_token(self._instance_name)
|
||||
current_position = self._streams[stream_name].current_token(instance_name)
|
||||
if position <= current_position:
|
||||
# We're already past the position
|
||||
return
|
||||
@ -338,17 +360,32 @@ class ReplicationDataHandler:
|
||||
deferred, _WAIT_FOR_REPLICATION_TIMEOUT_SECONDS, self._reactor
|
||||
)
|
||||
|
||||
waiting_list = self._streams_to_waiters.setdefault(stream_name, [])
|
||||
waiting_list = self._streams_to_waiters.setdefault(
|
||||
(stream_name, instance_name), []
|
||||
)
|
||||
|
||||
waiting_list.append((position, deferred))
|
||||
waiting_list.sort(key=lambda t: t[0])
|
||||
|
||||
# We measure here to get in flight counts and average waiting time.
|
||||
with Measure(self._clock, "repl.wait_for_stream_position"):
|
||||
logger.info("Waiting for repl stream %r to reach %s", stream_name, position)
|
||||
await make_deferred_yieldable(deferred)
|
||||
logger.info(
|
||||
"Finished waiting for repl stream %r to reach %s", stream_name, position
|
||||
"Waiting for repl stream %r to reach %s (%s)",
|
||||
stream_name,
|
||||
position,
|
||||
instance_name,
|
||||
)
|
||||
try:
|
||||
await make_deferred_yieldable(deferred)
|
||||
except defer.TimeoutError:
|
||||
logger.error("Timed out waiting for stream %s", stream_name)
|
||||
return
|
||||
|
||||
logger.info(
|
||||
"Finished waiting for repl stream %r to reach %s (%s)",
|
||||
stream_name,
|
||||
position,
|
||||
instance_name,
|
||||
)
|
||||
|
||||
def stop_pusher(self, user_id: str, app_id: str, pushkey: str) -> None:
|
||||
@ -423,7 +460,11 @@ class FederationSenderHandler:
|
||||
# The entities are either user IDs (starting with '@') whose devices
|
||||
# have changed, or remote servers that we need to tell about
|
||||
# changes.
|
||||
hosts = {row.entity for row in rows if not row.entity.startswith("@")}
|
||||
hosts = {
|
||||
row.entity
|
||||
for row in rows
|
||||
if not row.entity.startswith("@") and not row.is_signature
|
||||
}
|
||||
for host in hosts:
|
||||
self.federation_sender.send_device_messages(host, immediate=False)
|
||||
|
||||
|
@ -58,7 +58,6 @@ from synapse.replication.tcp.streams import (
|
||||
PresenceStream,
|
||||
ReceiptsStream,
|
||||
Stream,
|
||||
TagAccountDataStream,
|
||||
ToDeviceStream,
|
||||
TypingStream,
|
||||
)
|
||||
@ -145,7 +144,7 @@ class ReplicationCommandHandler:
|
||||
|
||||
continue
|
||||
|
||||
if isinstance(stream, (AccountDataStream, TagAccountDataStream)):
|
||||
if isinstance(stream, AccountDataStream):
|
||||
# Only add AccountDataStream and TagAccountDataStream as a source on the
|
||||
# instance in charge of account_data persistence.
|
||||
if hs.get_instance_name() in hs.config.worker.writers.account_data:
|
||||
|
@ -199,33 +199,28 @@ class ReplicationStreamer:
|
||||
# The token has advanced but there is no data to
|
||||
# send, so we send a `POSITION` to inform other
|
||||
# workers of the updated position.
|
||||
if stream.NAME == EventsStream.NAME:
|
||||
# XXX: We only do this for the EventStream as it
|
||||
# turns out that e.g. account data streams share
|
||||
# their "current token" with each other, meaning
|
||||
# that it is *not* safe to send a POSITION.
|
||||
|
||||
# Note: `last_token` may not *actually* be the
|
||||
# last token we sent out in a RDATA or POSITION.
|
||||
# This can happen if we sent out an RDATA for
|
||||
# position X when our current token was say X+1.
|
||||
# Other workers will see RDATA for X and then a
|
||||
# POSITION with last token of X+1, which will
|
||||
# cause them to check if there were any missing
|
||||
# updates between X and X+1.
|
||||
logger.info(
|
||||
"Sending position: %s -> %s",
|
||||
# Note: `last_token` may not *actually* be the
|
||||
# last token we sent out in a RDATA or POSITION.
|
||||
# This can happen if we sent out an RDATA for
|
||||
# position X when our current token was say X+1.
|
||||
# Other workers will see RDATA for X and then a
|
||||
# POSITION with last token of X+1, which will
|
||||
# cause them to check if there were any missing
|
||||
# updates between X and X+1.
|
||||
logger.info(
|
||||
"Sending position: %s -> %s",
|
||||
stream.NAME,
|
||||
current_token,
|
||||
)
|
||||
self.command_handler.send_command(
|
||||
PositionCommand(
|
||||
stream.NAME,
|
||||
self._instance_name,
|
||||
last_token,
|
||||
current_token,
|
||||
)
|
||||
self.command_handler.send_command(
|
||||
PositionCommand(
|
||||
stream.NAME,
|
||||
self._instance_name,
|
||||
last_token,
|
||||
current_token,
|
||||
)
|
||||
)
|
||||
)
|
||||
continue
|
||||
|
||||
# Some streams return multiple rows with the same stream IDs,
|
||||
|
@ -35,10 +35,8 @@ from synapse.replication.tcp.streams._base import (
|
||||
PushRulesStream,
|
||||
ReceiptsStream,
|
||||
Stream,
|
||||
TagAccountDataStream,
|
||||
ToDeviceStream,
|
||||
TypingStream,
|
||||
UserSignatureStream,
|
||||
)
|
||||
from synapse.replication.tcp.streams.events import EventsStream
|
||||
from synapse.replication.tcp.streams.federation import FederationStream
|
||||
@ -62,9 +60,7 @@ STREAMS_MAP = {
|
||||
DeviceListsStream,
|
||||
ToDeviceStream,
|
||||
FederationStream,
|
||||
TagAccountDataStream,
|
||||
AccountDataStream,
|
||||
UserSignatureStream,
|
||||
UnPartialStatedRoomStream,
|
||||
UnPartialStatedEventStream,
|
||||
)
|
||||
@ -83,9 +79,7 @@ __all__ = [
|
||||
"CachesStream",
|
||||
"DeviceListsStream",
|
||||
"ToDeviceStream",
|
||||
"TagAccountDataStream",
|
||||
"AccountDataStream",
|
||||
"UserSignatureStream",
|
||||
"UnPartialStatedRoomStream",
|
||||
"UnPartialStatedEventStream",
|
||||
]
|
||||
|
@ -28,8 +28,8 @@ from typing import (
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.api.constants import AccountDataTypes
|
||||
from synapse.replication.http.streams import ReplicationGetStreamUpdates
|
||||
from synapse.types import JsonDict
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@ -463,18 +463,67 @@ class DeviceListsStream(Stream):
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class DeviceListsStreamRow:
|
||||
entity: str
|
||||
# Indicates that a user has signed their own device with their user-signing key
|
||||
is_signature: bool
|
||||
|
||||
NAME = "device_lists"
|
||||
ROW_TYPE = DeviceListsStreamRow
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
store = hs.get_datastores().main
|
||||
self.store = hs.get_datastores().main
|
||||
super().__init__(
|
||||
hs.get_instance_name(),
|
||||
current_token_without_instance(store.get_device_stream_token),
|
||||
store.get_all_device_list_changes_for_remotes,
|
||||
current_token_without_instance(self.store.get_device_stream_token),
|
||||
self._update_function,
|
||||
)
|
||||
|
||||
async def _update_function(
|
||||
self,
|
||||
instance_name: str,
|
||||
from_token: Token,
|
||||
current_token: Token,
|
||||
target_row_count: int,
|
||||
) -> StreamUpdateResult:
|
||||
(
|
||||
device_updates,
|
||||
devices_to_token,
|
||||
devices_limited,
|
||||
) = await self.store.get_all_device_list_changes_for_remotes(
|
||||
instance_name, from_token, current_token, target_row_count
|
||||
)
|
||||
|
||||
(
|
||||
signatures_updates,
|
||||
signatures_to_token,
|
||||
signatures_limited,
|
||||
) = await self.store.get_all_user_signature_changes_for_remotes(
|
||||
instance_name, from_token, current_token, target_row_count
|
||||
)
|
||||
|
||||
upper_limit_token = current_token
|
||||
if devices_limited:
|
||||
upper_limit_token = min(upper_limit_token, devices_to_token)
|
||||
if signatures_limited:
|
||||
upper_limit_token = min(upper_limit_token, signatures_to_token)
|
||||
|
||||
device_updates = [
|
||||
(stream_id, (entity, False))
|
||||
for stream_id, (entity,) in device_updates
|
||||
if stream_id <= upper_limit_token
|
||||
]
|
||||
|
||||
signatures_updates = [
|
||||
(stream_id, (entity, True))
|
||||
for stream_id, (entity,) in signatures_updates
|
||||
if stream_id <= upper_limit_token
|
||||
]
|
||||
|
||||
updates = list(
|
||||
heapq.merge(device_updates, signatures_updates, key=lambda row: row[0])
|
||||
)
|
||||
|
||||
return updates, upper_limit_token, devices_limited or signatures_limited
|
||||
|
||||
|
||||
class ToDeviceStream(Stream):
|
||||
"""New to_device messages for a client"""
|
||||
@ -495,27 +544,6 @@ class ToDeviceStream(Stream):
|
||||
)
|
||||
|
||||
|
||||
class TagAccountDataStream(Stream):
|
||||
"""Someone added/removed a tag for a room"""
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class TagAccountDataStreamRow:
|
||||
user_id: str
|
||||
room_id: str
|
||||
data: JsonDict
|
||||
|
||||
NAME = "tag_account_data"
|
||||
ROW_TYPE = TagAccountDataStreamRow
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
store = hs.get_datastores().main
|
||||
super().__init__(
|
||||
hs.get_instance_name(),
|
||||
current_token_without_instance(store.get_max_account_data_stream_id),
|
||||
store.get_all_updated_tags,
|
||||
)
|
||||
|
||||
|
||||
class AccountDataStream(Stream):
|
||||
"""Global or per room account data was changed"""
|
||||
|
||||
@ -560,6 +588,19 @@ class AccountDataStream(Stream):
|
||||
to_token = room_results[-1][0]
|
||||
limited = True
|
||||
|
||||
tags, tag_to_token, tags_limited = await self.store.get_all_updated_tags(
|
||||
instance_name,
|
||||
from_token,
|
||||
to_token,
|
||||
limit,
|
||||
)
|
||||
|
||||
# again, if the tag results hit the limit, limit the global results to
|
||||
# the same stream token.
|
||||
if tags_limited:
|
||||
to_token = tag_to_token
|
||||
limited = True
|
||||
|
||||
# convert the global results to the right format, and limit them to the to_token
|
||||
# at the same time
|
||||
global_rows = (
|
||||
@ -568,11 +609,16 @@ class AccountDataStream(Stream):
|
||||
if stream_id <= to_token
|
||||
)
|
||||
|
||||
# we know that the room_results are already limited to `to_token` so no need
|
||||
# for a check on `stream_id` here.
|
||||
room_rows = (
|
||||
(stream_id, (user_id, room_id, account_data_type))
|
||||
for stream_id, user_id, room_id, account_data_type in room_results
|
||||
if stream_id <= to_token
|
||||
)
|
||||
|
||||
tag_rows = (
|
||||
(stream_id, (user_id, room_id, AccountDataTypes.TAG))
|
||||
for stream_id, user_id, room_id in tags
|
||||
if stream_id <= to_token
|
||||
)
|
||||
|
||||
# We need to return a sorted list, so merge them together.
|
||||
@ -582,24 +628,7 @@ class AccountDataStream(Stream):
|
||||
# leading to a comparison between the data tuples. The comparison could
|
||||
# fail due to attempting to compare the `room_id` which results in a
|
||||
# `TypeError` from comparing a `str` vs `None`.
|
||||
updates = list(heapq.merge(room_rows, global_rows, key=lambda row: row[0]))
|
||||
return updates, to_token, limited
|
||||
|
||||
|
||||
class UserSignatureStream(Stream):
|
||||
"""A user has signed their own device with their user-signing key"""
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class UserSignatureStreamRow:
|
||||
user_id: str
|
||||
|
||||
NAME = "user_signature"
|
||||
ROW_TYPE = UserSignatureStreamRow
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
store = hs.get_datastores().main
|
||||
super().__init__(
|
||||
hs.get_instance_name(),
|
||||
current_token_without_instance(store.get_device_stream_token),
|
||||
store.get_all_user_signature_changes_for_remotes,
|
||||
updates = list(
|
||||
heapq.merge(room_rows, global_rows, tag_rows, key=lambda row: row[0])
|
||||
)
|
||||
return updates, to_token, limited
|
||||
|
@ -16,7 +16,6 @@ from typing import TYPE_CHECKING
|
||||
import attr
|
||||
|
||||
from synapse.replication.tcp.streams import Stream
|
||||
from synapse.replication.tcp.streams._base import current_token_without_instance
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@ -42,8 +41,7 @@ class UnPartialStatedRoomStream(Stream):
|
||||
store = hs.get_datastores().main
|
||||
super().__init__(
|
||||
hs.get_instance_name(),
|
||||
# TODO(faster_joins, multiple writers): we need to account for instance names
|
||||
current_token_without_instance(store.get_un_partial_stated_rooms_token),
|
||||
store.get_un_partial_stated_rooms_token,
|
||||
store.get_un_partial_stated_rooms_from_stream,
|
||||
)
|
||||
|
||||
@ -70,7 +68,6 @@ class UnPartialStatedEventStream(Stream):
|
||||
store = hs.get_datastores().main
|
||||
super().__init__(
|
||||
hs.get_instance_name(),
|
||||
# TODO(faster_joins, multiple writers): we need to account for instance names
|
||||
current_token_without_instance(store.get_un_partial_stated_events_token),
|
||||
store.get_un_partial_stated_events_token,
|
||||
store.get_un_partial_stated_events_from_stream,
|
||||
)
|
||||
|
@ -107,7 +107,7 @@ from synapse.http.client import InsecureInterceptableContextFactory, SimpleHttpC
|
||||
from synapse.http.matrixfederationclient import MatrixFederationHttpClient
|
||||
from synapse.metrics.common_usage_metrics import CommonUsageMetricsManager
|
||||
from synapse.module_api import ModuleApi
|
||||
from synapse.notifier import Notifier
|
||||
from synapse.notifier import Notifier, ReplicationNotifier
|
||||
from synapse.push.bulk_push_rule_evaluator import BulkPushRuleEvaluator
|
||||
from synapse.push.pusherpool import PusherPool
|
||||
from synapse.replication.tcp.client import ReplicationDataHandler
|
||||
@ -389,6 +389,10 @@ class HomeServer(metaclass=abc.ABCMeta):
|
||||
def get_notifier(self) -> Notifier:
|
||||
return Notifier(self)
|
||||
|
||||
@cache_in_self
|
||||
def get_replication_notifier(self) -> ReplicationNotifier:
|
||||
return ReplicationNotifier()
|
||||
|
||||
@cache_in_self
|
||||
def get_auth(self) -> Auth:
|
||||
return Auth(self)
|
||||
@ -743,7 +747,7 @@ class HomeServer(metaclass=abc.ABCMeta):
|
||||
|
||||
@cache_in_self
|
||||
def get_event_client_serializer(self) -> EventClientSerializer:
|
||||
return EventClientSerializer()
|
||||
return EventClientSerializer(self.config.experimental.msc3925_inhibit_edit)
|
||||
|
||||
@cache_in_self
|
||||
def get_password_policy_handler(self) -> PasswordPolicyHandler:
|
||||
|
@ -493,8 +493,6 @@ class StateStorageController:
|
||||
up to date.
|
||||
"""
|
||||
# FIXME(faster_joins): what do we do here?
|
||||
# https://github.com/matrix-org/synapse/issues/12814
|
||||
# https://github.com/matrix-org/synapse/issues/12815
|
||||
# https://github.com/matrix-org/synapse/issues/13008
|
||||
|
||||
return await self.stores.main.get_partial_current_state_deltas(
|
||||
|
@ -27,7 +27,7 @@ from typing import (
|
||||
)
|
||||
|
||||
from synapse.api.constants import AccountDataTypes
|
||||
from synapse.replication.tcp.streams import AccountDataStream, TagAccountDataStream
|
||||
from synapse.replication.tcp.streams import AccountDataStream
|
||||
from synapse.storage._base import db_to_json
|
||||
from synapse.storage.database import (
|
||||
DatabasePool,
|
||||
@ -75,6 +75,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
|
||||
self._account_data_id_gen = MultiWriterIdGenerator(
|
||||
db_conn=db_conn,
|
||||
db=database,
|
||||
notifier=hs.get_replication_notifier(),
|
||||
stream_name="account_data",
|
||||
instance_name=self._instance_name,
|
||||
tables=[
|
||||
@ -95,6 +96,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
|
||||
# SQLite).
|
||||
self._account_data_id_gen = StreamIdGenerator(
|
||||
db_conn,
|
||||
hs.get_replication_notifier(),
|
||||
"room_account_data",
|
||||
"stream_id",
|
||||
extra_tables=[("room_tags_revisions", "stream_id")],
|
||||
@ -454,9 +456,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
|
||||
def process_replication_position(
|
||||
self, stream_name: str, instance_name: str, token: int
|
||||
) -> None:
|
||||
if stream_name == TagAccountDataStream.NAME:
|
||||
self._account_data_id_gen.advance(instance_name, token)
|
||||
elif stream_name == AccountDataStream.NAME:
|
||||
if stream_name == AccountDataStream.NAME:
|
||||
self._account_data_id_gen.advance(instance_name, token)
|
||||
super().process_replication_position(stream_name, instance_name, token)
|
||||
|
||||
|
@ -75,6 +75,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
|
||||
self._cache_id_gen = MultiWriterIdGenerator(
|
||||
db_conn,
|
||||
database,
|
||||
notifier=hs.get_replication_notifier(),
|
||||
stream_name="caches",
|
||||
instance_name=hs.get_instance_name(),
|
||||
tables=[
|
||||
|
@ -91,6 +91,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
|
||||
MultiWriterIdGenerator(
|
||||
db_conn=db_conn,
|
||||
db=database,
|
||||
notifier=hs.get_replication_notifier(),
|
||||
stream_name="to_device",
|
||||
instance_name=self._instance_name,
|
||||
tables=[("device_inbox", "instance_name", "stream_id")],
|
||||
@ -101,7 +102,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
|
||||
else:
|
||||
self._can_write_to_device = True
|
||||
self._device_inbox_id_gen = StreamIdGenerator(
|
||||
db_conn, "device_inbox", "stream_id"
|
||||
db_conn, hs.get_replication_notifier(), "device_inbox", "stream_id"
|
||||
)
|
||||
|
||||
max_device_inbox_id = self._device_inbox_id_gen.get_current_token()
|
||||
|
@ -38,7 +38,7 @@ from synapse.logging.opentracing import (
|
||||
whitelisted_homeserver,
|
||||
)
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
from synapse.replication.tcp.streams._base import DeviceListsStream, UserSignatureStream
|
||||
from synapse.replication.tcp.streams._base import DeviceListsStream
|
||||
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
|
||||
from synapse.storage.database import (
|
||||
DatabasePool,
|
||||
@ -92,6 +92,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
|
||||
# class below that is used on the main process.
|
||||
self._device_list_id_gen: AbstractStreamIdTracker = StreamIdGenerator(
|
||||
db_conn,
|
||||
hs.get_replication_notifier(),
|
||||
"device_lists_stream",
|
||||
"stream_id",
|
||||
extra_tables=[
|
||||
@ -163,9 +164,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
|
||||
) -> None:
|
||||
if stream_name == DeviceListsStream.NAME:
|
||||
self._invalidate_caches_for_devices(token, rows)
|
||||
elif stream_name == UserSignatureStream.NAME:
|
||||
for row in rows:
|
||||
self._user_signature_stream_cache.entity_has_changed(row.user_id, token)
|
||||
|
||||
return super().process_replication_rows(stream_name, instance_name, token, rows)
|
||||
|
||||
def process_replication_position(
|
||||
@ -173,14 +172,17 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
|
||||
) -> None:
|
||||
if stream_name == DeviceListsStream.NAME:
|
||||
self._device_list_id_gen.advance(instance_name, token)
|
||||
elif stream_name == UserSignatureStream.NAME:
|
||||
self._device_list_id_gen.advance(instance_name, token)
|
||||
|
||||
super().process_replication_position(stream_name, instance_name, token)
|
||||
|
||||
def _invalidate_caches_for_devices(
|
||||
self, token: int, rows: Iterable[DeviceListsStream.DeviceListsStreamRow]
|
||||
) -> None:
|
||||
for row in rows:
|
||||
if row.is_signature:
|
||||
self._user_signature_stream_cache.entity_has_changed(row.entity, token)
|
||||
continue
|
||||
|
||||
# The entities are either user IDs (starting with '@') whose devices
|
||||
# have changed, or remote servers that we need to tell about
|
||||
# changes.
|
||||
|
@ -1181,7 +1181,10 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
|
||||
super().__init__(database, db_conn, hs)
|
||||
|
||||
self._cross_signing_id_gen = StreamIdGenerator(
|
||||
db_conn, "e2e_cross_signing_keys", "stream_id"
|
||||
db_conn,
|
||||
hs.get_replication_notifier(),
|
||||
"e2e_cross_signing_keys",
|
||||
"stream_id",
|
||||
)
|
||||
|
||||
async def set_e2e_device_keys(
|
||||
|
@ -69,6 +69,8 @@ class _BackgroundUpdates:
|
||||
|
||||
EVENTS_POPULATE_STATE_KEY_REJECTIONS = "events_populate_state_key_rejections"
|
||||
|
||||
EVENTS_JUMP_TO_DATE_INDEX = "events_jump_to_date_index"
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class _CalculateChainCover:
|
||||
@ -260,6 +262,16 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
|
||||
self._background_events_populate_state_key_rejections,
|
||||
)
|
||||
|
||||
# Add an index that would be useful for jumping to date using
|
||||
# get_event_id_for_timestamp.
|
||||
self.db_pool.updates.register_background_index_update(
|
||||
_BackgroundUpdates.EVENTS_JUMP_TO_DATE_INDEX,
|
||||
index_name="events_jump_to_date_idx",
|
||||
table="events",
|
||||
columns=["room_id", "origin_server_ts"],
|
||||
where_clause="NOT outlier",
|
||||
)
|
||||
|
||||
async def _background_reindex_fields_sender(
|
||||
self, progress: JsonDict, batch_size: int
|
||||
) -> int:
|
||||
|
@ -110,6 +110,10 @@ event_fetch_ongoing_gauge = Gauge(
|
||||
)
|
||||
|
||||
|
||||
class InvalidEventError(Exception):
|
||||
"""The event retrieved from the database is invalid and cannot be used."""
|
||||
|
||||
|
||||
@attr.s(slots=True, auto_attribs=True)
|
||||
class EventCacheEntry:
|
||||
event: EventBase
|
||||
@ -191,6 +195,7 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
self._stream_id_gen = MultiWriterIdGenerator(
|
||||
db_conn=db_conn,
|
||||
db=database,
|
||||
notifier=hs.get_replication_notifier(),
|
||||
stream_name="events",
|
||||
instance_name=hs.get_instance_name(),
|
||||
tables=[("events", "instance_name", "stream_ordering")],
|
||||
@ -200,6 +205,7 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
self._backfill_id_gen = MultiWriterIdGenerator(
|
||||
db_conn=db_conn,
|
||||
db=database,
|
||||
notifier=hs.get_replication_notifier(),
|
||||
stream_name="backfill",
|
||||
instance_name=hs.get_instance_name(),
|
||||
tables=[("events", "instance_name", "stream_ordering")],
|
||||
@ -217,12 +223,14 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
# SQLite).
|
||||
self._stream_id_gen = StreamIdGenerator(
|
||||
db_conn,
|
||||
hs.get_replication_notifier(),
|
||||
"events",
|
||||
"stream_ordering",
|
||||
is_writer=hs.get_instance_name() in hs.config.worker.writers.events,
|
||||
)
|
||||
self._backfill_id_gen = StreamIdGenerator(
|
||||
db_conn,
|
||||
hs.get_replication_notifier(),
|
||||
"events",
|
||||
"stream_ordering",
|
||||
step=-1,
|
||||
@ -300,6 +308,7 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
self._un_partial_stated_events_stream_id_gen = MultiWriterIdGenerator(
|
||||
db_conn=db_conn,
|
||||
db=database,
|
||||
notifier=hs.get_replication_notifier(),
|
||||
stream_name="un_partial_stated_event_stream",
|
||||
instance_name=hs.get_instance_name(),
|
||||
tables=[
|
||||
@ -311,14 +320,18 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
)
|
||||
else:
|
||||
self._un_partial_stated_events_stream_id_gen = StreamIdGenerator(
|
||||
db_conn, "un_partial_stated_event_stream", "stream_id"
|
||||
db_conn,
|
||||
hs.get_replication_notifier(),
|
||||
"un_partial_stated_event_stream",
|
||||
"stream_id",
|
||||
)
|
||||
|
||||
def get_un_partial_stated_events_token(self) -> int:
|
||||
# TODO(faster_joins, multiple writers): This is inappropriate if there are multiple
|
||||
# writers because workers that don't write often will hold all
|
||||
# readers up.
|
||||
return self._un_partial_stated_events_stream_id_gen.get_current_token()
|
||||
def get_un_partial_stated_events_token(self, instance_name: str) -> int:
|
||||
return (
|
||||
self._un_partial_stated_events_stream_id_gen.get_current_token_for_writer(
|
||||
instance_name
|
||||
)
|
||||
)
|
||||
|
||||
async def get_un_partial_stated_events_from_stream(
|
||||
self, instance_name: str, last_id: int, current_id: int, limit: int
|
||||
@ -408,6 +421,8 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
self._stream_id_gen.advance(instance_name, token)
|
||||
elif stream_name == BackfillStream.NAME:
|
||||
self._backfill_id_gen.advance(instance_name, -token)
|
||||
elif stream_name == UnPartialStatedEventStream.NAME:
|
||||
self._un_partial_stated_events_stream_id_gen.advance(instance_name, token)
|
||||
super().process_replication_position(stream_name, instance_name, token)
|
||||
|
||||
async def have_censored_event(self, event_id: str) -> bool:
|
||||
@ -1299,7 +1314,7 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
# invites, so just accept it for all membership events.
|
||||
#
|
||||
if d["type"] != EventTypes.Member:
|
||||
raise Exception(
|
||||
raise InvalidEventError(
|
||||
"Room %s for event %s is unknown" % (d["room_id"], event_id)
|
||||
)
|
||||
|
||||
|
@ -77,6 +77,7 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore)
|
||||
self._presence_id_gen = MultiWriterIdGenerator(
|
||||
db_conn=db_conn,
|
||||
db=database,
|
||||
notifier=hs.get_replication_notifier(),
|
||||
stream_name="presence_stream",
|
||||
instance_name=self._instance_name,
|
||||
tables=[("presence_stream", "instance_name", "stream_id")],
|
||||
@ -85,7 +86,7 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore)
|
||||
)
|
||||
else:
|
||||
self._presence_id_gen = StreamIdGenerator(
|
||||
db_conn, "presence_stream", "stream_id"
|
||||
db_conn, hs.get_replication_notifier(), "presence_stream", "stream_id"
|
||||
)
|
||||
|
||||
self.hs = hs
|
||||
|
@ -86,8 +86,9 @@ def _load_rules(
|
||||
filtered_rules = FilteredPushRules(
|
||||
push_rules,
|
||||
enabled_map,
|
||||
msc3664_enabled=experimental_config.msc3664_enabled,
|
||||
msc1767_enabled=experimental_config.msc1767_enabled,
|
||||
msc3664_enabled=experimental_config.msc3664_enabled,
|
||||
msc3381_polls_enabled=experimental_config.msc3381_polls_enabled,
|
||||
)
|
||||
|
||||
return filtered_rules
|
||||
@ -117,6 +118,7 @@ class PushRulesWorkerStore(
|
||||
# class below that is used on the main process.
|
||||
self._push_rules_stream_id_gen: AbstractStreamIdTracker = StreamIdGenerator(
|
||||
db_conn,
|
||||
hs.get_replication_notifier(),
|
||||
"push_rules_stream",
|
||||
"stream_id",
|
||||
is_writer=hs.config.worker.worker_app is None,
|
||||
|
@ -62,6 +62,7 @@ class PusherWorkerStore(SQLBaseStore):
|
||||
# class below that is used on the main process.
|
||||
self._pushers_id_gen: AbstractStreamIdTracker = StreamIdGenerator(
|
||||
db_conn,
|
||||
hs.get_replication_notifier(),
|
||||
"pushers",
|
||||
"id",
|
||||
extra_tables=[("deleted_pushers", "stream_id")],
|
||||
|
@ -73,6 +73,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
||||
self._receipts_id_gen = MultiWriterIdGenerator(
|
||||
db_conn=db_conn,
|
||||
db=database,
|
||||
notifier=hs.get_replication_notifier(),
|
||||
stream_name="receipts",
|
||||
instance_name=self._instance_name,
|
||||
tables=[("receipts_linearized", "instance_name", "stream_id")],
|
||||
@ -91,6 +92,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
||||
# SQLite).
|
||||
self._receipts_id_gen = StreamIdGenerator(
|
||||
db_conn,
|
||||
hs.get_replication_notifier(),
|
||||
"receipts_linearized",
|
||||
"stream_id",
|
||||
is_writer=hs.get_instance_name() in hs.config.worker.writers.receipts,
|
||||
|
@ -292,6 +292,7 @@ class RelationsWorkerStore(SQLBaseStore):
|
||||
to_device_key=0,
|
||||
device_list_key=0,
|
||||
groups_key=0,
|
||||
un_partial_stated_rooms_key=0,
|
||||
)
|
||||
|
||||
return events[:limit], next_token
|
||||
|
@ -26,6 +26,7 @@ from typing import (
|
||||
Mapping,
|
||||
Optional,
|
||||
Sequence,
|
||||
Set,
|
||||
Tuple,
|
||||
Union,
|
||||
cast,
|
||||
@ -43,6 +44,7 @@ from synapse.api.errors import StoreError
|
||||
from synapse.api.room_versions import RoomVersion, RoomVersions
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.events import EventBase
|
||||
from synapse.replication.tcp.streams.partial_state import UnPartialStatedRoomStream
|
||||
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
|
||||
from synapse.storage.database import (
|
||||
DatabasePool,
|
||||
@ -126,6 +128,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
|
||||
self._un_partial_stated_rooms_stream_id_gen = MultiWriterIdGenerator(
|
||||
db_conn=db_conn,
|
||||
db=database,
|
||||
notifier=hs.get_replication_notifier(),
|
||||
stream_name="un_partial_stated_room_stream",
|
||||
instance_name=self._instance_name,
|
||||
tables=[
|
||||
@ -137,9 +140,19 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
|
||||
)
|
||||
else:
|
||||
self._un_partial_stated_rooms_stream_id_gen = StreamIdGenerator(
|
||||
db_conn, "un_partial_stated_room_stream", "stream_id"
|
||||
db_conn,
|
||||
hs.get_replication_notifier(),
|
||||
"un_partial_stated_room_stream",
|
||||
"stream_id",
|
||||
)
|
||||
|
||||
def process_replication_position(
|
||||
self, stream_name: str, instance_name: str, token: int
|
||||
) -> None:
|
||||
if stream_name == UnPartialStatedRoomStream.NAME:
|
||||
self._un_partial_stated_rooms_stream_id_gen.advance(instance_name, token)
|
||||
return super().process_replication_position(stream_name, instance_name, token)
|
||||
|
||||
async def store_room(
|
||||
self,
|
||||
room_id: str,
|
||||
@ -1277,18 +1290,49 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
|
||||
)
|
||||
return result["join_event_id"], result["device_lists_stream_id"]
|
||||
|
||||
def get_un_partial_stated_rooms_token(self) -> int:
|
||||
# TODO(faster_joins, multiple writers): This is inappropriate if there
|
||||
# are multiple writers because workers that don't write often will
|
||||
# hold all readers up.
|
||||
# (See `MultiWriterIdGenerator.get_persisted_upto_position` for an
|
||||
# explanation.)
|
||||
return self._un_partial_stated_rooms_stream_id_gen.get_current_token()
|
||||
def get_un_partial_stated_rooms_token(self, instance_name: str) -> int:
|
||||
return self._un_partial_stated_rooms_stream_id_gen.get_current_token_for_writer(
|
||||
instance_name
|
||||
)
|
||||
|
||||
async def get_un_partial_stated_rooms_between(
|
||||
self, last_id: int, current_id: int, room_ids: Collection[str]
|
||||
) -> Set[str]:
|
||||
"""Get all rooms that got un partial stated between `last_id` exclusive and
|
||||
`current_id` inclusive.
|
||||
|
||||
Returns:
|
||||
The list of room ids.
|
||||
"""
|
||||
|
||||
if last_id == current_id:
|
||||
return set()
|
||||
|
||||
def _get_un_partial_stated_rooms_between_txn(
|
||||
txn: LoggingTransaction,
|
||||
) -> Set[str]:
|
||||
sql = """
|
||||
SELECT DISTINCT room_id FROM un_partial_stated_room_stream
|
||||
WHERE ? < stream_id AND stream_id <= ? AND
|
||||
"""
|
||||
|
||||
clause, args = make_in_list_sql_clause(
|
||||
self.database_engine, "room_id", room_ids
|
||||
)
|
||||
|
||||
txn.execute(sql + clause, [last_id, current_id] + args)
|
||||
|
||||
return {r[0] for r in txn}
|
||||
|
||||
return await self.db_pool.runInteraction(
|
||||
"get_un_partial_stated_rooms_between",
|
||||
_get_un_partial_stated_rooms_between_txn,
|
||||
)
|
||||
|
||||
async def get_un_partial_stated_rooms_from_stream(
|
||||
self, instance_name: str, last_id: int, current_id: int, limit: int
|
||||
) -> Tuple[List[Tuple[int, Tuple[str]]], int, bool]:
|
||||
"""Get updates for caches replication stream.
|
||||
"""Get updates for un partial stated rooms replication stream.
|
||||
|
||||
Args:
|
||||
instance_name: The writer we want to fetch updates from. Unused
|
||||
@ -2295,16 +2339,16 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
|
||||
(room_id,),
|
||||
)
|
||||
|
||||
async def clear_partial_state_room(self, room_id: str) -> bool:
|
||||
async def clear_partial_state_room(self, room_id: str) -> Optional[int]:
|
||||
"""Clears the partial state flag for a room.
|
||||
|
||||
Args:
|
||||
room_id: The room whose partial state flag is to be cleared.
|
||||
|
||||
Returns:
|
||||
`True` if the partial state flag has been cleared successfully.
|
||||
The corresponding stream id for the un-partial-stated rooms stream.
|
||||
|
||||
`False` if the partial state flag could not be cleared because the room
|
||||
`None` if the partial state flag could not be cleared because the room
|
||||
still contains events with partial state.
|
||||
"""
|
||||
try:
|
||||
@ -2315,7 +2359,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
|
||||
room_id,
|
||||
un_partial_state_room_stream_id,
|
||||
)
|
||||
return True
|
||||
return un_partial_state_room_stream_id
|
||||
except self.db_pool.engine.module.IntegrityError as e:
|
||||
# Assume that any `IntegrityError`s are due to partial state events.
|
||||
logger.info(
|
||||
@ -2323,7 +2367,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
|
||||
room_id,
|
||||
e,
|
||||
)
|
||||
return False
|
||||
return None
|
||||
|
||||
def _clear_partial_state_room_txn(
|
||||
self,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user