Merge remote-tracking branch 'upstream/release-v1.58'

This commit is contained in:
Tulir Asokan 2022-04-26 14:40:43 +03:00
commit 6669e3b670
112 changed files with 6165 additions and 1516 deletions

View file

@ -0,0 +1,4 @@
---
title: CI run against latest deps is failing
---
See https://github.com/{{env.GITHUB_REPOSITORY}}/actions/runs/{{env.GITHUB_RUN_ID}}

View file

@ -1,8 +0,0 @@
#!/bin/sh
# replaces the dependency on Twisted in `python_dependencies` with trunk.
set -e
cd "$(dirname "$0")"/..
sed -i -e 's#"Twisted.*"#"Twisted @ git+https://github.com/twisted/twisted"#' synapse/python_dependencies.py

View file

@ -9,8 +9,4 @@
!poetry.lock !poetry.lock
!requirements.txt !requirements.txt
# TODO: remove these once we have moved over to using poetry-core in pyproject.toml
!MANIFEST.in
!setup.py
**/__pycache__ **/__pycache__

156
.github/workflows/latest_deps.yml vendored Normal file
View file

@ -0,0 +1,156 @@
# People who are freshly `pip install`ing from PyPI will pull in the latest versions of
# dependencies which match the broad requirements. Since most CI runs are against
# the locked poetry environment, run specifically against the latest dependencies to
# know if there's an upcoming breaking change.
#
# As an overview this workflow:
# - checks out develop,
# - installs from source, pulling in the dependencies like a fresh `pip install` would, and
# - runs mypy and test suites in that checkout.
#
# Based on the twisted trunk CI job.
name: Latest dependencies
on:
schedule:
- cron: 0 7 * * *
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
mypy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
# The dev dependencies aren't exposed in the wheel metadata (at least with current
# poetry-core versions), so we install with poetry.
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
poetry-version: "1.2.0b1"
# Dump installed versions for debugging.
- run: poetry run pip list > before.txt
# Upgrade all runtime dependencies only. This is intended to mimic a fresh
# `pip install matrix-synapse[all]` as closely as possible.
- run: poetry update --no-dev
- run: poetry run pip list > after.txt && (diff -u before.txt after.txt || true)
- run: poetry run mypy
trial:
runs-on: ubuntu-latest
strategy:
matrix:
include:
- database: "sqlite"
- database: "postgres"
postgres-version: "14"
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- name: Set up PostgreSQL ${{ matrix.postgres-version }}
if: ${{ matrix.postgres-version }}
run: |
docker run -d -p 5432:5432 \
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.postgres-version }}
- uses: actions/setup-python@v2
with:
python-version: "3.x"
- run: pip install .[all,test]
- name: Await PostgreSQL
if: ${{ matrix.postgres-version }}
timeout-minutes: 2
run: until pg_isready -h localhost; do sleep 1; done
- run: python -m twisted.trial --jobs=2 tests
env:
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
SYNAPSE_POSTGRES_HOST: localhost
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
# Note: Dumps to workflow logs instead of using actions/upload-artifact
# This keeps logs colocated with failing jobs
# It also ignores find's exit code; this is a best effort affair
run: >-
find _trial_temp -name '*.log'
-exec echo "::group::{}" \;
-exec cat {} \;
-exec echo "::endgroup::" \;
|| true
sytest:
runs-on: ubuntu-latest
container:
image: matrixdotorg/sytest-synapse:testing
volumes:
- ${{ github.workspace }}:/src
strategy:
fail-fast: false
matrix:
include:
- sytest-tag: focal
- sytest-tag: focal
postgres: postgres
workers: workers
redis: redis
env:
POSTGRES: ${{ matrix.postgres && 1}}
WORKERS: ${{ matrix.workers && 1 }}
REDIS: ${{ matrix.redis && 1 }}
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
steps:
- uses: actions/checkout@v2
- name: Ensure sytest runs `pip install`
# Delete the lockfile so sytest will `pip install` rather than `poetry install`
run: rm /src/poetry.lock
working-directory: /src
- name: Prepare test blacklist
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
- name: Run SyTest
run: /bootstrap.sh synapse
working-directory: /src
- name: Summarise results.tap
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
uses: actions/upload-artifact@v2
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
path: |
/logs/results.tap
/logs/**/*.log*
# TODO: run complement (as with twisted trunk, see #12473).
# open an issue if the build fails, so we know about it.
open-issue:
if: failure()
needs:
# TODO: should mypy be included here? It feels more brittle than the other two.
- mypy
- trial
- sytest
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
update_existing: true
filename: .ci/latest_deps_build_failed_issue_template.md

View file

@ -15,24 +15,18 @@ jobs:
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- uses: actions/setup-python@v2 - uses: actions/setup-python@v2
- run: pip install -e . - run: pip install .
- run: scripts-dev/generate_sample_config.sh --check - run: scripts-dev/generate_sample_config.sh --check
- run: scripts-dev/config-lint.sh - run: scripts-dev/config-lint.sh
lint: lint:
runs-on: ubuntu-latest # This does a vanilla `poetry install` - no extras. I'm slightly anxious
strategy: # that we might skip some typechecks on code that uses extras. However,
matrix: # I think the right way to fix this is to mark any extras needed for
toxenv: # typechecking as development dependencies. To detect this, we ought to
- "check_codestyle" # turn up mypy's strictness: disallow unknown imports and be accept fewer
- "check_isort" # uses of `Any`.
- "mypy" uses: "matrix-org/backend-meta/.github/workflows/python-poetry-ci.yml@v1"
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- run: pip install tox
- run: tox -e ${{ matrix.toxenv }}
lint-crlf: lint-crlf:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -71,23 +65,23 @@ jobs:
matrix: matrix:
python-version: ["3.7", "3.8", "3.9", "3.10"] python-version: ["3.7", "3.8", "3.9", "3.10"]
database: ["sqlite"] database: ["sqlite"]
toxenv: ["py"] extras: ["all"]
include: include:
# Newest Python without optional deps # Newest Python without optional deps
- python-version: "3.10" - python-version: "3.10"
toxenv: "py-noextras" extras: ""
# Oldest Python with PostgreSQL # Oldest Python with PostgreSQL
- python-version: "3.7" - python-version: "3.7"
database: "postgres" database: "postgres"
postgres-version: "10" postgres-version: "10"
toxenv: "py" extras: "all"
# Newest Python with newest PostgreSQL # Newest Python with newest PostgreSQL
- python-version: "3.10" - python-version: "3.10"
database: "postgres" database: "postgres"
postgres-version: "14" postgres-version: "14"
toxenv: "py" extras: "all"
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
@ -99,17 +93,16 @@ jobs:
-e POSTGRES_PASSWORD=postgres \ -e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \ -e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.postgres-version }} postgres:${{ matrix.postgres-version }}
- uses: actions/setup-python@v2 - uses: matrix-org/setup-python-poetry@v1
with: with:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
- run: pip install tox extras: ${{ matrix.extras }}
- name: Await PostgreSQL - name: Await PostgreSQL
if: ${{ matrix.postgres-version }} if: ${{ matrix.postgres-version }}
timeout-minutes: 2 timeout-minutes: 2
run: until pg_isready -h localhost; do sleep 1; done run: until pg_isready -h localhost; do sleep 1; done
- run: tox -e ${{ matrix.toxenv }} - run: poetry run trial --jobs=2 tests
env: env:
TRIAL_FLAGS: "--jobs=2"
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }} SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
SYNAPSE_POSTGRES_HOST: localhost SYNAPSE_POSTGRES_HOST: localhost
SYNAPSE_POSTGRES_USER: postgres SYNAPSE_POSTGRES_USER: postgres
@ -156,23 +149,24 @@ jobs:
trial-pypy: trial-pypy:
# Very slow; only run if the branch name includes 'pypy' # Very slow; only run if the branch name includes 'pypy'
# Note: sqlite only; no postgres. Completely untested since poetry move.
if: ${{ contains(github.ref, 'pypy') && !failure() && !cancelled() }} if: ${{ contains(github.ref, 'pypy') && !failure() && !cancelled() }}
needs: linting-done needs: linting-done
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
python-version: ["pypy-3.7"] python-version: ["pypy-3.7"]
extras: ["all"]
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
# Install libs necessary for PyPy to build binary wheels for dependencies
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev - run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
- uses: actions/setup-python@v2 - uses: matrix-org/setup-python-poetry@v1
with: with:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
- run: pip install tox extras: ${{ matrix.extras }}
- run: tox -e py - run: poetry run trial --jobs=2 tests
env:
TRIAL_FLAGS: "--jobs=2"
- name: Dump logs - name: Dump logs
# Logs are most useful when the command fails, always include them. # Logs are most useful when the command fails, always include them.
if: ${{ always() }} if: ${{ always() }}

View file

@ -6,16 +6,25 @@ on:
workflow_dispatch: workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs: jobs:
mypy: mypy:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- uses: actions/setup-python@v2 - uses: matrix-org/setup-python-poetry@v1
- run: .ci/patch_for_twisted_trunk.sh with:
- run: pip install tox python-version: "3.x"
- run: tox -e mypy extras: "all"
- run: |
poetry remove twisted
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry install --no-interaction --extras "all test"
- run: poetry run mypy
trial: trial:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -23,14 +32,15 @@ jobs:
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1 - run: sudo apt-get -qq install xmlsec1
- uses: actions/setup-python@v2 - uses: matrix-org/setup-python-poetry@v1
with: with:
python-version: 3.7 python-version: "3.x"
- run: .ci/patch_for_twisted_trunk.sh extras: "all test"
- run: pip install tox - run: |
- run: tox -e py poetry remove twisted
env: poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
TRIAL_FLAGS: "--jobs=2" poetry install --no-interaction --extras "all test"
- run: poetry run trial --jobs 2 tests
- name: Dump logs - name: Dump logs
# Logs are most useful when the command fails, always include them. # Logs are most useful when the command fails, always include them.
@ -55,11 +65,23 @@ jobs:
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- name: Patch dependencies - name: Patch dependencies
run: .ci/patch_for_twisted_trunk.sh # Note: The poetry commands want to create a virtualenv in /src/.venv/,
# but the sytest-synapse container expects it to be in /venv/.
# We symlink it before running poetry so that poetry actually
# ends up installing to `/venv`.
run: |
ln -s -T /venv /src/.venv
poetry remove twisted
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry install --no-interaction --extras "all test"
working-directory: /src working-directory: /src
- name: Run SyTest - name: Run SyTest
run: /bootstrap.sh synapse run: /bootstrap.sh synapse
working-directory: /src working-directory: /src
env:
# Use offline mode to avoid reinstalling the pinned version of
# twisted.
OFFLINE: 1
- name: Summarise results.tap - name: Summarise results.tap
if: ${{ always() }} if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap run: /sytest/scripts/tap_to_gha.pl /logs/results.tap

3
.gitignore vendored
View file

@ -15,8 +15,7 @@ _trial_temp*/
.DS_Store .DS_Store
__pycache__/ __pycache__/
# We do want the poetry lockfile. TODO: is there a good reason for ignoring # We do want the poetry lockfile.
# '*.lock' above? If not, let's nuke it.
!poetry.lock !poetry.lock
# stuff that is likely to exist when you run a server locally # stuff that is likely to exist when you run a server locally

View file

@ -1,3 +1,75 @@
Synapse 1.58.0rc1 (2022-04-26)
==============================
As of this release, the groups/communities feature in Synapse is now disabled by default. See [\#11584](https://github.com/matrix-org/synapse/issues/11584) for details. As mentioned in [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1580), this feature will be removed in Synapse 1.61.
Features
--------
- Implement [MSC3383](https://github.com/matrix-org/matrix-spec-proposals/pull/3383) for including the destination in server-to-server authentication headers. Contributed by @Bubu and @jcgruenhage for Famedly. ([\#11398](https://github.com/matrix-org/synapse/issues/11398))
- Docker images and Debian packages from matrix.org now contain a locked set of Python dependencies, greatly improving build reproducibility. ([Board](https://github.com/orgs/matrix-org/projects/54), [\#11537](https://github.com/matrix-org/synapse/issues/11537))
- Enable processing of device list updates asynchronously. ([\#12365](https://github.com/matrix-org/synapse/issues/12365), [\#12465](https://github.com/matrix-org/synapse/issues/12465))
- Implement [MSC2815](https://github.com/matrix-org/matrix-spec-proposals/pull/2815) to allow room moderators to view redacted event content. Contributed by @tulir. ([\#12427](https://github.com/matrix-org/synapse/issues/12427))
- Build Debian packages for Ubuntu 22.04 "Jammy Jellyfish". ([\#12543](https://github.com/matrix-org/synapse/issues/12543))
Bugfixes
--------
- Prevent a sync request from removing a user's busy presence status. ([\#12213](https://github.com/matrix-org/synapse/issues/12213))
- Fix bug with incremental sync missing events when rejoining/backfilling. Contributed by Nick @ Beeper. ([\#12319](https://github.com/matrix-org/synapse/issues/12319))
- Fix a long-standing bug which incorrectly caused `GET /_matrix/client/v3/rooms/{roomId}/event/{eventId}` to return edited events rather than the original. ([\#12476](https://github.com/matrix-org/synapse/issues/12476))
- Fix a bug introduced in Synapse 1.27.0 where the admin API for [deleting forward extremities](https://github.com/matrix-org/synapse/blob/erikj/fix_delete_event_response_count/docs/admin_api/rooms.md#deleting-forward-extremities) would always return a count of 1, no matter how many extremities were deleted. ([\#12496](https://github.com/matrix-org/synapse/issues/12496))
- Fix a long-standing bug where the image thumbnails embedded into email notifications were broken. ([\#12510](https://github.com/matrix-org/synapse/issues/12510))
- Fix a bug in the implementation of [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202) where Synapse would use the field name `device_unused_fallback_keys`, rather than `device_unused_fallback_key_types`. ([\#12520](https://github.com/matrix-org/synapse/issues/12520))
- Fix a bug introduced in Synapse 0.99.3 which could cause Synapse to consume large amounts of RAM when back-paginating in a large room. ([\#12522](https://github.com/matrix-org/synapse/issues/12522))
Improved Documentation
----------------------
- Fix rendering of the documentation site when using the 'print' feature. ([\#12340](https://github.com/matrix-org/synapse/issues/12340))
- Add a manual documenting config file options. ([\#12368](https://github.com/matrix-org/synapse/issues/12368), [\#12527](https://github.com/matrix-org/synapse/issues/12527))
- Update documentation to reflect that both the `run_background_tasks_on` option and the options for moving stream writers off of the main process are no longer experimental. ([\#12451](https://github.com/matrix-org/synapse/issues/12451))
- Update worker documentation and replace old `federation_reader` with `generic_worker`. ([\#12457](https://github.com/matrix-org/synapse/issues/12457))
- Strongly recommend [Poetry](https://python-poetry.org/) for development. ([\#12475](https://github.com/matrix-org/synapse/issues/12475))
- Add some example configurations for workers and update architectural diagram. ([\#12492](https://github.com/matrix-org/synapse/issues/12492))
- Fix a broken link in `README.rst`. ([\#12495](https://github.com/matrix-org/synapse/issues/12495))
- Add HAProxy delegation example with CORS headers to docs. ([\#12501](https://github.com/matrix-org/synapse/issues/12501))
- Remove extraneous comma in User Admin API's device deletion section so that the example JSON is actually valid and works. Contributed by @olmari. ([\#12533](https://github.com/matrix-org/synapse/issues/12533))
Deprecations and Removals
-------------------------
- The groups/communities feature in Synapse is now disabled by default. ([\#12344](https://github.com/matrix-org/synapse/issues/12344))
- Remove unstable identifiers from [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440). ([\#12382](https://github.com/matrix-org/synapse/issues/12382))
Internal Changes
----------------
- Preparation for faster-room-join work: start a background process to resynchronise the room state after a room join. ([\#12394](https://github.com/matrix-org/synapse/issues/12394))
- Preparation for faster-room-join work: Implement a tracking mechanism to allow functions to wait for full room state to arrive. ([\#12399](https://github.com/matrix-org/synapse/issues/12399))
- Remove an unstable identifier from [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083). ([\#12395](https://github.com/matrix-org/synapse/issues/12395))
- Run CI in the locked [Poetry](https://python-poetry.org/) environment, and remove corresponding `tox` jobs. ([\#12425](https://github.com/matrix-org/synapse/issues/12425), [\#12434](https://github.com/matrix-org/synapse/issues/12434), [\#12438](https://github.com/matrix-org/synapse/issues/12438), [\#12441](https://github.com/matrix-org/synapse/issues/12441), [\#12449](https://github.com/matrix-org/synapse/issues/12449), [\#12478](https://github.com/matrix-org/synapse/issues/12478), [\#12514](https://github.com/matrix-org/synapse/issues/12514), [\#12472](https://github.com/matrix-org/synapse/issues/12472))
- Change Mutual Rooms' `unstable_features` flag to `uk.half-shot.msc2666.mutual_rooms` which matches the current iteration of [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666). ([\#12445](https://github.com/matrix-org/synapse/issues/12445))
- Fix typo in the release script help string. ([\#12450](https://github.com/matrix-org/synapse/issues/12450))
- Fix a minor typo in the Debian changelogs generated by the release script. ([\#12497](https://github.com/matrix-org/synapse/issues/12497))
- Reintroduce the list of targets to the linter script, to avoid linting unwanted local-only directories during development. ([\#12455](https://github.com/matrix-org/synapse/issues/12455))
- Limit length of `device_id` to less than 512 characters. ([\#12454](https://github.com/matrix-org/synapse/issues/12454))
- Dockerfile-workers: reduce the amount we install in the image. ([\#12464](https://github.com/matrix-org/synapse/issues/12464))
- Dockerfile-workers: give the master its own log config. ([\#12466](https://github.com/matrix-org/synapse/issues/12466))
- complement-synapse-workers: factor out separate entry point script. ([\#12467](https://github.com/matrix-org/synapse/issues/12467))
- Back out experimental implementation of [MSC2314](https://github.com/matrix-org/matrix-spec-proposals/pull/2314). ([\#12474](https://github.com/matrix-org/synapse/issues/12474))
- Fix grammatical error in federation error response when the room version of a room is unknown. ([\#12483](https://github.com/matrix-org/synapse/issues/12483))
- Remove unnecessary configuration overrides in tests. ([\#12511](https://github.com/matrix-org/synapse/issues/12511))
- Refactor the relations code for clarity. ([\#12519](https://github.com/matrix-org/synapse/issues/12519))
- Add type hints so `docker` and `stubs` directories pass `mypy --disallow-untyped-defs`. ([\#12528](https://github.com/matrix-org/synapse/issues/12528))
- Update `delay_cancellation` to accept any awaitable, rather than just `Deferred`s. ([\#12468](https://github.com/matrix-org/synapse/issues/12468))
- Handle cancellation in `EventsWorkerStore._get_events_from_cache_or_db`. ([\#12529](https://github.com/matrix-org/synapse/issues/12529))
Synapse 1.57.1 (2022-04-20) Synapse 1.57.1 (2022-04-20)
=========================== ===========================

View file

@ -1,54 +0,0 @@
include LICENSE
include VERSION
include *.rst
include *.md
include demo/README
include demo/demo.tls.dh
include demo/*.py
include demo/*.sh
include synapse/py.typed
recursive-include synapse/storage *.sql
recursive-include synapse/storage *.sql.postgres
recursive-include synapse/storage *.sql.sqlite
recursive-include synapse/storage *.py
recursive-include synapse/storage *.txt
recursive-include synapse/storage *.md
recursive-include docs *
recursive-include scripts-dev *
recursive-include synapse *.pyi
recursive-include tests *.py
recursive-include tests *.pem
recursive-include tests *.p8
recursive-include tests *.crt
recursive-include tests *.key
recursive-include synapse/res *
recursive-include synapse/static *.css
recursive-include synapse/static *.gif
recursive-include synapse/static *.html
recursive-include synapse/static *.js
exclude .codecov.yml
exclude .coveragerc
exclude .dockerignore
exclude .editorconfig
exclude Dockerfile
exclude mypy.ini
exclude sytest-blacklist
exclude test_postgresql.sh
include book.toml
include pyproject.toml
recursive-include changelog.d *
include .flake8
prune .circleci
prune .github
prune .ci
prune contrib
prune debian
prune demo/etc
prune docker
prune stubs

View file

@ -293,39 +293,42 @@ directory of your choice::
git clone https://github.com/matrix-org/synapse.git git clone https://github.com/matrix-org/synapse.git
cd synapse cd synapse
Synapse has a number of external dependencies, that are easiest Synapse has a number of external dependencies. We maintain a fixed development
to install using pip and a virtualenv:: environment using [poetry](https://python-poetry.org/). First, install poetry. We recommend
python3 -m venv ./env pip install --user pipx
source ./env/bin/activate pipx install poetry
pip install -e ".[all,dev]"
as described `here <https://python-poetry.org/docs/#installing-with-pipx>`_.
(See `poetry's installation docs <https://python-poetry.org/docs/#installation>`
for other installation methods.) Then ask poetry to create a virtual environment
from the project and install Synapse's dependencies::
poetry install --extras "all test"
This will run a process of downloading and installing all the needed This will run a process of downloading and installing all the needed
dependencies into a virtual env. If any dependencies fail to install, dependencies into a virtual env.
try installing the failing modules individually::
pip install -e "module-name"
We recommend using the demo which starts 3 federated instances running on ports `8080` - `8082` We recommend using the demo which starts 3 federated instances running on ports `8080` - `8082`
./demo/start.sh poetry run ./demo/start.sh
(to stop, you can use `./demo/stop.sh`) (to stop, you can use `poetry run ./demo/stop.sh`)
See the [demo documentation](https://matrix-org.github.io/synapse/develop/development/demo.html) See the `demo documentation <https://matrix-org.github.io/synapse/develop/development/demo.html>`_
for more information. for more information.
If you just want to start a single instance of the app and run it directly:: If you just want to start a single instance of the app and run it directly::
# Create the homeserver.yaml config once # Create the homeserver.yaml config once
python -m synapse.app.homeserver \ poetry run synapse_homeserver \
--server-name my.domain.name \ --server-name my.domain.name \
--config-path homeserver.yaml \ --config-path homeserver.yaml \
--generate-config \ --generate-config \
--report-stats=[yes|no] --report-stats=[yes|no]
# Start the app # Start the app
python -m synapse.app.homeserver --config-path homeserver.yaml poetry run synapse_homeserver --config-path homeserver.yaml
Running the unit tests Running the unit tests
@ -334,7 +337,7 @@ Running the unit tests
After getting up and running, you may wish to run Synapse's unit tests to After getting up and running, you may wish to run Synapse's unit tests to
check that everything is installed correctly:: check that everything is installed correctly::
trial tests poetry run trial tests
This should end with a 'PASSED' result (note that exact numbers will This should end with a 'PASSED' result (note that exact numbers will
differ):: differ)::

View file

@ -30,9 +30,19 @@ case $(dpkg-architecture -q DEB_HOST_ARCH) in
;; ;;
esac esac
# Use --builtin-venv to use the better `venv` module from CPython 3.4+ rather # Manually install Poetry and export a pip-compatible `requirements.txt`
# than the 2/3 compatible `virtualenv`. # We need a Poetry pre-release as the export command is buggy in < 1.2
TEMP_VENV="$(mktemp -d)"
python3 -m venv "$TEMP_VENV"
source "$TEMP_VENV/bin/activate"
pip install -U pip
pip install poetry==1.2.0b1
poetry export --extras all --extras test -o exported_requirements.txt
deactivate
rm -rf "$TEMP_VENV"
# Use --no-deps to only install pinned versions in exported_requirements.txt,
# and to avoid https://github.com/pypa/pip/issues/9644
dh_virtualenv \ dh_virtualenv \
--install-suffix "matrix-synapse" \ --install-suffix "matrix-synapse" \
--builtin-venv \ --builtin-venv \
@ -41,9 +51,11 @@ dh_virtualenv \
--preinstall="lxml" \ --preinstall="lxml" \
--preinstall="mock" \ --preinstall="mock" \
--preinstall="wheel" \ --preinstall="wheel" \
--extra-pip-arg="--no-deps" \
--extra-pip-arg="--no-cache-dir" \ --extra-pip-arg="--no-cache-dir" \
--extra-pip-arg="--compile" \ --extra-pip-arg="--compile" \
--extras="all,systemd,test" --extras="all,systemd,test" \
--requirements="exported_requirements.txt"
PACKAGE_BUILD_DIR="debian/matrix-synapse-py3" PACKAGE_BUILD_DIR="debian/matrix-synapse-py3"
VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse" VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse"

7
debian/changelog vendored
View file

@ -1,3 +1,10 @@
matrix-synapse-py3 (1.58.0~rc1) stable; urgency=medium
* Use poetry to manage the bundled virtualenv included with this package.
* New Synapse release 1.58.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 26 Apr 2022 11:15:20 +0100
matrix-synapse-py3 (1.57.1) stable; urgency=medium matrix-synapse-py3 (1.57.1) stable; urgency=medium
* New synapse release 1.57.1. * New synapse release 1.57.1.

1
debian/clean vendored Normal file
View file

@ -0,0 +1 @@
exported_requirements.txt

View file

@ -59,7 +59,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
WORKDIR /synapse WORKDIR /synapse
# Copy just what we need to run `poetry export`... # Copy just what we need to run `poetry export`...
COPY pyproject.toml poetry.lock README.rst /synapse/ COPY pyproject.toml poetry.lock /synapse/
RUN /root/.local/bin/poetry export --extras all -o /synapse/requirements.txt RUN /root/.local/bin/poetry export --extras all -o /synapse/requirements.txt
@ -98,9 +98,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
# Copy over the rest of the synapse source code. # Copy over the rest of the synapse source code.
COPY synapse /synapse/synapse/ COPY synapse /synapse/synapse/
# ... and what we need to `pip install`. # ... and what we need to `pip install`.
# TODO: once pyproject.toml declares poetry-core as its build system, we'll need to copy COPY pyproject.toml README.rst /synapse/
# pyproject.toml here, ditching setup.py and MANIFEST.in.
COPY setup.py MANIFEST.in README.rst /synapse/
# Install the synapse package itself. # Install the synapse package itself.
RUN pip install --prefix="/install" --no-deps --no-warn-script-location /synapse RUN pip install --prefix="/install" --no-deps --no-warn-script-location /synapse

View file

@ -2,10 +2,19 @@
FROM matrixdotorg/synapse FROM matrixdotorg/synapse
# Install deps # Install deps
RUN apt-get update RUN \
RUN apt-get install -y supervisor redis nginx --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
redis-server nginx-light
# Remove the default nginx sites # Install supervisord with pip instead of apt, to avoid installing a second
# copy of python.
RUN --mount=type=cache,target=/root/.cache/pip \
pip install supervisor~=4.2
# Disable the default nginx sites
RUN rm /etc/nginx/sites-enabled/default RUN rm /etc/nginx/sites-enabled/default
# Copy Synapse worker, nginx and supervisord configuration template files # Copy Synapse worker, nginx and supervisord configuration template files
@ -19,5 +28,7 @@ EXPOSE 8080/tcp
COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py
ENTRYPOINT ["/configure_workers_and_start.py"] ENTRYPOINT ["/configure_workers_and_start.py"]
# Replace the healthcheck with one which checks *all* the workers. The script
# is generated by configure_workers_and_start.py.
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \ HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
CMD /bin/sh /healthcheck.sh CMD /bin/sh /healthcheck.sh

View file

@ -13,8 +13,8 @@ RUN curl -OL "https://github.com/caddyserver/caddy/releases/download/v2.3.0/cadd
tar xzf caddy_2.3.0_linux_amd64.tar.gz && rm caddy_2.3.0_linux_amd64.tar.gz && mv caddy /root tar xzf caddy_2.3.0_linux_amd64.tar.gz && rm caddy_2.3.0_linux_amd64.tar.gz && mv caddy /root
# Install postgresql # Install postgresql
RUN apt-get update RUN apt-get update && \
RUN apt-get install -y postgresql DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y postgresql-13
# Configure a user and create a database for Synapse # Configure a user and create a database for Synapse
RUN pg_ctlcluster 13 main start && su postgres -c "echo \ RUN pg_ctlcluster 13 main start && su postgres -c "echo \
@ -34,40 +34,14 @@ WORKDIR /data
# Copy the caddy config # Copy the caddy config
COPY conf-workers/caddy.complement.json /root/caddy.json COPY conf-workers/caddy.complement.json /root/caddy.json
# Copy the entrypoint
COPY conf-workers/start-complement-synapse-workers.sh /
# Expose caddy's listener ports # Expose caddy's listener ports
EXPOSE 8008 8448 EXPOSE 8008 8448
ENTRYPOINT \ ENTRYPOINT /start-complement-synapse-workers.sh
# Replace the server name in the caddy config
sed -i "s/{{ server_name }}/${SERVER_NAME}/g" /root/caddy.json && \
# Start postgres
pg_ctlcluster 13 main start 2>&1 && \
# Start caddy
/root/caddy start --config /root/caddy.json 2>&1 && \
# Set the server name of the homeserver
SYNAPSE_SERVER_NAME=${SERVER_NAME} \
# No need to report stats here
SYNAPSE_REPORT_STATS=no \
# Set postgres authentication details which will be placed in the homeserver config file
POSTGRES_PASSWORD=somesecret POSTGRES_USER=postgres POSTGRES_HOST=localhost \
# Specify the workers to test with
SYNAPSE_WORKER_TYPES="\
event_persister, \
event_persister, \
background_worker, \
frontend_proxy, \
event_creator, \
user_dir, \
media_repository, \
federation_inbound, \
federation_reader, \
federation_sender, \
synchrotron, \
appservice, \
pusher" \
# Run the script that writes the necessary config files and starts supervisord, which in turn
# starts everything else
/configure_workers_and_start.py
# Update the healthcheck to have a shorter check interval
HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \ HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
CMD /bin/sh /healthcheck.sh CMD /bin/sh /healthcheck.sh

View file

@ -0,0 +1,50 @@
#!/bin/bash
#
# Default ENTRYPOINT for the docker image used for testing synapse with workers under complement
set -e
function log {
d=$(date +"%Y-%m-%d %H:%M:%S,%3N")
echo "$d $@"
}
# Replace the server name in the caddy config
sed -i "s/{{ server_name }}/${SERVER_NAME}/g" /root/caddy.json
log "starting postgres"
pg_ctlcluster 13 main start
log "starting caddy"
/root/caddy start --config /root/caddy.json
# Set the server name of the homeserver
export SYNAPSE_SERVER_NAME=${SERVER_NAME}
# No need to report stats here
export SYNAPSE_REPORT_STATS=no
# Set postgres authentication details which will be placed in the homeserver config file
export POSTGRES_PASSWORD=somesecret
export POSTGRES_USER=postgres
export POSTGRES_HOST=localhost
# Specify the workers to test with
export SYNAPSE_WORKER_TYPES="\
event_persister, \
event_persister, \
background_worker, \
frontend_proxy, \
event_creator, \
user_dir, \
media_repository, \
federation_inbound, \
federation_reader, \
federation_sender, \
synchrotron, \
appservice, \
pusher"
# Run the script that writes the necessary config files and starts supervisord, which in turn
# starts everything else
exec /configure_workers_and_start.py

View file

@ -103,8 +103,10 @@ experimental_features:
spaces_enabled: true spaces_enabled: true
# Enable history backfilling support # Enable history backfilling support
msc2716_enabled: true msc2716_enabled: true
# server-side support for partial state in /send_join # server-side support for partial state in /send_join responses
msc3706_enabled: true msc3706_enabled: true
# client-side support for partial state in /send_join responses
faster_joins: true
# Enable jump to date endpoint # Enable jump to date endpoint
msc3030_enabled: true msc3030_enabled: true

View file

@ -5,6 +5,9 @@
nodaemon=true nodaemon=true
user=root user=root
[include]
files = /etc/supervisor/conf.d/*.conf
[program:nginx] [program:nginx]
command=/usr/sbin/nginx -g "daemon off;" command=/usr/sbin/nginx -g "daemon off;"
priority=500 priority=500

View file

@ -29,7 +29,7 @@
import os import os
import subprocess import subprocess
import sys import sys
from typing import Any, Dict, Set from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Set
import jinja2 import jinja2
import yaml import yaml
@ -201,7 +201,7 @@ upstream {upstream_worker_type} {{
# Utility functions # Utility functions
def log(txt: str): def log(txt: str) -> None:
"""Log something to the stdout. """Log something to the stdout.
Args: Args:
@ -210,7 +210,7 @@ def log(txt: str):
print(txt) print(txt)
def error(txt: str): def error(txt: str) -> NoReturn:
"""Log something and exit with an error code. """Log something and exit with an error code.
Args: Args:
@ -220,7 +220,7 @@ def error(txt: str):
sys.exit(2) sys.exit(2)
def convert(src: str, dst: str, **template_vars): def convert(src: str, dst: str, **template_vars: object) -> None:
"""Generate a file from a template """Generate a file from a template
Args: Args:
@ -290,7 +290,7 @@ def add_sharding_to_shared_config(
shared_config.setdefault("media_instance_running_background_jobs", worker_name) shared_config.setdefault("media_instance_running_background_jobs", worker_name)
def generate_base_homeserver_config(): def generate_base_homeserver_config() -> None:
"""Starts Synapse and generates a basic homeserver config, which will later be """Starts Synapse and generates a basic homeserver config, which will later be
modified for worker support. modified for worker support.
@ -302,12 +302,14 @@ def generate_base_homeserver_config():
subprocess.check_output(["/usr/local/bin/python", "/start.py", "migrate_config"]) subprocess.check_output(["/usr/local/bin/python", "/start.py", "migrate_config"])
def generate_worker_files(environ, config_path: str, data_dir: str): def generate_worker_files(
environ: Mapping[str, str], config_path: str, data_dir: str
) -> None:
"""Read the desired list of workers from environment variables and generate """Read the desired list of workers from environment variables and generate
shared homeserver, nginx and supervisord configs. shared homeserver, nginx and supervisord configs.
Args: Args:
environ: _Environ[str] environ: os.environ instance.
config_path: The location of the generated Synapse main worker config file. config_path: The location of the generated Synapse main worker config file.
data_dir: The location of the synapse data directory. Where log and data_dir: The location of the synapse data directory. Where log and
user-facing config files live. user-facing config files live.
@ -341,7 +343,7 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
# base shared worker jinja2 template. # base shared worker jinja2 template.
# #
# This config file will be passed to all workers, included Synapse's main process. # This config file will be passed to all workers, included Synapse's main process.
shared_config = {"listeners": listeners} shared_config: Dict[str, Any] = {"listeners": listeners}
# The supervisord config. The contents of which will be inserted into the # The supervisord config. The contents of which will be inserted into the
# base supervisord jinja2 template. # base supervisord jinja2 template.
@ -369,13 +371,13 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
nginx_locations = {} nginx_locations = {}
# Read the desired worker configuration from the environment # Read the desired worker configuration from the environment
worker_types = environ.get("SYNAPSE_WORKER_TYPES") worker_types_env = environ.get("SYNAPSE_WORKER_TYPES")
if worker_types is None: if worker_types_env is None:
# No workers, just the main process # No workers, just the main process
worker_types = [] worker_types = []
else: else:
# Split type names by comma # Split type names by comma
worker_types = worker_types.split(",") worker_types = worker_types_env.split(",")
# Create the worker configuration directory if it doesn't already exist # Create the worker configuration directory if it doesn't already exist
os.makedirs("/conf/workers", exist_ok=True) os.makedirs("/conf/workers", exist_ok=True)
@ -446,21 +448,7 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
# Write out the worker's logging config file # Write out the worker's logging config file
# Check whether we should write worker logs to disk, in addition to the console log_config_filepath = generate_worker_log_config(environ, worker_name, data_dir)
extra_log_template_args = {}
if environ.get("SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK"):
extra_log_template_args["LOG_FILE_PATH"] = "{dir}/logs/{name}.log".format(
dir=data_dir, name=worker_name
)
# Render and write the file
log_config_filepath = "/conf/workers/{name}.log.config".format(name=worker_name)
convert(
"/conf/log.config",
log_config_filepath,
worker_name=worker_name,
**extra_log_template_args,
)
# Then a worker config file # Then a worker config file
convert( convert(
@ -496,6 +484,10 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
# Finally, we'll write out the config files. # Finally, we'll write out the config files.
# log config for the master process
master_log_config = generate_worker_log_config(environ, "master", data_dir)
shared_config["log_config"] = master_log_config
# Shared homeserver config # Shared homeserver config
convert( convert(
"/conf/shared.yaml.j2", "/conf/shared.yaml.j2",
@ -512,9 +504,10 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
) )
# Supervisord config # Supervisord config
os.makedirs("/etc/supervisor", exist_ok=True)
convert( convert(
"/conf/supervisord.conf.j2", "/conf/supervisord.conf.j2",
"/etc/supervisor/conf.d/supervisord.conf", "/etc/supervisor/supervisord.conf",
main_config_path=config_path, main_config_path=config_path,
worker_config=supervisord_config, worker_config=supervisord_config,
) )
@ -532,15 +525,31 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
os.mkdir(log_dir) os.mkdir(log_dir)
def start_supervisord(): def generate_worker_log_config(
"""Starts up supervisord which then starts and monitors all other necessary processes environ: Mapping[str, str], worker_name: str, data_dir: str
) -> str:
"""Generate a log.config file for the given worker.
Raises: CalledProcessError if calling start.py return a non-zero exit code. Returns: the path to the generated file
""" """
subprocess.run(["/usr/bin/supervisord"], stdin=subprocess.PIPE) # Check whether we should write worker logs to disk, in addition to the console
extra_log_template_args = {}
if environ.get("SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK"):
extra_log_template_args["LOG_FILE_PATH"] = "{dir}/logs/{name}.log".format(
dir=data_dir, name=worker_name
)
# Render and write the file
log_config_filepath = "/conf/workers/{name}.log.config".format(name=worker_name)
convert(
"/conf/log.config",
log_config_filepath,
worker_name=worker_name,
**extra_log_template_args,
)
return log_config_filepath
def main(args, environ): def main(args: List[str], environ: MutableMapping[str, str]) -> None:
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data") config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml") config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml")
data_dir = environ.get("SYNAPSE_DATA_DIR", "/data") data_dir = environ.get("SYNAPSE_DATA_DIR", "/data")
@ -567,7 +576,13 @@ def main(args, environ):
# Start supervisord, which will start Synapse, all of the configured worker # Start supervisord, which will start Synapse, all of the configured worker
# processes, redis, nginx etc. according to the config we created above. # processes, redis, nginx etc. according to the config we created above.
start_supervisord() log("Starting supervisord")
os.execl(
"/usr/local/bin/supervisord",
"supervisord",
"-c",
"/etc/supervisor/supervisord.conf",
)
if __name__ == "__main__": if __name__ == "__main__":

View file

@ -6,27 +6,28 @@ import os
import platform import platform
import subprocess import subprocess
import sys import sys
from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Optional
import jinja2 import jinja2
# Utility functions # Utility functions
def log(txt): def log(txt: str) -> None:
print(txt, file=sys.stderr) print(txt, file=sys.stderr)
def error(txt): def error(txt: str) -> NoReturn:
log(txt) log(txt)
sys.exit(2) sys.exit(2)
def convert(src, dst, environ): def convert(src: str, dst: str, environ: Mapping[str, object]) -> None:
"""Generate a file from a template """Generate a file from a template
Args: Args:
src (str): path to input file src: path to input file
dst (str): path to file to write dst: path to file to write
environ (dict): environment dictionary, for replacement mappings. environ: environment dictionary, for replacement mappings.
""" """
with open(src) as infile: with open(src) as infile:
template = infile.read() template = infile.read()
@ -35,25 +36,30 @@ def convert(src, dst, environ):
outfile.write(rendered) outfile.write(rendered)
def generate_config_from_template(config_dir, config_path, environ, ownership): def generate_config_from_template(
config_dir: str,
config_path: str,
os_environ: Mapping[str, str],
ownership: Optional[str],
) -> None:
"""Generate a homeserver.yaml from environment variables """Generate a homeserver.yaml from environment variables
Args: Args:
config_dir (str): where to put generated config files config_dir: where to put generated config files
config_path (str): where to put the main config file config_path: where to put the main config file
environ (dict): environment dictionary os_environ: environment mapping
ownership (str|None): "<user>:<group>" string which will be used to set ownership: "<user>:<group>" string which will be used to set
ownership of the generated configs. If None, ownership will not change. ownership of the generated configs. If None, ownership will not change.
""" """
for v in ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS"): for v in ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS"):
if v not in environ: if v not in os_environ:
error( error(
"Environment variable '%s' is mandatory when generating a config file." "Environment variable '%s' is mandatory when generating a config file."
% (v,) % (v,)
) )
# populate some params from data files (if they exist, else create new ones) # populate some params from data files (if they exist, else create new ones)
environ = environ.copy() environ: Dict[str, Any] = dict(os_environ)
secrets = { secrets = {
"registration": "SYNAPSE_REGISTRATION_SHARED_SECRET", "registration": "SYNAPSE_REGISTRATION_SHARED_SECRET",
"macaroon": "SYNAPSE_MACAROON_SECRET_KEY", "macaroon": "SYNAPSE_MACAROON_SECRET_KEY",
@ -127,12 +133,12 @@ def generate_config_from_template(config_dir, config_path, environ, ownership):
subprocess.check_output(args) subprocess.check_output(args)
def run_generate_config(environ, ownership): def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) -> None:
"""Run synapse with a --generate-config param to generate a template config file """Run synapse with a --generate-config param to generate a template config file
Args: Args:
environ (dict): env var dict environ: env vars from `os.enrivon`.
ownership (str|None): "userid:groupid" arg for chmod. If None, ownership will not change. ownership: "userid:groupid" arg for chmod. If None, ownership will not change.
Never returns. Never returns.
""" """
@ -178,7 +184,7 @@ def run_generate_config(environ, ownership):
os.execv(sys.executable, args) os.execv(sys.executable, args)
def main(args, environ): def main(args: List[str], environ: MutableMapping[str, str]) -> None:
mode = args[1] if len(args) > 1 else "run" mode = args[1] if len(args) > 1 else "run"
# if we were given an explicit user to switch to, do so # if we were given an explicit user to switch to, do so

View file

@ -17,6 +17,7 @@
# Usage # Usage
- [Federation](federate.md) - [Federation](federate.md)
- [Configuration](usage/configuration/README.md) - [Configuration](usage/configuration/README.md)
- [Configuration Manual](usage/configuration/config_documentation.md)
- [Homeserver Sample Config File](usage/configuration/homeserver_sample_config.md) - [Homeserver Sample Config File](usage/configuration/homeserver_sample_config.md)
- [Logging Sample Config File](usage/configuration/logging_sample_config.md) - [Logging Sample Config File](usage/configuration/logging_sample_config.md)
- [Structured Logging](structured_logging.md) - [Structured Logging](structured_logging.md)

View file

@ -804,7 +804,7 @@ POST /_synapse/admin/v2/users/<user_id>/delete_devices
"devices": [ "devices": [
"QBUAZIFURK", "QBUAZIFURK",
"AUIECTSRND" "AUIECTSRND"
], ]
} }
``` ```

View file

@ -6,60 +6,36 @@ The Synapse codebase uses a number of code formatting tools in order to
quickly and automatically check for formatting (and sometimes logical) quickly and automatically check for formatting (and sometimes logical)
errors in code. errors in code.
The necessary tools are detailed below. The necessary tools are:
First install them with: - [black](https://black.readthedocs.io/en/stable/), a source code formatter;
- [isort](https://pycqa.github.io/isort/), which organises each file's imports;
- [flake8](https://flake8.pycqa.org/en/latest/), which can spot common errors; and
- [mypy](https://mypy.readthedocs.io/en/stable/), a type checker.
Install them with:
```sh ```sh
pip install -e ".[lint,mypy]" pip install -e ".[lint,mypy]"
``` ```
- **black** The easiest way to run the lints is to invoke the linter script as follows.
The Synapse codebase uses [black](https://pypi.org/project/black/) ```sh
as an opinionated code formatter, ensuring all comitted code is scripts-dev/lint.sh
properly formatted. ```
Have `black` auto-format your code (it shouldn't change any
functionality) with:
```sh
black .
```
- **flake8**
`flake8` is a code checking tool. We require code to pass `flake8`
before being merged into the codebase.
Check all application and test code with:
```sh
flake8 .
```
- **isort**
`isort` ensures imports are nicely formatted, and can suggest and
auto-fix issues such as double-importing.
Auto-fix imports with:
```sh
isort .
```
It's worth noting that modern IDEs and text editors can run these tools It's worth noting that modern IDEs and text editors can run these tools
automatically on save. It may be worth looking into whether this automatically on save. It may be worth looking into whether this
functionality is supported in your editor for a more convenient functionality is supported in your editor for a more convenient
development workflow. It is not, however, recommended to run `flake8` on development workflow. It is not, however, recommended to run `flake8` or `mypy`
save as it takes a while and is very resource intensive. on save as they take a while and can be very resource intensive.
## General rules ## General rules
- **Naming**: - **Naming**:
- Use camel case for class and type names - Use `CamelCase` for class and type names
- Use underscores for functions and variables. - Use underscores for `function_names` and `variable_names`.
- **Docstrings**: should follow the [google code - **Docstrings**: should follow the [google code
style](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings). style](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings).
See the See the

View file

@ -48,19 +48,28 @@ can find many good git tutorials on the web.
# 4. Install the dependencies # 4. Install the dependencies
Once you have installed Python 3 and added the source, please open a terminal and Synapse uses the [poetry](https://python-poetry.org/) project to manage its dependencies
setup a *virtualenv*, as follows: and development environment. Once you have installed Python 3 and added the
source, you should install `poetry`.
Of their installation methods, we recommend
[installing `poetry` using `pipx`](https://python-poetry.org/docs/#installing-with-pipx),
```shell
pip install --user pipx
pipx install poetry
```
but see poetry's [installation instructions](https://python-poetry.org/docs/#installation)
for other installation methods.
Next, open a terminal and install dependencies as follows:
```sh ```sh
cd path/where/you/have/cloned/the/repository cd path/where/you/have/cloned/the/repository
python3 -m venv ./env poetry install --extras all
source ./env/bin/activate
pip install wheel
pip install -e ".[all,dev]"
pip install tox
``` ```
This will install the developer dependencies for the project. This will install the runtime and developer dependencies for the project.
# 5. Get in touch. # 5. Get in touch.
@ -117,11 +126,10 @@ The linters look at your code and do two things:
- ensure that your code follows the coding style adopted by the project; - ensure that your code follows the coding style adopted by the project;
- catch a number of errors in your code. - catch a number of errors in your code.
The linter takes no time at all to run as soon as you've [downloaded the dependencies into your python virtual environment](#4-install-the-dependencies). The linter takes no time at all to run as soon as you've [downloaded the dependencies](#4-install-the-dependencies).
```sh ```sh
source ./env/bin/activate poetry run ./scripts-dev/lint.sh
./scripts-dev/lint.sh
``` ```
Note that this script *will modify your files* to fix styling errors. Note that this script *will modify your files* to fix styling errors.
@ -131,15 +139,13 @@ If you wish to restrict the linters to only the files changed since the last com
(much faster!), you can instead run: (much faster!), you can instead run:
```sh ```sh
source ./env/bin/activate poetry run ./scripts-dev/lint.sh -d
./scripts-dev/lint.sh -d
``` ```
Or if you know exactly which files you wish to lint, you can instead run: Or if you know exactly which files you wish to lint, you can instead run:
```sh ```sh
source ./env/bin/activate poetry run ./scripts-dev/lint.sh path/to/file1.py path/to/file2.py path/to/folder
./scripts-dev/lint.sh path/to/file1.py path/to/file2.py path/to/folder
``` ```
## Run the unit tests (Twisted trial). ## Run the unit tests (Twisted trial).
@ -148,16 +154,14 @@ The unit tests run parts of Synapse, including your changes, to see if anything
was broken. They are slower than the linters but will typically catch more errors. was broken. They are slower than the linters but will typically catch more errors.
```sh ```sh
source ./env/bin/activate poetry run trial tests
trial tests
``` ```
If you wish to only run *some* unit tests, you may specify If you wish to only run *some* unit tests, you may specify
another module instead of `tests` - or a test class or a method: another module instead of `tests` - or a test class or a method:
```sh ```sh
source ./env/bin/activate poetry run trial tests.rest.admin.test_room tests.handlers.test_admin.ExfiltrateData.test_invite
trial tests.rest.admin.test_room tests.handlers.test_admin.ExfiltrateData.test_invite
``` ```
If your tests fail, you may wish to look at the logs (the default log level is `ERROR`): If your tests fail, you may wish to look at the logs (the default log level is `ERROR`):
@ -169,7 +173,7 @@ less _trial_temp/test.log
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`: To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`:
```sh ```sh
SYNAPSE_TEST_LOG_LEVEL=DEBUG trial tests SYNAPSE_TEST_LOG_LEVEL=DEBUG poetry run trial tests
``` ```
By default, tests will use an in-memory SQLite database for test data. For additional By default, tests will use an in-memory SQLite database for test data. For additional
@ -180,7 +184,7 @@ database state to be stored in a file named `test.db` under the trial process'
working directory. Typically, this ends up being `_trial_temp/test.db`. For example: working directory. Typically, this ends up being `_trial_temp/test.db`. For example:
```sh ```sh
SYNAPSE_TEST_PERSIST_SQLITE_DB=1 trial tests SYNAPSE_TEST_PERSIST_SQLITE_DB=1 poetry run trial tests
``` ```
The database file can then be inspected with: The database file can then be inspected with:

View file

@ -0,0 +1,239 @@
# Managing dependencies with Poetry
This is a quick cheat sheet for developers on how to use [`poetry`](https://python-poetry.org/).
# Background
Synapse uses a variety of third-party Python packages to function as a homeserver.
Some of these are direct dependencies, listed in `pyproject.toml` under the
`[tool.poetry.dependencies]` section. The rest are transitive dependencies (the
things that our direct dependencies themselves depend on, and so on recursively.)
We maintain a locked list of all our dependencies (transitive included) so that
we can track exactly which version of each dependency appears in a given release.
See [here](https://github.com/matrix-org/synapse/issues/11537#issue-1074469665)
for discussion of why we wanted this for Synapse. We chose to use
[`poetry`](https://python-poetry.org/) to manage this locked list; see
[this comment](https://github.com/matrix-org/synapse/issues/11537#issuecomment-1015975819)
for the reasoning.
The locked dependencies get included in our "self-contained" releases: namely,
our docker images and our debian packages. We also use the locked dependencies
in development and our continuous integration.
Separately, our "broad" dependencies—the version ranges specified in
`pyproject.toml`—are included as metadata in our "sdists" and "wheels" [uploaded
to PyPI](https://pypi.org/project/matrix-synapse). Installing from PyPI or from
the Synapse source tree directly will _not_ use the locked dependencies; instead,
they'll pull in the latest version of each package available at install time.
## Example dependency
An example may help. We have a broad dependency on
[`phonenumbers`](https://pypi.org/project/phonenumbers/), as declared in
this snippet from pyproject.toml [as of Synapse 1.57](
https://github.com/matrix-org/synapse/blob/release-v1.57/pyproject.toml#L133
):
```toml
[tool.poetry.dependencies]
# ...
phonenumbers = ">=8.2.0"
```
In our lockfile this is
[pinned]( https://github.com/matrix-org/synapse/blob/dfc7646504cef3e4ff396c36089e1c6f1b1634de/poetry.lock#L679-L685)
to version 8.12.44, even though
[newer versions are available](https://pypi.org/project/phonenumbers/#history).
```toml
[[package]]
name = "phonenumbers"
version = "8.12.44"
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
category = "main"
optional = false
python-versions = "*"
```
The lockfile also includes a
[cryptographic checksum](https://github.com/matrix-org/synapse/blob/release-v1.57/poetry.lock#L2178-L2181)
of the sdists and wheels provided for this version of `phonenumbers`.
```toml
[metadata.files]
# ...
phonenumbers = [
{file = "phonenumbers-8.12.44-py2.py3-none-any.whl", hash = "sha256:cc1299cf37b309ecab6214297663ab86cb3d64ae37fd5b88e904fe7983a874a6"},
{file = "phonenumbers-8.12.44.tar.gz", hash = "sha256:26cfd0257d1704fe2f88caff2caabb70d16a877b1e65b6aae51f9fbbe10aa8ce"},
]
```
We can see this pinned version inside the docker image for that release:
```
$ docker pull matrixdotorg/synapse:v1.57.0
...
$ docker run --entrypoint pip matrixdotorg/synapse:v1.57.0 show phonenumbers
Name: phonenumbers
Version: 8.12.44
Summary: Python version of Google's common library for parsing, formatting, storing and validating international phone numbers.
Home-page: https://github.com/daviddrysdale/python-phonenumbers
Author: David Drysdale
Author-email: dmd@lurklurk.org
License: Apache License 2.0
Location: /usr/local/lib/python3.9/site-packages
Requires:
Required-by: matrix-synapse
```
Whereas the wheel metadata just contains the broad dependencies:
```
$ cd /tmp
$ wget https://files.pythonhosted.org/packages/ca/5e/d722d572cc5b3092402b783d6b7185901b444427633bd8a6b00ea0dd41b7/matrix_synapse-1.57.0rc1-py3-none-any.whl
...
$ unzip -c matrix_synapse-1.57.0rc1-py3-none-any.whl matrix_synapse-1.57.0rc1.dist-info/METADATA | grep phonenumbers
Requires-Dist: phonenumbers (>=8.2.0)
```
# Tooling recommendation: direnv
[`direnv`](https://direnv.net/) is a tool for activating environments in your
shell inside a given directory. Its support for poetry is unofficial (a
community wiki recipe only), but works solidly in our experience. We thoroughly
recommend it for daily use. To use it:
1. [Install `direnv`](https://direnv.net/docs/installation.html) - it's likely
packaged for your system already.
2. Teach direnv about poetry. The [shell config here](https://github.com/direnv/direnv/wiki/Python#poetry)
needs to be added to `~/.config/direnv/direnvrc` (or more generally `$XDG_CONFIG_HOME/direnv/direnvrc`).
3. Mark the synapse checkout as a poetry project: `echo layout poetry > .envrc`.
4. Convince yourself that you trust this `.envrc` configuration and project.
Then formally confirm this to `direnv` by running `direnv allow`.
Then whenever you navigate to the synapse checkout, you should be able to run
e.g. `mypy` instead of `poetry run mypy`; `python` instead of
`poetry run python`; and your shell commands will automatically run in the
context of poetry's venv, without having to run `poetry shell` beforehand.
# How do I...
## ...reset my venv to the locked environment?
```shell
poetry install --extras all --remove-untracked
```
## ...run a command in the `poetry` virtualenv?
Use `poetry run cmd args` when you need the python virtualenv context.
To avoid typing `poetry run` all the time, you can run `poetry shell`
to start a new shell in the poetry virtualenv context. Within `poetry shell`,
`python`, `pip`, `mypy`, `trial`, etc. are all run inside the project virtualenv
and isolated from the rest o the system.
Roughly speaking, the translation from a traditional virtualenv is:
- `env/bin/activate` -> `poetry shell`, and
- `deactivate` -> close the terminal (Ctrl-D, `exit`, etc.)
See also the direnv recommendation above, which makes `poetry run` and
`poetry shell` unnecessary.
## ...inspect the `poetry` virtualenv?
Some suggestions:
```shell
# Current env only
poetry env info
# All envs: this allows you to have e.g. a poetry managed venv for Python 3.7,
# and another for Python 3.10.
poetry env list --full-path
poetry run pip list
```
Note that `poetry show` describes the abstract *lock file* rather than your
on-disk environment. With that said, `poetry show --tree` can sometimes be
useful.
## ...add a new dependency?
Either:
- manually update `pyproject.toml`; then `poetry lock --no-update`; or else
- `poetry add packagename`. See `poetry add --help`; note the `--dev`,
`--extras` and `--optional` flags in particular.
- **NB**: this specifies the new package with a version given by a "caret bound". This won't get forced to its lowest version in the old deps CI job: see [this TODO](https://github.com/matrix-org/synapse/blob/4e1374373857f2f7a911a31c50476342d9070681/.ci/scripts/test_old_deps.sh#L35-L39).
Include the updated `pyproject.toml` and `poetry.lock` files in your commit.
## ...remove a dependency?
This is not done often and is untested, but
```shell
poetry remove packagename
```
ought to do the trick. Alternatively, manually update `pyproject.toml` and
`poetry lock --no-update`. Include the updated `pyproject.toml` and poetry.lock`
files in your commit.
## ...update the version range for an existing dependency?
Best done by manually editing `pyproject.toml`, then `poetry lock --no-update`.
Include the updated `pyproject.toml` and `poetry.lock` in your commit.
## ...update a dependency in the locked environment?
Use
```shell
poetry update packagename
```
to use the latest version of `packagename` in the locked environment, without
affecting the broad dependencies listed in the wheel.
There doesn't seem to be a way to do this whilst locking a _specific_ version of
`packagename`. We can workaround this (crudely) as follows:
```shell
poetry add packagename==1.2.3
# This should update pyproject.lock.
# Now undo the changes to pyproject.toml. For example
# git restore pyproject.toml
# Get poetry to recompute the content-hash of pyproject.toml without changing
# the locked package versions.
poetry lock --no-update
```
Either way, include the updated `poetry.lock` file in your commit.
## ...export a `requirements.txt` file?
```shell
poetry export --extras all
```
Be wary of bugs in `poetry export` and `pip install -r requirements.txt`.
Note: `poetry export` will be made a plugin in Poetry 1.2. Additional config may
be required.
## ...build a test wheel?
I usually use
```shell
poetry run pip install build && poetry run python -m build
```
because [`build`](https://github.com/pypa/build) is a standardish tool which
doesn't require poetry. (It's what we use in CI too). However, you could try
`poetry build` too.

View file

@ -206,6 +206,28 @@ backend matrix
server matrix 127.0.0.1:8008 server matrix 127.0.0.1:8008
``` ```
[Delegation](delegate.md) example:
```
frontend https
acl matrix-well-known-client-path path /.well-known/matrix/client
acl matrix-well-known-server-path path /.well-known/matrix/server
use_backend matrix-well-known-client if matrix-well-known-client-path
use_backend matrix-well-known-server if matrix-well-known-server-path
backend matrix-well-known-client
http-after-response set-header Access-Control-Allow-Origin "*"
http-after-response set-header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS"
http-after-response set-header Access-Control-Allow-Headers "Origin, X-Requested-With, Content-Type, Accept, Authorization"
http-request return status 200 content-type application/json string '{"m.homeserver":{"base_url":"https://matrix.example.com"},"m.identity_server":{"base_url":"https://identity.example.com"}}'
backend matrix-well-known-server
http-after-response set-header Access-Control-Allow-Origin "*"
http-after-response set-header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS"
http-after-response set-header Access-Control-Allow-Headers "Origin, X-Requested-With, Content-Type, Accept, Authorization"
http-request return status 200 content-type application/json string '{"m.server":"matrix.example.com:443"}'
```
### Relayd ### Relayd
``` ```

View file

@ -10,15 +10,15 @@ See the folder [system](https://github.com/matrix-org/synapse/tree/develop/docs/
for the systemd unit files. for the systemd unit files.
The folder [workers](https://github.com/matrix-org/synapse/tree/develop/docs/systemd-with-workers/workers/) The folder [workers](https://github.com/matrix-org/synapse/tree/develop/docs/systemd-with-workers/workers/)
contains an example configuration for the `federation_reader` worker. contains an example configuration for the `generic_worker` worker.
## Synapse configuration files ## Synapse configuration files
See [the worker documentation](../workers.md) for information on how to set up the See [the worker documentation](../workers.md) for information on how to set up the
configuration files and reverse-proxy correctly. configuration files and reverse-proxy correctly.
Below is a sample `federation_reader` worker configuration file. Below is a sample `generic_worker` worker configuration file.
```yaml ```yaml
{{#include workers/federation_reader.yaml}} {{#include workers/generic_worker.yaml}}
``` ```
Systemd manages daemonization itself, so ensure that none of the configuration Systemd manages daemonization itself, so ensure that none of the configuration
@ -61,9 +61,9 @@ systemctl stop matrix-synapse.target
# Restart the master alone # Restart the master alone
systemctl start matrix-synapse.service systemctl start matrix-synapse.service
# Restart a specific worker (eg. federation_reader); the master is # Restart a specific worker (eg. generic_worker); the master is
# unaffected by this. # unaffected by this.
systemctl restart matrix-synapse-worker@federation_reader.service systemctl restart matrix-synapse-worker@generic_worker.service
# Add a new worker (assuming all configs are set up already) # Add a new worker (assuming all configs are set up already)
systemctl enable matrix-synapse-worker@federation_writer.service systemctl enable matrix-synapse-worker@federation_writer.service

View file

@ -0,0 +1,8 @@
worker_app: synapse.app.generic_worker
worker_name: background_worker
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_log_config: /etc/matrix-synapse/background-worker-log.yaml

View file

@ -0,0 +1,23 @@
worker_app: synapse.app.generic_worker
worker_name: event_persister1
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_listeners:
- type: http
port: 8034
resources:
- names: [replication]
# Enable listener if this stream writer handles endpoints for the `typing` or
# `to_device` streams. Uses a different port to the `replication` listener to
# avoid exposing the `replication` listener publicly.
#
#- type: http
# port: 8035
# resources:
# - names: [client]
worker_log_config: /etc/matrix-synapse/event-persister-log.yaml

View file

@ -1,13 +0,0 @@
worker_app: synapse.app.federation_reader
worker_name: federation_reader1
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_listeners:
- type: http
port: 8011
resources:
- names: [federation]
worker_log_config: /etc/matrix-synapse/federation-reader-log.yaml

View file

@ -0,0 +1,14 @@
worker_app: synapse.app.generic_worker
worker_name: generic_worker1
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_listeners:
- type: http
port: 8083
resources:
- names: [client, federation]
worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml

View file

@ -302,14 +302,14 @@ Here are a few things to try:
(Understanding the output is beyond the scope of this document!) (Understanding the output is beyond the scope of this document!)
* You can test your Matrix homeserver TURN setup with https://test.voip.librepush.net/. * You can test your Matrix homeserver TURN setup with <https://test.voip.librepush.net/>.
Note that this test is not fully reliable yet, so don't be discouraged if Note that this test is not fully reliable yet, so don't be discouraged if
the test fails. the test fails.
[Here](https://github.com/matrix-org/voip-tester) is the github repo of the [Here](https://github.com/matrix-org/voip-tester) is the github repo of the
source of the tester, where you can file bug reports. source of the tester, where you can file bug reports.
* There is a WebRTC test tool at * There is a WebRTC test tool at
https://webrtc.github.io/samples/src/content/peerconnection/trickle-ice/. To <https://webrtc.github.io/samples/src/content/peerconnection/trickle-ice/>. To
use it, you will need a username/password for your TURN server. You can use it, you will need a username/password for your TURN server. You can
either: either:

View file

@ -19,32 +19,36 @@ this document.
packages](setup/installation.md#prebuilt-packages), you will need to follow the packages](setup/installation.md#prebuilt-packages), you will need to follow the
normal process for upgrading those packages. normal process for upgrading those packages.
- If Synapse was installed from source, then: - If Synapse was installed using pip then upgrade to the latest
1. Activate the virtualenv before upgrading. For example, if
Synapse is installed in a virtualenv in `~/synapse/env` then
run:
```bash
source ~/synapse/env/bin/activate
```
2. If Synapse was installed using pip then upgrade to the latest
version by running: version by running:
```bash ```bash
pip install --upgrade matrix-synapse pip install --upgrade matrix-synapse
``` ```
If Synapse was installed using git then upgrade to the latest - If Synapse was installed from source, then:
version by running:
1. Obtain the latest version of the source code. Git users can run
`git pull` to do this.
2. If you're running Synapse in a virtualenv, make sure to activate it before
upgrading. For example, if Synapse is installed in a virtualenv in `~/synapse/env` then
run:
```bash ```bash
git pull source ~/synapse/env/bin/activate
pip install --upgrade . pip install --upgrade .
``` ```
Include any relevant extras between square brackets, e.g. `pip install --upgrade ".[postgres,oidc]"`.
3. Restart Synapse: 3. If you're using `poetry` to manage a Synapse installation, run:
```bash
poetry install
```
Include any relevant extras with `--extras`, e.g. `poetry install --extras postgres --extras oidc`.
It's probably easiest to run `poetry install --extras all`.
4. Restart Synapse:
```bash ```bash
synctl restart synctl restart
@ -85,6 +89,13 @@ process, for example:
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
``` ```
# Upgrading to v1.58.0
## Groups/communities feature has been disabled by default
The non-standard groups/communities feature in Synapse has been disabled by default
and will be removed in Synapse v1.61.0.
# Upgrading to v1.57.0 # Upgrading to v1.57.0
## Changes to database schema for application services ## Changes to database schema for application services

File diff suppressed because it is too large Load diff

View file

@ -75,6 +75,20 @@ function setTocEntry() {
* Populate sidebar on load * Populate sidebar on load
*/ */
window.addEventListener('load', () => { window.addEventListener('load', () => {
// Prevent rendering the table of contents of the "print book" page, as it
// will end up being rendered into the output (in a broken-looking way)
// Get the name of the current page (i.e. 'print.html')
const pageNameExtension = window.location.pathname.split('/').pop();
// Split off the extension (as '.../print' is also a valid page name), which
// should result in 'print'
const pageName = pageNameExtension.split('.')[0];
if (pageName === "print") {
// Don't render the table of contents on this page
return;
}
// Only create table of contents if there is more than one header on the page // Only create table of contents if there is more than one header on the page
if (headers.length <= 1) { if (headers.length <= 1) {
return; return;

View file

@ -138,22 +138,7 @@ as the `listeners` option in the shared config.
For example: For example:
```yaml ```yaml
worker_app: synapse.app.generic_worker {{#include systemd-with-workers/workers/generic_worker.yaml}}
worker_name: worker1
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_listeners:
- type: http
port: 8083
resources:
- names:
- client
- federation
worker_log_config: /home/matrix/synapse/config/worker1_log_config.yaml
``` ```
...is a full configuration for a generic worker instance, which will expose a ...is a full configuration for a generic worker instance, which will expose a
@ -343,9 +328,9 @@ effects of bursts of events from that bridge on events sent by normal users.
#### Stream writers #### Stream writers
Additionally, there is *experimental* support for moving writing of specific Additionally, the writing of specific streams (such as events) can be moved off
streams (such as events) off of the main process to a particular worker. (This of the main process to a particular worker.
is only supported with Redis-based replication.) (This is only supported with Redis-based replication.)
To enable this, the worker must have a HTTP replication listener configured, To enable this, the worker must have a HTTP replication listener configured,
have a `worker_name` and be listed in the `instance_map` config. The same worker have a `worker_name` and be listed in the `instance_map` config. The same worker
@ -365,6 +350,12 @@ stream_writers:
events: event_persister1 events: event_persister1
``` ```
An example for a stream writer instance:
```yaml
{{#include systemd-with-workers/workers/event_persister.yaml}}
```
Some of the streams have associated endpoints which, for maximum efficiency, should Some of the streams have associated endpoints which, for maximum efficiency, should
be routed to the workers handling that stream. See below for the currently supported be routed to the workers handling that stream. See below for the currently supported
streams and the endpoints associated with them: streams and the endpoints associated with them:
@ -422,7 +413,7 @@ the stream writer for the `presence` stream:
#### Background tasks #### Background tasks
There is also *experimental* support for moving background tasks to a separate There is also support for moving background tasks to a separate
worker. Background tasks are run periodically or started via replication. Exactly worker. Background tasks are run periodically or started via replication. Exactly
which tasks are configured to run depends on your Synapse configuration (e.g. if which tasks are configured to run depends on your Synapse configuration (e.g. if
stats is enabled). stats is enabled).
@ -438,6 +429,12 @@ run_background_tasks_on: background_worker
You might also wish to investigate the `update_user_directory` and You might also wish to investigate the `update_user_directory` and
`media_instance_running_background_jobs` settings. `media_instance_running_background_jobs` settings.
An example for a dedicated background worker instance:
```yaml
{{#include systemd-with-workers/workers/background_worker.yaml}}
```
### `synapse.app.pusher` ### `synapse.app.pusher`
Handles sending push notifications to sygnal and email. Doesn't handle any Handles sending push notifications to sygnal and email. Doesn't handle any
@ -617,14 +614,14 @@ The following shows an example setup using Redis and a reverse proxy:
| Main | | Generic | | Generic | | Event | | Main | | Generic | | Generic | | Event |
| Process | | Worker 1 | | Worker 2 | | Persister | | Process | | Worker 1 | | Worker 2 | | Persister |
+--------------+ +--------------+ +--------------+ +--------------+ +--------------+ +--------------+ +--------------+ +--------------+
^ ^ | ^ | | ^ | ^ ^ ^ ^ | ^ | | ^ | | ^ ^
| | | | | | | | | | | | | | | | | | | | |
| | | | | HTTP | | | | | | | | | | HTTP | | | | | |
| +----------+<--|---|---------+ | | | | | +----------+<--|---|---------+<--|---|---------+ | |
| | +-------------|-->+----------+ | | | +-------------|-->+-------------+ |
| | | | | | | |
| | | | | | | |
v v v v v v v v
==================================================================== ======================================================================
Redis pub/sub channel Redis pub/sub channel
``` ```

View file

@ -13,7 +13,6 @@ no_implicit_optional = True
files = files =
docker/, docker/,
scripts-dev/, scripts-dev/,
setup.py,
synapse/, synapse/,
tests/ tests/
@ -234,8 +233,8 @@ disallow_untyped_defs = True
;; The `typeshed` project maintains stubs here: ;; The `typeshed` project maintains stubs here:
;; https://github.com/python/typeshed/tree/master/stubs ;; https://github.com/python/typeshed/tree/master/stubs
;; and for each package `foo` there's a corresponding `types-foo` package on PyPI, ;; and for each package `foo` there's a corresponding `types-foo` package on PyPI,
;; which we can pull in as a dev dependency by adding to `setup.py`'s ;; which we can pull in as a dev dependency by adding to `pyproject.toml`'s
;; `CONDITIONAL_REQUIREMENTS["mypy"]` list. ;; `[tool.poetry.dev-dependencies]` list.
[mypy-authlib.*] [mypy-authlib.*]
ignore_missing_imports = True ignore_missing_imports = True

87
poetry.lock generated
View file

@ -1,11 +1,3 @@
[[package]]
name = "appdirs"
version = "1.4.4"
description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
category = "dev"
optional = false
python-versions = "*"
[[package]] [[package]]
name = "attrs" name = "attrs"
version = "21.4.0" version = "21.4.0"
@ -49,17 +41,6 @@ six = "*"
[package.extras] [package.extras]
visualize = ["graphviz (>0.5.1)", "Twisted (>=16.1.1)"] visualize = ["graphviz (>0.5.1)", "Twisted (>=16.1.1)"]
[[package]]
name = "baron"
version = "0.10.1"
description = "Full Syntax Tree for python to make writing refactoring code a realist task"
category = "dev"
optional = false
python-versions = "*"
[package.dependencies]
rply = "*"
[[package]] [[package]]
name = "bcrypt" name = "bcrypt"
version = "3.2.0" version = "3.2.0"
@ -720,7 +701,7 @@ test = ["appdirs (==1.4.4)", "pytest (>=6)", "pytest-cov (>=2.7)", "pytest-mock
[[package]] [[package]]
name = "prometheus-client" name = "prometheus-client"
version = "0.13.1" version = "0.14.0"
description = "Python client for the Prometheus monitoring system." description = "Python client for the Prometheus monitoring system."
category = "main" category = "main"
optional = false optional = false
@ -984,20 +965,6 @@ Pygments = ">=2.5.1"
[package.extras] [package.extras]
md = ["cmarkgfm (>=0.8.0)"] md = ["cmarkgfm (>=0.8.0)"]
[[package]]
name = "redbaron"
version = "0.9.2"
description = "Abstraction on top of baron, a FST for python to make writing refactoring code a realistic task"
category = "dev"
optional = false
python-versions = "*"
[package.dependencies]
baron = ">=0.7"
[package.extras]
notebook = ["pygments"]
[[package]] [[package]]
name = "requests" name = "requests"
version = "2.27.1" version = "2.27.1"
@ -1038,17 +1005,6 @@ python-versions = ">=3.7"
[package.extras] [package.extras]
idna2008 = ["idna"] idna2008 = ["idna"]
[[package]]
name = "rply"
version = "0.7.8"
description = "A pure Python Lex/Yacc that works with RPython"
category = "dev"
optional = false
python-versions = "*"
[package.dependencies]
appdirs = "*"
[[package]] [[package]]
name = "secretstorage" name = "secretstorage"
version = "3.3.1" version = "3.3.1"
@ -1288,7 +1244,7 @@ urllib3 = ">=1.26.0"
[[package]] [[package]]
name = "twisted" name = "twisted"
version = "22.2.0" version = "22.4.0"
description = "An asynchronous networking framework written in Python" description = "An asynchronous networking framework written in Python"
category = "main" category = "main"
optional = false optional = false
@ -1308,19 +1264,20 @@ typing-extensions = ">=3.6.5"
"zope.interface" = ">=4.4.2" "zope.interface" = ">=4.4.2"
[package.extras] [package.extras]
all_non_platform = ["cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<4.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"] all_non_platform = ["cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
conch = ["pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)"] conch = ["pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)"]
conch_nacl = ["pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pynacl"]
contextvars = ["contextvars (>=2.4,<3)"] contextvars = ["contextvars (>=2.4,<3)"]
dev = ["towncrier (>=19.2,<20.0)", "sphinx-rtd-theme (>=0.5,<1.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "pyflakes (>=2.2,<3.0)", "twistedchecker (>=0.7,<1.0)", "coverage (>=6b1,<7)", "python-subunit (>=1.4,<2.0)", "pydoctor (>=21.9.0,<21.10.0)"] dev = ["towncrier (>=19.2,<20.0)", "sphinx-rtd-theme (>=0.5,<1.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "pyflakes (>=2.2,<3.0)", "twistedchecker (>=0.7,<1.0)", "coverage (>=6b1,<7)", "python-subunit (>=1.4,<2.0)", "pydoctor (>=21.9.0,<21.10.0)"]
dev_release = ["towncrier (>=19.2,<20.0)", "sphinx-rtd-theme (>=0.5,<1.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "pydoctor (>=21.9.0,<21.10.0)"] dev_release = ["towncrier (>=19.2,<20.0)", "sphinx-rtd-theme (>=0.5,<1.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "pydoctor (>=21.9.0,<21.10.0)"]
http2 = ["h2 (>=3.0,<4.0)", "priority (>=1.1.0,<2.0)"] http2 = ["h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)"]
macos_platform = ["pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<4.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"] macos_platform = ["pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
mypy = ["mypy (==0.930)", "mypy-zope (==0.3.4)", "types-setuptools", "types-pyopenssl", "towncrier (>=19.2,<20.0)", "sphinx-rtd-theme (>=0.5,<1.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "pyflakes (>=2.2,<3.0)", "twistedchecker (>=0.7,<1.0)", "coverage (>=6b1,<7)", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<4.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "python-subunit (>=1.4,<2.0)", "contextvars (>=2.4,<3)", "pydoctor (>=21.9.0,<21.10.0)"] mypy = ["mypy (==0.930)", "mypy-zope (==0.3.4)", "types-setuptools", "types-pyopenssl", "towncrier (>=19.2,<20.0)", "sphinx-rtd-theme (>=0.5,<1.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "pyflakes (>=2.2,<3.0)", "twistedchecker (>=0.7,<1.0)", "coverage (>=6b1,<7)", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pynacl", "pywin32 (!=226)", "python-subunit (>=1.4,<2.0)", "contextvars (>=2.4,<3)", "pydoctor (>=21.9.0,<21.10.0)"]
osx_platform = ["pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<4.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"] osx_platform = ["pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
serial = ["pyserial (>=3.0)", "pywin32 (!=226)"] serial = ["pyserial (>=3.0)", "pywin32 (!=226)"]
test = ["cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)"] test = ["cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)"]
tls = ["pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)"] tls = ["pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)"]
windows_platform = ["pywin32 (!=226)", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<4.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"] windows_platform = ["pywin32 (!=226)", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
[[package]] [[package]]
name = "twisted-iocpsupport" name = "twisted-iocpsupport"
@ -1596,13 +1553,9 @@ url_preview = ["lxml"]
[metadata] [metadata]
lock-version = "1.1" lock-version = "1.1"
python-versions = "^3.7" python-versions = "^3.7"
content-hash = "964ad29eaf7fd02749a4e735818f3bc0ba729c2f4b9e3213f0daa02643508b16" content-hash = "f482a4f594a165dfe01ce253a22510d5faf38647ab0dcebc35789350cafd9bf0"
[metadata.files] [metadata.files]
appdirs = [
{file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"},
{file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"},
]
attrs = [ attrs = [
{file = "attrs-21.4.0-py2.py3-none-any.whl", hash = "sha256:2d27e3784d7a565d36ab851fe94887c5eccd6a463168875832a1be79c82828b4"}, {file = "attrs-21.4.0-py2.py3-none-any.whl", hash = "sha256:2d27e3784d7a565d36ab851fe94887c5eccd6a463168875832a1be79c82828b4"},
{file = "attrs-21.4.0.tar.gz", hash = "sha256:626ba8234211db98e869df76230a137c4c40a12d72445c45d5f5b716f076e2fd"}, {file = "attrs-21.4.0.tar.gz", hash = "sha256:626ba8234211db98e869df76230a137c4c40a12d72445c45d5f5b716f076e2fd"},
@ -1615,10 +1568,6 @@ automat = [
{file = "Automat-20.2.0-py2.py3-none-any.whl", hash = "sha256:b6feb6455337df834f6c9962d6ccf771515b7d939bca142b29c20c2376bc6111"}, {file = "Automat-20.2.0-py2.py3-none-any.whl", hash = "sha256:b6feb6455337df834f6c9962d6ccf771515b7d939bca142b29c20c2376bc6111"},
{file = "Automat-20.2.0.tar.gz", hash = "sha256:7979803c74610e11ef0c0d68a2942b152df52da55336e0c9d58daf1831cbdf33"}, {file = "Automat-20.2.0.tar.gz", hash = "sha256:7979803c74610e11ef0c0d68a2942b152df52da55336e0c9d58daf1831cbdf33"},
] ]
baron = [
{file = "baron-0.10.1-py2.py3-none-any.whl", hash = "sha256:befb33f4b9e832c7cd1e3cf0eafa6dd3cb6ed4cb2544245147c019936f4e0a8a"},
{file = "baron-0.10.1.tar.gz", hash = "sha256:af822ad44d4eb425c8516df4239ac4fdba9fdb398ef77e4924cd7c9b4045bc2f"},
]
bcrypt = [ bcrypt = [
{file = "bcrypt-3.2.0-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:b589229207630484aefe5899122fb938a5b017b0f4349f769b8c13e78d99a8fd"}, {file = "bcrypt-3.2.0-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:b589229207630484aefe5899122fb938a5b017b0f4349f769b8c13e78d99a8fd"},
{file = "bcrypt-3.2.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:c95d4cbebffafcdd28bd28bb4e25b31c50f6da605c81ffd9ad8a3d1b2ab7b1b6"}, {file = "bcrypt-3.2.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:c95d4cbebffafcdd28bd28bb4e25b31c50f6da605c81ffd9ad8a3d1b2ab7b1b6"},
@ -2229,8 +2178,8 @@ platformdirs = [
{file = "platformdirs-2.5.1.tar.gz", hash = "sha256:7535e70dfa32e84d4b34996ea99c5e432fa29a708d0f4e394bbcb2a8faa4f16d"}, {file = "platformdirs-2.5.1.tar.gz", hash = "sha256:7535e70dfa32e84d4b34996ea99c5e432fa29a708d0f4e394bbcb2a8faa4f16d"},
] ]
prometheus-client = [ prometheus-client = [
{file = "prometheus_client-0.13.1-py3-none-any.whl", hash = "sha256:357a447fd2359b0a1d2e9b311a0c5778c330cfbe186d880ad5a6b39884652316"}, {file = "prometheus_client-0.14.0-py3-none-any.whl", hash = "sha256:f4aba3fdd1735852049f537c1f0ab177159b7ab76f271ecc4d2f45aa2a1d01f2"},
{file = "prometheus_client-0.13.1.tar.gz", hash = "sha256:ada41b891b79fca5638bd5cfe149efa86512eaa55987893becd2c6d8d0a5dfc5"}, {file = "prometheus_client-0.14.0.tar.gz", hash = "sha256:8f7a922dd5455ad524b6ba212ce8eb2b4b05e073f4ec7218287f88b1cac34750"},
] ]
psycopg2 = [ psycopg2 = [
{file = "psycopg2-2.9.3-cp310-cp310-win32.whl", hash = "sha256:083707a696e5e1c330af2508d8fab36f9700b26621ccbcb538abe22e15485362"}, {file = "psycopg2-2.9.3-cp310-cp310-win32.whl", hash = "sha256:083707a696e5e1c330af2508d8fab36f9700b26621ccbcb538abe22e15485362"},
@ -2411,10 +2360,6 @@ readme-renderer = [
{file = "readme_renderer-33.0-py3-none-any.whl", hash = "sha256:f02cee0c4de9636b5a62b6be50c9742427ba1b956aad1d938bfb087d0d72ccdf"}, {file = "readme_renderer-33.0-py3-none-any.whl", hash = "sha256:f02cee0c4de9636b5a62b6be50c9742427ba1b956aad1d938bfb087d0d72ccdf"},
{file = "readme_renderer-33.0.tar.gz", hash = "sha256:e3b53bc84bd6af054e4cc1fe3567dc1ae19f554134221043a3f8c674e22209db"}, {file = "readme_renderer-33.0.tar.gz", hash = "sha256:e3b53bc84bd6af054e4cc1fe3567dc1ae19f554134221043a3f8c674e22209db"},
] ]
redbaron = [
{file = "redbaron-0.9.2-py2.py3-none-any.whl", hash = "sha256:d01032b6a848b5521a8d6ef72486315c2880f420956870cdd742e2b5a09b9bab"},
{file = "redbaron-0.9.2.tar.gz", hash = "sha256:472d0739ca6b2240bb2278ae428604a75472c9c12e86c6321e8c016139c0132f"},
]
requests = [ requests = [
{file = "requests-2.27.1-py2.py3-none-any.whl", hash = "sha256:f22fa1e554c9ddfd16e6e41ac79759e17be9e492b3587efa038054674760e72d"}, {file = "requests-2.27.1-py2.py3-none-any.whl", hash = "sha256:f22fa1e554c9ddfd16e6e41ac79759e17be9e492b3587efa038054674760e72d"},
{file = "requests-2.27.1.tar.gz", hash = "sha256:68d7c56fd5a8999887728ef304a6d12edc7be74f1cfa47714fc8b414525c9a61"}, {file = "requests-2.27.1.tar.gz", hash = "sha256:68d7c56fd5a8999887728ef304a6d12edc7be74f1cfa47714fc8b414525c9a61"},
@ -2427,10 +2372,6 @@ rfc3986 = [
{file = "rfc3986-2.0.0-py2.py3-none-any.whl", hash = "sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd"}, {file = "rfc3986-2.0.0-py2.py3-none-any.whl", hash = "sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd"},
{file = "rfc3986-2.0.0.tar.gz", hash = "sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c"}, {file = "rfc3986-2.0.0.tar.gz", hash = "sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c"},
] ]
rply = [
{file = "rply-0.7.8-py2.py3-none-any.whl", hash = "sha256:28ffd11d656c48aeb8c508eb382acd6a0bd906662624b34388751732a27807e7"},
{file = "rply-0.7.8.tar.gz", hash = "sha256:2a808ac25a4580a9991fc304d64434e299a8fc75760574492f242cbb5bb301c9"},
]
secretstorage = [ secretstorage = [
{file = "SecretStorage-3.3.1-py3-none-any.whl", hash = "sha256:422d82c36172d88d6a0ed5afdec956514b189ddbfb72fefab0c8a1cee4eaf71f"}, {file = "SecretStorage-3.3.1-py3-none-any.whl", hash = "sha256:422d82c36172d88d6a0ed5afdec956514b189ddbfb72fefab0c8a1cee4eaf71f"},
{file = "SecretStorage-3.3.1.tar.gz", hash = "sha256:fd666c51a6bf200643495a04abb261f83229dcb6fd8472ec393df7ffc8b6f195"}, {file = "SecretStorage-3.3.1.tar.gz", hash = "sha256:fd666c51a6bf200643495a04abb261f83229dcb6fd8472ec393df7ffc8b6f195"},
@ -2596,8 +2537,8 @@ twine = [
{file = "twine-3.8.0.tar.gz", hash = "sha256:8efa52658e0ae770686a13b675569328f1fba9837e5de1867bfe5f46a9aefe19"}, {file = "twine-3.8.0.tar.gz", hash = "sha256:8efa52658e0ae770686a13b675569328f1fba9837e5de1867bfe5f46a9aefe19"},
] ]
twisted = [ twisted = [
{file = "Twisted-22.2.0-py3-none-any.whl", hash = "sha256:5c63c149eb6b8fe1e32a0215b1cef96fabdba04f705d8efb9174b1ccf5b49d49"}, {file = "Twisted-22.4.0-py3-none-any.whl", hash = "sha256:f9f7a91f94932477a9fc3b169d57f54f96c6e74a23d78d9ce54039a7f48928a2"},
{file = "Twisted-22.2.0.tar.gz", hash = "sha256:57f32b1f6838facb8c004c89467840367ad38e9e535f8252091345dba500b4f2"}, {file = "Twisted-22.4.0.tar.gz", hash = "sha256:a047990f57dfae1e0bd2b7df2526d4f16dcdc843774dc108b78c52f2a5f13680"},
] ]
twisted-iocpsupport = [ twisted-iocpsupport = [
{file = "twisted-iocpsupport-1.0.2.tar.gz", hash = "sha256:72068b206ee809c9c596b57b5287259ea41ddb4774d86725b19f35bf56aa32a9"}, {file = "twisted-iocpsupport-1.0.2.tar.gz", hash = "sha256:72068b206ee809c9c596b57b5287259ea41ddb4774d86725b19f35bf56aa32a9"},

View file

@ -54,7 +54,7 @@ skip_gitignore = true
[tool.poetry] [tool.poetry]
name = "matrix-synapse" name = "matrix-synapse"
version = "1.57.1" version = "1.58.0rc1"
description = "Homeserver for the Matrix decentralised comms protocol" description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"] authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "Apache-2.0" license = "Apache-2.0"
@ -270,7 +270,6 @@ idna = ">=2.5"
# The following are used by the release script # The following are used by the release script
click = "==8.1.0" click = "==8.1.0"
redbaron = "==0.9.2"
GitPython = "==3.1.14" GitPython = "==3.1.14"
commonmark = "==0.9.1" commonmark = "==0.9.1"
pygithub = "==1.55" pygithub = "==1.55"
@ -280,5 +279,5 @@ twine = "*"
towncrier = ">=18.6.0rc1" towncrier = ">=18.6.0rc1"
[build-system] [build-system]
requires = ["setuptools"] requires = ["poetry-core>=1.0.0"]
build-backend = "setuptools.build_meta" build-backend = "poetry.core.masonry.api"

View file

@ -26,6 +26,7 @@ DISTS = (
"debian:sid", "debian:sid",
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14) "ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
"ubuntu:impish", # 21.10 (EOL 2022-07) "ubuntu:impish", # 21.10 (EOL 2022-07)
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04)
) )
DESC = """\ DESC = """\

View file

@ -64,4 +64,4 @@ docker build -t $COMPLEMENT_BASE_IMAGE -f "docker/complement/$COMPLEMENT_DOCKERF
# Run the tests! # Run the tests!
echo "Images built; running complement" echo "Images built; running complement"
cd "$COMPLEMENT_DIR" cd "$COMPLEMENT_DIR"
go test -v -tags synapse_blacklist,msc2716,msc3030 -count=1 "$@" ./tests/... go test -v -tags synapse_blacklist,msc2716,msc3030,faster_joins -count=1 "$@" ./tests/...

View file

@ -124,7 +124,12 @@ def request(
authorization_headers = [] authorization_headers = []
for key, sig in signed_json["signatures"][origin_name].items(): for key, sig in signed_json["signatures"][origin_name].items():
header = 'X-Matrix origin=%s,key="%s",sig="%s"' % (origin_name, key, sig) header = 'X-Matrix origin=%s,key="%s",sig="%s",destination="%s"' % (
origin_name,
key,
sig,
destination,
)
authorization_headers.append(header.encode("ascii")) authorization_headers.append(header.encode("ascii"))
print("Authorization: %s" % header, file=sys.stderr) print("Authorization: %s" % header, file=sys.stderr)

View file

@ -79,8 +79,20 @@ else
# If we were not asked to lint changed files, and no paths were found as a result, # If we were not asked to lint changed files, and no paths were found as a result,
# then lint everything! # then lint everything!
if [[ -z ${files+x} ]]; then if [[ -z ${files+x} ]]; then
# Lint all source code files and directories # CI runs each linter on the entire checkout, e.g. `black .`. So don't
files=( "." ) # rely on this list to *find* lint targets if that misses a file; instead;
# use it to exclude files from linters when this can't be done by config.
#
# To check which files the linters examine, use:
# black --verbose . 2>&1 | \grep -v ignored
# isort --show-files .
# flake8 --verbose . # This isn't a great option
# mypy has explicit config in mypy.ini; there is also mypy --verbose
files=(
"synapse" "docker" "tests"
"scripts-dev"
"contrib" "synmark" "stubs" ".ci"
)
fi fi
fi fi

View file

@ -25,13 +25,12 @@ import sys
import urllib.request import urllib.request
from os import path from os import path
from tempfile import TemporaryDirectory from tempfile import TemporaryDirectory
from typing import List, Optional, Tuple from typing import List, Optional
import attr import attr
import click import click
import commonmark import commonmark
import git import git
import redbaron
from click.exceptions import ClickException from click.exceptions import ClickException
from github import Github from github import Github
from packaging import version from packaging import version
@ -69,11 +68,12 @@ def cli():
# ... wait for assets to build ... # ... wait for assets to build ...
./scripts-dev/release.py publish ./scripts-dev/release.py publish
./scripts-dev/release.py upload ./scripts-dev/release.py upload
# Optional: generate some nice links for the announcement # Optional: generate some nice links for the announcement
./scripts-dev/release.py upload ./scripts-dev/release.py announce
If the env var GH_TOKEN (or GITHUB_TOKEN) is set, or passed into the If the env var GH_TOKEN (or GITHUB_TOKEN) is set, or passed into the
`tag`/`publish` command, then a new draft release will be created/published. `tag`/`publish` command, then a new draft release will be created/published.
@ -99,7 +99,7 @@ def prepare():
repo.remote().fetch() repo.remote().fetch()
# Get the current version and AST from root Synapse module. # Get the current version and AST from root Synapse module.
current_version, parsed_synapse_ast, version_node = parse_version_from_module() current_version = get_package_version()
# Figure out what sort of release we're doing and calcuate the new version. # Figure out what sort of release we're doing and calcuate the new version.
rc = click.confirm("RC", default=True) rc = click.confirm("RC", default=True)
@ -161,7 +161,7 @@ def prepare():
click.get_current_context().abort() click.get_current_context().abort()
# Switch to the release branch. # Switch to the release branch.
parsed_new_version = version.parse(new_version) parsed_new_version: version.Version = version.parse(new_version)
# We assume for debian changelogs that we only do RCs or full releases. # We assume for debian changelogs that we only do RCs or full releases.
assert not parsed_new_version.is_devrelease assert not parsed_new_version.is_devrelease
@ -206,17 +206,15 @@ def prepare():
# Create the new release branch # Create the new release branch
release_branch = repo.create_head(release_branch_name, commit=base_branch) release_branch = repo.create_head(release_branch_name, commit=base_branch)
# Switch to the release branch and ensure its up to date. # Switch to the release branch and ensure it's up to date.
repo.git.checkout(release_branch_name) repo.git.checkout(release_branch_name)
update_branch(repo) update_branch(repo)
# Update the `__version__` variable and write it back to the file. # Update the version specified in pyproject.toml.
version_node.value = '"' + new_version + '"' subprocess.check_output(["poetry", "version", new_version])
with open("synapse/__init__.py", "w") as f:
f.write(parsed_synapse_ast.dumps())
# Generate changelogs. # Generate changelogs.
generate_and_write_changelog(current_version) generate_and_write_changelog(current_version, new_version)
# Generate debian changelogs # Generate debian changelogs
if parsed_new_version.pre is not None: if parsed_new_version.pre is not None:
@ -229,7 +227,7 @@ def prepare():
debian_version = new_version debian_version = new_version
run_until_successful( run_until_successful(
f'dch -M -v {debian_version} "New synapse release {debian_version}."', f'dch -M -v {debian_version} "New Synapse release {new_version}."',
shell=True, shell=True,
) )
run_until_successful('dch -M -r -D stable ""', shell=True) run_until_successful('dch -M -r -D stable ""', shell=True)
@ -283,7 +281,7 @@ def tag(gh_token: Optional[str]):
repo.remote().fetch() repo.remote().fetch()
# Find out the version and tag name. # Find out the version and tag name.
current_version, _, _ = parse_version_from_module() current_version = get_package_version()
tag_name = f"v{current_version}" tag_name = f"v{current_version}"
# Check we haven't released this version. # Check we haven't released this version.
@ -361,7 +359,7 @@ def publish(gh_token: str):
if repo.is_dirty(): if repo.is_dirty():
raise click.ClickException("Uncommitted changes exist.") raise click.ClickException("Uncommitted changes exist.")
current_version, _, _ = parse_version_from_module() current_version = get_package_version()
tag_name = f"v{current_version}" tag_name = f"v{current_version}"
if not click.confirm(f"Publish {tag_name}?", default=True): if not click.confirm(f"Publish {tag_name}?", default=True):
@ -395,7 +393,7 @@ def publish(gh_token: str):
def upload(): def upload():
"""Upload release to pypi.""" """Upload release to pypi."""
current_version, _, _ = parse_version_from_module() current_version = get_package_version()
tag_name = f"v{current_version}" tag_name = f"v{current_version}"
pypi_asset_names = [ pypi_asset_names = [
@ -423,7 +421,7 @@ def upload():
def announce(): def announce():
"""Generate markdown to announce the release.""" """Generate markdown to announce the release."""
current_version, _, _ = parse_version_from_module() current_version = get_package_version()
tag_name = f"v{current_version}" tag_name = f"v{current_version}"
click.echo( click.echo(
@ -454,37 +452,11 @@ Announce the release in
) )
def parse_version_from_module() -> Tuple[ def get_package_version() -> version.Version:
version.Version, redbaron.RedBaron, redbaron.Node version_string = subprocess.check_output(["poetry", "version", "--short"]).decode(
]: "utf-8"
# Parse the AST and load the `__version__` node so that we can edit it )
# later. return version.Version(version_string)
with open("synapse/__init__.py") as f:
red = redbaron.RedBaron(f.read())
version_node = None
for node in red:
if node.type != "assignment":
continue
if node.target.type != "name":
continue
if node.target.value != "__version__":
continue
version_node = node
break
if not version_node:
print("Failed to find '__version__' definition in synapse/__init__.py")
sys.exit(1)
# Parse the current version.
current_version = version.parse(version_node.value.value.strip('"'))
assert isinstance(current_version, version.Version)
return current_version, red, version_node
def find_ref(repo: git.Repo, ref_name: str) -> Optional[git.HEAD]: def find_ref(repo: git.Repo, ref_name: str) -> Optional[git.HEAD]:
@ -564,11 +536,13 @@ def get_changes_for_version(wanted_version: version.Version) -> str:
return "\n".join(version_changelog) return "\n".join(version_changelog)
def generate_and_write_changelog(current_version: version.Version): def generate_and_write_changelog(current_version: version.Version, new_version: str):
# We do this by getting a draft so that we can edit it before writing to the # We do this by getting a draft so that we can edit it before writing to the
# changelog. # changelog.
result = run_until_successful( result = run_until_successful(
"python3 -m towncrier --draft", shell=True, capture_output=True f"python3 -m towncrier build --draft --version {new_version}",
shell=True,
capture_output=True,
) )
new_changes = result.stdout.decode("utf-8") new_changes = result.stdout.decode("utf-8")
new_changes = new_changes.replace( new_changes = new_changes.replace(

View file

@ -1,9 +0,0 @@
[check-manifest]
ignore =
.git-blame-ignore-revs
contrib
contrib/*
docs/*
pylint.cfg
tox.ini

183
setup.py
View file

@ -1,183 +0,0 @@
#!/usr/bin/env python
# Copyright 2014-2017 OpenMarket Ltd
# Copyright 2017 Vector Creations Ltd
# Copyright 2017-2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Any, Dict
from setuptools import Command, find_packages, setup
here = os.path.abspath(os.path.dirname(__file__))
# Some notes on `setup.py test`:
#
# Once upon a time we used to try to make `setup.py test` run `tox` to run the
# tests. That's a bad idea for three reasons:
#
# 1: `setup.py test` is supposed to find out whether the tests work in the
# *current* environmentt, not whatever tox sets up.
# 2: Empirically, trying to install tox during the test run wasn't working ("No
# module named virtualenv").
# 3: The tox documentation advises against it[1].
#
# Even further back in time, we used to use setuptools_trial [2]. That has its
# own set of issues: for instance, it requires installation of Twisted to build
# an sdist (because the recommended mode of usage is to add it to
# `setup_requires`). That in turn means that in order to successfully run tox
# you have to have the python header files installed for whichever version of
# python tox uses (which is python3 on recent ubuntus, for example).
#
# So, for now at least, we stick with what appears to be the convention among
# Twisted projects, and don't attempt to do anything when someone runs
# `setup.py test`; instead we direct people to run `trial` directly if they
# care.
#
# [1]: http://tox.readthedocs.io/en/2.5.0/example/basic.html#integration-with-setup-py-test-command
# [2]: https://pypi.python.org/pypi/setuptools_trial
class TestCommand(Command):
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
print(
"""Synapse's tests cannot be run via setup.py. To run them, try:
PYTHONPATH="." trial tests
"""
)
def read_file(path_segments):
"""Read a file from the package. Takes a list of strings to join to
make the path"""
file_path = os.path.join(here, *path_segments)
with open(file_path) as f:
return f.read()
def exec_file(path_segments):
"""Execute a single python file to get the variables defined in it"""
result: Dict[str, Any] = {}
code = read_file(path_segments)
exec(code, result)
return result
version = exec_file(("synapse", "__init__.py"))["__version__"]
dependencies = exec_file(("synapse", "python_dependencies.py"))
long_description = read_file(("README.rst",))
REQUIREMENTS = dependencies["REQUIREMENTS"]
CONDITIONAL_REQUIREMENTS = dependencies["CONDITIONAL_REQUIREMENTS"]
ALL_OPTIONAL_REQUIREMENTS = dependencies["ALL_OPTIONAL_REQUIREMENTS"]
# Make `pip install matrix-synapse[all]` install all the optional dependencies.
CONDITIONAL_REQUIREMENTS["all"] = list(ALL_OPTIONAL_REQUIREMENTS)
# Developer dependencies should not get included in "all".
#
# We pin black so that our tests don't start failing on new releases.
CONDITIONAL_REQUIREMENTS["lint"] = [
"isort==5.7.0",
"black==22.3.0",
"flake8-comprehensions",
"flake8-bugbear==21.3.2",
"flake8",
]
CONDITIONAL_REQUIREMENTS["mypy"] = [
"mypy==0.931",
"mypy-zope==0.3.5",
"types-bleach>=4.1.0",
"types-jsonschema>=3.2.0",
"types-opentracing>=2.4.2",
"types-Pillow>=8.3.4",
"types-psycopg2>=2.9.9",
"types-pyOpenSSL>=20.0.7",
"types-PyYAML>=5.4.10",
"types-requests>=2.26.0",
"types-setuptools>=57.4.0",
]
# Dependencies which are exclusively required by unit test code. This is
# NOT a list of all modules that are necessary to run the unit tests.
# Tests assume that all optional dependencies are installed.
#
# parameterized_class decorator was introduced in parameterized 0.7.0
CONDITIONAL_REQUIREMENTS["test"] = ["parameterized>=0.7.0", "idna>=2.5"]
CONDITIONAL_REQUIREMENTS["dev"] = (
CONDITIONAL_REQUIREMENTS["lint"]
+ CONDITIONAL_REQUIREMENTS["mypy"]
+ CONDITIONAL_REQUIREMENTS["test"]
+ [
# The following are used by the release script
"click==8.1.0",
"redbaron==0.9.2",
"GitPython==3.1.14",
"commonmark==0.9.1",
"pygithub==1.55",
# The following are executed as commands by the release script.
"twine",
"towncrier",
]
)
setup(
name="matrix-synapse",
version=version,
packages=find_packages(exclude=["tests", "tests.*"]),
description="Reference homeserver for the Matrix decentralised comms protocol",
install_requires=REQUIREMENTS,
extras_require=CONDITIONAL_REQUIREMENTS,
include_package_data=True,
zip_safe=False,
long_description=long_description,
long_description_content_type="text/x-rst",
python_requires="~=3.7",
entry_points={
"console_scripts": [
# Application
"synapse_homeserver = synapse.app.homeserver:main",
"synapse_worker = synapse.app.generic_worker:main",
"synctl = synapse._scripts.synctl:main",
# Scripts
"export_signing_key = synapse._scripts.export_signing_key:main",
"generate_config = synapse._scripts.generate_config:main",
"generate_log_config = synapse._scripts.generate_log_config:main",
"generate_signing_key = synapse._scripts.generate_signing_key:main",
"hash_password = synapse._scripts.hash_password:main",
"register_new_matrix_user = synapse._scripts.register_new_matrix_user:main",
"synapse_port_db = synapse._scripts.synapse_port_db:main",
"synapse_review_recent_signups = synapse._scripts.review_recent_signups:main",
"update_synapse_database = synapse._scripts.update_synapse_database:main",
]
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Communications :: Chat",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
cmdclass={"test": TestCommand},
)

View file

@ -103,7 +103,7 @@ class SortedDict(Dict[_KT, _VT]):
self, self,
start: Optional[int] = ..., start: Optional[int] = ...,
stop: Optional[int] = ..., stop: Optional[int] = ...,
reverse=bool, reverse: bool = ...,
) -> Iterator[_KT]: ... ) -> Iterator[_KT]: ...
def bisect_left(self, value: _KT) -> int: ... def bisect_left(self, value: _KT) -> int: ...
def bisect_right(self, value: _KT) -> int: ... def bisect_right(self, value: _KT) -> int: ...

View file

@ -81,7 +81,7 @@ class SortedList(MutableSequence[_T]):
self, self,
start: Optional[int] = ..., start: Optional[int] = ...,
stop: Optional[int] = ..., stop: Optional[int] = ...,
reverse=bool, reverse: bool = ...,
) -> Iterator[_T]: ... ) -> Iterator[_T]: ...
def _islice( def _islice(
self, self,
@ -153,14 +153,14 @@ class SortedKeyList(SortedList[_T]):
maximum: Optional[int] = ..., maximum: Optional[int] = ...,
inclusive: Tuple[bool, bool] = ..., inclusive: Tuple[bool, bool] = ...,
reverse: bool = ..., reverse: bool = ...,
): ... ) -> Iterator[_T]: ...
def irange_key( def irange_key(
self, self,
min_key: Optional[Any] = ..., min_key: Optional[Any] = ...,
max_key: Optional[Any] = ..., max_key: Optional[Any] = ...,
inclusive: Tuple[bool, bool] = ..., inclusive: Tuple[bool, bool] = ...,
reserve: bool = ..., reserve: bool = ...,
): ... ) -> Iterator[_T]: ...
def bisect_left(self, value: _T) -> int: ... def bisect_left(self, value: _T) -> int: ...
def bisect_right(self, value: _T) -> int: ... def bisect_right(self, value: _T) -> int: ...
def bisect(self, value: _T) -> int: ... def bisect(self, value: _T) -> int: ...

View file

@ -103,7 +103,7 @@ class SortedSet(MutableSet[_T], Sequence[_T]):
self, self,
start: Optional[int] = ..., start: Optional[int] = ...,
stop: Optional[int] = ..., stop: Optional[int] = ...,
reverse=bool, reverse: bool = ...,
) -> Iterator[_T]: ... ) -> Iterator[_T]: ...
def irange( def irange(
self, self,

View file

@ -18,6 +18,8 @@ from typing import Any, List, Optional, Type, Union
from twisted.internet import protocol from twisted.internet import protocol
from twisted.internet.defer import Deferred from twisted.internet.defer import Deferred
from twisted.internet.interfaces import IAddress
from twisted.python.failure import Failure
class RedisProtocol(protocol.Protocol): class RedisProtocol(protocol.Protocol):
def publish(self, channel: str, message: bytes) -> "Deferred[None]": ... def publish(self, channel: str, message: bytes) -> "Deferred[None]": ...
@ -34,11 +36,14 @@ class RedisProtocol(protocol.Protocol):
def get(self, key: str) -> "Deferred[Any]": ... def get(self, key: str) -> "Deferred[Any]": ...
class SubscriberProtocol(RedisProtocol): class SubscriberProtocol(RedisProtocol):
def __init__(self, *args, **kwargs): ... def __init__(self, *args: object, **kwargs: object): ...
password: Optional[str] password: Optional[str]
def subscribe(self, channels: Union[str, List[str]]): ... def subscribe(self, channels: Union[str, List[str]]) -> "Deferred[None]": ...
def connectionMade(self): ... def connectionMade(self) -> None: ...
def connectionLost(self, reason): ... # type-ignore: twisted.internet.protocol.Protocol provides a default argument for
# `reason`. txredisapi's LineReceiver Protocol doesn't. But that's fine: it's what's
# actually specified in twisted.internet.interfaces.IProtocol.
def connectionLost(self, reason: Failure) -> None: ... # type: ignore[override]
def lazyConnection( def lazyConnection(
host: str = ..., host: str = ...,
@ -74,7 +79,7 @@ class RedisFactory(protocol.ReconnectingClientFactory):
replyTimeout: Optional[int] = None, replyTimeout: Optional[int] = None,
convertNumbers: Optional[int] = True, convertNumbers: Optional[int] = True,
): ... ): ...
def buildProtocol(self, addr) -> RedisProtocol: ... def buildProtocol(self, addr: IAddress) -> RedisProtocol: ...
class SubscriberFactory(RedisFactory): class SubscriberFactory(RedisFactory):
def __init__(self) -> None: ... def __init__(self) -> None: ...

View file

@ -20,6 +20,8 @@ import json
import os import os
import sys import sys
from matrix_common.versionstring import get_distribution_version_string
# Check that we're not running on an unsupported Python version. # Check that we're not running on an unsupported Python version.
if sys.version_info < (3, 7): if sys.version_info < (3, 7):
print("Synapse requires Python 3.7 or above.") print("Synapse requires Python 3.7 or above.")
@ -68,7 +70,7 @@ try:
except ImportError: except ImportError:
pass pass
__version__ = "1.57.1" __version__ = get_distribution_version_string("matrix-synapse")
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
# We import here so that we don't have to install a bunch of deps when # We import here so that we don't have to install a bunch of deps when

View file

@ -179,8 +179,6 @@ class RelationTypes:
REPLACE: Final = "m.replace" REPLACE: Final = "m.replace"
REFERENCE: Final = "m.reference" REFERENCE: Final = "m.reference"
THREAD: Final = "m.thread" THREAD: Final = "m.thread"
# TODO Remove this in Synapse >= v1.57.0.
UNSTABLE_THREAD: Final = "io.element.thread"
class LimitBlockingTypes: class LimitBlockingTypes:

View file

@ -79,6 +79,8 @@ class Codes:
UNABLE_AUTHORISE_JOIN = "M_UNABLE_TO_AUTHORISE_JOIN" UNABLE_AUTHORISE_JOIN = "M_UNABLE_TO_AUTHORISE_JOIN"
UNABLE_TO_GRANT_JOIN = "M_UNABLE_TO_GRANT_JOIN" UNABLE_TO_GRANT_JOIN = "M_UNABLE_TO_GRANT_JOIN"
UNREDACTED_CONTENT_DELETED = "FI.MAU.MSC2815_UNREDACTED_CONTENT_DELETED"
class CodeMessageException(RuntimeError): class CodeMessageException(RuntimeError):
"""An exception with integer code and message string attributes. """An exception with integer code and message string attributes.
@ -483,6 +485,22 @@ class RequestSendFailed(RuntimeError):
self.can_retry = can_retry self.can_retry = can_retry
class UnredactedContentDeletedError(SynapseError):
def __init__(self, content_keep_ms: Optional[int] = None):
super().__init__(
404,
"The content for that event has already been erased from the database",
errcode=Codes.UNREDACTED_CONTENT_DELETED,
)
self.content_keep_ms = content_keep_ms
def error_dict(self) -> "JsonDict":
extra = {}
if self.content_keep_ms is not None:
extra = {"fi.mau.msc2815.content_keep_ms": self.content_keep_ms}
return cs_error(self.msg, self.errcode, **extra)
def cs_error(msg: str, code: str = Codes.UNKNOWN, **kwargs: Any) -> "JsonDict": def cs_error(msg: str, code: str = Codes.UNKNOWN, **kwargs: Any) -> "JsonDict":
"""Utility method for constructing an error response for client-server """Utility method for constructing an error response for client-server
interactions. interactions.

View file

@ -89,9 +89,7 @@ ROOM_EVENT_FILTER_SCHEMA = {
"org.matrix.not_labels": {"type": "array", "items": {"type": "string"}}, "org.matrix.not_labels": {"type": "array", "items": {"type": "string"}},
# MSC3440, filtering by event relations. # MSC3440, filtering by event relations.
"related_by_senders": {"type": "array", "items": {"type": "string"}}, "related_by_senders": {"type": "array", "items": {"type": "string"}},
"io.element.relation_senders": {"type": "array", "items": {"type": "string"}},
"related_by_rel_types": {"type": "array", "items": {"type": "string"}}, "related_by_rel_types": {"type": "array", "items": {"type": "string"}},
"io.element.relation_types": {"type": "array", "items": {"type": "string"}},
}, },
} }
@ -323,16 +321,6 @@ class Filter:
self.related_by_senders = self.filter_json.get("related_by_senders", None) self.related_by_senders = self.filter_json.get("related_by_senders", None)
self.related_by_rel_types = self.filter_json.get("related_by_rel_types", None) self.related_by_rel_types = self.filter_json.get("related_by_rel_types", None)
# Fallback to the unstable prefix if the stable version is not given.
if hs.config.experimental.msc3440_enabled:
self.related_by_senders = self.related_by_senders or self.filter_json.get(
"io.element.relation_senders", None
)
self.related_by_rel_types = (
self.related_by_rel_types
or self.filter_json.get("io.element.relation_types", None)
)
def filters_all_types(self) -> bool: def filters_all_types(self) -> bool:
return "*" in self.not_types return "*" in self.not_types

View file

@ -42,7 +42,7 @@ logger = logging.getLogger(__name__)
# user ID -> {device ID -> {algorithm -> count}} # user ID -> {device ID -> {algorithm -> count}}
TransactionOneTimeKeyCounts = Dict[str, Dict[str, Dict[str, int]]] TransactionOneTimeKeyCounts = Dict[str, Dict[str, Dict[str, int]]]
# Type for the `device_unused_fallback_keys` field in an appservice transaction # Type for the `device_unused_fallback_key_types` field in an appservice transaction
# user ID -> {device ID -> [algorithm]} # user ID -> {device ID -> [algorithm]}
TransactionUnusedFallbackKeys = Dict[str, Dict[str, List[str]]] TransactionUnusedFallbackKeys = Dict[str, Dict[str, List[str]]]

View file

@ -278,7 +278,7 @@ class ApplicationServiceApi(SimpleHttpClient):
] = one_time_key_counts ] = one_time_key_counts
if unused_fallback_keys: if unused_fallback_keys:
body[ body[
"org.matrix.msc3202.device_unused_fallback_keys" "org.matrix.msc3202.device_unused_fallback_key_types"
] = unused_fallback_keys ] = unused_fallback_keys
if device_list_summary: if device_list_summary:
body["org.matrix.msc3202.device_lists"] = { body["org.matrix.msc3202.device_lists"] = {

View file

@ -26,9 +26,6 @@ class ExperimentalConfig(Config):
def read_config(self, config: JsonDict, **kwargs: Any) -> None: def read_config(self, config: JsonDict, **kwargs: Any) -> None:
experimental = config.get("experimental_features") or {} experimental = config.get("experimental_features") or {}
# MSC3440 (thread relation)
self.msc3440_enabled: bool = experimental.get("msc3440_enabled", False)
# MSC3026 (busy presence state) # MSC3026 (busy presence state)
self.msc3026_enabled: bool = experimental.get("msc3026_enabled", False) self.msc3026_enabled: bool = experimental.get("msc3026_enabled", False)
@ -77,7 +74,10 @@ class ExperimentalConfig(Config):
self.msc3720_enabled: bool = experimental.get("msc3720_enabled", False) self.msc3720_enabled: bool = experimental.get("msc3720_enabled", False)
# The deprecated groups feature. # The deprecated groups feature.
self.groups_enabled: bool = experimental.get("groups_enabled", True) self.groups_enabled: bool = experimental.get("groups_enabled", False)
# MSC2654: Unread counts # MSC2654: Unread counts
self.msc2654_enabled: bool = experimental.get("msc2654_enabled", False) self.msc2654_enabled: bool = experimental.get("msc2654_enabled", False)
# MSC2815 (allow room moderators to view redacted event content)
self.msc2815_enabled: bool = experimental.get("msc2815_enabled", False)

View file

@ -680,14 +680,6 @@ class ServerConfig(Config):
config.get("use_account_validity_in_account_status") or False config.get("use_account_validity_in_account_status") or False
) )
# This is a temporary option that enables fully using the new
# `device_lists_changes_in_room` without the backwards compat code. This
# is primarily for testing. If enabled the server should *not* be
# downgraded, as it may lead to missing device list updates.
self.use_new_device_lists_changes_in_room = (
config.get("use_new_device_lists_changes_in_room") or False
)
self.rooms_to_exclude_from_sync: List[str] = ( self.rooms_to_exclude_from_sync: List[str] = (
config.get("exclude_rooms_from_sync") or [] config.get("exclude_rooms_from_sync") or []
) )

View file

@ -39,7 +39,6 @@ from . import EventBase
if TYPE_CHECKING: if TYPE_CHECKING:
from synapse.handlers.relations import BundledAggregations from synapse.handlers.relations import BundledAggregations
from synapse.server import HomeServer
# Split strings on "." but not "\." This uses a negative lookbehind assertion for '\' # Split strings on "." but not "\." This uses a negative lookbehind assertion for '\'
@ -396,9 +395,6 @@ class EventClientSerializer:
clients. clients.
""" """
def __init__(self, hs: "HomeServer"):
self._msc3440_enabled = hs.config.experimental.msc3440_enabled
def serialize_event( def serialize_event(
self, self,
event: Union[JsonDict, EventBase], event: Union[JsonDict, EventBase],
@ -406,6 +402,7 @@ class EventClientSerializer:
*, *,
config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG, config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG,
bundle_aggregations: Optional[Dict[str, "BundledAggregations"]] = None, bundle_aggregations: Optional[Dict[str, "BundledAggregations"]] = None,
apply_edits: bool = True,
) -> JsonDict: ) -> JsonDict:
"""Serializes a single event. """Serializes a single event.
@ -413,10 +410,10 @@ class EventClientSerializer:
event: The event being serialized. event: The event being serialized.
time_now: The current time in milliseconds time_now: The current time in milliseconds
config: Event serialization config config: Event serialization config
bundle_aggregations: Whether to include the bundled aggregations for this bundle_aggregations: A map from event_id to the aggregations to be bundled
event. Only applies to non-state events. (State events never include into the event.
bundled aggregations.) apply_edits: Whether the content of the event should be modified to reflect
any replacement in `bundle_aggregations[<event_id>].replace`.
Returns: Returns:
The serialized event The serialized event
""" """
@ -434,8 +431,9 @@ class EventClientSerializer:
event, event,
time_now, time_now,
config, config,
bundle_aggregations[event.event_id], event_aggregations,
serialized_event, serialized_event,
apply_edits=apply_edits,
) )
return serialized_event return serialized_event
@ -474,16 +472,18 @@ class EventClientSerializer:
config: SerializeEventConfig, config: SerializeEventConfig,
aggregations: "BundledAggregations", aggregations: "BundledAggregations",
serialized_event: JsonDict, serialized_event: JsonDict,
apply_edits: bool,
) -> None: ) -> None:
"""Potentially injects bundled aggregations into the unsigned portion of the serialized event. """Potentially injects bundled aggregations into the unsigned portion of the serialized event.
Args: Args:
event: The event being serialized. event: The event being serialized.
time_now: The current time in milliseconds time_now: The current time in milliseconds
config: Event serialization config
aggregations: The bundled aggregation to serialize. aggregations: The bundled aggregation to serialize.
serialized_event: The serialized event which may be modified. serialized_event: The serialized event which may be modified.
config: Event serialization config apply_edits: Whether the content of the event should be modified to reflect
any replacement in `aggregations.replace`.
""" """
serialized_aggregations = {} serialized_aggregations = {}
@ -494,8 +494,9 @@ class EventClientSerializer:
serialized_aggregations[RelationTypes.REFERENCE] = aggregations.references serialized_aggregations[RelationTypes.REFERENCE] = aggregations.references
if aggregations.replace: if aggregations.replace:
# If there is an edit, apply it to the event. # If there is an edit, optionally apply it to the event.
edit = aggregations.replace edit = aggregations.replace
if apply_edits:
self._apply_edit(event, serialized_event, edit) self._apply_edit(event, serialized_event, edit)
# Include information about it in the relations dict. # Include information about it in the relations dict.
@ -525,8 +526,6 @@ class EventClientSerializer:
"current_user_participated": thread.current_user_participated, "current_user_participated": thread.current_user_participated,
} }
serialized_aggregations[RelationTypes.THREAD] = thread_summary serialized_aggregations[RelationTypes.THREAD] = thread_summary
if self._msc3440_enabled:
serialized_aggregations[RelationTypes.UNSTABLE_THREAD] = thread_summary
# Include the bundled aggregations in the event. # Include the bundled aggregations in the event.
if serialized_aggregations: if serialized_aggregations:

View file

@ -515,7 +515,7 @@ class FederationServer(FederationBase):
) )
async def on_room_state_request( async def on_room_state_request(
self, origin: str, room_id: str, event_id: Optional[str] self, origin: str, room_id: str, event_id: str
) -> Tuple[int, JsonDict]: ) -> Tuple[int, JsonDict]:
origin_host, _ = parse_server_name(origin) origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id) await self.check_server_matches_acl(origin_host, room_id)
@ -530,17 +530,12 @@ class FederationServer(FederationBase):
# - but that's non-trivial to get right, and anyway somewhat defeats # - but that's non-trivial to get right, and anyway somewhat defeats
# the point of the linearizer. # the point of the linearizer.
async with self._server_linearizer.queue((origin, room_id)): async with self._server_linearizer.queue((origin, room_id)):
resp: JsonDict = dict( resp = await self._state_resp_cache.wrap(
await self._state_resp_cache.wrap(
(room_id, event_id), (room_id, event_id),
self._on_context_state_request_compute, self._on_context_state_request_compute,
room_id, room_id,
event_id, event_id,
) )
)
room_version = await self.store.get_room_version_id(room_id)
resp["room_version"] = room_version
return 200, resp return 200, resp
@ -574,14 +569,11 @@ class FederationServer(FederationBase):
return {"pdu_ids": state_ids, "auth_chain_ids": list(auth_chain_ids)} return {"pdu_ids": state_ids, "auth_chain_ids": list(auth_chain_ids)}
async def _on_context_state_request_compute( async def _on_context_state_request_compute(
self, room_id: str, event_id: Optional[str] self, room_id: str, event_id: str
) -> Dict[str, list]: ) -> Dict[str, list]:
pdus: Collection[EventBase] pdus: Collection[EventBase]
if event_id:
event_ids = await self.handler.get_state_ids_for_pdu(room_id, event_id) event_ids = await self.handler.get_state_ids_for_pdu(room_id, event_id)
pdus = await self.store.get_events_as_list(event_ids) pdus = await self.store.get_events_as_list(event_ids)
else:
pdus = (await self.state.get_current_state(room_id)).values()
auth_chain = await self.store.get_auth_chain( auth_chain = await self.store.get_auth_chain(
room_id, [pdu.event_id for pdu in pdus] room_id, [pdu.event_id for pdu in pdus]
@ -687,8 +679,6 @@ class FederationServer(FederationBase):
time_now = self._clock.time_msec() time_now = self._clock.time_msec()
event_json = event.get_pdu_json(time_now) event_json = event.get_pdu_json(time_now)
resp = { resp = {
# TODO Remove the unstable prefix when servers have updated.
"org.matrix.msc3083.v2.event": event_json,
"event": event_json, "event": event_json,
"state": [p.get_pdu_json(time_now) for p in state_events], "state": [p.get_pdu_json(time_now) for p in state_events],
"auth_chain": [p.get_pdu_json(time_now) for p in auth_chain_events], "auth_chain": [p.get_pdu_json(time_now) for p in auth_chain_events],

View file

@ -1380,16 +1380,6 @@ class SendJoinParser(ByteParser[SendJoinResponse]):
prefix + "auth_chain.item", prefix + "auth_chain.item",
use_float=True, use_float=True,
), ),
# TODO Remove the unstable prefix when servers have updated.
#
# By re-using the same event dictionary this will cause the parsing of
# org.matrix.msc3083.v2.event and event to stomp over each other.
# Generally this should be fine.
ijson.kvitems_coro(
_event_parser(self._response.event_dict),
prefix + "org.matrix.msc3083.v2.event",
use_float=True,
),
ijson.kvitems_coro( ijson.kvitems_coro(
_event_parser(self._response.event_dict), _event_parser(self._response.event_dict),
prefix + "event", prefix + "event",

View file

@ -16,7 +16,8 @@ import functools
import logging import logging
import re import re
import time import time
from typing import TYPE_CHECKING, Any, Awaitable, Callable, Optional, Tuple, cast from http import HTTPStatus
from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, Optional, Tuple, cast
from synapse.api.errors import Codes, FederationDeniedError, SynapseError from synapse.api.errors import Codes, FederationDeniedError, SynapseError
from synapse.api.urls import FEDERATION_V1_PREFIX from synapse.api.urls import FEDERATION_V1_PREFIX
@ -86,15 +87,24 @@ class Authenticator:
if not auth_headers: if not auth_headers:
raise NoAuthenticationError( raise NoAuthenticationError(
401, "Missing Authorization headers", Codes.UNAUTHORIZED HTTPStatus.UNAUTHORIZED,
"Missing Authorization headers",
Codes.UNAUTHORIZED,
) )
for auth in auth_headers: for auth in auth_headers:
if auth.startswith(b"X-Matrix"): if auth.startswith(b"X-Matrix"):
(origin, key, sig) = _parse_auth_header(auth) (origin, key, sig, destination) = _parse_auth_header(auth)
json_request["origin"] = origin json_request["origin"] = origin
json_request["signatures"].setdefault(origin, {})[key] = sig json_request["signatures"].setdefault(origin, {})[key] = sig
# if the origin_server sent a destination along it needs to match our own server_name
if destination is not None and destination != self.server_name:
raise AuthenticationError(
HTTPStatus.UNAUTHORIZED,
"Destination mismatch in auth header",
Codes.UNAUTHORIZED,
)
if ( if (
self.federation_domain_whitelist is not None self.federation_domain_whitelist is not None
and origin not in self.federation_domain_whitelist and origin not in self.federation_domain_whitelist
@ -103,7 +113,9 @@ class Authenticator:
if origin is None or not json_request["signatures"]: if origin is None or not json_request["signatures"]:
raise NoAuthenticationError( raise NoAuthenticationError(
401, "Missing Authorization headers", Codes.UNAUTHORIZED HTTPStatus.UNAUTHORIZED,
"Missing Authorization headers",
Codes.UNAUTHORIZED,
) )
await self.keyring.verify_json_for_server( await self.keyring.verify_json_for_server(
@ -142,13 +154,14 @@ class Authenticator:
logger.exception("Error resetting retry timings on %s", origin) logger.exception("Error resetting retry timings on %s", origin)
def _parse_auth_header(header_bytes: bytes) -> Tuple[str, str, str]: def _parse_auth_header(header_bytes: bytes) -> Tuple[str, str, str, Optional[str]]:
"""Parse an X-Matrix auth header """Parse an X-Matrix auth header
Args: Args:
header_bytes: header value header_bytes: header value
Returns: Returns:
origin, key id, signature, destination.
origin, key id, signature. origin, key id, signature.
Raises: Raises:
@ -157,7 +170,9 @@ def _parse_auth_header(header_bytes: bytes) -> Tuple[str, str, str]:
try: try:
header_str = header_bytes.decode("utf-8") header_str = header_bytes.decode("utf-8")
params = header_str.split(" ")[1].split(",") params = header_str.split(" ")[1].split(",")
param_dict = {k: v for k, v in (kv.split("=", maxsplit=1) for kv in params)} param_dict: Dict[str, str] = {
k: v for k, v in [param.split("=", maxsplit=1) for param in params]
}
def strip_quotes(value: str) -> str: def strip_quotes(value: str) -> str:
if value.startswith('"'): if value.startswith('"'):
@ -172,7 +187,15 @@ def _parse_auth_header(header_bytes: bytes) -> Tuple[str, str, str]:
key = strip_quotes(param_dict["key"]) key = strip_quotes(param_dict["key"])
sig = strip_quotes(param_dict["sig"]) sig = strip_quotes(param_dict["sig"])
return origin, key, sig
# get the destination server_name from the auth header if it exists
destination = param_dict.get("destination")
if destination is not None:
destination = strip_quotes(destination)
else:
destination = None
return origin, key, sig, destination
except Exception as e: except Exception as e:
logger.warning( logger.warning(
"Error parsing auth header '%s': %s", "Error parsing auth header '%s': %s",
@ -180,7 +203,7 @@ def _parse_auth_header(header_bytes: bytes) -> Tuple[str, str, str]:
e, e,
) )
raise AuthenticationError( raise AuthenticationError(
400, "Malformed Authorization header", Codes.UNAUTHORIZED HTTPStatus.BAD_REQUEST, "Malformed Authorization header", Codes.UNAUTHORIZED
) )

View file

@ -160,7 +160,7 @@ class FederationStateV1Servlet(BaseFederationServerServlet):
return await self.handler.on_room_state_request( return await self.handler.on_room_state_request(
origin, origin,
room_id, room_id,
parse_string_from_args(query, "event_id", None, required=False), parse_string_from_args(query, "event_id", None, required=True),
) )

View file

@ -291,12 +291,6 @@ class DeviceHandler(DeviceWorkerHandler):
# On start up check if there are any updates pending. # On start up check if there are any updates pending.
hs.get_reactor().callWhenRunning(self._handle_new_device_update_async) hs.get_reactor().callWhenRunning(self._handle_new_device_update_async)
# Used to decide if we calculate outbound pokes up front or not. By
# default we do to allow safely downgrading Synapse.
self.use_new_device_lists_changes_in_room = (
hs.config.server.use_new_device_lists_changes_in_room
)
def _check_device_name_length(self, name: Optional[str]) -> None: def _check_device_name_length(self, name: Optional[str]) -> None:
""" """
Checks whether a device name is longer than the maximum allowed length. Checks whether a device name is longer than the maximum allowed length.
@ -490,23 +484,9 @@ class DeviceHandler(DeviceWorkerHandler):
room_ids = await self.store.get_rooms_for_user(user_id) room_ids = await self.store.get_rooms_for_user(user_id)
hosts: Optional[Set[str]] = None
if not self.use_new_device_lists_changes_in_room:
hosts = set()
if self.hs.is_mine_id(user_id):
for room_id in room_ids:
joined_users = await self.store.get_users_in_room(room_id)
hosts.update(get_domain_from_id(u) for u in joined_users)
set_tag("target_hosts", hosts)
hosts.discard(self.server_name)
position = await self.store.add_device_change_to_streams( position = await self.store.add_device_change_to_streams(
user_id, user_id,
device_ids, device_ids,
hosts=hosts,
room_ids=room_ids, room_ids=room_ids,
) )
@ -528,14 +508,6 @@ class DeviceHandler(DeviceWorkerHandler):
# We may need to do some processing asynchronously. # We may need to do some processing asynchronously.
self._handle_new_device_update_async() self._handle_new_device_update_async()
if hosts:
logger.info(
"Sending device list update notif for %r to: %r", user_id, hosts
)
for host in hosts:
self.federation_sender.send_device_messages(host, immediate=False)
log_kv({"message": "sent device update to host", "host": host})
async def notify_user_signature_update( async def notify_user_signature_update(
self, from_user_id: str, user_ids: List[str] self, from_user_id: str, user_ids: List[str]
) -> None: ) -> None:
@ -677,6 +649,10 @@ class DeviceHandler(DeviceWorkerHandler):
return return
for user_id, device_id, room_id, stream_id, opentracing_context in rows: for user_id, device_id, room_id, stream_id, opentracing_context in rows:
hosts = set()
# Ignore any users that aren't ours
if self.hs.is_mine_id(user_id):
joined_user_ids = await self.store.get_users_in_room(room_id) joined_user_ids = await self.store.get_users_in_room(room_id)
hosts = {get_domain_from_id(u) for u in joined_user_ids} hosts = {get_domain_from_id(u) for u in joined_user_ids}
hosts.discard(self.server_name) hosts.discard(self.server_name)

View file

@ -16,11 +16,12 @@ import logging
import random import random
from typing import TYPE_CHECKING, Iterable, List, Optional from typing import TYPE_CHECKING, Iterable, List, Optional
from synapse.api.constants import EduTypes, EventTypes, Membership from synapse.api.constants import EduTypes, EventTypes, Membership, PresenceState
from synapse.api.errors import AuthError, SynapseError from synapse.api.errors import AuthError, SynapseError
from synapse.events import EventBase from synapse.events import EventBase
from synapse.events.utils import SerializeEventConfig from synapse.events.utils import SerializeEventConfig
from synapse.handlers.presence import format_user_presence_state from synapse.handlers.presence import format_user_presence_state
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.streams.config import PaginationConfig from synapse.streams.config import PaginationConfig
from synapse.types import JsonDict, UserID from synapse.types import JsonDict, UserID
from synapse.visibility import filter_events_for_client from synapse.visibility import filter_events_for_client
@ -67,7 +68,9 @@ class EventStreamHandler:
presence_handler = self.hs.get_presence_handler() presence_handler = self.hs.get_presence_handler()
context = await presence_handler.user_syncing( context = await presence_handler.user_syncing(
auth_user_id, affect_presence=affect_presence auth_user_id,
affect_presence=affect_presence,
presence_state=PresenceState.ONLINE,
) )
with context: with context:
if timeout: if timeout:
@ -139,7 +142,11 @@ class EventHandler:
self.storage = hs.get_storage() self.storage = hs.get_storage()
async def get_event( async def get_event(
self, user: UserID, room_id: Optional[str], event_id: str self,
user: UserID,
room_id: Optional[str],
event_id: str,
show_redacted: bool = False,
) -> Optional[EventBase]: ) -> Optional[EventBase]:
"""Retrieve a single specified event. """Retrieve a single specified event.
@ -148,6 +155,7 @@ class EventHandler:
room_id: The expected room id. We'll return None if the room_id: The expected room id. We'll return None if the
event's room does not match. event's room does not match.
event_id: The event ID to obtain. event_id: The event ID to obtain.
show_redacted: Should the full content of redacted events be returned?
Returns: Returns:
An event, or None if there is no event matching this ID. An event, or None if there is no event matching this ID.
Raises: Raises:
@ -155,7 +163,12 @@ class EventHandler:
AuthError if the user does not have the rights to inspect this AuthError if the user does not have the rights to inspect this
event. event.
""" """
event = await self.store.get_event(event_id, check_room_id=room_id) redact_behaviour = (
EventRedactBehaviour.AS_IS if show_redacted else EventRedactBehaviour.REDACT
)
event = await self.store.get_event(
event_id, check_room_id=room_id, redact_behaviour=redact_behaviour
)
if not event: if not event:
return None return None

View file

@ -1,4 +1,4 @@
# Copyright 2014-2021 The Matrix.org Foundation C.I.C. # Copyright 2014-2022 The Matrix.org Foundation C.I.C.
# Copyright 2020 Sorunome # Copyright 2020 Sorunome
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
@ -15,10 +15,14 @@
"""Contains handlers for federation events.""" """Contains handlers for federation events."""
import enum
import itertools
import logging import logging
from enum import Enum
from http import HTTPStatus from http import HTTPStatus
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, Union from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, Union
import attr
from signedjson.key import decode_verify_key_bytes from signedjson.key import decode_verify_key_bytes
from signedjson.sign import verify_signed_json from signedjson.sign import verify_signed_json
from unpaddedbase64 import decode_base64 from unpaddedbase64 import decode_base64
@ -92,6 +96,24 @@ def get_domains_from_state(state: StateMap[EventBase]) -> List[Tuple[str, int]]:
return sorted(joined_domains.items(), key=lambda d: d[1]) return sorted(joined_domains.items(), key=lambda d: d[1])
class _BackfillPointType(Enum):
# a regular backwards extremity (ie, an event which we don't yet have, but which
# is referred to by other events in the DAG)
BACKWARDS_EXTREMITY = enum.auto()
# an MSC2716 "insertion event"
INSERTION_PONT = enum.auto()
@attr.s(slots=True, auto_attribs=True, frozen=True)
class _BackfillPoint:
"""A potential point we might backfill from"""
event_id: str
depth: int
type: _BackfillPointType
class FederationHandler: class FederationHandler:
"""Handles general incoming federation requests """Handles general incoming federation requests
@ -157,89 +179,51 @@ class FederationHandler:
async def _maybe_backfill_inner( async def _maybe_backfill_inner(
self, room_id: str, current_depth: int, limit: int self, room_id: str, current_depth: int, limit: int
) -> bool: ) -> bool:
oldest_events_with_depth = ( backwards_extremities = [
await self.store.get_oldest_event_ids_with_depth_in_room(room_id) _BackfillPoint(event_id, depth, _BackfillPointType.BACKWARDS_EXTREMITY)
) for event_id, depth in await self.store.get_oldest_event_ids_with_depth_in_room(
insertion_events_to_be_backfilled: Dict[str, int] = {}
if self.hs.config.experimental.msc2716_enabled:
insertion_events_to_be_backfilled = (
await self.store.get_insertion_event_backward_extremities_in_room(
room_id room_id
) )
]
insertion_events_to_be_backfilled: List[_BackfillPoint] = []
if self.hs.config.experimental.msc2716_enabled:
insertion_events_to_be_backfilled = [
_BackfillPoint(event_id, depth, _BackfillPointType.INSERTION_PONT)
for event_id, depth in await self.store.get_insertion_event_backward_extremities_in_room(
room_id
) )
]
logger.debug( logger.debug(
"_maybe_backfill_inner: extremities oldest_events_with_depth=%s insertion_events_to_be_backfilled=%s", "_maybe_backfill_inner: backwards_extremities=%s insertion_events_to_be_backfilled=%s",
oldest_events_with_depth, backwards_extremities,
insertion_events_to_be_backfilled, insertion_events_to_be_backfilled,
) )
if not oldest_events_with_depth and not insertion_events_to_be_backfilled: if not backwards_extremities and not insertion_events_to_be_backfilled:
logger.debug("Not backfilling as no extremeties found.") logger.debug("Not backfilling as no extremeties found.")
return False return False
# We only want to paginate if we can actually see the events we'll get, # we now have a list of potential places to backpaginate from. We prefer to
# as otherwise we'll just spend a lot of resources to get redacted # start with the most recent (ie, max depth), so let's sort the list.
# events. sorted_backfill_points: List[_BackfillPoint] = sorted(
# itertools.chain(
# We do this by filtering all the backwards extremities and seeing if backwards_extremities,
# any remain. Given we don't have the extremity events themselves, we insertion_events_to_be_backfilled,
# need to actually check the events that reference them. ),
# key=lambda e: -int(e.depth),
# *Note*: the spec wants us to keep backfilling until we reach the start
# of the room in case we are allowed to see some of the history. However
# in practice that causes more issues than its worth, as a) its
# relatively rare for there to be any visible history and b) even when
# there is its often sufficiently long ago that clients would stop
# attempting to paginate before backfill reached the visible history.
#
# TODO: If we do do a backfill then we should filter the backwards
# extremities to only include those that point to visible portions of
# history.
#
# TODO: Correctly handle the case where we are allowed to see the
# forward event but not the backward extremity, e.g. in the case of
# initial join of the server where we are allowed to see the join
# event but not anything before it. This would require looking at the
# state *before* the event, ignoring the special casing certain event
# types have.
forward_event_ids = await self.store.get_successor_events(
list(oldest_events_with_depth)
) )
extremities_events = await self.store.get_events(
forward_event_ids,
redact_behaviour=EventRedactBehaviour.AS_IS,
get_prev_content=False,
)
# We set `check_history_visibility_only` as we might otherwise get false
# positives from users having been erased.
filtered_extremities = await filter_events_for_server(
self.storage,
self.server_name,
list(extremities_events.values()),
redact=False,
check_history_visibility_only=True,
)
logger.debug( logger.debug(
"_maybe_backfill_inner: filtered_extremities %s", filtered_extremities "_maybe_backfill_inner: room_id: %s: current_depth: %s, limit: %s, "
"backfill points (%d): %s",
room_id,
current_depth,
limit,
len(sorted_backfill_points),
sorted_backfill_points,
) )
if not filtered_extremities and not insertion_events_to_be_backfilled:
return False
extremities = {
**oldest_events_with_depth,
# TODO: insertion_events_to_be_backfilled is currently skipping the filtered_extremities checks
**insertion_events_to_be_backfilled,
}
# Check if we reached a point where we should start backfilling.
sorted_extremeties_tuple = sorted(extremities.items(), key=lambda e: -int(e[1]))
max_depth = sorted_extremeties_tuple[0][1]
# If we're approaching an extremity we trigger a backfill, otherwise we # If we're approaching an extremity we trigger a backfill, otherwise we
# no-op. # no-op.
# #
@ -249,6 +233,11 @@ class FederationHandler:
# chose more than one times the limit in case of failure, but choosing a # chose more than one times the limit in case of failure, but choosing a
# much larger factor will result in triggering a backfill request much # much larger factor will result in triggering a backfill request much
# earlier than necessary. # earlier than necessary.
#
# XXX: shouldn't we do this *after* the filter by depth below? Again, we don't
# care about events that have happened after our current position.
#
max_depth = sorted_backfill_points[0].depth
if current_depth - 2 * limit > max_depth: if current_depth - 2 * limit > max_depth:
logger.debug( logger.debug(
"Not backfilling as we don't need to. %d < %d - 2 * %d", "Not backfilling as we don't need to. %d < %d - 2 * %d",
@ -265,31 +254,98 @@ class FederationHandler:
# 2. we have likely previously tried and failed to backfill from that # 2. we have likely previously tried and failed to backfill from that
# extremity, so to avoid getting "stuck" requesting the same # extremity, so to avoid getting "stuck" requesting the same
# backfill repeatedly we drop those extremities. # backfill repeatedly we drop those extremities.
filtered_sorted_extremeties_tuple = [ #
t for t in sorted_extremeties_tuple if int(t[1]) <= current_depth
]
logger.debug(
"room_id: %s, backfill: current_depth: %s, limit: %s, max_depth: %s, extrems (%d): %s filtered_sorted_extremeties_tuple: %s",
room_id,
current_depth,
limit,
max_depth,
len(sorted_extremeties_tuple),
sorted_extremeties_tuple,
filtered_sorted_extremeties_tuple,
)
# However, we need to check that the filtered extremities are non-empty. # However, we need to check that the filtered extremities are non-empty.
# If they are empty then either we can a) bail or b) still attempt to # If they are empty then either we can a) bail or b) still attempt to
# backfill. We opt to try backfilling anyway just in case we do get # backfill. We opt to try backfilling anyway just in case we do get
# relevant events. # relevant events.
if filtered_sorted_extremeties_tuple: #
sorted_extremeties_tuple = filtered_sorted_extremeties_tuple filtered_sorted_backfill_points = [
t for t in sorted_backfill_points if t.depth <= current_depth
]
if filtered_sorted_backfill_points:
logger.debug(
"_maybe_backfill_inner: backfill points before current depth: %s",
filtered_sorted_backfill_points,
)
sorted_backfill_points = filtered_sorted_backfill_points
else:
logger.debug(
"_maybe_backfill_inner: all backfill points are *after* current depth. Backfilling anyway."
)
# We don't want to specify too many extremities as it causes the backfill # For performance's sake, we only want to paginate from a particular extremity
# request URI to be too long. # if we can actually see the events we'll get. Otherwise, we'd just spend a lot
extremities = dict(sorted_extremeties_tuple[:5]) # of resources to get redacted events. We check each extremity in turn and
# ignore those which users on our server wouldn't be able to see.
#
# Additionally, we limit ourselves to backfilling from at most 5 extremities,
# for two reasons:
#
# - The check which determines if we can see an extremity's events can be
# expensive (we load the full state for the room at each of the backfill
# points, or (worse) their successors)
# - We want to avoid the server-server API request URI becoming too long.
#
# *Note*: the spec wants us to keep backfilling until we reach the start
# of the room in case we are allowed to see some of the history. However,
# in practice that causes more issues than its worth, as (a) it's
# relatively rare for there to be any visible history and (b) even when
# there is it's often sufficiently long ago that clients would stop
# attempting to paginate before backfill reached the visible history.
extremities_to_request: List[str] = []
for bp in sorted_backfill_points:
if len(extremities_to_request) >= 5:
break
# For regular backwards extremities, we don't have the extremity events
# themselves, so we need to actually check the events that reference them -
# their "successor" events.
#
# TODO: Correctly handle the case where we are allowed to see the
# successor event but not the backward extremity, e.g. in the case of
# initial join of the server where we are allowed to see the join
# event but not anything before it. This would require looking at the
# state *before* the event, ignoring the special casing certain event
# types have.
if bp.type == _BackfillPointType.INSERTION_PONT:
event_ids_to_check = [bp.event_id]
else:
event_ids_to_check = await self.store.get_successor_events(bp.event_id)
events_to_check = await self.store.get_events_as_list(
event_ids_to_check,
redact_behaviour=EventRedactBehaviour.AS_IS,
get_prev_content=False,
)
# We set `check_history_visibility_only` as we might otherwise get false
# positives from users having been erased.
filtered_extremities = await filter_events_for_server(
self.storage,
self.server_name,
events_to_check,
redact=False,
check_history_visibility_only=True,
)
if filtered_extremities:
extremities_to_request.append(bp.event_id)
else:
logger.debug(
"_maybe_backfill_inner: skipping extremity %s as it would not be visible",
bp,
)
if not extremities_to_request:
logger.debug(
"_maybe_backfill_inner: found no extremities which would be visible"
)
return False
logger.debug(
"_maybe_backfill_inner: extremities_to_request %s", extremities_to_request
)
# Now we need to decide which hosts to hit first. # Now we need to decide which hosts to hit first.
@ -309,7 +365,7 @@ class FederationHandler:
for dom in domains: for dom in domains:
try: try:
await self._federation_event_handler.backfill( await self._federation_event_handler.backfill(
dom, room_id, limit=100, extremities=extremities dom, room_id, limit=100, extremities=extremities_to_request
) )
# If this succeeded then we probably already have the # If this succeeded then we probably already have the
# appropriate stuff. # appropriate stuff.
@ -466,6 +522,8 @@ class FederationHandler:
) )
if ret.partial_state: if ret.partial_state:
# TODO(faster_joins): roll this back if we don't manage to start the
# background resync (eg process_remote_join fails)
await self.store.store_partial_state_room(room_id, ret.servers_in_room) await self.store.store_partial_state_room(room_id, ret.servers_in_room)
max_stream_id = await self._federation_event_handler.process_remote_join( max_stream_id = await self._federation_event_handler.process_remote_join(
@ -478,6 +536,18 @@ class FederationHandler:
partial_state=ret.partial_state, partial_state=ret.partial_state,
) )
if ret.partial_state:
# Kick off the process of asynchronously fetching the state for this
# room.
#
# TODO(faster_joins): pick this up again on restart
run_as_background_process(
desc="sync_partial_state_room",
func=self._sync_partial_state_room,
destination=origin,
room_id=room_id,
)
# We wait here until this instance has seen the events come down # We wait here until this instance has seen the events come down
# replication (if we're using replication) as the below uses caches. # replication (if we're using replication) as the below uses caches.
await self._replication.wait_for_stream_position( await self._replication.wait_for_stream_position(
@ -1370,3 +1440,64 @@ class FederationHandler:
# We fell off the bottom, couldn't get the complexity from anyone. Oh # We fell off the bottom, couldn't get the complexity from anyone. Oh
# well. # well.
return None return None
async def _sync_partial_state_room(
self,
destination: str,
room_id: str,
) -> None:
"""Background process to resync the state of a partial-state room
Args:
destination: homeserver to pull the state from
room_id: room to be resynced
"""
# TODO(faster_joins): do we need to lock to avoid races? What happens if other
# worker processes kick off a resync in parallel? Perhaps we should just elect
# a single worker to do the resync.
#
# TODO(faster_joins): what happens if we leave the room during a resync? if we
# really leave, that might mean we have difficulty getting the room state over
# federation.
#
# TODO(faster_joins): try other destinations if the one we have fails
logger.info("Syncing state for room %s via %s", room_id, destination)
# we work through the queue in order of increasing stream ordering.
while True:
batch = await self.store.get_partial_state_events_batch(room_id)
if not batch:
# all the events are updated, so we can update current state and
# clear the lazy-loading flag.
logger.info("Updating current state for %s", room_id)
assert (
self.storage.persistence is not None
), "TODO(faster_joins): support for workers"
await self.storage.persistence.update_current_state(room_id)
logger.info("Clearing partial-state flag for %s", room_id)
success = await self.store.clear_partial_state_room(room_id)
if success:
logger.info("State resync complete for %s", room_id)
# TODO(faster_joins) update room stats and user directory?
return
# we raced against more events arriving with partial state. Go round
# the loop again. We've already logged a warning, so no need for more.
# TODO(faster_joins): there is still a race here, whereby incoming events which raced
# with us will fail to be persisted after the call to `clear_partial_state_room` due to
# having partial state.
continue
events = await self.store.get_events_as_list(
batch,
redact_behaviour=EventRedactBehaviour.AS_IS,
allow_rejected=True,
)
for event in events:
await self._federation_event_handler.update_state_for_partial_state_event(
destination, event
)

View file

@ -477,6 +477,46 @@ class FederationEventHandler:
return await self.persist_events_and_notify(room_id, [(event, context)]) return await self.persist_events_and_notify(room_id, [(event, context)])
async def update_state_for_partial_state_event(
self, destination: str, event: EventBase
) -> None:
"""Recalculate the state at an event as part of a de-partial-stating process
Args:
destination: server to request full state from
event: partial-state event to be de-partial-stated
"""
logger.info("Updating state for %s", event.event_id)
with nested_logging_context(suffix=event.event_id):
# if we have all the event's prev_events, then we can work out the
# state based on their states. Otherwise, we request it from the destination
# server.
#
# This is the same operation as we do when we receive a regular event
# over federation.
state = await self._resolve_state_at_missing_prevs(destination, event)
# build a new state group for it if need be
context = await self._state_handler.compute_event_context(
event,
old_state=state,
)
if context.partial_state:
# this can happen if some or all of the event's prev_events still have
# partial state - ie, an event has an earlier stream_ordering than one
# or more of its prev_events, so we de-partial-state it before its
# prev_events.
#
# TODO(faster_joins): we probably need to be more intelligent, and
# exclude partial-state prev_events from consideration
logger.warning(
"%s still has partial state: can't de-partial-state it yet",
event.event_id,
)
return
await self._store.update_state_for_partial_state_event(event, context)
self._state_store.notify_event_un_partial_stated(event.event_id)
async def backfill( async def backfill(
self, dest: str, room_id: str, limit: int, extremities: Collection[str] self, dest: str, room_id: str, limit: int, extremities: Collection[str]
) -> None: ) -> None:

View file

@ -175,17 +175,13 @@ class MessageHandler:
state_filter = state_filter or StateFilter.all() state_filter = state_filter or StateFilter.all()
if at_token: if at_token:
# FIXME this claims to get the state at a stream position, but last_event = await self.store.get_last_event_in_room_before_stream_ordering(
# get_recent_events_for_room operates by topo ordering. This therefore room_id,
# does not reliably give you the state at the given stream position. end_token=at_token.room_key,
# (https://github.com/matrix-org/synapse/issues/3305)
last_events, _ = await self.store.get_recent_events_for_room(
room_id, end_token=at_token.room_key, limit=1
) )
if not last_events: if not last_event:
raise NotFoundError("Can't find event for token %s" % (at_token,)) raise NotFoundError("Can't find event for token %s" % (at_token,))
last_event = last_events[0]
# check whether the user is in the room at that time to determine # check whether the user is in the room at that time to determine
# whether they should be treated as peeking. # whether they should be treated as peeking.
@ -204,7 +200,7 @@ class MessageHandler:
visible_events = await filter_events_for_client( visible_events = await filter_events_for_client(
self.storage, self.storage,
user_id, user_id,
last_events, [last_event],
filter_send_to_client=False, filter_send_to_client=False,
is_peeking=is_peeking, is_peeking=is_peeking,
) )
@ -1104,10 +1100,7 @@ class EventCreationHandler:
raise SynapseError(400, "Can't send same reaction twice") raise SynapseError(400, "Can't send same reaction twice")
# Don't attempt to start a thread if the parent event is a relation. # Don't attempt to start a thread if the parent event is a relation.
elif ( elif relation_type == RelationTypes.THREAD:
relation_type == RelationTypes.THREAD
or relation_type == RelationTypes.UNSTABLE_THREAD
):
if await self.store.event_includes_relation(relates_to): if await self.store.event_includes_relation(relates_to):
raise SynapseError( raise SynapseError(
400, "Cannot start threads from an event with a relation" 400, "Cannot start threads from an event with a relation"

View file

@ -151,7 +151,7 @@ class BasePresenceHandler(abc.ABC):
@abc.abstractmethod @abc.abstractmethod
async def user_syncing( async def user_syncing(
self, user_id: str, affect_presence: bool self, user_id: str, affect_presence: bool, presence_state: str
) -> ContextManager[None]: ) -> ContextManager[None]:
"""Returns a context manager that should surround any stream requests """Returns a context manager that should surround any stream requests
from the user. from the user.
@ -165,6 +165,7 @@ class BasePresenceHandler(abc.ABC):
affect_presence: If false this function will be a no-op. affect_presence: If false this function will be a no-op.
Useful for streams that are not associated with an actual Useful for streams that are not associated with an actual
client that is being used by a user. client that is being used by a user.
presence_state: The presence state indicated in the sync request
""" """
@abc.abstractmethod @abc.abstractmethod
@ -228,6 +229,11 @@ class BasePresenceHandler(abc.ABC):
return states return states
async def current_state_for_user(self, user_id: str) -> UserPresenceState:
"""Get the current presence state for a user."""
res = await self.current_state_for_users([user_id])
return res[user_id]
@abc.abstractmethod @abc.abstractmethod
async def set_state( async def set_state(
self, self,
@ -461,7 +467,7 @@ class WorkerPresenceHandler(BasePresenceHandler):
self.send_user_sync(user_id, False, last_sync_ms) self.send_user_sync(user_id, False, last_sync_ms)
async def user_syncing( async def user_syncing(
self, user_id: str, affect_presence: bool self, user_id: str, affect_presence: bool, presence_state: str
) -> ContextManager[None]: ) -> ContextManager[None]:
"""Record that a user is syncing. """Record that a user is syncing.
@ -471,6 +477,17 @@ class WorkerPresenceHandler(BasePresenceHandler):
if not affect_presence or not self._presence_enabled: if not affect_presence or not self._presence_enabled:
return _NullContextManager() return _NullContextManager()
prev_state = await self.current_state_for_user(user_id)
if prev_state != PresenceState.BUSY:
# We set state here but pass ignore_status_msg = True as we don't want to
# cause the status message to be cleared.
# Note that this causes last_active_ts to be incremented which is not
# what the spec wants: see comment in the BasePresenceHandler version
# of this function.
await self.set_state(
UserID.from_string(user_id), {"presence": presence_state}, True
)
curr_sync = self._user_to_num_current_syncs.get(user_id, 0) curr_sync = self._user_to_num_current_syncs.get(user_id, 0)
self._user_to_num_current_syncs[user_id] = curr_sync + 1 self._user_to_num_current_syncs[user_id] = curr_sync + 1
@ -942,7 +959,10 @@ class PresenceHandler(BasePresenceHandler):
await self._update_states([prev_state.copy_and_replace(**new_fields)]) await self._update_states([prev_state.copy_and_replace(**new_fields)])
async def user_syncing( async def user_syncing(
self, user_id: str, affect_presence: bool = True self,
user_id: str,
affect_presence: bool = True,
presence_state: str = PresenceState.ONLINE,
) -> ContextManager[None]: ) -> ContextManager[None]:
"""Returns a context manager that should surround any stream requests """Returns a context manager that should surround any stream requests
from the user. from the user.
@ -956,6 +976,7 @@ class PresenceHandler(BasePresenceHandler):
affect_presence: If false this function will be a no-op. affect_presence: If false this function will be a no-op.
Useful for streams that are not associated with an actual Useful for streams that are not associated with an actual
client that is being used by a user. client that is being used by a user.
presence_state: The presence state indicated in the sync request
""" """
# Override if it should affect the user's presence, if presence is # Override if it should affect the user's presence, if presence is
# disabled. # disabled.
@ -967,9 +988,25 @@ class PresenceHandler(BasePresenceHandler):
self.user_to_num_current_syncs[user_id] = curr_sync + 1 self.user_to_num_current_syncs[user_id] = curr_sync + 1
prev_state = await self.current_state_for_user(user_id) prev_state = await self.current_state_for_user(user_id)
# If they're busy then they don't stop being busy just by syncing,
# so just update the last sync time.
if prev_state.state != PresenceState.BUSY:
# XXX: We set_state separately here and just update the last_active_ts above
# This keeps the logic as similar as possible between the worker and single
# process modes. Using set_state will actually cause last_active_ts to be
# updated always, which is not what the spec calls for, but synapse has done
# this for... forever, I think.
await self.set_state(
UserID.from_string(user_id), {"presence": presence_state}, True
)
# Retrieve the new state for the logic below. This should come from the
# in-memory cache.
prev_state = await self.current_state_for_user(user_id)
# To keep the single process behaviour consistent with worker mode, run the
# same logic as `update_external_syncs_row`, even though it looks weird.
if prev_state.state == PresenceState.OFFLINE: if prev_state.state == PresenceState.OFFLINE:
# If they're currently offline then bring them online, otherwise
# just update the last sync times.
await self._update_states( await self._update_states(
[ [
prev_state.copy_and_replace( prev_state.copy_and_replace(
@ -979,6 +1016,10 @@ class PresenceHandler(BasePresenceHandler):
) )
] ]
) )
# otherwise, set the new presence state & update the last sync time,
# but don't update last_active_ts as this isn't an indication that
# they've been active (even though it's probably been updated by
# set_state above)
else: else:
await self._update_states( await self._update_states(
[ [
@ -1086,11 +1127,6 @@ class PresenceHandler(BasePresenceHandler):
) )
self.external_process_last_updated_ms.pop(process_id, None) self.external_process_last_updated_ms.pop(process_id, None)
async def current_state_for_user(self, user_id: str) -> UserPresenceState:
"""Get the current presence state for a user."""
res = await self.current_state_for_users([user_id])
return res[user_id]
async def _persist_and_notify(self, states: List[UserPresenceState]) -> None: async def _persist_and_notify(self, states: List[UserPresenceState]) -> None:
"""Persist states in the database, poke the notifier and send to """Persist states in the database, poke the notifier and send to
interested remote servers interested remote servers

View file

@ -256,64 +256,6 @@ class RelationsHandler:
return filtered_results return filtered_results
async def _get_bundled_aggregation_for_event(
self, event: EventBase, ignored_users: FrozenSet[str]
) -> Optional[BundledAggregations]:
"""Generate bundled aggregations for an event.
Note that this does not use a cache, but depends on cached methods.
Args:
event: The event to calculate bundled aggregations for.
ignored_users: The users ignored by the requesting user.
Returns:
The bundled aggregations for an event, if bundled aggregations are
enabled and the event can have bundled aggregations.
"""
# Do not bundle aggregations for an event which represents an edit or an
# annotation. It does not make sense for them to have related events.
relates_to = event.content.get("m.relates_to")
if isinstance(relates_to, (dict, frozendict)):
relation_type = relates_to.get("rel_type")
if relation_type in (RelationTypes.ANNOTATION, RelationTypes.REPLACE):
return None
event_id = event.event_id
room_id = event.room_id
# The bundled aggregations to include, a mapping of relation type to a
# type-specific value. Some types include the direct return type here
# while others need more processing during serialization.
aggregations = BundledAggregations()
annotations = await self.get_annotations_for_event(
event_id, room_id, ignored_users=ignored_users
)
if annotations:
aggregations.annotations = {"chunk": annotations}
references, next_token = await self.get_relations_for_event(
event_id,
event,
room_id,
RelationTypes.REFERENCE,
ignored_users=ignored_users,
)
if references:
aggregations.references = {
"chunk": [{"event_id": event.event_id} for event in references]
}
if next_token:
aggregations.references["next_batch"] = await next_token.to_string(
self._main_store
)
# Store the bundled aggregations in the event metadata for later use.
return aggregations
async def get_threads_for_events( async def get_threads_for_events(
self, event_ids: Collection[str], user_id: str, ignored_users: FrozenSet[str] self, event_ids: Collection[str], user_id: str, ignored_users: FrozenSet[str]
) -> Dict[str, _ThreadAggregation]: ) -> Dict[str, _ThreadAggregation]:
@ -435,11 +377,39 @@ class RelationsHandler:
# Fetch other relations per event. # Fetch other relations per event.
for event in events_by_id.values(): for event in events_by_id.values():
event_result = await self._get_bundled_aggregation_for_event( # Do not bundle aggregations for an event which represents an edit or an
event, ignored_users # annotation. It does not make sense for them to have related events.
relates_to = event.content.get("m.relates_to")
if isinstance(relates_to, (dict, frozendict)):
relation_type = relates_to.get("rel_type")
if relation_type in (RelationTypes.ANNOTATION, RelationTypes.REPLACE):
continue
annotations = await self.get_annotations_for_event(
event.event_id, event.room_id, ignored_users=ignored_users
)
if annotations:
results.setdefault(
event.event_id, BundledAggregations()
).annotations = {"chunk": annotations}
references, next_token = await self.get_relations_for_event(
event.event_id,
event,
event.room_id,
RelationTypes.REFERENCE,
ignored_users=ignored_users,
)
if references:
aggregations = results.setdefault(event.event_id, BundledAggregations())
aggregations.references = {
"chunk": [{"event_id": ev.event_id} for ev in references]
}
if next_token:
aggregations.references["next_batch"] = await next_token.to_string(
self._main_store
) )
if event_result:
results[event.event_id] = event_result
# Fetch any edits (but not for redacted events). # Fetch any edits (but not for redacted events).
# #

View file

@ -55,7 +55,7 @@ class RoomBatchHandler:
# it has a larger `depth` but before the successor event because the `stream_ordering` # it has a larger `depth` but before the successor event because the `stream_ordering`
# is negative before the successor event. # is negative before the successor event.
successor_event_ids = await self.store.get_successor_events( successor_event_ids = await self.store.get_successor_events(
[most_recent_prev_event_id] most_recent_prev_event_id
) )
# If we can't find any successor events, then it's a forward extremity of # If we can't find any successor events, then it's a forward extremity of

View file

@ -661,16 +661,15 @@ class SyncHandler:
stream_position: point at which to get state stream_position: point at which to get state
state_filter: The state filter used to fetch state from the database. state_filter: The state filter used to fetch state from the database.
""" """
# FIXME this claims to get the state at a stream position, but # FIXME: This gets the state at the latest event before the stream ordering,
# get_recent_events_for_room operates by topo ordering. This therefore # which might not be the same as the "current state" of the room at the time
# does not reliably give you the state at the given stream position. # of the stream token if there were multiple forward extremities at the time.
# (https://github.com/matrix-org/synapse/issues/3305) last_event = await self.store.get_last_event_in_room_before_stream_ordering(
last_events, _ = await self.store.get_recent_events_for_room( room_id,
room_id, end_token=stream_position.room_key, limit=1 end_token=stream_position.room_key,
) )
if last_events: if last_event:
last_event = last_events[-1]
state = await self.get_state_after_event( state = await self.get_state_after_event(
last_event, state_filter=state_filter or StateFilter.all() last_event, state_filter=state_filter or StateFilter.all()
) )

View file

@ -704,6 +704,9 @@ class MatrixFederationHttpClient:
Returns: Returns:
A list of headers to be added as "Authorization:" headers A list of headers to be added as "Authorization:" headers
""" """
if destination is None and destination_is is None:
raise ValueError("destination and destination_is cannot both be None!")
request: JsonDict = { request: JsonDict = {
"method": method.decode("ascii"), "method": method.decode("ascii"),
"uri": url_bytes.decode("ascii"), "uri": url_bytes.decode("ascii"),
@ -726,8 +729,13 @@ class MatrixFederationHttpClient:
for key, sig in request["signatures"][self.server_name].items(): for key, sig in request["signatures"][self.server_name].items():
auth_headers.append( auth_headers.append(
( (
'X-Matrix origin=%s,key="%s",sig="%s"' 'X-Matrix origin=%s,key="%s",sig="%s",destination="%s"'
% (self.server_name, key, sig) % (
self.server_name,
key,
sig,
request.get("destination") or request["destination_is"],
)
).encode("ascii") ).encode("ascii")
) )
return auth_headers return auth_headers

View file

@ -1,151 +0,0 @@
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2017 Vector Creations Ltd
# Copyright 2018 New Vector Ltd
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import logging
from typing import Set
logger = logging.getLogger(__name__)
# REQUIREMENTS is a simple list of requirement specifiers[1], and must be
# installed. It is passed to setup() as install_requires in setup.py.
#
# CONDITIONAL_REQUIREMENTS is the optional dependencies, represented as a dict
# of lists. The dict key is the optional dependency name and can be passed to
# pip when installing. The list is a series of requirement specifiers[1] to be
# installed when that optional dependency requirement is specified. It is passed
# to setup() as extras_require in setup.py
#
# Note that these both represent runtime dependencies (and the versions
# installed are checked at runtime).
#
# Also note that we replicate these constraints in the Synapse Dockerfile while
# pre-installing dependencies. If these constraints are updated here, the same
# change should be made in the Dockerfile.
#
# [1] https://pip.pypa.io/en/stable/reference/pip_install/#requirement-specifiers.
REQUIREMENTS = [
# we use the TYPE_CHECKER.redefine method added in jsonschema 3.0.0
"jsonschema>=3.0.0",
# frozendict 2.1.2 is broken on Debian 10: https://github.com/Marco-Sulla/python-frozendict/issues/41
"frozendict>=1,!=2.1.2",
"unpaddedbase64>=1.1.0",
"canonicaljson>=1.4.0",
# we use the type definitions added in signedjson 1.1.
"signedjson>=1.1.0",
"pynacl>=1.2.1",
# validating SSL certs for IP addresses requires service_identity 18.1.
"service_identity>=18.1.0",
# Twisted 18.9 introduces some logger improvements that the structured
# logger utilises
"Twisted[tls]>=18.9.0",
"treq>=15.1",
# Twisted has required pyopenssl 16.0 since about Twisted 16.6.
"pyopenssl>=16.0.0",
"pyyaml>=3.11",
"pyasn1>=0.1.9",
"pyasn1-modules>=0.0.7",
"bcrypt>=3.1.0",
"pillow>=5.4.0",
"sortedcontainers>=1.4.4",
"pymacaroons>=0.13.0",
"msgpack>=0.5.2",
"phonenumbers>=8.2.0",
# we use GaugeHistogramMetric, which was added in prom-client 0.4.0.
"prometheus_client>=0.4.0",
# we use `order`, which arrived in attrs 19.2.0.
# Note: 21.1.0 broke `/sync`, see #9936
"attrs>=19.2.0,!=21.1.0",
"netaddr>=0.7.18",
# Jinja 2.x is incompatible with MarkupSafe>=2.1. To ensure that admins do not
# end up with a broken installation, with recent MarkupSafe but old Jinja, we
# add a lower bound to the Jinja2 dependency.
"Jinja2>=3.0",
"bleach>=1.4.3",
# We use `ParamSpec`, which was added in `typing-extensions` 3.10.0.0.
"typing-extensions>=3.10.0",
# We enforce that we have a `cryptography` version that bundles an `openssl`
# with the latest security patches.
"cryptography>=3.4.7",
# ijson 3.1.4 fixes a bug with "." in property names
"ijson>=3.1.4",
"matrix-common~=1.1.0",
# We need packaging.requirements.Requirement, added in 16.1.
"packaging>=16.1",
# At the time of writing, we only use functions from the version `importlib.metadata`
# which shipped in Python 3.8. This corresponds to version 1.4 of the backport.
"importlib_metadata>=1.4 ; python_version < '3.8'",
]
CONDITIONAL_REQUIREMENTS = {
"matrix-synapse-ldap3": ["matrix-synapse-ldap3>=0.1"],
"postgres": [
# we use execute_values with the fetch param, which arrived in psycopg 2.8.
"psycopg2>=2.8 ; platform_python_implementation != 'PyPy'",
"psycopg2cffi>=2.8 ; platform_python_implementation == 'PyPy'",
"psycopg2cffi-compat==1.1 ; platform_python_implementation == 'PyPy'",
],
"saml2": [
"pysaml2>=4.5.0",
],
"oidc": ["authlib>=0.14.0"],
# systemd-python is necessary for logging to the systemd journal via
# `systemd.journal.JournalHandler`, as is documented in
# `contrib/systemd/log_config.yaml`.
"systemd": ["systemd-python>=231"],
"url_preview": ["lxml>=4.2.0"],
"sentry": ["sentry-sdk>=0.7.2"],
"opentracing": ["jaeger-client>=4.0.0", "opentracing>=2.2.0"],
"jwt": ["pyjwt>=1.6.4"],
# hiredis is not a *strict* dependency, but it makes things much faster.
# (if it is not installed, we fall back to slow code.)
"redis": ["txredisapi>=1.4.7", "hiredis"],
# Required to use experimental `caches.track_memory_usage` config option.
"cache_memory": ["pympler"],
}
ALL_OPTIONAL_REQUIREMENTS: Set[str] = set()
for name, optional_deps in CONDITIONAL_REQUIREMENTS.items():
# Exclude systemd as it's a system-based requirement.
# Exclude lint as it's a dev-based requirement.
if name not in ["systemd"]:
ALL_OPTIONAL_REQUIREMENTS = set(optional_deps) | ALL_OPTIONAL_REQUIREMENTS
# ensure there are no double-quote characters in any of the deps (otherwise the
# 'pip install' incantation in DependencyException will break)
for dep in itertools.chain(
REQUIREMENTS,
*CONDITIONAL_REQUIREMENTS.values(),
):
if '"' in dep:
raise Exception(
"Dependency `%s` contains double-quote; use single-quotes instead" % (dep,)
)
def list_requirements():
return list(set(REQUIREMENTS) | ALL_OPTIONAL_REQUIREMENTS)
if __name__ == "__main__":
import sys
sys.stdout.writelines(req + "\n" for req in list_requirements())

View file

@ -30,7 +30,7 @@
{%- elif message.msgtype == "m.notice" %} {%- elif message.msgtype == "m.notice" %}
{{ message.body_text_html }} {{ message.body_text_html }}
{%- elif message.msgtype == "m.image" and message.image_url %} {%- elif message.msgtype == "m.image" and message.image_url %}
<img src="{{ message.image_url|mxc_to_http(640, 480, scale) }}" /> <img src="{{ message.image_url|mxc_to_http(640, 480, 'scale') }}" />
{%- elif message.msgtype == "m.file" %} {%- elif message.msgtype == "m.file" %}
<span class="filename">{{ message.body_text_plain }}</span> <span class="filename">{{ message.body_text_plain }}</span>
{%- else %} {%- else %}

View file

@ -342,6 +342,15 @@ class LoginRestServlet(RestServlet):
user_id = canonical_uid user_id = canonical_uid
device_id = login_submission.get("device_id") device_id = login_submission.get("device_id")
# If device_id is present, check that device_id is not longer than a reasonable 512 characters
if device_id and len(device_id) > 512:
raise LoginError(
400,
"device_id cannot be longer than 512 characters.",
errcode=Codes.INVALID_PARAM,
)
initial_display_name = login_submission.get("initial_device_display_name") initial_display_name = login_submission.get("initial_device_display_name")
( (
device_id, device_id,

View file

@ -21,6 +21,7 @@ from urllib import parse as urlparse
from twisted.web.server import Request from twisted.web.server import Request
from synapse import event_auth
from synapse.api.constants import EventTypes, Membership from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import ( from synapse.api.errors import (
AuthError, AuthError,
@ -29,6 +30,7 @@ from synapse.api.errors import (
MissingClientTokenError, MissingClientTokenError,
ShadowBanError, ShadowBanError,
SynapseError, SynapseError,
UnredactedContentDeletedError,
) )
from synapse.api.filtering import Filter from synapse.api.filtering import Filter
from synapse.events.utils import format_event_for_client_v2 from synapse.events.utils import format_event_for_client_v2
@ -647,18 +649,55 @@ class RoomEventServlet(RestServlet):
super().__init__() super().__init__()
self.clock = hs.get_clock() self.clock = hs.get_clock()
self._store = hs.get_datastores().main self._store = hs.get_datastores().main
self._state = hs.get_state_handler()
self.event_handler = hs.get_event_handler() self.event_handler = hs.get_event_handler()
self._event_serializer = hs.get_event_client_serializer() self._event_serializer = hs.get_event_client_serializer()
self._relations_handler = hs.get_relations_handler() self._relations_handler = hs.get_relations_handler()
self.auth = hs.get_auth() self.auth = hs.get_auth()
self.content_keep_ms = hs.config.server.redaction_retention_period
self.msc2815_enabled = hs.config.experimental.msc2815_enabled
async def on_GET( async def on_GET(
self, request: SynapseRequest, room_id: str, event_id: str self, request: SynapseRequest, room_id: str, event_id: str
) -> Tuple[int, JsonDict]: ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True) requester = await self.auth.get_user_by_req(request, allow_guest=True)
include_unredacted_content = self.msc2815_enabled and (
parse_string(
request,
"fi.mau.msc2815.include_unredacted_content",
allowed_values=("true", "false"),
)
== "true"
)
if include_unredacted_content and not await self.auth.is_server_admin(
requester.user
):
power_level_event = await self._state.get_current_state(
room_id, EventTypes.PowerLevels, ""
)
auth_events = {}
if power_level_event:
auth_events[(EventTypes.PowerLevels, "")] = power_level_event
redact_level = event_auth.get_named_level(auth_events, "redact", 50)
user_level = event_auth.get_user_power_level(
requester.user.to_string(), auth_events
)
if user_level < redact_level:
raise SynapseError(
403,
"You don't have permission to view redacted events in this room.",
errcode=Codes.FORBIDDEN,
)
try: try:
event = await self.event_handler.get_event( event = await self.event_handler.get_event(
requester.user, room_id, event_id requester.user,
room_id,
event_id,
show_redacted=include_unredacted_content,
) )
except AuthError: except AuthError:
# This endpoint is supposed to return a 404 when the requester does # This endpoint is supposed to return a 404 when the requester does
@ -667,14 +706,21 @@ class RoomEventServlet(RestServlet):
raise SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND) raise SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND)
if event: if event:
if include_unredacted_content and await self._store.have_censored_event(
event_id
):
raise UnredactedContentDeletedError(self.content_keep_ms)
# Ensure there are bundled aggregations available. # Ensure there are bundled aggregations available.
aggregations = await self._relations_handler.get_bundled_aggregations( aggregations = await self._relations_handler.get_bundled_aggregations(
[event], requester.user.to_string() [event], requester.user.to_string()
) )
time_now = self.clock.time_msec() time_now = self.clock.time_msec()
# per MSC2676, /rooms/{roomId}/event/{eventId}, should return the
# *original* event, rather than the edited version
event_dict = self._event_serializer.serialize_event( event_dict = self._event_serializer.serialize_event(
event, time_now, bundle_aggregations=aggregations event, time_now, bundle_aggregations=aggregations, apply_edits=False
) )
return 200, event_dict return 200, event_dict

View file

@ -180,13 +180,10 @@ class SyncRestServlet(RestServlet):
affect_presence = set_presence != PresenceState.OFFLINE affect_presence = set_presence != PresenceState.OFFLINE
if affect_presence:
await self.presence_handler.set_state(
user, {"presence": set_presence}, True
)
context = await self.presence_handler.user_syncing( context = await self.presence_handler.user_syncing(
user.to_string(), affect_presence=affect_presence user.to_string(),
affect_presence=affect_presence,
presence_state=set_presence,
) )
with context: with context:
sync_result = await self.sync_handler.wait_for_sync_for_user( sync_result = await self.sync_handler.wait_for_sync_for_user(

View file

@ -86,7 +86,7 @@ class VersionsRestServlet(RestServlet):
# Implements additional endpoints as described in MSC2432 # Implements additional endpoints as described in MSC2432
"org.matrix.msc2432": True, "org.matrix.msc2432": True,
# Implements additional endpoints as described in MSC2666 # Implements additional endpoints as described in MSC2666
"uk.half-shot.msc2666": True, "uk.half-shot.msc2666.mutual_rooms": True,
# Whether new rooms will be set to encrypted or not (based on presets). # Whether new rooms will be set to encrypted or not (based on presets).
"io.element.e2ee_forced.public": self.e2ee_forced_public, "io.element.e2ee_forced.public": self.e2ee_forced_public,
"io.element.e2ee_forced.private": self.e2ee_forced_private, "io.element.e2ee_forced.private": self.e2ee_forced_private,
@ -100,8 +100,9 @@ class VersionsRestServlet(RestServlet):
# Adds support for jump to date endpoints (/timestamp_to_event) as per MSC3030 # Adds support for jump to date endpoints (/timestamp_to_event) as per MSC3030
"org.matrix.msc3030": self.config.experimental.msc3030_enabled, "org.matrix.msc3030": self.config.experimental.msc3030_enabled,
# Adds support for thread relations, per MSC3440. # Adds support for thread relations, per MSC3440.
"org.matrix.msc3440": self.config.experimental.msc3440_enabled,
"org.matrix.msc3440.stable": True, # TODO: remove when "v1.3" is added above "org.matrix.msc3440.stable": True, # TODO: remove when "v1.3" is added above
# Allows moderators to fetch redacted event content as described in MSC2815
"fi.mau.msc2815": self.config.experimental.msc2815_enabled,
}, },
}, },
) )

View file

@ -758,7 +758,7 @@ class HomeServer(metaclass=abc.ABCMeta):
@cache_in_self @cache_in_self
def get_event_client_serializer(self) -> EventClientSerializer: def get_event_client_serializer(self) -> EventClientSerializer:
return EventClientSerializer(self) return EventClientSerializer()
@cache_in_self @cache_in_self
def get_password_policy_handler(self) -> PasswordPolicyHandler: def get_password_policy_handler(self) -> PasswordPolicyHandler:

View file

@ -41,7 +41,6 @@ from prometheus_client import Histogram
from typing_extensions import Literal from typing_extensions import Literal
from twisted.enterprise import adbapi from twisted.enterprise import adbapi
from twisted.internet import defer
from synapse.api.errors import StoreError from synapse.api.errors import StoreError
from synapse.config.database import DatabaseConnectionConfig from synapse.config.database import DatabaseConnectionConfig
@ -794,7 +793,7 @@ class DatabasePool:
# We also wait until everything above is done before releasing the # We also wait until everything above is done before releasing the
# `CancelledError`, so that logging contexts won't get used after they have been # `CancelledError`, so that logging contexts won't get used after they have been
# finished. # finished.
return await delay_cancellation(defer.ensureDeferred(_runInteraction())) return await delay_cancellation(_runInteraction())
async def runWithConnection( async def runWithConnection(
self, self,

View file

@ -1582,7 +1582,6 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
self, self,
user_id: str, user_id: str,
device_ids: Collection[str], device_ids: Collection[str],
hosts: Optional[Collection[str]],
room_ids: Collection[str], room_ids: Collection[str],
) -> Optional[int]: ) -> Optional[int]:
"""Persist that a user's devices have been updated, and which hosts """Persist that a user's devices have been updated, and which hosts
@ -1592,9 +1591,6 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
user_id: The ID of the user whose device changed. user_id: The ID of the user whose device changed.
device_ids: The IDs of any changed devices. If empty, this function will device_ids: The IDs of any changed devices. If empty, this function will
return None. return None.
hosts: The remote destinations that should be notified of the change. If
None then the set of hosts have *not* been calculated, and will be
calculated later by a background task.
room_ids: The rooms that the user is in room_ids: The rooms that the user is in
Returns: Returns:
@ -1606,14 +1602,12 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
context = get_active_span_text_map() context = get_active_span_text_map()
def add_device_changes_txn( def add_device_changes_txn(txn, stream_ids):
txn, stream_ids_for_device_change, stream_ids_for_outbound_pokes
):
self._add_device_change_to_stream_txn( self._add_device_change_to_stream_txn(
txn, txn,
user_id, user_id,
device_ids, device_ids,
stream_ids_for_device_change, stream_ids,
) )
self._add_device_outbound_room_poke_txn( self._add_device_outbound_room_poke_txn(
@ -1621,43 +1615,17 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
user_id, user_id,
device_ids, device_ids,
room_ids, room_ids,
stream_ids_for_device_change, stream_ids,
context,
hosts_have_been_calculated=hosts is not None,
)
# If the set of hosts to send to has not been calculated yet (and so
# `hosts` is None) or there are no `hosts` to send to, then skip
# trying to persist them to the DB.
if not hosts:
return
self._add_device_outbound_poke_to_stream_txn(
txn,
user_id,
device_ids,
hosts,
stream_ids_for_outbound_pokes,
context, context,
) )
# `device_lists_stream` wants a stream ID per device update. async with self._device_list_id_gen.get_next_mult(
num_stream_ids = len(device_ids) len(device_ids)
) as stream_ids:
if hosts:
# `device_lists_outbound_pokes` wants a different stream ID for
# each row, which is a row per host per device update.
num_stream_ids += len(hosts) * len(device_ids)
async with self._device_list_id_gen.get_next_mult(num_stream_ids) as stream_ids:
stream_ids_for_device_change = stream_ids[: len(device_ids)]
stream_ids_for_outbound_pokes = stream_ids[len(device_ids) :]
await self.db_pool.runInteraction( await self.db_pool.runInteraction(
"add_device_change_to_stream", "add_device_change_to_stream",
add_device_changes_txn, add_device_changes_txn,
stream_ids_for_device_change, stream_ids,
stream_ids_for_outbound_pokes,
) )
return stream_ids[-1] return stream_ids[-1]
@ -1735,7 +1703,9 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
next(stream_id_iterator), next(stream_id_iterator),
user_id, user_id,
device_id, device_id,
False, not self.hs.is_mine_id(
user_id
), # We only need to send out update for *our* users
now, now,
encoded_context if whitelisted_homeserver(destination) else "{}", encoded_context if whitelisted_homeserver(destination) else "{}",
) )
@ -1752,19 +1722,8 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
room_ids: Collection[str], room_ids: Collection[str],
stream_ids: List[str], stream_ids: List[str],
context: Dict[str, str], context: Dict[str, str],
hosts_have_been_calculated: bool,
) -> None: ) -> None:
"""Record the user in the room has updated their device. """Record the user in the room has updated their device."""
Args:
hosts_have_been_calculated: True if `device_lists_outbound_pokes`
has been updated already with the updates.
"""
# We only need to convert to outbound pokes if they are our user.
converted_to_destinations = (
hosts_have_been_calculated or not self.hs.is_mine_id(user_id)
)
encoded_context = json_encoder.encode(context) encoded_context = json_encoder.encode(context)
@ -1789,7 +1748,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
device_id, device_id,
room_id, room_id,
stream_id, stream_id,
converted_to_destinations, False,
encoded_context, encoded_context,
) )
for room_id in room_ids for room_id in room_ids

View file

@ -695,7 +695,9 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
# Return all events where not all sets can reach them. # Return all events where not all sets can reach them.
return {eid for eid, n in event_to_missing_sets.items() if n} return {eid for eid, n in event_to_missing_sets.items() if n}
async def get_oldest_event_ids_with_depth_in_room(self, room_id) -> Dict[str, int]: async def get_oldest_event_ids_with_depth_in_room(
self, room_id
) -> List[Tuple[str, int]]:
"""Gets the oldest events(backwards extremities) in the room along with the """Gets the oldest events(backwards extremities) in the room along with the
aproximate depth. aproximate depth.
@ -708,7 +710,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
room_id: Room where we want to find the oldest events room_id: Room where we want to find the oldest events
Returns: Returns:
Map from event_id to depth List of (event_id, depth) tuples
""" """
def get_oldest_event_ids_with_depth_in_room_txn(txn, room_id): def get_oldest_event_ids_with_depth_in_room_txn(txn, room_id):
@ -741,7 +743,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
txn.execute(sql, (room_id, False)) txn.execute(sql, (room_id, False))
return dict(txn) return txn.fetchall()
return await self.db_pool.runInteraction( return await self.db_pool.runInteraction(
"get_oldest_event_ids_with_depth_in_room", "get_oldest_event_ids_with_depth_in_room",
@ -751,7 +753,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
async def get_insertion_event_backward_extremities_in_room( async def get_insertion_event_backward_extremities_in_room(
self, room_id self, room_id
) -> Dict[str, int]: ) -> List[Tuple[str, int]]:
"""Get the insertion events we know about that we haven't backfilled yet. """Get the insertion events we know about that we haven't backfilled yet.
We use this function so that we can compare and see if someones current We use this function so that we can compare and see if someones current
@ -763,7 +765,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
room_id: Room where we want to find the oldest events room_id: Room where we want to find the oldest events
Returns: Returns:
Map from event_id to depth List of (event_id, depth) tuples
""" """
def get_insertion_event_backward_extremities_in_room_txn(txn, room_id): def get_insertion_event_backward_extremities_in_room_txn(txn, room_id):
@ -778,8 +780,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
""" """
txn.execute(sql, (room_id,)) txn.execute(sql, (room_id,))
return txn.fetchall()
return dict(txn)
return await self.db_pool.runInteraction( return await self.db_pool.runInteraction(
"get_insertion_event_backward_extremities_in_room", "get_insertion_event_backward_extremities_in_room",
@ -1295,22 +1296,19 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
event_results.reverse() event_results.reverse()
return event_results return event_results
async def get_successor_events(self, event_ids: Iterable[str]) -> List[str]: async def get_successor_events(self, event_id: str) -> List[str]:
"""Fetch all events that have the given events as a prev event """Fetch all events that have the given event as a prev event
Args: Args:
event_ids: The events to use as the previous events. event_id: The event to search for as a prev_event.
""" """
rows = await self.db_pool.simple_select_many_batch( return await self.db_pool.simple_select_onecol(
table="event_edges", table="event_edges",
column="prev_event_id", keyvalues={"prev_event_id": event_id},
iterable=event_ids, retcol="event_id",
retcols=("event_id",),
desc="get_successor_events", desc="get_successor_events",
) )
return [row["event_id"] for row in rows]
@wrap_as_background_process("delete_old_forward_extrem_cache") @wrap_as_background_process("delete_old_forward_extrem_cache")
async def _delete_old_forward_extrem_cache(self) -> None: async def _delete_old_forward_extrem_cache(self) -> None:
def _delete_old_forward_extrem_cache_txn(txn): def _delete_old_forward_extrem_cache_txn(txn):

View file

@ -963,6 +963,21 @@ class PersistEventsStore:
values=to_insert, values=to_insert,
) )
async def update_current_state(
self,
room_id: str,
state_delta: DeltaState,
stream_id: int,
) -> None:
"""Update the current state stored in the datatabase for the given room"""
await self.db_pool.runInteraction(
"update_current_state",
self._update_current_state_txn,
state_delta_by_room={room_id: state_delta},
stream_id=stream_id,
)
def _update_current_state_txn( def _update_current_state_txn(
self, self,
txn: LoggingTransaction, txn: LoggingTransaction,
@ -1819,10 +1834,7 @@ class PersistEventsStore:
if rel_type == RelationTypes.REPLACE: if rel_type == RelationTypes.REPLACE:
txn.call_after(self.store.get_applicable_edit.invalidate, (parent_id,)) txn.call_after(self.store.get_applicable_edit.invalidate, (parent_id,))
if ( if rel_type == RelationTypes.THREAD:
rel_type == RelationTypes.THREAD
or rel_type == RelationTypes.UNSTABLE_THREAD
):
txn.call_after(self.store.get_thread_summary.invalidate, (parent_id,)) txn.call_after(self.store.get_thread_summary.invalidate, (parent_id,))
# It should be safe to only invalidate the cache if the user has not # It should be safe to only invalidate the cache if the user has not
# previously participated in the thread, but that's difficult (and # previously participated in the thread, but that's difficult (and

View file

@ -66,13 +66,15 @@ class EventForwardExtremitiesStore(
""" """
txn.execute(sql, (event_id, room_id)) txn.execute(sql, (event_id, room_id))
deleted_count = txn.rowcount
logger.info( logger.info(
"Deleted %s extra forward extremities for room %s", "Deleted %s extra forward extremities for room %s",
txn.rowcount, deleted_count,
room_id, room_id,
) )
if txn.rowcount > 0: if deleted_count > 0:
# Invalidate the cache # Invalidate the cache
self._invalidate_cache_and_stream( self._invalidate_cache_and_stream(
txn, txn,
@ -80,7 +82,7 @@ class EventForwardExtremitiesStore(
(room_id,), (room_id,),
) )
return txn.rowcount return deleted_count
return await self.db_pool.runInteraction( return await self.db_pool.runInteraction(
"delete_forward_extremities_for_room", "delete_forward_extremities_for_room",

View file

@ -75,7 +75,7 @@ from synapse.storage.util.id_generators import (
from synapse.storage.util.sequence import build_sequence_generator from synapse.storage.util.sequence import build_sequence_generator
from synapse.types import JsonDict, get_domain_from_id from synapse.types import JsonDict, get_domain_from_id
from synapse.util import unwrapFirstError from synapse.util import unwrapFirstError
from synapse.util.async_helpers import ObservableDeferred from synapse.util.async_helpers import ObservableDeferred, delay_cancellation
from synapse.util.caches.descriptors import cached, cachedList from synapse.util.caches.descriptors import cached, cachedList
from synapse.util.caches.lrucache import LruCache from synapse.util.caches.lrucache import LruCache
from synapse.util.iterutils import batch_iter from synapse.util.iterutils import batch_iter
@ -303,6 +303,24 @@ class EventsWorkerStore(SQLBaseStore):
desc="get_received_ts", desc="get_received_ts",
) )
async def have_censored_event(self, event_id: str) -> bool:
"""Check if an event has been censored, i.e. if the content of the event has been erased
from the database due to a redaction.
Args:
event_id: The event ID that was redacted.
Returns:
True if the event has been censored, False otherwise.
"""
censored_redactions_list = await self.db_pool.simple_select_onecol(
table="redactions",
keyvalues={"redacts": event_id},
retcol="have_censored",
desc="get_have_censored",
)
return any(censored_redactions_list)
# Inform mypy that if allow_none is False (the default) then get_event # Inform mypy that if allow_none is False (the default) then get_event
# always returns an EventBase. # always returns an EventBase.
@overload @overload
@ -622,6 +640,13 @@ class EventsWorkerStore(SQLBaseStore):
missing_events_ids.difference_update(already_fetching_ids) missing_events_ids.difference_update(already_fetching_ids)
if missing_events_ids: if missing_events_ids:
async def get_missing_events_from_db() -> Dict[str, EventCacheEntry]:
"""Fetches the events in `missing_event_ids` from the database.
Also creates entries in `self._current_event_fetches` to allow
concurrent `_get_events_from_cache_or_db` calls to reuse the same fetch.
"""
log_ctx = current_context() log_ctx = current_context()
log_ctx.record_event_fetch(len(missing_events_ids)) log_ctx.record_event_fetch(len(missing_events_ids))
@ -638,15 +663,13 @@ class EventsWorkerStore(SQLBaseStore):
# Note that _get_events_from_db is also responsible for turning db rows # Note that _get_events_from_db is also responsible for turning db rows
# into FrozenEvents (via _get_event_from_row), which involves seeing if # into FrozenEvents (via _get_event_from_row), which involves seeing if
# the events have been redacted, and if so pulling the redaction event out # the events have been redacted, and if so pulling the redaction event
# of the database to check it. # out of the database to check it.
# #
try: try:
missing_events = await self._get_events_from_db( missing_events = await self._get_events_from_db(
missing_events_ids, missing_events_ids,
) )
event_entry_map.update(missing_events)
except Exception as e: except Exception as e:
with PreserveLoggingContext(): with PreserveLoggingContext():
fetching_deferred.errback(e) fetching_deferred.errback(e)
@ -659,6 +682,16 @@ class EventsWorkerStore(SQLBaseStore):
with PreserveLoggingContext(): with PreserveLoggingContext():
fetching_deferred.callback(missing_events) fetching_deferred.callback(missing_events)
return missing_events
# We must allow the database fetch to complete in the presence of
# cancellations, since multiple `_get_events_from_cache_or_db` calls can
# reuse the same fetch.
missing_events: Dict[str, EventCacheEntry] = await delay_cancellation(
get_missing_events_from_db()
)
event_entry_map.update(missing_events)
if already_fetching_deferreds: if already_fetching_deferreds:
# Wait for the other event requests to finish and add their results # Wait for the other event requests to finish and add their results
# to ours. # to ours.
@ -1956,7 +1989,15 @@ class EventsWorkerStore(SQLBaseStore):
async def get_partial_state_events( async def get_partial_state_events(
self, event_ids: Collection[str] self, event_ids: Collection[str]
) -> Dict[str, bool]: ) -> Dict[str, bool]:
"""Checks which of the given events have partial state""" """Checks which of the given events have partial state
Args:
event_ids: the events we want to check for partial state.
Returns:
a dict mapping from event id to partial-stateness. We return True for
any of the events which are unknown (or are outliers).
"""
result = await self.db_pool.simple_select_many_batch( result = await self.db_pool.simple_select_many_batch(
table="partial_state_events", table="partial_state_events",
column="event_id", column="event_id",
@ -1979,3 +2020,27 @@ class EventsWorkerStore(SQLBaseStore):
desc="is_partial_state_event", desc="is_partial_state_event",
) )
return result is not None return result is not None
async def get_partial_state_events_batch(self, room_id: str) -> List[str]:
"""Get a list of events in the given room that have partial state"""
return await self.db_pool.runInteraction(
"get_partial_state_events_batch",
self._get_partial_state_events_batch_txn,
room_id,
)
@staticmethod
def _get_partial_state_events_batch_txn(
txn: LoggingTransaction, room_id: str
) -> List[str]:
txn.execute(
"""
SELECT event_id FROM partial_state_events AS pse
JOIN events USING (event_id)
WHERE pse.room_id = ?
ORDER BY events.stream_ordering
LIMIT 100
""",
(room_id,),
)
return [row[0] for row in txn]

View file

@ -14,7 +14,6 @@
import logging import logging
from typing import ( from typing import (
TYPE_CHECKING,
Collection, Collection,
Dict, Dict,
FrozenSet, FrozenSet,
@ -32,20 +31,12 @@ import attr
from synapse.api.constants import RelationTypes from synapse.api.constants import RelationTypes
from synapse.events import EventBase from synapse.events import EventBase
from synapse.storage._base import SQLBaseStore from synapse.storage._base import SQLBaseStore
from synapse.storage.database import ( from synapse.storage.database import LoggingTransaction, make_in_list_sql_clause
DatabasePool,
LoggingDatabaseConnection,
LoggingTransaction,
make_in_list_sql_clause,
)
from synapse.storage.databases.main.stream import generate_pagination_where_clause from synapse.storage.databases.main.stream import generate_pagination_where_clause
from synapse.storage.engines import PostgresEngine from synapse.storage.engines import PostgresEngine
from synapse.types import JsonDict, RoomStreamToken, StreamToken from synapse.types import JsonDict, RoomStreamToken, StreamToken
from synapse.util.caches.descriptors import cached, cachedList from synapse.util.caches.descriptors import cached, cachedList
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -63,16 +54,6 @@ class _RelatedEvent:
class RelationsWorkerStore(SQLBaseStore): class RelationsWorkerStore(SQLBaseStore):
def __init__(
self,
database: DatabasePool,
db_conn: LoggingDatabaseConnection,
hs: "HomeServer",
):
super().__init__(database, db_conn, hs)
self._msc3440_enabled = hs.config.experimental.msc3440_enabled
@cached(uncached_args=("event",), tree=True) @cached(uncached_args=("event",), tree=True)
async def get_relations_for_event( async def get_relations_for_event(
self, self,
@ -497,7 +478,7 @@ class RelationsWorkerStore(SQLBaseStore):
AND parent.room_id = child.room_id AND parent.room_id = child.room_id
WHERE WHERE
%s %s
AND %s AND relation_type = ?
ORDER BY parent.event_id, child.topological_ordering DESC, child.stream_ordering DESC ORDER BY parent.event_id, child.topological_ordering DESC, child.stream_ordering DESC
""" """
else: else:
@ -512,22 +493,16 @@ class RelationsWorkerStore(SQLBaseStore):
AND parent.room_id = child.room_id AND parent.room_id = child.room_id
WHERE WHERE
%s %s
AND %s AND relation_type = ?
ORDER BY child.topological_ordering DESC, child.stream_ordering DESC ORDER BY child.topological_ordering DESC, child.stream_ordering DESC
""" """
clause, args = make_in_list_sql_clause( clause, args = make_in_list_sql_clause(
txn.database_engine, "relates_to_id", event_ids txn.database_engine, "relates_to_id", event_ids
) )
if self._msc3440_enabled:
relations_clause = "(relation_type = ? OR relation_type = ?)"
args.extend((RelationTypes.THREAD, RelationTypes.UNSTABLE_THREAD))
else:
relations_clause = "relation_type = ?"
args.append(RelationTypes.THREAD) args.append(RelationTypes.THREAD)
txn.execute(sql % (clause, relations_clause), args) txn.execute(sql % (clause,), args)
latest_event_ids = {} latest_event_ids = {}
for parent_event_id, child_event_id in txn: for parent_event_id, child_event_id in txn:
# Only consider the latest threaded reply (by topological ordering). # Only consider the latest threaded reply (by topological ordering).
@ -547,7 +522,7 @@ class RelationsWorkerStore(SQLBaseStore):
AND parent.room_id = child.room_id AND parent.room_id = child.room_id
WHERE WHERE
%s %s
AND %s AND relation_type = ?
GROUP BY parent.event_id GROUP BY parent.event_id
""" """
@ -556,15 +531,9 @@ class RelationsWorkerStore(SQLBaseStore):
clause, args = make_in_list_sql_clause( clause, args = make_in_list_sql_clause(
txn.database_engine, "relates_to_id", latest_event_ids.keys() txn.database_engine, "relates_to_id", latest_event_ids.keys()
) )
if self._msc3440_enabled:
relations_clause = "(relation_type = ? OR relation_type = ?)"
args.extend((RelationTypes.THREAD, RelationTypes.UNSTABLE_THREAD))
else:
relations_clause = "relation_type = ?"
args.append(RelationTypes.THREAD) args.append(RelationTypes.THREAD)
txn.execute(sql % (clause, relations_clause), args) txn.execute(sql % (clause,), args)
counts = dict(cast(List[Tuple[str, int]], txn.fetchall())) counts = dict(cast(List[Tuple[str, int]], txn.fetchall()))
return counts, latest_event_ids return counts, latest_event_ids
@ -622,7 +591,7 @@ class RelationsWorkerStore(SQLBaseStore):
parent.event_id = relates_to_id parent.event_id = relates_to_id
AND parent.room_id = child.room_id AND parent.room_id = child.room_id
WHERE WHERE
%s relation_type = ?
AND %s AND %s
AND %s AND %s
GROUP BY parent.event_id, child.sender GROUP BY parent.event_id, child.sender
@ -638,16 +607,9 @@ class RelationsWorkerStore(SQLBaseStore):
txn.database_engine, "relates_to_id", event_ids txn.database_engine, "relates_to_id", event_ids
) )
if self._msc3440_enabled:
relations_clause = "(relation_type = ? OR relation_type = ?)"
relations_args = [RelationTypes.THREAD, RelationTypes.UNSTABLE_THREAD]
else:
relations_clause = "relation_type = ?"
relations_args = [RelationTypes.THREAD]
txn.execute( txn.execute(
sql % (users_sql, events_clause, relations_clause), sql % (users_sql, events_clause),
users_args + events_args + relations_args, [RelationTypes.THREAD] + users_args + events_args,
) )
return {(row[0], row[1]): row[2] for row in txn} return {(row[0], row[1]): row[2] for row in txn}
@ -677,7 +639,7 @@ class RelationsWorkerStore(SQLBaseStore):
user participated in that event's thread, otherwise false. user participated in that event's thread, otherwise false.
""" """
def _get_thread_summary_txn(txn: LoggingTransaction) -> Set[str]: def _get_threads_participated_txn(txn: LoggingTransaction) -> Set[str]:
# Fetch whether the requester has participated or not. # Fetch whether the requester has participated or not.
sql = """ sql = """
SELECT DISTINCT relates_to_id SELECT DISTINCT relates_to_id
@ -688,28 +650,20 @@ class RelationsWorkerStore(SQLBaseStore):
AND parent.room_id = child.room_id AND parent.room_id = child.room_id
WHERE WHERE
%s %s
AND %s AND relation_type = ?
AND child.sender = ? AND child.sender = ?
""" """
clause, args = make_in_list_sql_clause( clause, args = make_in_list_sql_clause(
txn.database_engine, "relates_to_id", event_ids txn.database_engine, "relates_to_id", event_ids
) )
args.extend([RelationTypes.THREAD, user_id])
if self._msc3440_enabled: txn.execute(sql % (clause,), args)
relations_clause = "(relation_type = ? OR relation_type = ?)"
args.extend((RelationTypes.THREAD, RelationTypes.UNSTABLE_THREAD))
else:
relations_clause = "relation_type = ?"
args.append(RelationTypes.THREAD)
args.append(user_id)
txn.execute(sql % (clause, relations_clause), args)
return {row[0] for row in txn.fetchall()} return {row[0] for row in txn.fetchall()}
participated_threads = await self.db_pool.runInteraction( participated_threads = await self.db_pool.runInteraction(
"get_thread_summary", _get_thread_summary_txn "get_threads_participated", _get_threads_participated_txn
) )
return {event_id: event_id in participated_threads for event_id in event_ids} return {event_id: event_id in participated_threads for event_id in event_ids}

View file

@ -1077,6 +1077,37 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
get_rooms_for_retention_period_in_range_txn, get_rooms_for_retention_period_in_range_txn,
) )
async def clear_partial_state_room(self, room_id: str) -> bool:
# this can race with incoming events, so we watch out for FK errors.
# TODO(faster_joins): this still doesn't completely fix the race, since the persist process
# is not atomic. I fear we need an application-level lock.
try:
await self.db_pool.runInteraction(
"clear_partial_state_room", self._clear_partial_state_room_txn, room_id
)
return True
except self.db_pool.engine.module.DatabaseError as e:
# TODO(faster_joins): how do we distinguish between FK errors and other errors?
logger.warning(
"Exception while clearing lazy partial-state-room %s, retrying: %s",
room_id,
e,
)
return False
@staticmethod
def _clear_partial_state_room_txn(txn: LoggingTransaction, room_id: str) -> None:
DatabasePool.simple_delete_txn(
txn,
table="partial_state_rooms_servers",
keyvalues={"room_id": room_id},
)
DatabasePool.simple_delete_one_txn(
txn,
table="partial_state_rooms",
keyvalues={"room_id": room_id},
)
class _BackgroundUpdates: class _BackgroundUpdates:
REMOVE_TOMESTONED_ROOMS_BG_UPDATE = "remove_tombstoned_rooms_from_directory" REMOVE_TOMESTONED_ROOMS_BG_UPDATE = "remove_tombstoned_rooms_from_directory"

View file

@ -21,6 +21,7 @@ from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import NotFoundError, UnsupportedRoomVersionError from synapse.api.errors import NotFoundError, UnsupportedRoomVersionError
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
from synapse.events import EventBase from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.storage._base import SQLBaseStore from synapse.storage._base import SQLBaseStore
from synapse.storage.database import ( from synapse.storage.database import (
DatabasePool, DatabasePool,
@ -129,7 +130,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
) )
if room_version is None: if room_version is None:
raise NotFoundError("Could not room_version for %s" % (room_id,)) raise NotFoundError("Could not find room_version for %s" % (room_id,))
return room_version return room_version
@ -354,6 +355,54 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
return {row["state_group"] for row in rows} return {row["state_group"] for row in rows}
async def update_state_for_partial_state_event(
self,
event: EventBase,
context: EventContext,
) -> None:
"""Update the state group for a partial state event"""
await self.db_pool.runInteraction(
"update_state_for_partial_state_event",
self._update_state_for_partial_state_event_txn,
event,
context,
)
def _update_state_for_partial_state_event_txn(
self,
txn,
event: EventBase,
context: EventContext,
):
# we shouldn't have any outliers here
assert not event.internal_metadata.is_outlier()
# anything that was rejected should have the same state as its
# predecessor.
if context.rejected:
assert context.state_group == context.state_group_before_event
self.db_pool.simple_update_txn(
txn,
table="event_to_state_groups",
keyvalues={"event_id": event.event_id},
updatevalues={"state_group": context.state_group},
)
self.db_pool.simple_delete_one_txn(
txn,
table="partial_state_events",
keyvalues={"event_id": event.event_id},
)
# TODO(faster_joins): need to do something about workers here
txn.call_after(self.is_partial_state_event.invalidate, (event.event_id,))
txn.call_after(
self._get_state_group_for_event.prefill,
(event.event_id,),
context.state_group,
)
class MainStateBackgroundUpdateStore(RoomMemberWorkerStore): class MainStateBackgroundUpdateStore(RoomMemberWorkerStore):

View file

@ -758,6 +758,32 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
"get_room_event_before_stream_ordering", _f "get_room_event_before_stream_ordering", _f
) )
async def get_last_event_in_room_before_stream_ordering(
self,
room_id: str,
end_token: RoomStreamToken,
) -> Optional[EventBase]:
"""Returns the last event in a room at or before a stream ordering
Args:
room_id
end_token: The token used to stream from
Returns:
The most recent event.
"""
last_row = await self.get_room_event_before_stream_ordering(
room_id=room_id,
stream_ordering=end_token.stream,
)
if last_row:
_, _, event_id = last_row
event = await self.get_event(event_id, get_prev_content=True)
return event
return None
async def get_current_room_stream_token_for_room_id( async def get_current_room_stream_token_for_room_id(
self, room_id: Optional[str] = None self, room_id: Optional[str] = None
) -> RoomStreamToken: ) -> RoomStreamToken:

View file

@ -376,6 +376,62 @@ class EventsPersistenceStorage:
pos = PersistedEventPosition(self._instance_name, event_stream_id) pos = PersistedEventPosition(self._instance_name, event_stream_id)
return event, pos, self.main_store.get_room_max_token() return event, pos, self.main_store.get_room_max_token()
async def update_current_state(self, room_id: str) -> None:
"""Recalculate the current state for a room, and persist it"""
state = await self._calculate_current_state(room_id)
delta = await self._calculate_state_delta(room_id, state)
# TODO(faster_joins): get a real stream ordering, to make this work correctly
# across workers.
#
# TODO(faster_joins): this can race against event persistence, in which case we
# will end up with incorrect state. Perhaps we should make this a job we
# farm out to the event persister, somehow.
stream_id = self.main_store.get_room_max_stream_ordering()
await self.persist_events_store.update_current_state(room_id, delta, stream_id)
async def _calculate_current_state(self, room_id: str) -> StateMap[str]:
"""Calculate the current state of a room, based on the forward extremities
Args:
room_id: room for which to calculate current state
Returns:
map from (type, state_key) to event id for the current state in the room
"""
latest_event_ids = await self.main_store.get_latest_event_ids_in_room(room_id)
state_groups = set(
(
await self.main_store._get_state_group_for_events(latest_event_ids)
).values()
)
state_maps_by_state_group = await self.state_store._get_state_for_groups(
state_groups
)
if len(state_groups) == 1:
# If there is only one state group, then we know what the current
# state is.
return state_maps_by_state_group[state_groups.pop()]
# Ok, we need to defer to the state handler to resolve our state sets.
logger.debug("calling resolve_state_groups from preserve_events")
# Avoid a circular import.
from synapse.state import StateResolutionStore
room_version = await self.main_store.get_room_version_id(room_id)
res = await self._state_resolution_handler.resolve_state_groups(
room_id,
room_version,
state_maps_by_state_group,
event_map=None,
state_res_store=StateResolutionStore(self.main_store),
)
return res.state
async def _persist_event_batch( async def _persist_event_batch(
self, self,
events_and_contexts: List[Tuple[EventBase, EventContext]], events_and_contexts: List[Tuple[EventBase, EventContext]],

View file

@ -66,9 +66,9 @@ Changes in SCHEMA_VERSION = 69:
SCHEMA_COMPAT_VERSION = ( SCHEMA_COMPAT_VERSION = (
# we now have `state_key` columns in both `events` and `state_events`, so # We now assume that `device_lists_changes_in_room` has been filled out for
# now incompatible with synapses wth SCHEMA_VERSION < 66. # recent device_list_updates.
66 69
) )
"""Limit on how far the synapse codebase can be rolled back without breaking db compat """Limit on how far the synapse codebase can be rolled back without breaking db compat

View file

@ -31,6 +31,7 @@ from frozendict import frozendict
from synapse.api.constants import EventTypes from synapse.api.constants import EventTypes
from synapse.events import EventBase from synapse.events import EventBase
from synapse.storage.util.partial_state_events_tracker import PartialStateEventsTracker
from synapse.types import MutableStateMap, StateKey, StateMap from synapse.types import MutableStateMap, StateKey, StateMap
if TYPE_CHECKING: if TYPE_CHECKING:
@ -542,6 +543,10 @@ class StateGroupStorage:
def __init__(self, hs: "HomeServer", stores: "Databases"): def __init__(self, hs: "HomeServer", stores: "Databases"):
self.stores = stores self.stores = stores
self._partial_state_events_tracker = PartialStateEventsTracker(stores.main)
def notify_event_un_partial_stated(self, event_id: str) -> None:
self._partial_state_events_tracker.notify_un_partial_stated(event_id)
async def get_state_group_delta( async def get_state_group_delta(
self, state_group: int self, state_group: int
@ -579,7 +584,7 @@ class StateGroupStorage:
if not event_ids: if not event_ids:
return {} return {}
event_to_groups = await self.stores.main._get_state_group_for_events(event_ids) event_to_groups = await self._get_state_group_for_events(event_ids)
groups = set(event_to_groups.values()) groups = set(event_to_groups.values())
group_to_state = await self.stores.state._get_state_for_groups(groups) group_to_state = await self.stores.state._get_state_for_groups(groups)
@ -668,7 +673,7 @@ class StateGroupStorage:
RuntimeError if we don't have a state group for one or more of the events RuntimeError if we don't have a state group for one or more of the events
(ie they are outliers or unknown) (ie they are outliers or unknown)
""" """
event_to_groups = await self.stores.main._get_state_group_for_events(event_ids) event_to_groups = await self._get_state_group_for_events(event_ids)
groups = set(event_to_groups.values()) groups = set(event_to_groups.values())
group_to_state = await self.stores.state._get_state_for_groups( group_to_state = await self.stores.state._get_state_for_groups(
@ -709,7 +714,7 @@ class StateGroupStorage:
RuntimeError if we don't have a state group for one or more of the events RuntimeError if we don't have a state group for one or more of the events
(ie they are outliers or unknown) (ie they are outliers or unknown)
""" """
event_to_groups = await self.stores.main._get_state_group_for_events(event_ids) event_to_groups = await self._get_state_group_for_events(event_ids)
groups = set(event_to_groups.values()) groups = set(event_to_groups.values())
group_to_state = await self.stores.state._get_state_for_groups( group_to_state = await self.stores.state._get_state_for_groups(
@ -785,6 +790,23 @@ class StateGroupStorage:
groups, state_filter or StateFilter.all() groups, state_filter or StateFilter.all()
) )
async def _get_state_group_for_events(
self,
event_ids: Collection[str],
await_full_state: bool = True,
) -> Mapping[str, int]:
"""Returns mapping event_id -> state_group
Args:
event_ids: events to get state groups for
await_full_state: if true, will block if we do not yet have complete
state at this event.
"""
if await_full_state:
await self._partial_state_events_tracker.await_full_state(event_ids)
return await self.stores.main._get_state_group_for_events(event_ids)
async def store_state_group( async def store_state_group(
self, self,
event_id: str, event_id: str,

View file

@ -0,0 +1,120 @@
# Copyright 2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections import defaultdict
from typing import Collection, Dict, Set
from twisted.internet import defer
from twisted.internet.defer import Deferred
from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
from synapse.storage.databases.main.events_worker import EventsWorkerStore
from synapse.util import unwrapFirstError
logger = logging.getLogger(__name__)
class PartialStateEventsTracker:
"""Keeps track of which events have partial state, after a partial-state join"""
def __init__(self, store: EventsWorkerStore):
self._store = store
# a map from event id to a set of Deferreds which are waiting for that event to be
# un-partial-stated.
self._observers: Dict[str, Set[Deferred[None]]] = defaultdict(set)
def notify_un_partial_stated(self, event_id: str) -> None:
"""Notify that we now have full state for a given event
Called by the state-resynchronization loop whenever we resynchronize the state
for a particular event. Unblocks any callers to await_full_state() for that
event.
Args:
event_id: the event that now has full state.
"""
observers = self._observers.pop(event_id, None)
if not observers:
return
logger.info(
"Notifying %i things waiting for un-partial-stating of event %s",
len(observers),
event_id,
)
with PreserveLoggingContext():
for o in observers:
o.callback(None)
async def await_full_state(self, event_ids: Collection[str]) -> None:
"""Wait for all the given events to have full state.
Args:
event_ids: the list of event ids that we want full state for
"""
# first try the happy path: if there are no partial-state events, we can return
# quickly
partial_state_event_ids = [
ev
for ev, p in (await self._store.get_partial_state_events(event_ids)).items()
if p
]
if not partial_state_event_ids:
return
logger.info(
"Awaiting un-partial-stating of events %s",
partial_state_event_ids,
stack_info=True,
)
# create an observer for each lazy-joined event
observers: Dict[str, Deferred[None]] = {
event_id: Deferred() for event_id in partial_state_event_ids
}
for event_id, observer in observers.items():
self._observers[event_id].add(observer)
try:
# some of them may have been un-lazy-joined between us checking the db and
# registering the observer, in which case we'd wait forever for the
# notification. Call back the observers now.
for event_id, partial in (
await self._store.get_partial_state_events(observers.keys())
).items():
# there may have been a call to notify_un_partial_stated during the
# db query, so the observers may already have been called.
if not partial and not observers[event_id].called:
observers[event_id].callback(None)
await make_deferred_yieldable(
defer.gatherResults(
observers.values(),
consumeErrors=True,
)
).addErrback(unwrapFirstError)
logger.info("Events %s all un-partial-stated", observers.keys())
finally:
# remove any observers we created. This should happen when the notification
# is received, but that might not happen for two reasons:
# (a) we're bailing out early on an exception (including us being
# cancelled during the await)
# (b) the event got de-lazy-joined before we set up the observer.
for event_id, observer in observers.items():
observer_set = self._observers.get(event_id)
if observer_set:
observer_set.discard(observer)
if not observer_set:
del self._observers[event_id]

View file

@ -14,6 +14,7 @@
# limitations under the License. # limitations under the License.
import abc import abc
import asyncio
import collections import collections
import inspect import inspect
import itertools import itertools
@ -25,6 +26,7 @@ from typing import (
Awaitable, Awaitable,
Callable, Callable,
Collection, Collection,
Coroutine,
Dict, Dict,
Generic, Generic,
Hashable, Hashable,
@ -701,27 +703,57 @@ def stop_cancellation(deferred: "defer.Deferred[T]") -> "defer.Deferred[T]":
return new_deferred return new_deferred
def delay_cancellation(deferred: "defer.Deferred[T]") -> "defer.Deferred[T]": @overload
"""Delay cancellation of a `Deferred` until it resolves. def delay_cancellation(awaitable: "defer.Deferred[T]") -> "defer.Deferred[T]":
...
@overload
def delay_cancellation(awaitable: Coroutine[Any, Any, T]) -> "defer.Deferred[T]":
...
@overload
def delay_cancellation(awaitable: Awaitable[T]) -> Awaitable[T]:
...
def delay_cancellation(awaitable: Awaitable[T]) -> Awaitable[T]:
"""Delay cancellation of a coroutine or `Deferred` awaitable until it resolves.
Has the same effect as `stop_cancellation`, but the returned `Deferred` will not Has the same effect as `stop_cancellation`, but the returned `Deferred` will not
resolve with a `CancelledError` until the original `Deferred` resolves. resolve with a `CancelledError` until the original awaitable resolves.
Args: Args:
deferred: The `Deferred` to protect against cancellation. May optionally follow deferred: The coroutine or `Deferred` to protect against cancellation. May
the Synapse logcontext rules. optionally follow the Synapse logcontext rules.
Returns: Returns:
A new `Deferred`, which will contain the result of the original `Deferred`. A new `Deferred`, which will contain the result of the original coroutine or
The new `Deferred` will not propagate cancellation through to the original. `Deferred`. The new `Deferred` will not propagate cancellation through to the
When cancelled, the new `Deferred` will wait until the original `Deferred` original coroutine or `Deferred`.
resolves before failing with a `CancelledError`.
The new `Deferred` will follow the Synapse logcontext rules if `deferred` When cancelled, the new `Deferred` will wait until the original coroutine or
`Deferred` resolves before failing with a `CancelledError`.
The new `Deferred` will follow the Synapse logcontext rules if `awaitable`
follows the Synapse logcontext rules. Otherwise the new `Deferred` should be follows the Synapse logcontext rules. Otherwise the new `Deferred` should be
wrapped with `make_deferred_yieldable`. wrapped with `make_deferred_yieldable`.
""" """
# First, convert the awaitable into a `Deferred`.
if isinstance(awaitable, defer.Deferred):
deferred = awaitable
elif asyncio.iscoroutine(awaitable):
# Ideally we'd use `Deferred.fromCoroutine()` here, to save on redundant
# type-checking, but we'd need Twisted >= 21.2.
deferred = defer.ensureDeferred(awaitable)
else:
# We have no idea what to do with this awaitable.
# We assume it's already resolved, such as `DoneAwaitable`s or `Future`s from
# `make_awaitable`, and let the caller `await` it normally.
return awaitable
def handle_cancel(new_deferred: "defer.Deferred[T]") -> None: def handle_cancel(new_deferred: "defer.Deferred[T]") -> None:
# before the new deferred is cancelled, we `pause` it to stop the cancellation # before the new deferred is cancelled, we `pause` it to stop the cancellation
# propagating. we then `unpause` it once the wrapped deferred completes, to # propagating. we then `unpause` it once the wrapped deferred completes, to

View file

@ -427,6 +427,13 @@ async def _event_to_memberships(
return {} return {}
# for each event, get the event_ids of the membership state at those events. # for each event, get the event_ids of the membership state at those events.
#
# TODO: this means that we request the entire membership list. If there are only
# one or two users on this server, and the room is huge, this is very wasteful
# (it means more db work, and churns the *stateGroupMembersCache*).
# It might be that we could extend StateFilter to specify "give me keys matching
# *:<server_name>", to avoid this.
event_to_state_ids = await storage.state.get_state_ids_for_events( event_to_state_ids = await storage.state.get_state_ids_for_events(
frozenset(e.event_id for e in events), frozenset(e.event_id for e in events),
state_filter=StateFilter.from_types(types=((EventTypes.Member, None),)), state_filter=StateFilter.from_types(types=((EventTypes.Member, None),)),

View file

@ -21,10 +21,6 @@ Newly created users see their own presence in /initialSync (SYT-34)
# Blacklisted due to https://github.com/matrix-org/synapse/issues/1396 # Blacklisted due to https://github.com/matrix-org/synapse/issues/1396
Should reject keys claiming to belong to a different user Should reject keys claiming to belong to a different user
# Blacklisted due to https://github.com/matrix-org/matrix-doc/pull/2314 removing
# this requirement from the spec
Inbound federation of state requires event_id as a mandatory paramater
# Blacklisted until MSC2753 is implemented # Blacklisted until MSC2753 is implemented
Local users can peek into world_readable rooms by room ID Local users can peek into world_readable rooms by room ID
We can't peek into rooms with shared history_visibility We can't peek into rooms with shared history_visibility

View file

@ -481,9 +481,7 @@ class FilteringTestCase(unittest.HomeserverTestCase):
# events). This is a bit cheeky, but tests the logic of _check_event_relations. # events). This is a bit cheeky, but tests the logic of _check_event_relations.
# Filter for a particular sender. # Filter for a particular sender.
definition = { definition = {"related_by_senders": ["@foo:bar"]}
"io.element.relation_senders": ["@foo:bar"],
}
async def events_have_relations(*args, **kwargs): async def events_have_relations(*args, **kwargs):
return ["$with_relation"] return ["$with_relation"]

View file

@ -14,7 +14,6 @@
from typing import Optional from typing import Optional
from unittest.mock import Mock from unittest.mock import Mock
from parameterized import parameterized_class
from signedjson import key, sign from signedjson import key, sign
from signedjson.types import BaseKey, SigningKey from signedjson.types import BaseKey, SigningKey
@ -155,12 +154,6 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase):
) )
@parameterized_class(
[
{"enable_room_poke_code_path": False},
{"enable_room_poke_code_path": True},
]
)
class FederationSenderDevicesTestCases(HomeserverTestCase): class FederationSenderDevicesTestCases(HomeserverTestCase):
servlets = [ servlets = [
admin.register_servlets, admin.register_servlets,
@ -169,13 +162,14 @@ class FederationSenderDevicesTestCases(HomeserverTestCase):
def make_homeserver(self, reactor, clock): def make_homeserver(self, reactor, clock):
return self.setup_test_homeserver( return self.setup_test_homeserver(
federation_transport_client=Mock(spec=["send_transaction"]), federation_transport_client=Mock(
spec=["send_transaction", "query_user_devices"]
),
) )
def default_config(self): def default_config(self):
c = super().default_config() c = super().default_config()
c["send_federation"] = True c["send_federation"] = True
c["use_new_device_lists_changes_in_room"] = self.enable_room_poke_code_path
return c return c
def prepare(self, reactor, clock, hs): def prepare(self, reactor, clock, hs):
@ -226,6 +220,45 @@ class FederationSenderDevicesTestCases(HomeserverTestCase):
self.assertEqual(len(self.edus), 1) self.assertEqual(len(self.edus), 1)
self.check_device_update_edu(self.edus.pop(0), u1, "D2", stream_id) self.check_device_update_edu(self.edus.pop(0), u1, "D2", stream_id)
def test_dont_send_device_updates_for_remote_users(self):
"""Check that we don't send device updates for remote users"""
# Send the server a device list EDU for the other user, this will cause
# it to try and resync the device lists.
self.hs.get_federation_transport_client().query_user_devices.return_value = (
defer.succeed(
{
"stream_id": "1",
"user_id": "@user2:host2",
"devices": [{"device_id": "D1"}],
}
)
)
self.get_success(
self.hs.get_device_handler().device_list_updater.incoming_device_list_update(
"host2",
{
"user_id": "@user2:host2",
"device_id": "D1",
"stream_id": "1",
"prev_ids": [],
},
)
)
self.reactor.advance(1)
# We shouldn't see an EDU for that update
self.assertEqual(self.edus, [])
# Check that we did successfully process the inbound EDU (otherwise this
# test would pass if we failed to process the EDU)
devices = self.get_success(
self.hs.get_datastores().main.get_cached_devices_for_user("@user2:host2")
)
self.assertIn("D1", devices)
def test_upload_signatures(self): def test_upload_signatures(self):
"""Uploading signatures on some devices should produce updates for that user""" """Uploading signatures on some devices should produce updates for that user"""

Some files were not shown because too many files have changed in this diff Show more