Merge remote-tracking branch 'upstream/release-v1.57'

This commit is contained in:
Tulir Asokan 2022-04-21 13:53:47 +03:00
commit b2fa6ec9f6
248 changed files with 14616 additions and 8934 deletions

View File

@ -2,29 +2,24 @@
# Test for the export-data admin command against sqlite and postgres
# Expects Synapse to have been already installed with `poetry install --extras postgres`.
# Expects `poetry` to be available on the `PATH`.
set -xe
cd "$(dirname "$0")/../.."
echo "--- Install dependencies"
# Install dependencies for this test.
pip install psycopg2
# Install Synapse itself. This won't update any libraries.
pip install -e .
echo "--- Generate the signing key"
# Generate the server's signing key.
python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml
poetry run synapse_homeserver --generate-keys -c .ci/sqlite-config.yaml
echo "--- Prepare test database"
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# Run the export-data command on the sqlite test database
python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
poetry run python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
--output-directory /tmp/export_data
# Test that the output directory exists and contains the rooms directory
@ -37,14 +32,14 @@ else
fi
# Create the PostgreSQL database.
.ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
# Port the SQLite databse to postgres so we can check command works against postgres
echo "+++ Port SQLite3 databse to postgres"
synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
# Run the export-data command on postgres database
python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
poetry run python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
--output-directory /tmp/export_data2
# Test that the output directory exists and contains the rooms directory

View File

@ -1,6 +1,9 @@
#!/usr/bin/env bash
# this script is run by GitHub Actions in a plain `focal` container; it installs the
# minimal requirements for tox and hands over to the py3-old tox environment.
# this script is run by GitHub Actions in a plain `focal` container; it
# - installs the minimal system requirements, and poetry;
# - patches the project definition file to refer to old versions only;
# - creates a venv with these old versions using poetry; and finally
# - invokes `trial` to run the tests with old deps.
# Prevent tzdata from asking for user input
export DEBIAN_FRONTEND=noninteractive
@ -9,12 +12,70 @@ set -ex
apt-get update
apt-get install -y \
python3 python3-dev python3-pip python3-venv \
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev tox libjpeg-dev libwebp-dev
python3 python3-dev python3-pip python3-venv pipx \
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
export LANG="C.UTF-8"
# Prevent virtualenv from auto-updating pip to an incompatible version
export VIRTUALENV_NO_DOWNLOAD=1
exec tox -e py3-old
# TODO: in the future, we could use an implementation of
# https://github.com/python-poetry/poetry/issues/3527
# https://github.com/pypa/pip/issues/8085
# to select the lowest possible versions, rather than resorting to this sed script.
# Patch the project definitions in-place:
# - Replace all lower and tilde bounds with exact bounds
# - Make the pyopenssl 17.0, which is the oldest version that works with
# a `cryptography` compiled against OpenSSL 1.1.
# - Delete all lines referring to psycopg2 --- so no testing of postgres support.
# - Omit systemd: we're not logging to journal here.
# TODO: also replace caret bounds, see https://python-poetry.org/docs/dependency-specification/#version-constraints
# We don't use these yet, but IIRC they are the default bound used when you `poetry add`.
# The sed expression 's/\^/==/g' ought to do the trick. But it would also change
# `python = "^3.7"` to `python = "==3.7", which would mean we fail because olddeps
# runs on 3.8 (#12343).
sed -i \
-e "s/[~>]=/==/g" \
-e "/psycopg2/d" \
-e 's/pyOpenSSL = "==16.0.0"/pyOpenSSL = "==17.0.0"/' \
-e '/systemd/d' \
pyproject.toml
# Use poetry to do the installation. This ensures that the versions are all mutually
# compatible (as far the package metadata declares, anyway); pip's package resolver
# is more lax.
#
# Rather than `poetry install --no-dev`, we drop all dev dependencies from the
# toml file. This means we don't have to ensure compatibility between old deps and
# dev tools.
pip install --user toml
REMOVE_DEV_DEPENDENCIES="
import toml
with open('pyproject.toml', 'r') as f:
data = toml.loads(f.read())
del data['tool']['poetry']['dev-dependencies']
with open('pyproject.toml', 'w') as f:
toml.dump(data, f)
"
python3 -c "$REMOVE_DEV_DEPENDENCIES"
pipx install poetry==1.1.12
~/.local/bin/poetry lock
echo "::group::Patched pyproject.toml"
cat pyproject.toml
echo "::endgroup::"
echo "::group::Lockfile after patch"
cat poetry.lock
echo "::endgroup::"
~/.local/bin/poetry install -E "all test"
~/.local/bin/poetry run trial --jobs=2 tests

View File

@ -1,43 +1,37 @@
#!/usr/bin/env bash
#
# Test script for 'synapse_port_db'.
# - sets up synapse and deps
# - configures synapse and a postgres server.
# - runs the port script on a prepopulated test sqlite db
# - also runs it against an new sqlite db
#
# Expects Synapse to have been already installed with `poetry install --extras postgres`.
# Expects `poetry` to be available on the `PATH`.
set -xe
cd "$(dirname "$0")/../.."
echo "--- Install dependencies"
# Install dependencies for this test.
pip install psycopg2 coverage coverage-enable-subprocess
# Install Synapse itself. This won't update any libraries.
pip install -e .
echo "--- Generate the signing key"
# Generate the server's signing key.
python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml
poetry run synapse_homeserver --generate-keys -c .ci/sqlite-config.yaml
echo "--- Prepare test database"
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# Create the PostgreSQL database.
.ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
echo "+++ Run synapse_port_db against test database"
# TODO: this invocation of synapse_port_db (and others below) used to be prepended with `coverage run`,
# but coverage seems unable to find the entrypoints installed by `pip install -e .`.
synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
# We should be able to run twice against the same database.
echo "+++ Run synapse_port_db a second time"
synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
#####
@ -48,12 +42,12 @@ echo "--- Prepare empty SQLite database"
# we do this by deleting the sqlite db, and then doing the same again.
rm .ci/test_db.db
update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# re-create the PostgreSQL database.
.ci/scripts/postgres_exec.py \
poetry run .ci/scripts/postgres_exec.py \
"DROP DATABASE synapse" \
"CREATE DATABASE synapse"
echo "+++ Run synapse_port_db against empty database"
synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml

View File

@ -4,8 +4,12 @@
# things to include
!docker
!synapse
!MANIFEST.in
!README.rst
!pyproject.toml
!poetry.lock
# TODO: remove these once we have moved over to using poetry-core in pyproject.toml
!MANIFEST.in
!setup.py
**/__pycache__

View File

@ -22,7 +22,7 @@ jobs:
- name: Setup mdbook
uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14
with:
mdbook-version: '0.4.9'
mdbook-version: '0.4.17'
- name: Build the documentation
# mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.

View File

@ -27,7 +27,6 @@ jobs:
- "check_codestyle"
- "check_isort"
- "mypy"
- "packaging"
steps:
- uses: actions/checkout@v2
@ -129,6 +128,7 @@ jobs:
|| true
trial-olddeps:
# Note: sqlite only; no postgres
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-latest
@ -136,11 +136,11 @@ jobs:
- uses: actions/checkout@v2
- name: Test with old deps
uses: docker://ubuntu:focal # For old python and sqlite
# Note: focal seems to be using 3.8, but the oldest is 3.7?
# See https://github.com/matrix-org/synapse/issues/12343
with:
workdir: /github/workspace
entrypoint: .ci/scripts/test_old_deps.sh
env:
TRIAL_FLAGS: "--jobs=2"
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
@ -271,9 +271,10 @@ jobs:
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- uses: actions/setup-python@v2
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.9"
python-version: ${{ matrix.python-version }}
extras: "postgres"
- run: .ci/scripts/test_export_data_command.sh
portdb:
@ -308,9 +309,10 @@ jobs:
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- uses: actions/setup-python@v2
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.python-version }}
extras: "postgres"
- run: .ci/scripts/test_synapse_port_db.sh
complement:
@ -362,27 +364,11 @@ jobs:
(wget -O - "https://github.com/matrix-org/complement/archive/$BRANCH_NAME.tar.gz" | tar -xz --strip-components=1 -C complement) && break
done
# Build initial Synapse image
- run: docker build -t matrixdotorg/synapse:latest -f docker/Dockerfile .
working-directory: synapse
env:
DOCKER_BUILDKIT: 1
# Build a ready-to-run Synapse image based on the initial image above.
# This new image includes a config file, keys for signing and TLS, and
# other settings to make it suitable for testing under Complement.
- run: docker build -t complement-synapse -f Synapse.Dockerfile .
working-directory: complement/dockerfiles
# Run Complement
- run: |
set -o pipefail
go test -v -json -tags synapse_blacklist,msc2403,msc2716,msc3030 ./tests/... 2>&1 | gotestfmt
COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
shell: bash
name: Run Complement Tests
env:
COMPLEMENT_BASE_IMAGE: complement-synapse:latest
working-directory: complement
# a job which marks all the other jobs as complete, thus allowing PRs to be merged.
tests-done:

7
.gitignore vendored
View File

@ -15,6 +15,10 @@ _trial_temp*/
.DS_Store
__pycache__/
# We do want the poetry lockfile. TODO: is there a good reason for ignoring
# '*.lock' above? If not, let's nuke it.
!poetry.lock
# stuff that is likely to exist when you run a server locally
/*.db
/*.log
@ -30,6 +34,9 @@ __pycache__/
/media_store/
/uploads
# For direnv users
/.envrc
# IDEs
/.idea/
/.ropeproject/

5882
CHANGES.md

File diff suppressed because it is too large Load Diff

18
debian/changelog vendored
View File

@ -1,3 +1,21 @@
matrix-synapse-py3 (1.57.1) stable; urgency=medium
* New synapse release 1.57.1.
-- Synapse Packaging team <packages@matrix.org> Wed, 20 Apr 2022 15:27:21 +0100
matrix-synapse-py3 (1.57.0) stable; urgency=medium
* New synapse release 1.57.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 19 Apr 2022 10:58:42 +0100
matrix-synapse-py3 (1.57.0~rc1) stable; urgency=medium
* New synapse release 1.57.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 12 Apr 2022 13:36:25 +0100
matrix-synapse-py3 (1.56.0) stable; urgency=medium
* New synapse release 1.56.0.

View File

@ -14,20 +14,61 @@
# DOCKER_BUILDKIT=1 docker build -f docker/Dockerfile --build-arg PYTHON_VERSION=3.10 .
#
# Irritatingly, there is no blessed guide on how to distribute an application with its
# poetry-managed environment in a docker image. We have opted for
# `poetry export | pip install -r /dev/stdin`, but there are known bugs in
# in `poetry export` whose fixes (scheduled for poetry 1.2) have yet to be released.
# In case we get bitten by those bugs in the future, the recommendations here might
# be useful:
# https://github.com/python-poetry/poetry/discussions/1879#discussioncomment-216865
# https://stackoverflow.com/questions/53835198/integrating-python-poetry-with-docker?answertab=scoredesc
ARG PYTHON_VERSION=3.9
###
### Stage 0: builder
### Stage 0: generate requirements.txt
###
FROM docker.io/python:${PYTHON_VERSION}-slim as requirements
# RUN --mount is specific to buildkit and is documented at
# https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount.
# Here we use it to set up a cache for apt (and below for pip), to improve
# rebuild speeds on slow connections.
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update && apt-get install -y git \
&& rm -rf /var/lib/apt/lists/*
# We install poetry in its own build stage to avoid its dependencies conflicting with
# synapse's dependencies.
# We use a specific commit from poetry's master branch instead of our usual 1.1.12,
# to incorporate fixes to some bugs in `poetry export`. This commit corresponds to
# https://github.com/python-poetry/poetry/pull/5156 and
# https://github.com/python-poetry/poetry/issues/5141 ;
# without it, we generate a requirements.txt with incorrect environment markers,
# which causes necessary packages to be omitted when we `pip install`.
#
# NB: In poetry 1.2 `poetry export` will be moved into a plugin; we'll need to also
# pip install poetry-plugin-export (https://github.com/python-poetry/poetry-plugin-export).
RUN --mount=type=cache,target=/root/.cache/pip \
pip install --user git+https://github.com/python-poetry/poetry.git@fb13b3a676f476177f7937ffa480ee5cff9a90a5
WORKDIR /synapse
# Copy just what we need to run `poetry export`...
COPY pyproject.toml poetry.lock README.rst /synapse/
RUN /root/.local/bin/poetry export --extras all -o /synapse/requirements.txt
###
### Stage 1: builder
###
FROM docker.io/python:${PYTHON_VERSION}-slim as builder
# install the OS build deps
#
# RUN --mount is specific to buildkit and is documented at
# https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount.
# Here we use it to set up a cache for apt, to improve rebuild speeds on
# slow connections.
#
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
@ -45,30 +86,27 @@ RUN \
zlib1g-dev \
&& rm -rf /var/lib/apt/lists/*
# Copy just what we need to pip install
COPY MANIFEST.in README.rst setup.py /synapse/
COPY synapse/__init__.py /synapse/synapse/__init__.py
COPY synapse/python_dependencies.py /synapse/synapse/python_dependencies.py
# To speed up rebuilds, install all of the dependencies before we copy over
# the whole synapse project so that we this layer in the Docker cache can be
# the whole synapse project, so that this layer in the Docker cache can be
# used while you develop on the source
#
# This is aiming at installing the `install_requires` and `extras_require` from `setup.py`
# This is aiming at installing the `[tool.poetry.depdendencies]` from pyproject.toml.
COPY --from=requirements /synapse/requirements.txt /synapse/
RUN --mount=type=cache,target=/root/.cache/pip \
pip install --prefix="/install" --no-warn-script-location \
/synapse[all]
pip install --prefix="/install" --no-deps --no-warn-script-location -r /synapse/requirements.txt
# Copy over the rest of the project
# Copy over the rest of the synapse source code.
COPY synapse /synapse/synapse/
# ... and what we need to `pip install`.
# TODO: once pyproject.toml declares poetry-core as its build system, we'll need to copy
# pyproject.toml here, ditching setup.py and MANIFEST.in.
COPY setup.py MANIFEST.in README.rst /synapse/
# Install the synapse package itself and all of its children packages.
#
# This is aiming at installing only the `packages=find_packages(...)` from `setup.py
# Install the synapse package itself.
RUN pip install --prefix="/install" --no-deps --no-warn-script-location /synapse
###
### Stage 1: runtime
### Stage 2: runtime
###
FROM docker.io/python:${PYTHON_VERSION}-slim

View File

@ -1,30 +0,0 @@
# Use the Sytest image that comes with a lot of the build dependencies
# pre-installed
FROM matrixdotorg/sytest:focal
# The Sytest image doesn't come with python, so install that
RUN apt-get update && apt-get -qq install -y python3 python3-dev python3-pip
# We need tox to run the tests in run_pg_tests.sh
RUN python3 -m pip install tox
# Initialise the db
RUN su -c '/usr/lib/postgresql/10/bin/initdb -D /var/lib/postgresql/data -E "UTF-8" --lc-collate="C.UTF-8" --lc-ctype="C.UTF-8" --username=postgres' postgres
# Add a user with our UID and GID so that files get created on the host owned
# by us, not root.
ARG UID
ARG GID
RUN groupadd --gid $GID user
RUN useradd --uid $UID --gid $GID --groups sudo --no-create-home user
# Ensure we can start postgres by sudo-ing as the postgres user.
RUN apt-get update && apt-get -qq install -y sudo
RUN echo "user ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
ADD run_pg_tests.sh /run_pg_tests.sh
# Use the "exec form" of ENTRYPOINT (https://docs.docker.com/engine/reference/builder/#entrypoint)
# so that we can `docker run` this container and pass arguments to pg_tests.sh
ENTRYPOINT ["/run_pg_tests.sh"]
USER user

View File

@ -14,9 +14,6 @@ COPY ./docker/conf-workers/* /conf/
# Expose nginx listener port
EXPOSE 8080/tcp
# Volume for user-editable config files, logs etc.
VOLUME ["/data"]
# A script to read environment variables and create the necessary
# files to run the desired worker configuration. Will start supervisord.
COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py

View File

@ -10,10 +10,10 @@ Note that running Synapse's unit tests from within the docker image is not suppo
## Testing with SQLite and single-process Synapse
> Note that `scripts-dev/complement.sh` is a script that will automatically build
> Note that `scripts-dev/complement.sh` is a script that will automatically build
> and run an SQLite-based, single-process of Synapse against Complement.
The instructions below will set up Complement testing for a single-process,
The instructions below will set up Complement testing for a single-process,
SQLite-based Synapse deployment.
Start by building the base Synapse docker image. If you wish to run tests with the latest
@ -26,23 +26,22 @@ docker build -t matrixdotorg/synapse -f docker/Dockerfile .
This will build an image with the tag `matrixdotorg/synapse`.
Next, build the Synapse image for Complement. You will need a local checkout
of Complement. Change to the root of your Complement checkout and run:
Next, build the Synapse image for Complement.
```sh
docker build -t complement-synapse -f "dockerfiles/Synapse.Dockerfile" dockerfiles
docker build -t complement-synapse -f "docker/complement/Dockerfile" docker/complement
```
This will build an image with the tag `complement-synapse`, which can be handed to
Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to
[Complement's documentation](https://github.com/matrix-org/complement/#running) for
This will build an image with the tag `complement-synapse`, which can be handed to
Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to
[Complement's documentation](https://github.com/matrix-org/complement/#running) for
how to run the tests, as well as the various available command line flags.
## Testing with PostgreSQL and single or multi-process Synapse
The above docker image only supports running Synapse with SQLite and in a
single-process topology. The following instructions are used to build a Synapse image for
Complement that supports either single or multi-process topology with a PostgreSQL
The above docker image only supports running Synapse with SQLite and in a
single-process topology. The following instructions are used to build a Synapse image for
Complement that supports either single or multi-process topology with a PostgreSQL
database backend.
As with the single-process image, build the base Synapse docker image. If you wish to run
@ -55,7 +54,7 @@ docker build -t matrixdotorg/synapse -f docker/Dockerfile .
This will build an image with the tag `matrixdotorg/synapse`.
Next, we build a new image with worker support based on `matrixdotorg/synapse:latest`.
Next, we build a new image with worker support based on `matrixdotorg/synapse:latest`.
Again, from the root of the repository:
```sh
@ -64,21 +63,20 @@ docker build -t matrixdotorg/synapse-workers -f docker/Dockerfile-workers .
This will build an image with the tag` matrixdotorg/synapse-workers`.
It's worth noting at this point that this image is fully functional, and
can be used for testing against locally. See instructions for using the container
It's worth noting at this point that this image is fully functional, and
can be used for testing against locally. See instructions for using the container
under
[Running the Dockerfile-worker image standalone](#running-the-dockerfile-worker-image-standalone)
below.
Finally, build the Synapse image for Complement, which is based on
`matrixdotorg/synapse-workers`. You will need a local checkout of Complement. Change to
the root of your Complement checkout and run:
`matrixdotorg/synapse-workers`.
```sh
docker build -t matrixdotorg/complement-synapse-workers -f dockerfiles/SynapseWorkers.Dockerfile dockerfiles
docker build -t matrixdotorg/complement-synapse-workers -f docker/complement/SynapseWorkers.Dockerfile docker/complement
```
This will build an image with the tag `complement-synapse`, which can be handed to
This will build an image with the tag `complement-synapse-workers`, which can be handed to
Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to
[Complement's documentation](https://github.com/matrix-org/complement/#running) for
how to run the tests, as well as the various available command line flags.
@ -91,10 +89,10 @@ bundling all necessary components together for a workerised homeserver instance.
This includes any desired Synapse worker processes, a nginx to route traffic accordingly,
a redis for worker communication and a supervisord instance to start up and monitor all
processes. You will need to provide your own postgres container to connect to, and TLS
processes. You will need to provide your own postgres container to connect to, and TLS
is not handled by the container.
Once you've built the image using the above instructions, you can run it. Be sure
Once you've built the image using the above instructions, you can run it. Be sure
you've set up a volume according to the [usual Synapse docker instructions](README.md).
Then run something along the lines of:
@ -112,7 +110,7 @@ docker run -d --name synapse \
matrixdotorg/synapse-workers
```
...substituting `POSTGRES*` variables for those that match a postgres host you have
...substituting `POSTGRES*` variables for those that match a postgres host you have
available (usually a running postgres docker container).
The `SYNAPSE_WORKER_TYPES` environment variable is a comma-separated list of workers to
@ -130,11 +128,11 @@ Otherwise, `SYNAPSE_WORKER_TYPES` can either be left empty or unset to spawn no
(leaving only the main process). The container is configured to use redis-based worker
mode.
Logs for workers and the main process are logged to stdout and can be viewed with
standard `docker logs` tooling. Worker logs contain their worker name
Logs for workers and the main process are logged to stdout and can be viewed with
standard `docker logs` tooling. Worker logs contain their worker name
after the timestamp.
Setting `SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK=1` will cause worker logs to be written to
`<data_dir>/logs/<worker_name>.log`. Logs are kept for 1 week and rotate every day at 00:
00, according to the container's clock. Logging for the main process must still be
00, according to the container's clock. Logging for the main process must still be
configured by modifying the homeserver's log config in your Synapse data volume.

View File

@ -0,0 +1,22 @@
# A dockerfile which builds an image suitable for testing Synapse under
# complement.
ARG SYNAPSE_VERSION=latest
FROM matrixdotorg/synapse:${SYNAPSE_VERSION}
ENV SERVER_NAME=localhost
COPY conf/* /conf/
# generate a signing key
RUN generate_signing_key -o /conf/server.signing.key
WORKDIR /data
EXPOSE 8008 8448
ENTRYPOINT ["/conf/start.sh"]
HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
CMD curl -fSs http://localhost:8008/health || exit 1

View File

@ -0,0 +1 @@
Stuff for building the docker image used for testing under complement.

View File

@ -0,0 +1,73 @@
# This dockerfile builds on top of 'docker/Dockerfile-worker' in matrix-org/synapse
# by including a built-in postgres instance, as well as setting up the homeserver so
# that it is ready for testing via Complement.
#
# Instructions for building this image from those it depends on is detailed in this guide:
# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse
FROM matrixdotorg/synapse-workers
# Download a caddy server to stand in front of nginx and terminate TLS using Complement's
# custom CA.
# We include this near the top of the file in order to cache the result.
RUN curl -OL "https://github.com/caddyserver/caddy/releases/download/v2.3.0/caddy_2.3.0_linux_amd64.tar.gz" && \
tar xzf caddy_2.3.0_linux_amd64.tar.gz && rm caddy_2.3.0_linux_amd64.tar.gz && mv caddy /root
# Install postgresql
RUN apt-get update
RUN apt-get install -y postgresql
# Configure a user and create a database for Synapse
RUN pg_ctlcluster 13 main start && su postgres -c "echo \
\"ALTER USER postgres PASSWORD 'somesecret'; \
CREATE DATABASE synapse \
ENCODING 'UTF8' \
LC_COLLATE='C' \
LC_CTYPE='C' \
template=template0;\" | psql" && pg_ctlcluster 13 main stop
# Modify the shared homeserver config with postgres support, certificate setup
# and the disabling of rate-limiting
COPY conf-workers/workers-shared.yaml /conf/workers/shared.yaml
WORKDIR /data
# Copy the caddy config
COPY conf-workers/caddy.complement.json /root/caddy.json
# Expose caddy's listener ports
EXPOSE 8008 8448
ENTRYPOINT \
# Replace the server name in the caddy config
sed -i "s/{{ server_name }}/${SERVER_NAME}/g" /root/caddy.json && \
# Start postgres
pg_ctlcluster 13 main start 2>&1 && \
# Start caddy
/root/caddy start --config /root/caddy.json 2>&1 && \
# Set the server name of the homeserver
SYNAPSE_SERVER_NAME=${SERVER_NAME} \
# No need to report stats here
SYNAPSE_REPORT_STATS=no \
# Set postgres authentication details which will be placed in the homeserver config file
POSTGRES_PASSWORD=somesecret POSTGRES_USER=postgres POSTGRES_HOST=localhost \
# Specify the workers to test with
SYNAPSE_WORKER_TYPES="\
event_persister, \
event_persister, \
background_worker, \
frontend_proxy, \
event_creator, \
user_dir, \
media_repository, \
federation_inbound, \
federation_reader, \
federation_sender, \
synchrotron, \
appservice, \
pusher" \
# Run the script that writes the necessary config files and starts supervisord, which in turn
# starts everything else
/configure_workers_and_start.py
HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
CMD /bin/sh /healthcheck.sh

View File

@ -0,0 +1,72 @@
{
"apps": {
"http": {
"servers": {
"srv0": {
"listen": [
":8448"
],
"routes": [
{
"match": [
{
"host": [
"{{ server_name }}"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"handler": "reverse_proxy",
"upstreams": [
{
"dial": "localhost:8008"
}
]
}
]
}
]
}
],
"terminal": true
}
]
}
}
},
"tls": {
"automation": {
"policies": [
{
"subjects": [
"{{ server_name }}"
],
"issuers": [
{
"module": "internal"
}
],
"on_demand": true
}
]
}
},
"pki": {
"certificate_authorities": {
"local": {
"name": "Complement CA",
"root": {
"certificate": "/complement/ca/ca.crt",
"private_key": "/complement/ca/ca.key"
}
}
}
}
}
}

View File

@ -0,0 +1,72 @@
## Server ##
report_stats: False
trusted_key_servers: []
enable_registration: true
enable_registration_without_verification: true
bcrypt_rounds: 4
## Federation ##
# trust certs signed by Complement's CA
federation_custom_ca_list:
- /complement/ca/ca.crt
# unblacklist RFC1918 addresses
federation_ip_range_blacklist: []
# Disable server rate-limiting
rc_federation:
window_size: 1000
sleep_limit: 10
sleep_delay: 500
reject_limit: 99999
concurrent: 3
rc_message:
per_second: 9999
burst_count: 9999
rc_registration:
per_second: 9999
burst_count: 9999
rc_login:
address:
per_second: 9999
burst_count: 9999
account:
per_second: 9999
burst_count: 9999
failed_attempts:
per_second: 9999
burst_count: 9999
rc_admin_redaction:
per_second: 9999
burst_count: 9999
rc_joins:
local:
per_second: 9999
burst_count: 9999
remote:
per_second: 9999
burst_count: 9999
federation_rr_transactions_per_room_per_second: 9999
## Experimental Features ##
experimental_features:
# Enable history backfilling support
msc2716_enabled: true
# Enable spaces support
spaces_enabled: true
# Enable jump to date endpoint
msc3030_enabled: true
server_notices:
system_mxid_localpart: _server
system_mxid_display_name: "Server Alert"
system_mxid_avatar_url: ""
room_name: "Server Alert"

View File

@ -0,0 +1,115 @@
## Server ##
server_name: SERVER_NAME
log_config: /conf/log_config.yaml
report_stats: False
signing_key_path: /conf/server.signing.key
trusted_key_servers: []
enable_registration: true
enable_registration_without_verification: true
## Listeners ##
tls_certificate_path: /conf/server.tls.crt
tls_private_key_path: /conf/server.tls.key
bcrypt_rounds: 4
registration_shared_secret: complement
listeners:
- port: 8448
bind_addresses: ['::']
type: http
tls: true
resources:
- names: [federation]
- port: 8008
bind_addresses: ['::']
type: http
resources:
- names: [client]
## Database ##
database:
name: "sqlite3"
args:
# We avoid /data, as it is a volume and is not transferred when the container is committed,
# which is a fundamental necessity in complement.
database: "/conf/homeserver.db"
## Federation ##
# trust certs signed by the complement CA
federation_custom_ca_list:
- /complement/ca/ca.crt
# unblacklist RFC1918 addresses
ip_range_blacklist: []
# Disable server rate-limiting
rc_federation:
window_size: 1000
sleep_limit: 10
sleep_delay: 500
reject_limit: 99999
concurrent: 3
rc_message:
per_second: 9999
burst_count: 9999
rc_registration:
per_second: 9999
burst_count: 9999
rc_login:
address:
per_second: 9999
burst_count: 9999
account:
per_second: 9999
burst_count: 9999
failed_attempts:
per_second: 9999
burst_count: 9999
rc_admin_redaction:
per_second: 9999
burst_count: 9999
rc_joins:
local:
per_second: 9999
burst_count: 9999
remote:
per_second: 9999
burst_count: 9999
federation_rr_transactions_per_room_per_second: 9999
## API Configuration ##
# A list of application service config files to use
#
app_service_config_files:
AS_REGISTRATION_FILES
## Experimental Features ##
experimental_features:
# Enable spaces support
spaces_enabled: true
# Enable history backfilling support
msc2716_enabled: true
# server-side support for partial state in /send_join
msc3706_enabled: true
# Enable jump to date endpoint
msc3030_enabled: true
server_notices:
system_mxid_localpart: _server
system_mxid_display_name: "Server Alert"
system_mxid_avatar_url: ""
room_name: "Server Alert"

View File

@ -0,0 +1,24 @@
version: 1
formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
filters:
context:
(): synapse.logging.context.LoggingContextFilter
request: ""
handlers:
console:
class: logging.StreamHandler
formatter: precise
filters: [context]
# log to stdout, for easier use with 'docker logs'
stream: 'ext://sys.stdout'
root:
level: INFO
handlers: [console]
disable_existing_loggers: false

30
docker/complement/conf/start.sh Executable file
View File

@ -0,0 +1,30 @@
#!/bin/sh
set -e
sed -i "s/SERVER_NAME/${SERVER_NAME}/g" /conf/homeserver.yaml
# Add the application service registration files to the homeserver.yaml config
for filename in /complement/appservice/*.yaml; do
[ -f "$filename" ] || break
as_id=$(basename "$filename" .yaml)
# Insert the path to the registration file and the AS_REGISTRATION_FILES marker after
# so we can add the next application service in the next iteration of this for loop
sed -i "s/AS_REGISTRATION_FILES/ - \/complement\/appservice\/${as_id}.yaml\nAS_REGISTRATION_FILES/g" /conf/homeserver.yaml
done
# Remove the AS_REGISTRATION_FILES entry
sed -i "s/AS_REGISTRATION_FILES//g" /conf/homeserver.yaml
# generate an ssl key and cert for the server, signed by the complement CA
openssl genrsa -out /conf/server.tls.key 2048
openssl req -new -key /conf/server.tls.key -out /conf/server.tls.csr \
-subj "/CN=${SERVER_NAME}"
openssl x509 -req -in /conf/server.tls.csr \
-CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -set_serial 1 \
-out /conf/server.tls.crt
exec python -m synapse.app.homeserver -c /conf/homeserver.yaml "$@"

View File

@ -29,6 +29,7 @@
import os
import subprocess
import sys
from typing import Any, Dict, Set
import jinja2
import yaml
@ -36,7 +37,7 @@ import yaml
MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
WORKERS_CONFIG = {
WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"pusher": {
"app": "synapse.app.pusher",
"listener_resources": [],
@ -307,7 +308,7 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
Args:
environ: _Environ[str]
config_path: Where to output the generated Synapse main worker config file.
config_path: The location of the generated Synapse main worker config file.
data_dir: The location of the synapse data directory. Where log and
user-facing config files live.
"""
@ -320,7 +321,8 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
# and adding a replication listener.
# First read the original config file and extract the listeners block. Then we'll add
# another listener for replication. Later we'll write out the result.
# another listener for replication. Later we'll write out the result to the shared
# config file.
listeners = [
{
"port": 9093,
@ -355,7 +357,7 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
# worker_type: {1234, 1235, ...}}
# }
# and will be used to construct 'upstream' nginx directives.
nginx_upstreams = {}
nginx_upstreams: Dict[str, Set[int]] = {}
# A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what will be
# placed after the proxy_pass directive. The main benefit to representing this data as a
@ -384,7 +386,11 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
# A counter of worker_type -> int. Used for determining the name for a given
# worker type when generating its config file, as each worker's name is just
# worker_type + instance #
worker_type_counter = {}
worker_type_counter: Dict[str, int] = {}
# A list of internal endpoints to healthcheck, starting with the main process
# which exists even if no workers do.
healthcheck_urls = ["http://localhost:8080/health"]
# For each worker type specified by the user, create config values
for worker_type in worker_types:
@ -404,12 +410,14 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
# e.g. federation_reader1
worker_name = worker_type + str(new_worker_count)
worker_config.update(
{"name": worker_name, "port": worker_port, "config_path": config_path}
{"name": worker_name, "port": str(worker_port), "config_path": config_path}
)
# Update the shared config with any worker-type specific options
shared_config.update(worker_config["shared_extra_conf"])
healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
# Check if more than one instance of this worker type has been specified
worker_type_total_count = worker_types.count(worker_type)
if worker_type_total_count > 1:
@ -475,15 +483,10 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
# Determine the load-balancing upstreams to configure
nginx_upstream_config = ""
# At the same time, prepare a list of internal endpoints to healthcheck
# starting with the main process which exists even if no workers do.
healthcheck_urls = ["http://localhost:8080/health"]
for upstream_worker_type, upstream_worker_ports in nginx_upstreams.items():
body = ""
for port in upstream_worker_ports:
body += " server localhost:%d;\n" % (port,)
healthcheck_urls.append("http://localhost:%d/health" % (port,))
# Add to the list of configured upstreams
nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format(

View File

@ -1,19 +0,0 @@
#!/usr/bin/env bash
# This script runs the PostgreSQL tests inside a Docker container. It expects
# the relevant source files to be mounted into /src (done automatically by the
# caller script). It will set up the database, run it, and then use the tox
# configuration to run the tests.
set -e
# Set PGUSER so Synapse's tests know what user to connect to the database with
export PGUSER=postgres
# Start the database
sudo -u postgres /usr/lib/postgresql/10/bin/pg_ctl -w -D /var/lib/postgresql/data start
# Run the tests
cd /src
export TRIAL_FLAGS="-j 4"
tox --workdir=./.tox-pg-container -e py37-postgres "$@"

View File

@ -108,7 +108,7 @@ def generate_config_from_template(config_dir, config_path, environ, ownership):
# Hopefully we already have a signing key, but generate one if not.
args = [
"python",
sys.executable,
"-m",
"synapse.app.homeserver",
"--config-path",
@ -158,7 +158,7 @@ def run_generate_config(environ, ownership):
# generate the main config file, and a signing key.
args = [
"python",
sys.executable,
"-m",
"synapse.app.homeserver",
"--server-name",
@ -175,7 +175,7 @@ def run_generate_config(environ, ownership):
"--open-private-ports",
]
# log("running %s" % (args, ))
os.execv("/usr/local/bin/python", args)
os.execv(sys.executable, args)
def main(args, environ):
@ -254,12 +254,12 @@ running with 'migrate_config'. See the README for more details.
log("Starting synapse with args " + " ".join(args))
args = ["python"] + args
args = [sys.executable] + args
if ownership is not None:
args = ["gosu", ownership] + args
os.execve("/usr/sbin/gosu", args, environ)
else:
os.execve("/usr/local/bin/python", args, environ)
os.execve(sys.executable, args, environ)
if __name__ == "__main__":

View File

@ -45,6 +45,7 @@
- [Account validity callbacks](modules/account_validity_callbacks.md)
- [Password auth provider callbacks](modules/password_auth_provider_callbacks.md)
- [Background update controller callbacks](modules/background_update_controller_callbacks.md)
- [Account data callbacks](modules/account_data_callbacks.md)
- [Porting a legacy module to the new interface](modules/porting_legacy_module.md)
- [Workers](workers.md)
- [Using `synctl` with Workers](synctl_workers.md)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1 @@
This directory contains changelogs for previous years.

View File

@ -24,7 +24,7 @@ pip install -e ".[lint,mypy]"
functionality) with:
```sh
black . --exclude="\.tox|build|env"
black .
```
- **flake8**
@ -35,7 +35,7 @@ pip install -e ".[lint,mypy]"
Check all application and test code with:
```sh
flake8 synapse tests
flake8 .
```
- **isort**
@ -46,11 +46,9 @@ pip install -e ".[lint,mypy]"
Auto-fix imports with:
```sh
isort -rc synapse tests
isort .
```
`-rc` means to recursively search the given directories.
It's worth noting that modern IDEs and text editors can run these tools
automatically on save. It may be worth looking into whether this
functionality is supported in your editor for a more convenient

View File

@ -206,9 +206,10 @@ To do so, [configure Postgres](../postgres.md) and run `trial` with the
following environment variables matching your configuration:
- `SYNAPSE_POSTGRES` to anything nonempty
- `SYNAPSE_POSTGRES_HOST`
- `SYNAPSE_POSTGRES_USER`
- `SYNAPSE_POSTGRES_PASSWORD`
- `SYNAPSE_POSTGRES_HOST` (optional if it's the default: UNIX socket)
- `SYNAPSE_POSTGRES_PORT` (optional if it's the default: 5432)
- `SYNAPSE_POSTGRES_USER` (optional if using a UNIX socket)
- `SYNAPSE_POSTGRES_PASSWORD` (optional if using a UNIX socket)
For example:
@ -220,26 +221,12 @@ export SYNAPSE_POSTGRES_PASSWORD=mydevenvpassword
trial
```
#### Prebuilt container
You don't need to specify the host, user, port or password if your Postgres
server is set to authenticate you over the UNIX socket (i.e. if the `psql` command
works without further arguments).
Since configuring PostgreSQL can be fiddly, we can make use of a pre-made
Docker container to set up PostgreSQL and run our tests for us. To do so, run
Your Postgres account needs to be able to create databases.
```shell
scripts-dev/test_postgresql.sh
```
Any extra arguments to the script will be passed to `tox` and then to `trial`,
so we can run a specific test in this container with e.g.
```shell
scripts-dev/test_postgresql.sh tests.replication.test_sharded_event_persister.EventPersisterShardTestCase
```
The container creates a folder in your Synapse checkout called
`.tox-pg-container` and uses this as a tox environment. The output of any
`trial` runs goes into `_trial_temp` in your synapse source directory — the same
as running `trial` directly on your host machine.
## Run the integration tests ([Sytest](https://github.com/matrix-org/sytest)).
@ -254,8 +241,14 @@ configuration:
```sh
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:buster
```
(Note that the paths must be full paths! You could also write `$(realpath relative/path)` if needed.)
This configuration should generally cover your needs. For more details about other configurations, see [documentation in the SyTest repo](https://github.com/matrix-org/sytest/blob/develop/docker/README.md).
This configuration should generally cover your needs.
- To run with Postgres, supply the `-e POSTGRES=1 -e MULTI_POSTGRES=1` environment flags.
- To run with Synapse in worker mode, supply the `-e WORKERS=1 -e REDIS=1` environment flags (in addition to the Postgres flags).
For more details about other configurations, see the [Docker-specific documentation in the SyTest repo](https://github.com/matrix-org/sytest/blob/develop/docker/README.md).
## Run the integration tests ([Complement](https://github.com/matrix-org/complement)).

View File

@ -39,7 +39,8 @@ yet correlated to the DAG.
Outliers typically arise when we fetch the auth chain or state for a given
event. When that happens, we just grab the events in the state/auth chain,
without calculating the state at those events, or backfilling their
`prev_events`.
`prev_events`. Since we don't have the state at any events fetched in that
way, we mark them as outliers.
So, typically, we won't have the `prev_events` of an `outlier` in the database,
(though it's entirely possible that we *might* have them for some other

View File

@ -0,0 +1,106 @@
# Account data callbacks
Account data callbacks allow module developers to react to changes of the account data
of local users. Account data callbacks can be registered using the module API's
`register_account_data_callbacks` method.
## Callbacks
The available account data callbacks are:
### `on_account_data_updated`
_First introduced in Synapse v1.57.0_
```python
async def on_account_data_updated(
user_id: str,
room_id: Optional[str],
account_data_type: str,
content: "synapse.module_api.JsonDict",
) -> None:
```
Called after user's account data has been updated. The module is given the
Matrix ID of the user whose account data is changing, the room ID the data is associated
with, the type associated with the change, as well as the new content. If the account
data is not associated with a specific room, then the room ID is `None`.
This callback is triggered when new account data is added or when the data associated with
a given type (and optionally room) changes. This includes deletion, since in Matrix,
deleting account data consists of replacing the data associated with a given type
(and optionally room) with an empty dictionary (`{}`).
Note that this doesn't trigger when changing the tags associated with a room, as these are
processed separately by Synapse.
If multiple modules implement this callback, Synapse runs them all in order.
## Example
The example below is a module that implements the `on_account_data_updated` callback, and
sends an event to an audit room when a user changes their account data.
```python
import json
import attr
from typing import Any, Dict, Optional
from synapse.module_api import JsonDict, ModuleApi
from synapse.module_api.errors import ConfigError
@attr.s(auto_attribs=True)
class CustomAccountDataConfig:
audit_room: str
sender: str
class CustomAccountDataModule:
def __init__(self, config: CustomAccountDataConfig, api: ModuleApi):
self.api = api
self.config = config
self.api.register_account_data_callbacks(
on_account_data_updated=self.log_new_account_data,
)
@staticmethod
def parse_config(config: Dict[str, Any]) -> CustomAccountDataConfig:
def check_in_config(param: str):
if param not in config:
raise ConfigError(f"'{param}' is required")
check_in_config("audit_room")
check_in_config("sender")
return CustomAccountDataConfig(
audit_room=config["audit_room"],
sender=config["sender"],
)
async def log_new_account_data(
self,
user_id: str,
room_id: Optional[str],
account_data_type: str,
content: JsonDict,
) -> None:
content_raw = json.dumps(content)
msg_content = f"{user_id} has changed their account data for type {account_data_type} to: {content_raw}"
if room_id is not None:
msg_content += f" (in room {room_id})"
await self.api.create_and_send_event_into_room(
{
"room_id": self.config.audit_room,
"sender": self.config.sender,
"type": "m.room.message",
"content": {
"msgtype": "m.text",
"body": msg_content
}
}
)
```

View File

@ -247,6 +247,24 @@ admin API.
If multiple modules implement this callback, Synapse runs them all in order.
### `on_threepid_bind`
_First introduced in Synapse v1.56.0_
```python
async def on_threepid_bind(user_id: str, medium: str, address: str) -> None:
```
Called after creating an association between a local user and a third-party identifier
(email address, phone number). The module is given the Matrix ID of the user the
association is for, as well as the medium (`email` or `msisdn`) and address of the
third-party identifier.
Note that this callback is _not_ called after a successful association on an _identity
server_.
If multiple modules implement this callback, Synapse runs them all in order.
## Example
The example below is a module that implements the third-party rules callback

View File

@ -33,7 +33,7 @@ A module can implement the following static method:
```python
@staticmethod
def parse_config(config: dict) -> dict
def parse_config(config: dict) -> Any
```
This method is given a dictionary resulting from parsing the YAML configuration for the

View File

@ -539,6 +539,15 @@ templates:
#
#custom_template_directory: /path/to/custom/templates/
# List of rooms to exclude from sync responses. This is useful for server
# administrators wishing to group users into a room without these users being able
# to see it from their client.
#
# By default, no room is excluded.
#
#exclude_rooms_from_sync:
# - !foo:example.com
# Message retention policy at the server level.
#

View File

@ -78,82 +78,3 @@ loggers:
The above logging config will set Synapse as 'INFO' logging level by default,
with the SQL layer at 'WARNING', and will log JSON formatted messages to a
remote endpoint at 10.1.2.3:9999.
## Upgrading from legacy structured logging configuration
Versions of Synapse prior to v1.54.0 automatically converted the legacy
structured logging configuration, which was deprecated in v1.23.0, to the standard
library logging configuration.
The following reference can be used to update your configuration. Based on the
drain `type`, we can pick a new handler:
1. For a type of `console`, `console_json`, or `console_json_terse`: a handler
with a class of `logging.StreamHandler` and a `stream` of `ext://sys.stdout`
or `ext://sys.stderr` should be used.
2. For a type of `file` or `file_json`: a handler of `logging.FileHandler` with
a location of the file path should be used.
3. For a type of `network_json_terse`: a handler of `synapse.logging.RemoteHandler`
with the host and port should be used.
Then based on the drain `type` we can pick a new formatter:
1. For a type of `console` or `file` no formatter is necessary.
2. For a type of `console_json` or `file_json`: a formatter of
`synapse.logging.JsonFormatter` should be used.
3. For a type of `console_json_terse` or `network_json_terse`: a formatter of
`synapse.logging.TerseJsonFormatter` should be used.
For each new handler and formatter they should be added to the logging configuration
and then assigned to either a logger or the root logger.
An example legacy configuration:
```yaml
structured: true
loggers:
synapse:
level: INFO
synapse.storage.SQL:
level: WARNING
drains:
console:
type: console
location: stdout
file:
type: file_json
location: homeserver.log
```
Would be converted into a new configuration:
```yaml
version: 1
formatters:
json:
class: synapse.logging.JsonFormatter
handlers:
console:
class: logging.StreamHandler
stream: ext://sys.stdout
file:
class: logging.FileHandler
formatter: json
filename: homeserver.log
loggers:
synapse:
level: INFO
handlers: [console, file]
synapse.storage.SQL:
level: WARNING
```
The new logging configuration is a bit more verbose, but significantly more
flexible. It allows for configuration that were not previously possible, such as
sending plain logs over the network, or using different handlers for different
modules.

View File

@ -85,6 +85,58 @@ process, for example:
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
```
# Upgrading to v1.57.0
## Changes to database schema for application services
Synapse v1.57.0 includes a [change](https://github.com/matrix-org/synapse/pull/12209) to the
way transaction IDs are managed for application services. If your deployment uses a dedicated
worker for application service traffic, **it must be stopped** when the database is upgraded
(which normally happens when the main process is upgraded), to ensure the change is made safely
without any risk of reusing transaction IDs.
Deployments which do not use separate worker processes can be upgraded as normal. Similarly,
deployments where no application services are in use can be upgraded as normal.
<details>
<summary><b>Recovering from an incorrect upgrade</b></summary>
If the database schema is upgraded *without* stopping the worker responsible
for AS traffic, then the following error may be given when attempting to start
a Synapse worker or master process:
```
**********************************************************************************
Error during initialisation:
Postgres sequence 'application_services_txn_id_seq' is inconsistent with associated
table 'application_services_txns'. This can happen if Synapse has been downgraded and
then upgraded again, or due to a bad migration.
To fix this error, shut down Synapse (including any and all workers)
and run the following SQL:
SELECT setval('application_services_txn_id_seq', (
SELECT GREATEST(MAX(txn_id), 0) FROM application_services_txns
));
See docs/postgres.md for more information.
There may be more information in the logs.
**********************************************************************************
```
This error may also be seen if Synapse is *downgraded* to an earlier version,
and then upgraded again to v1.57.0 or later.
In either case:
1. Ensure that the worker responsible for AS traffic is stopped.
2. Run the SQL command given in the error message via `psql`.
Synapse should then start correctly.
</details>
# Upgrading to v1.56.0
## Open registration without verification is now disabled by default
@ -119,15 +171,15 @@ The `synctl` script
[has been made](https://github.com/matrix-org/synapse/pull/12140) an
[entry point](https://packaging.python.org/en/latest/specifications/entry-points/)
and no longer exists at the root of Synapse's source tree. If you wish to use
`synctl` to manage your homeserver, you should invoke `synctl` directly, e.g.
`synctl start` instead of `./synctl start` or `/path/to/synctl start`.
`synctl` to manage your homeserver, you should invoke `synctl` directly, e.g.
`synctl start` instead of `./synctl start` or `/path/to/synctl start`.
You will need to ensure `synctl` is on your `PATH`.
- This is automatically the case when using
[Debian packages](https://packages.matrix.org/debian/) or
[docker images](https://hub.docker.com/r/matrixdotorg/synapse)
provided by Matrix.org.
- When installing from a wheel, sdist, or PyPI, a `synctl` executable is added
- When installing from a wheel, sdist, or PyPI, a `synctl` executable is added
to your Python installation's `bin`. This should be on your `PATH`
automatically, though you might need to activate a virtual environment
depending on how you installed Synapse.
@ -147,7 +199,7 @@ please upgrade Mjolnir to version 1.3.2 or later before upgrading Synapse.
This release removes support for the `structured: true` logging configuration
which was deprecated in Synapse v1.23.0. If your logging configuration contains
`structured: true` then it should be modified based on the
[structured logging documentation](structured_logging.md).
[structured logging documentation](https://matrix-org.github.io/synapse/v1.56/structured_logging.html#upgrading-from-legacy-structured-logging-configuration).
# Upgrading to v1.53.0
@ -163,8 +215,8 @@ the `/_matrix/client/` path.
## Stablisation of MSC3231
The unstable validity-check endpoint for the
[Registration Tokens](https://spec.matrix.org/v1.2/client-server-api/#get_matrixclientv1registermloginregistration_tokenvalidity)
The unstable validity-check endpoint for the
[Registration Tokens](https://spec.matrix.org/v1.2/client-server-api/#get_matrixclientv1registermloginregistration_tokenvalidity)
feature has been stabilised and moved from:
`/_matrix/client/unstable/org.matrix.msc3231/register/org.matrix.msc3231.login.registration_token/validity`
@ -178,9 +230,9 @@ Please update any relevant reverse proxy or firewall configurations appropriatel
## Time-based cache expiry is now enabled by default
Formerly, entries in the cache were not evicted regardless of whether they were accessed after storing.
This behavior has now changed. By default entries in the cache are now evicted after 30m of not being accessed.
To change the default behavior, go to the `caches` section of the config and change the `expire_caches` and
`cache_entry_ttl` flags as necessary. Please note that these flags replace the `expiry_time` flag in the config.
This behavior has now changed. By default entries in the cache are now evicted after 30m of not being accessed.
To change the default behavior, go to the `caches` section of the config and change the `expire_caches` and
`cache_entry_ttl` flags as necessary. Please note that these flags replace the `expiry_time` flag in the config.
The `expiry_time` flag will still continue to work, but it has been deprecated and will be removed in the future.
## Deprecation of `capability` `org.matrix.msc3283.*`
@ -764,7 +816,7 @@ lock down external access to the Admin API endpoints.
This release deprecates use of the `structured: true` logging
configuration for structured logging. If your logging configuration
contains `structured: true` then it should be modified based on the
[structured logging documentation](structured_logging.md).
[structured logging documentation](https://matrix-org.github.io/synapse/v1.56/structured_logging.html#upgrading-from-legacy-structured-logging-configuration).
The `structured` and `drains` logging options are now deprecated and
should be replaced by standard logging configuration of `handlers` and

View File

@ -27,7 +27,7 @@ feeds streams of newly written data between processes so they can be kept in
sync with the database state.
When configured to do so, Synapse uses a
[Redis pub/sub channel](https://redis.io/topics/pubsub) to send the replication
[Redis pub/sub channel](https://redis.io/docs/manual/pubsub/) to send the replication
stream between all configured Synapse processes. Additionally, processes may
make HTTP requests to each other, primarily for operations which need to wait
for a reply ─ such as sending an event.

View File

@ -11,6 +11,7 @@ local_partial_types = True
no_implicit_optional = True
files =
docker/,
scripts-dev/,
setup.py,
synapse/,
@ -24,18 +25,8 @@ files =
exclude = (?x)
^(
|scripts-dev/build_debian_packages.py
|scripts-dev/check_signature.py
|scripts-dev/definitions.py
|scripts-dev/federation_client.py
|scripts-dev/hash_history.py
|scripts-dev/list_url_patterns.py
|scripts-dev/release.py
|scripts-dev/tail-synapse.py
|synapse/_scripts/export_signing_key.py
|synapse/_scripts/move_remote_media_to_new_store.py
|synapse/_scripts/synapse_port_db.py
|synapse/_scripts/update_synapse_database.py
|synapse/storage/databases/__init__.py
|synapse/storage/databases/main/cache.py
@ -43,7 +34,6 @@ exclude = (?x)
|synapse/storage/databases/main/event_federation.py
|synapse/storage/databases/main/push_rule.py
|synapse/storage/databases/main/roommember.py
|synapse/storage/databases/main/state.py
|synapse/storage/schema/
|tests/api/test_auth.py
@ -82,11 +72,9 @@ exclude = (?x)
|tests/storage/test_base.py
|tests/storage/test_roommember.py
|tests/test_metrics.py
|tests/test_phone_home.py
|tests/test_server.py
|tests/test_state.py
|tests/test_terms_auth.py
|tests/unittest.py
|tests/util/caches/test_cached_call.py
|tests/util/caches/test_deferred_cache.py
|tests/util/caches/test_descriptors.py
@ -105,6 +93,9 @@ exclude = (?x)
|tests/utils.py
)$
[mypy-synapse._scripts.*]
disallow_untyped_defs = True
[mypy-synapse.api.*]
disallow_untyped_defs = True
@ -114,7 +105,7 @@ disallow_untyped_defs = True
[mypy-synapse.appservice.*]
disallow_untyped_defs = True
[mypy-synapse.config._base]
[mypy-synapse.config.*]
disallow_untyped_defs = True
[mypy-synapse.crypto.*]
@ -273,6 +264,9 @@ ignore_missing_imports = True
[mypy-ijson.*]
ignore_missing_imports = True
[mypy-importlib_metadata.*]
ignore_missing_imports = True
[mypy-jaeger_client.*]
ignore_missing_imports = True

2835
poetry.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -36,24 +36,9 @@
[tool.black]
target-version = ['py37', 'py38', 'py39', 'py310']
exclude = '''
(
/(
\.eggs # exclude a few common directories in the
| \.git # root of the project
| \.tox
| \.venv
| \.env
| env
| _build
| _trial_temp.*
| build
| dist
| debian
)/
)
'''
# black ignores everything in .gitignore by default, see
# https://black.readthedocs.io/en/stable/usage_and_configuration/file_collection_and_discovery.html#gitignore
# Use `extend-exclude` if you want to exclude something in addition to this.
[tool.isort]
line_length = 88
@ -65,4 +50,235 @@ known_twisted = ["twisted", "OpenSSL"]
multi_line_output = 3
include_trailing_comma = true
combine_as_imports = true
skip_gitignore = true
[tool.poetry]
name = "matrix-synapse"
version = "1.57.1"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "Apache-2.0"
readme = "README.rst"
repository = "https://github.com/matrix-org/synapse"
packages = [
{ include = "synapse" },
]
classifiers = [
"Development Status :: 5 - Production/Stable",
"Topic :: Communications :: Chat",
]
include = [
{ path = "AUTHORS.rst", format = "sdist" },
{ path = "book.toml", format = "sdist" },
{ path = "changelog.d", format = "sdist" },
{ path = "CHANGES.md", format = "sdist" },
{ path = "CONTRIBUTING.md", format = "sdist" },
{ path = "demo", format = "sdist" },
{ path = "docs", format = "sdist" },
{ path = "INSTALL.md", format = "sdist" },
{ path = "mypy.ini", format = "sdist" },
{ path = "scripts-dev", format = "sdist" },
{ path = "synmark", format="sdist" },
{ path = "sytest-blacklist", format = "sdist" },
{ path = "tests", format = "sdist" },
{ path = "UPGRADE.rst", format = "sdist" },
]
[tool.poetry.scripts]
synapse_homeserver = "synapse.app.homeserver:main"
synapse_worker = "synapse.app.generic_worker:main"
synctl = "synapse._scripts.synctl:main"
export_signing_key = "synapse._scripts.export_signing_key:main"
generate_config = "synapse._scripts.generate_config:main"
generate_log_config = "synapse._scripts.generate_log_config:main"
generate_signing_key = "synapse._scripts.generate_signing_key:main"
hash_password = "synapse._scripts.hash_password:main"
register_new_matrix_user = "synapse._scripts.register_new_matrix_user:main"
synapse_port_db = "synapse._scripts.synapse_port_db:main"
synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main"
update_synapse_database = "synapse._scripts.update_synapse_database:main"
[tool.poetry.dependencies]
python = "^3.7"
# Mandatory Dependencies
# ----------------------
# we use the TYPE_CHECKER.redefine method added in jsonschema 3.0.0
jsonschema = ">=3.0.0"
# frozendict 2.1.2 is broken on Debian 10: https://github.com/Marco-Sulla/python-frozendict/issues/41
frozendict = ">=1,!=2.1.2"
# We require 2.1.0 or higher for type hints. Previous guard was >= 1.1.0
unpaddedbase64 = ">=2.1.0"
canonicaljson = ">=1.4.0"
# we use the type definitions added in signedjson 1.1.
signedjson = ">=1.1.0"
PyNaCl = ">=1.2.1"
# validating SSL certs for IP addresses requires service_identity 18.1.
service-identity = ">=18.1.0"
# Twisted 18.9 introduces some logger improvements that the structured
# logger utilises
Twisted = {extras = ["tls"], version = ">=18.9.0"}
treq = ">=15.1"
# Twisted has required pyopenssl 16.0 since about Twisted 16.6.
pyOpenSSL = ">=16.0.0"
PyYAML = ">=3.11"
pyasn1 = ">=0.1.9"
pyasn1-modules = ">=0.0.7"
bcrypt = ">=3.1.0"
Pillow = ">=5.4.0"
sortedcontainers = ">=1.4.4"
pymacaroons = ">=0.13.0"
msgpack = ">=0.5.2"
phonenumbers = ">=8.2.0"
# we use GaugeHistogramMetric, which was added in prom-client 0.4.0.
prometheus-client = ">=0.4.0"
# we use `order`, which arrived in attrs 19.2.0.
# Note: 21.1.0 broke `/sync`, see #9936
attrs = ">=19.2.0,!=21.1.0"
netaddr = ">=0.7.18"
# Jinja 2.x is incompatible with MarkupSafe>=2.1. To ensure that admins do not
# end up with a broken installation, with recent MarkupSafe but old Jinja, we
# add a lower bound to the Jinja2 dependency.
Jinja2 = ">=3.0"
bleach = ">=1.4.3"
# We use `ParamSpec`, which was added in `typing-extensions` 3.10.0.0.
typing-extensions = ">=3.10.0"
# We enforce that we have a `cryptography` version that bundles an `openssl`
# with the latest security patches.
cryptography = ">=3.4.7"
# ijson 3.1.4 fixes a bug with "." in property names
ijson = ">=3.1.4"
matrix-common = "~=1.1.0"
# We need packaging.requirements.Requirement, added in 16.1.
packaging = ">=16.1"
# At the time of writing, we only use functions from the version `importlib.metadata`
# which shipped in Python 3.8. This corresponds to version 1.4 of the backport.
importlib_metadata = { version = ">=1.4", python = "<3.8" }
# Optional Dependencies
# ---------------------
matrix-synapse-ldap3 = { version = ">=0.1", optional = true }
psycopg2 = { version = ">=2.8", markers = "platform_python_implementation != 'PyPy'", optional = true }
psycopg2cffi = { version = ">=2.8", markers = "platform_python_implementation == 'PyPy'", optional = true }
psycopg2cffi-compat = { version = "==1.1", markers = "platform_python_implementation == 'PyPy'", optional = true }
pysaml2 = { version = ">=4.5.0", optional = true }
authlib = { version = ">=0.14.0", optional = true }
# systemd-python is necessary for logging to the systemd journal via
# `systemd.journal.JournalHandler`, as is documented in
# `contrib/systemd/log_config.yaml`.
# Note: systemd-python 231 appears to have been yanked from pypi
systemd-python = { version = ">=231", optional = true }
lxml = { version = ">=4.2.0", optional = true }
sentry-sdk = { version = ">=0.7.2", optional = true }
opentracing = { version = ">=2.2.0", optional = true }
jaeger-client = { version = ">=4.0.0", optional = true }
pyjwt = { version = ">=1.6.4", optional = true }
txredisapi = { version = ">=1.4.7", optional = true }
hiredis = { version = "*", optional = true }
Pympler = { version = "*", optional = true }
parameterized = { version = ">=0.7.4", optional = true }
idna = { version = ">=2.5", optional = true }
[tool.poetry.extras]
# NB: Packages that should be part of `pip install matrix-synapse[all]` need to be specified
# twice: once here, and once in the `all` extra.
matrix-synapse-ldap3 = ["matrix-synapse-ldap3"]
postgres = ["psycopg2", "psycopg2cffi", "psycopg2cffi-compat"]
saml2 = ["pysaml2"]
oidc = ["authlib"]
# systemd-python is necessary for logging to the systemd journal via
# `systemd.journal.JournalHandler`, as is documented in
# `contrib/systemd/log_config.yaml`.
systemd = ["systemd-python"]
url_preview = ["lxml"]
sentry = ["sentry-sdk"]
opentracing = ["jaeger-client", "opentracing"]
jwt = ["pyjwt"]
# hiredis is not a *strict* dependency, but it makes things much faster.
# (if it is not installed, we fall back to slow code.)
redis = ["txredisapi", "hiredis"]
# Required to use experimental `caches.track_memory_usage` config option.
cache_memory = ["pympler"]
test = ["parameterized", "idna"]
# The duplication here is awful. I hate hate hate hate hate it. However, for now I want
# to ensure you can still `pip install matrix-synapse[all]` like today. Two motivations:
# 1) for new installations, I want instructions in existing documentation and tutorials
# out there to still work.
# 2) I don't want to hard-code a list of extras into CI if I can help it. The ideal
# solution here would be something like https://github.com/python-poetry/poetry/issues/3413
# Poetry 1.2's dependency groups might make this easier. But I'm not trying that out
# until there's a stable release of 1.2.
#
# NB: the strings in this list must be *package* names, not extra names.
# Some of our extra names _are_ package names, which can lead to great confusion.
all = [
# matrix-synapse-ldap3
"matrix-synapse-ldap3",
# postgres
"psycopg2", "psycopg2cffi", "psycopg2cffi-compat",
# saml2
"pysaml2",
# oidc
"authlib",
# url_preview
"lxml",
# sentry
"sentry-sdk",
# opentracing
"jaeger-client", "opentracing",
# jwt
"pyjwt",
#redis
"txredisapi", "hiredis"
# omitted:
# - cache_memory: this is an experimental option
# - test: it's useful to have this separate from dev deps in the olddeps job
# - systemd: this is a system-based requirement
]
[tool.poetry.dev-dependencies]
## We pin black so that our tests don't start failing on new releases.
isort = "==5.7.0"
black = "==22.3.0"
flake8-comprehensions = "*"
flake8-bugbear = "==21.3.2"
flake8 = "*"
# Typechecking
mypy = "==0.931"
mypy-zope = "==0.3.5"
types-bleach = ">=4.1.0"
types-jsonschema = ">=3.2.0"
types-opentracing = ">=2.4.2"
types-Pillow = ">=8.3.4"
types-psycopg2 = ">=2.9.9"
types-pyOpenSSL = ">=20.0.7"
types-PyYAML = ">=5.4.10"
types-requests = ">=2.26.0"
types-setuptools = ">=57.4.0"
# Dependencies which are exclusively required by unit test code. This is
# NOT a list of all modules that are necessary to run the unit tests.
# Tests assume that all optional dependencies are installed.
# parameterized<0.7.4 can create classes with names that would normally be invalid
# identifiers. trial really does not like this when running with multiple workers.
parameterized = ">=0.7.4"
idna = ">=2.5"
# The following are used by the release script
click = "==8.1.0"
redbaron = "==0.9.2"
GitPython = "==3.1.14"
commonmark = "==0.9.1"
pygithub = "==1.55"
# The following are executed as commands by the release script.
twine = "*"
# Towncrier min version comes from #3425. Rationale unclear.
towncrier = ">=18.6.0rc1"
[build-system]
requires = ["setuptools"]
build-backend = "setuptools.build_meta"

View File

@ -19,7 +19,7 @@ if ! git diff --quiet FETCH_HEAD... -- debian; then
if git diff --quiet FETCH_HEAD... -- debian/changelog; then
echo "Updates to debian directory, but no update to the changelog." >&2
echo "!! Please see the contributing guide for help writing your changelog entry:" >&2
echo "https://github.com/matrix-org/synapse/blob/develop/CONTRIBUTING.md#debian-changelog" >&2
echo "https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#debian-changelog" >&2
exit 1
fi
fi
@ -32,7 +32,7 @@ fi
# Print a link to the contributing guide if the user makes a mistake
CONTRIBUTING_GUIDE_TEXT="!! Please see the contributing guide for help writing your changelog entry:
https://github.com/matrix-org/synapse/blob/develop/CONTRIBUTING.md#changelog"
https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#changelog"
# If check-newsfragment returns a non-zero exit code, print the contributing guide and exit
python -m towncrier.check --compare-with=origin/develop || (echo -e "$CONTRIBUTING_GUIDE_TEXT" >&2 && exit 1)

View File

@ -1,72 +0,0 @@
import argparse
import json
import logging
import sys
import dns.resolver
import urllib2
from signedjson.key import decode_verify_key_bytes, write_signing_keys
from signedjson.sign import verify_signed_json
from unpaddedbase64 import decode_base64
def get_targets(server_name):
if ":" in server_name:
target, port = server_name.split(":")
yield (target, int(port))
return
try:
answers = dns.resolver.query("_matrix._tcp." + server_name, "SRV")
for srv in answers:
yield (srv.target, srv.port)
except dns.resolver.NXDOMAIN:
yield (server_name, 8448)
def get_server_keys(server_name, target, port):
url = "https://%s:%i/_matrix/key/v1" % (target, port)
keys = json.load(urllib2.urlopen(url))
verify_keys = {}
for key_id, key_base64 in keys["verify_keys"].items():
verify_key = decode_verify_key_bytes(key_id, decode_base64(key_base64))
verify_signed_json(keys, server_name, verify_key)
verify_keys[key_id] = verify_key
return verify_keys
def main():
parser = argparse.ArgumentParser()
parser.add_argument("signature_name")
parser.add_argument(
"input_json", nargs="?", type=argparse.FileType("r"), default=sys.stdin
)
args = parser.parse_args()
logging.basicConfig()
server_name = args.signature_name
keys = {}
for target, port in get_targets(server_name):
try:
keys = get_server_keys(server_name, target, port)
print("Using keys from https://%s:%s/_matrix/key/v1" % (target, port))
write_signing_keys(sys.stdout, keys.values())
break
except Exception:
logging.exception("Error talking to %s:%s", target, port)
json_to_check = json.load(args.input_json)
print("Checking JSON:")
for key_id in json_to_check["signatures"][args.signature_name]:
try:
key = keys[key_id]
verify_signed_json(json_to_check, args.signature_name, key)
print("PASS %s" % (key_id,))
except Exception:
logging.exception("Check for key %s failed" % (key_id,))
print("FAIL %s" % (key_id,))
if __name__ == "__main__":
main()

View File

@ -50,25 +50,18 @@ if [[ -n "$WORKERS" ]]; then
export COMPLEMENT_BASE_IMAGE=complement-synapse-workers
COMPLEMENT_DOCKERFILE=SynapseWorkers.Dockerfile
# And provide some more configuration to complement.
export COMPLEMENT_CA=true
export COMPLEMENT_SPAWN_HS_TIMEOUT_SECS=25
export COMPLEMENT_SPAWN_HS_TIMEOUT_SECS=60
else
export COMPLEMENT_BASE_IMAGE=complement-synapse
COMPLEMENT_DOCKERFILE=Synapse.Dockerfile
COMPLEMENT_DOCKERFILE=Dockerfile
fi
# Build the Complement image from the Synapse image we just built.
docker build -t $COMPLEMENT_BASE_IMAGE -f "$COMPLEMENT_DIR/dockerfiles/$COMPLEMENT_DOCKERFILE" "$COMPLEMENT_DIR/dockerfiles"
cd "$COMPLEMENT_DIR"
EXTRA_COMPLEMENT_ARGS=""
if [[ -n "$1" ]]; then
# A test name regex has been set, supply it to Complement
EXTRA_COMPLEMENT_ARGS+="-run $1 "
fi
docker build -t $COMPLEMENT_BASE_IMAGE -f "docker/complement/$COMPLEMENT_DOCKERFILE" "docker/complement"
# Run the tests!
echo "Images built; running complement"
go test -v -tags synapse_blacklist,msc2403,msc2716,msc3030 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests/...
cd "$COMPLEMENT_DIR"
go test -v -tags synapse_blacklist,msc2716,msc3030 -count=1 "$@" ./tests/...

View File

@ -1,208 +0,0 @@
#! /usr/bin/python
import argparse
import ast
import os
import re
import sys
import yaml
class DefinitionVisitor(ast.NodeVisitor):
def __init__(self):
super().__init__()
self.functions = {}
self.classes = {}
self.names = {}
self.attrs = set()
self.definitions = {
"def": self.functions,
"class": self.classes,
"names": self.names,
"attrs": self.attrs,
}
def visit_Name(self, node):
self.names.setdefault(type(node.ctx).__name__, set()).add(node.id)
def visit_Attribute(self, node):
self.attrs.add(node.attr)
for child in ast.iter_child_nodes(node):
self.visit(child)
def visit_ClassDef(self, node):
visitor = DefinitionVisitor()
self.classes[node.name] = visitor.definitions
for child in ast.iter_child_nodes(node):
visitor.visit(child)
def visit_FunctionDef(self, node):
visitor = DefinitionVisitor()
self.functions[node.name] = visitor.definitions
for child in ast.iter_child_nodes(node):
visitor.visit(child)
def non_empty(defs):
functions = {name: non_empty(f) for name, f in defs["def"].items()}
classes = {name: non_empty(f) for name, f in defs["class"].items()}
result = {}
if functions:
result["def"] = functions
if classes:
result["class"] = classes
names = defs["names"]
uses = []
for name in names.get("Load", ()):
if name not in names.get("Param", ()) and name not in names.get("Store", ()):
uses.append(name)
uses.extend(defs["attrs"])
if uses:
result["uses"] = uses
result["names"] = names
result["attrs"] = defs["attrs"]
return result
def definitions_in_code(input_code):
input_ast = ast.parse(input_code)
visitor = DefinitionVisitor()
visitor.visit(input_ast)
definitions = non_empty(visitor.definitions)
return definitions
def definitions_in_file(filepath):
with open(filepath) as f:
return definitions_in_code(f.read())
def defined_names(prefix, defs, names):
for name, funcs in defs.get("def", {}).items():
names.setdefault(name, {"defined": []})["defined"].append(prefix + name)
defined_names(prefix + name + ".", funcs, names)
for name, funcs in defs.get("class", {}).items():
names.setdefault(name, {"defined": []})["defined"].append(prefix + name)
defined_names(prefix + name + ".", funcs, names)
def used_names(prefix, item, defs, names):
for name, funcs in defs.get("def", {}).items():
used_names(prefix + name + ".", name, funcs, names)
for name, funcs in defs.get("class", {}).items():
used_names(prefix + name + ".", name, funcs, names)
path = prefix.rstrip(".")
for used in defs.get("uses", ()):
if used in names:
if item:
names[item].setdefault("uses", []).append(used)
names[used].setdefault("used", {}).setdefault(item, []).append(path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Find definitions.")
parser.add_argument(
"--unused", action="store_true", help="Only list unused definitions"
)
parser.add_argument(
"--ignore", action="append", metavar="REGEXP", help="Ignore a pattern"
)
parser.add_argument(
"--pattern", action="append", metavar="REGEXP", help="Search for a pattern"
)
parser.add_argument(
"directories",
nargs="+",
metavar="DIR",
help="Directories to search for definitions",
)
parser.add_argument(
"--referrers",
default=0,
type=int,
help="Include referrers up to the given depth",
)
parser.add_argument(
"--referred",
default=0,
type=int,
help="Include referred down to the given depth",
)
parser.add_argument(
"--format", default="yaml", help="Output format, one of 'yaml' or 'dot'"
)
args = parser.parse_args()
definitions = {}
for directory in args.directories:
for root, _, files in os.walk(directory):
for filename in files:
if filename.endswith(".py"):
filepath = os.path.join(root, filename)
definitions[filepath] = definitions_in_file(filepath)
names = {}
for filepath, defs in definitions.items():
defined_names(filepath + ":", defs, names)
for filepath, defs in definitions.items():
used_names(filepath + ":", None, defs, names)
patterns = [re.compile(pattern) for pattern in args.pattern or ()]
ignore = [re.compile(pattern) for pattern in args.ignore or ()]
result = {}
for name, definition in names.items():
if patterns and not any(pattern.match(name) for pattern in patterns):
continue
if ignore and any(pattern.match(name) for pattern in ignore):
continue
if args.unused and definition.get("used"):
continue
result[name] = definition
referrer_depth = args.referrers
referrers = set()
while referrer_depth:
referrer_depth -= 1
for entry in result.values():
for used_by in entry.get("used", ()):
referrers.add(used_by)
for name, definition in names.items():
if name not in referrers:
continue
if ignore and any(pattern.match(name) for pattern in ignore):
continue
result[name] = definition
referred_depth = args.referred
referred = set()
while referred_depth:
referred_depth -= 1
for entry in result.values():
for uses in entry.get("uses", ()):
referred.add(uses)
for name, definition in names.items():
if name not in referred:
continue
if ignore and any(pattern.match(name) for pattern in ignore):
continue
result[name] = definition
if args.format == "yaml":
yaml.dump(result, sys.stdout, default_flow_style=False)
elif args.format == "dot":
print("digraph {")
for name, entry in result.items():
print(name)
for used_by in entry.get("used", ()):
if used_by in result:
print(used_by, "->", name)
print("}")
else:
raise ValueError("Unknown format %r" % (args.format))

View File

@ -1,81 +0,0 @@
import sqlite3
import sys
from unpaddedbase64 import decode_base64, encode_base64
from synapse.crypto.event_signing import (
add_event_pdu_content_hash,
compute_pdu_event_reference_hash,
)
from synapse.federation.units import Pdu
from synapse.storage._base import SQLBaseStore
from synapse.storage.pdu import PduStore
from synapse.storage.signatures import SignatureStore
class Store:
_get_pdu_tuples = PduStore.__dict__["_get_pdu_tuples"]
_get_pdu_content_hashes_txn = SignatureStore.__dict__["_get_pdu_content_hashes_txn"]
_get_prev_pdu_hashes_txn = SignatureStore.__dict__["_get_prev_pdu_hashes_txn"]
_get_pdu_origin_signatures_txn = SignatureStore.__dict__[
"_get_pdu_origin_signatures_txn"
]
_store_pdu_content_hash_txn = SignatureStore.__dict__["_store_pdu_content_hash_txn"]
_store_pdu_reference_hash_txn = SignatureStore.__dict__[
"_store_pdu_reference_hash_txn"
]
_store_prev_pdu_hash_txn = SignatureStore.__dict__["_store_prev_pdu_hash_txn"]
simple_insert_txn = SQLBaseStore.__dict__["simple_insert_txn"]
store = Store()
def select_pdus(cursor):
cursor.execute("SELECT pdu_id, origin FROM pdus ORDER BY depth ASC")
ids = cursor.fetchall()
pdu_tuples = store._get_pdu_tuples(cursor, ids)
pdus = [Pdu.from_pdu_tuple(p) for p in pdu_tuples]
reference_hashes = {}
for pdu in pdus:
try:
if pdu.prev_pdus:
print("PROCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus)
for pdu_id, origin, hashes in pdu.prev_pdus:
ref_alg, ref_hsh = reference_hashes[(pdu_id, origin)]
hashes[ref_alg] = encode_base64(ref_hsh)
store._store_prev_pdu_hash_txn(
cursor, pdu.pdu_id, pdu.origin, pdu_id, origin, ref_alg, ref_hsh
)
print("SUCCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus)
pdu = add_event_pdu_content_hash(pdu)
ref_alg, ref_hsh = compute_pdu_event_reference_hash(pdu)
reference_hashes[(pdu.pdu_id, pdu.origin)] = (ref_alg, ref_hsh)
store._store_pdu_reference_hash_txn(
cursor, pdu.pdu_id, pdu.origin, ref_alg, ref_hsh
)
for alg, hsh_base64 in pdu.hashes.items():
print(alg, hsh_base64)
store._store_pdu_content_hash_txn(
cursor, pdu.pdu_id, pdu.origin, alg, decode_base64(hsh_base64)
)
except Exception:
print("FAILED_", pdu.pdu_id, pdu.origin, pdu.prev_pdus)
def main():
conn = sqlite3.connect(sys.argv[1])
cursor = conn.cursor()
select_pdus(cursor)
conn.commit()
if __name__ == "__main__":
main()

View File

@ -80,13 +80,7 @@ else
# then lint everything!
if [[ -z ${files+x} ]]; then
# Lint all source code files and directories
# Note: this list aims to mirror the one in tox.ini
files=(
"synapse" "docker" "tests"
# annoyingly, black doesn't find these so we have to list them
"scripts-dev"
"contrib" "setup.py" "synmark" "stubs" ".ci"
)
files=( "." )
fi
fi

View File

@ -1,60 +0,0 @@
#! /usr/bin/python
import argparse
import ast
import os
import sys
import yaml
PATTERNS_V1 = []
PATTERNS_V2 = []
RESULT = {"v1": PATTERNS_V1, "v2": PATTERNS_V2}
class CallVisitor(ast.NodeVisitor):
def visit_Call(self, node):
if isinstance(node.func, ast.Name):
name = node.func.id
else:
return
if name == "client_patterns":
PATTERNS_V2.append(node.args[0].s)
def find_patterns_in_code(input_code):
input_ast = ast.parse(input_code)
visitor = CallVisitor()
visitor.visit(input_ast)
def find_patterns_in_file(filepath):
with open(filepath) as f:
find_patterns_in_code(f.read())
parser = argparse.ArgumentParser(description="Find url patterns.")
parser.add_argument(
"directories",
nargs="+",
metavar="DIR",
help="Directories to search for definitions",
)
args = parser.parse_args()
for directory in args.directories:
for root, _, files in os.walk(directory):
for filename in files:
if filename.endswith(".py"):
filepath = os.path.join(root, filename)
find_patterns_in_file(filepath)
PATTERNS_V1.sort()
PATTERNS_V2.sort()
yaml.dump(RESULT, sys.stdout, default_flow_style=False)

View File

@ -1,67 +0,0 @@
import collections
import json
import sys
import time
import requests
Entry = collections.namedtuple("Entry", "name position rows")
ROW_TYPES = {}
def row_type_for_columns(name, column_names):
column_names = tuple(column_names)
row_type = ROW_TYPES.get((name, column_names))
if row_type is None:
row_type = collections.namedtuple(name, column_names)
ROW_TYPES[(name, column_names)] = row_type
return row_type
def parse_response(content):
streams = json.loads(content)
result = {}
for name, value in streams.items():
row_type = row_type_for_columns(name, value["field_names"])
position = value["position"]
rows = [row_type(*row) for row in value["rows"]]
result[name] = Entry(name, position, rows)
return result
def replicate(server, streams):
return parse_response(
requests.get(
server + "/_synapse/replication", verify=False, params=streams
).content
)
def main():
server = sys.argv[1]
streams = None
while not streams:
try:
streams = {
row.name: row.position
for row in replicate(server, {"streams": "-1"})["streams"].rows
}
except requests.exceptions.ConnectionError:
time.sleep(0.1)
while True:
try:
results = replicate(server, streams)
except Exception:
sys.stdout.write("connection_lost(" + repr(streams) + ")\n")
break
for update in results.values():
for row in update.rows:
sys.stdout.write(repr(row) + "\n")
streams[update.name] = update.position
if __name__ == "__main__":
main()

View File

@ -1,19 +0,0 @@
#!/usr/bin/env bash
# This script builds the Docker image to run the PostgreSQL tests, and then runs
# the tests. It uses a dedicated tox environment so that we don't have to
# rebuild it each time.
# Command line arguments to this script are forwarded to "tox" and then to "trial".
set -e
# Build, and tag
docker build docker/ \
--build-arg "UID=$(id -u)" \
--build-arg "GID=$(id -g)" \
-f docker/Dockerfile-pgtests \
-t synapsepgtests
# Run, mounting the current directory into /src
docker run --rm -it -v "$(pwd):/src" -v synapse-pg-test-tox:/tox synapsepgtests "$@"

View File

@ -120,7 +120,7 @@ CONDITIONAL_REQUIREMENTS["mypy"] = [
# Tests assume that all optional dependencies are installed.
#
# parameterized_class decorator was introduced in parameterized 0.7.0
CONDITIONAL_REQUIREMENTS["test"] = ["parameterized>=0.7.0"]
CONDITIONAL_REQUIREMENTS["test"] = ["parameterized>=0.7.0", "idna>=2.5"]
CONDITIONAL_REQUIREMENTS["dev"] = (
CONDITIONAL_REQUIREMENTS["lint"]

View File

@ -68,7 +68,7 @@ try:
except ImportError:
pass
__version__ = "1.56.0"
__version__ = "1.57.1"
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
# We import here so that we don't have to install a bunch of deps when

View File

@ -15,19 +15,19 @@
import argparse
import sys
import time
from typing import Optional
from typing import NoReturn, Optional
import nacl.signing
from signedjson.key import encode_verify_key_base64, get_verify_key, read_signing_keys
from signedjson.types import VerifyKey
def exit(status: int = 0, message: Optional[str] = None):
def exit(status: int = 0, message: Optional[str] = None) -> NoReturn:
if message:
print(message, file=sys.stderr)
sys.exit(status)
def format_plain(public_key: nacl.signing.VerifyKey):
def format_plain(public_key: VerifyKey) -> None:
print(
"%s:%s %s"
% (
@ -38,7 +38,7 @@ def format_plain(public_key: nacl.signing.VerifyKey):
)
def format_for_config(public_key: nacl.signing.VerifyKey, expiry_ts: int):
def format_for_config(public_key: VerifyKey, expiry_ts: int) -> None:
print(
' "%s:%s": { key: "%s", expired_ts: %i }'
% (
@ -50,7 +50,7 @@ def format_for_config(public_key: nacl.signing.VerifyKey, expiry_ts: int):
)
def main():
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument(
@ -94,7 +94,6 @@ def main():
message="Error reading key from file %s: %s %s"
% (file.name, type(e), e),
)
res = []
for key in res:
formatter(get_verify_key(key))

View File

@ -7,7 +7,7 @@ import sys
from synapse.config.homeserver import HomeServerConfig
def main():
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument(
"--config-dir",

View File

@ -20,7 +20,7 @@ import sys
from synapse.config.logger import DEFAULT_LOG_CONFIG
def main():
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument(

View File

@ -20,7 +20,7 @@ from signedjson.key import generate_signing_key, write_signing_keys
from synapse.util.stringutils import random_string
def main():
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument(

View File

@ -9,7 +9,7 @@ import bcrypt
import yaml
def prompt_for_pass():
def prompt_for_pass() -> str:
password = getpass.getpass("Password: ")
if not password:
@ -23,7 +23,7 @@ def prompt_for_pass():
return password
def main():
def main() -> None:
bcrypt_rounds = 12
password_pepper = ""

View File

@ -42,7 +42,7 @@ from synapse.rest.media.v1.filepath import MediaFilePaths
logger = logging.getLogger()
def main(src_repo, dest_repo):
def main(src_repo: str, dest_repo: str) -> None:
src_paths = MediaFilePaths(src_repo)
dest_paths = MediaFilePaths(dest_repo)
for line in sys.stdin:
@ -55,14 +55,19 @@ def main(src_repo, dest_repo):
move_media(parts[0], parts[1], src_paths, dest_paths)
def move_media(origin_server, file_id, src_paths, dest_paths):
def move_media(
origin_server: str,
file_id: str,
src_paths: MediaFilePaths,
dest_paths: MediaFilePaths,
) -> None:
"""Move the given file, and any thumbnails, to the dest repo
Args:
origin_server (str):
file_id (str):
src_paths (MediaFilePaths):
dest_paths (MediaFilePaths):
origin_server:
file_id:
src_paths:
dest_paths:
"""
logger.info("%s/%s", origin_server, file_id)
@ -91,7 +96,7 @@ def move_media(origin_server, file_id, src_paths, dest_paths):
)
def mkdir_and_move(original_file, dest_file):
def mkdir_and_move(original_file: str, dest_file: str) -> None:
dirname = os.path.dirname(dest_file)
if not os.path.exists(dirname):
logger.debug("mkdir %s", dirname)
@ -109,10 +114,9 @@ if __name__ == "__main__":
parser.add_argument("dest_repo", help="Path to source content repo")
args = parser.parse_args()
logging_config = {
"level": logging.DEBUG if args.v else logging.INFO,
"format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s",
}
logging.basicConfig(**logging_config)
logging.basicConfig(
level=logging.DEBUG if args.v else logging.INFO,
format="%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s",
)
main(args.src_repo, args.dest_repo)

View File

@ -22,7 +22,7 @@ import logging
import sys
from typing import Callable, Optional
import requests as _requests
import requests
import yaml
@ -33,7 +33,6 @@ def request_registration(
shared_secret: str,
admin: bool = False,
user_type: Optional[str] = None,
requests=_requests,
_print: Callable[[str], None] = print,
exit: Callable[[int], None] = sys.exit,
) -> None:

View File

@ -138,9 +138,7 @@ def main() -> None:
config_args = parser.parse_args(sys.argv[1:])
config_files = find_config_files(search_paths=config_args.config_path)
config_dict = read_config_files(config_files)
config.parse_config_dict(
config_dict,
)
config.parse_config_dict(config_dict, "", "")
since_ms = time.time() * 1000 - Config.parse_duration(config_args.since)
exclude_users_with_email = config_args.exclude_emails

View File

@ -21,12 +21,29 @@ import logging
import sys
import time
import traceback
from typing import Dict, Iterable, Optional, Set
from types import TracebackType
from typing import (
Any,
Awaitable,
Callable,
Dict,
Generator,
Iterable,
List,
NoReturn,
Optional,
Set,
Tuple,
Type,
TypeVar,
cast,
)
import yaml
from matrix_common.versionstring import get_distribution_version_string
from typing_extensions import TypedDict
from twisted.internet import defer, reactor
from twisted.internet import defer, reactor as reactor_
from synapse.config.database import DatabaseConnectionConfig
from synapse.config.homeserver import HomeServerConfig
@ -35,7 +52,7 @@ from synapse.logging.context import (
make_deferred_yieldable,
run_in_background,
)
from synapse.storage.database import DatabasePool, make_conn
from synapse.storage.database import DatabasePool, LoggingTransaction, make_conn
from synapse.storage.databases.main import PushRuleStore
from synapse.storage.databases.main.account_data import AccountDataWorkerStore
from synapse.storage.databases.main.client_ips import ClientIpBackgroundUpdateStore
@ -66,8 +83,12 @@ from synapse.storage.databases.main.user_directory import (
from synapse.storage.databases.state.bg_updates import StateBackgroundUpdateStore
from synapse.storage.engines import create_engine
from synapse.storage.prepare_database import prepare_database
from synapse.types import ISynapseReactor
from synapse.util import Clock
# Cast safety: Twisted does some naughty magic which replaces the
# twisted.internet.reactor module with a Reactor instance at runtime.
reactor = cast(ISynapseReactor, reactor_)
logger = logging.getLogger("synapse_port_db")
@ -97,6 +118,7 @@ BOOLEAN_COLUMNS = {
"users": ["shadow_banned"],
"e2e_fallback_keys_json": ["used"],
"access_tokens": ["used"],
"device_lists_changes_in_room": ["converted_to_destinations"],
}
@ -158,12 +180,16 @@ IGNORED_TABLES = {
# Error returned by the run function. Used at the top-level part of the script to
# handle errors and return codes.
end_error = None # type: Optional[str]
end_error: Optional[str] = None
# The exec_info for the error, if any. If error is defined but not exec_info the script
# will show only the error message without the stacktrace, if exec_info is defined but
# not the error then the script will show nothing outside of what's printed in the run
# function. If both are defined, the script will print both the error and the stacktrace.
end_error_exec_info = None
end_error_exec_info: Optional[
Tuple[Type[BaseException], BaseException, TracebackType]
] = None
R = TypeVar("R")
class Store(
@ -187,17 +213,19 @@ class Store(
PresenceBackgroundUpdateStore,
GroupServerWorkerStore,
):
def execute(self, f, *args, **kwargs):
def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]:
return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs)
def execute_sql(self, sql, *args):
def r(txn):
def execute_sql(self, sql: str, *args: object) -> Awaitable[List[Tuple]]:
def r(txn: LoggingTransaction) -> List[Tuple]:
txn.execute(sql, args)
return txn.fetchall()
return self.db_pool.runInteraction("execute_sql", r)
def insert_many_txn(self, txn, table, headers, rows):
def insert_many_txn(
self, txn: LoggingTransaction, table: str, headers: List[str], rows: List[Tuple]
) -> None:
sql = "INSERT INTO %s (%s) VALUES (%s)" % (
table,
", ".join(k for k in headers),
@ -210,14 +238,15 @@ class Store(
logger.exception("Failed to insert: %s", table)
raise
def set_room_is_public(self, room_id, is_public):
# Note: the parent method is an `async def`.
def set_room_is_public(self, room_id: str, is_public: bool) -> NoReturn:
raise Exception(
"Attempt to set room_is_public during port_db: database not empty?"
)
class MockHomeserver:
def __init__(self, config):
def __init__(self, config: HomeServerConfig):
self.clock = Clock(reactor)
self.config = config
self.hostname = config.server.server_name
@ -225,21 +254,30 @@ class MockHomeserver:
"matrix-synapse"
)
def get_clock(self):
def get_clock(self) -> Clock:
return self.clock
def get_reactor(self):
def get_reactor(self) -> ISynapseReactor:
return reactor
def get_instance_name(self):
def get_instance_name(self) -> str:
return "master"
class Porter(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class Porter:
def __init__(
self,
sqlite_config: Dict[str, Any],
progress: "Progress",
batch_size: int,
hs_config: HomeServerConfig,
):
self.sqlite_config = sqlite_config
self.progress = progress
self.batch_size = batch_size
self.hs_config = hs_config
async def setup_table(self, table):
async def setup_table(self, table: str) -> Tuple[str, int, int, int, int]:
if table in APPEND_ONLY_TABLES:
# It's safe to just carry on inserting.
row = await self.postgres_store.db_pool.simple_select_one(
@ -281,7 +319,7 @@ class Porter(object):
)
else:
def delete_all(txn):
def delete_all(txn: LoggingTransaction) -> None:
txn.execute(
"DELETE FROM port_from_sqlite3 WHERE table_name = %s", (table,)
)
@ -306,7 +344,7 @@ class Porter(object):
async def get_table_constraints(self) -> Dict[str, Set[str]]:
"""Returns a map of tables that have foreign key constraints to tables they depend on."""
def _get_constraints(txn):
def _get_constraints(txn: LoggingTransaction) -> Dict[str, Set[str]]:
# We can pull the information about foreign key constraints out from
# the postgres schema tables.
sql = """
@ -322,7 +360,7 @@ class Porter(object):
"""
txn.execute(sql)
results = {}
results: Dict[str, Set[str]] = {}
for table, foreign_table in txn:
results.setdefault(table, set()).add(foreign_table)
return results
@ -332,8 +370,13 @@ class Porter(object):
)
async def handle_table(
self, table, postgres_size, table_size, forward_chunk, backward_chunk
):
self,
table: str,
postgres_size: int,
table_size: int,
forward_chunk: int,
backward_chunk: int,
) -> None:
logger.info(
"Table %s: %i/%i (rows %i-%i) already ported",
table,
@ -380,7 +423,9 @@ class Porter(object):
while True:
def r(txn):
def r(
txn: LoggingTransaction,
) -> Tuple[Optional[List[str]], List[Tuple], List[Tuple]]:
forward_rows = []
backward_rows = []
if do_forward[0]:
@ -407,6 +452,7 @@ class Porter(object):
)
if frows or brows:
assert headers is not None
if frows:
forward_chunk = max(row[0] for row in frows) + 1
if brows:
@ -415,7 +461,8 @@ class Porter(object):
rows = frows + brows
rows = self._convert_rows(table, headers, rows)
def insert(txn):
def insert(txn: LoggingTransaction) -> None:
assert headers is not None
self.postgres_store.insert_many_txn(txn, table, headers[1:], rows)
self.postgres_store.db_pool.simple_update_one_txn(
@ -437,8 +484,12 @@ class Porter(object):
return
async def handle_search_table(
self, postgres_size, table_size, forward_chunk, backward_chunk
):
self,
postgres_size: int,
table_size: int,
forward_chunk: int,
backward_chunk: int,
) -> None:
select = (
"SELECT es.rowid, es.*, e.origin_server_ts, e.stream_ordering"
" FROM event_search as es"
@ -449,7 +500,7 @@ class Porter(object):
while True:
def r(txn):
def r(txn: LoggingTransaction) -> Tuple[List[str], List[Tuple]]:
txn.execute(select, (forward_chunk, self.batch_size))
rows = txn.fetchall()
headers = [column[0] for column in txn.description]
@ -463,7 +514,7 @@ class Porter(object):
# We have to treat event_search differently since it has a
# different structure in the two different databases.
def insert(txn):
def insert(txn: LoggingTransaction) -> None:
sql = (
"INSERT INTO event_search (event_id, room_id, key,"
" sender, vector, origin_server_ts, stream_ordering)"
@ -517,7 +568,7 @@ class Porter(object):
self,
db_config: DatabaseConnectionConfig,
allow_outdated_version: bool = False,
):
) -> Store:
"""Builds and returns a database store using the provided configuration.
Args:
@ -539,12 +590,13 @@ class Porter(object):
db_conn, allow_outdated_version=allow_outdated_version
)
prepare_database(db_conn, engine, config=self.hs_config)
store = Store(DatabasePool(hs, db_config, engine), db_conn, hs)
# Type safety: ignore that we're using Mock homeservers here.
store = Store(DatabasePool(hs, db_config, engine), db_conn, hs) # type: ignore[arg-type]
db_conn.commit()
return store
async def run_background_updates_on_postgres(self):
async def run_background_updates_on_postgres(self) -> None:
# Manually apply all background updates on the PostgreSQL database.
postgres_ready = (
await self.postgres_store.db_pool.updates.has_completed_background_updates()
@ -556,12 +608,12 @@ class Porter(object):
self.progress.set_state("Running background updates on PostgreSQL")
while not postgres_ready:
await self.postgres_store.db_pool.updates.do_next_background_update(100)
await self.postgres_store.db_pool.updates.do_next_background_update(True)
postgres_ready = await (
self.postgres_store.db_pool.updates.has_completed_background_updates()
)
async def run(self):
async def run(self) -> None:
"""Ports the SQLite database to a PostgreSQL database.
When a fatal error is met, its message is assigned to the global "end_error"
@ -597,7 +649,7 @@ class Porter(object):
self.progress.set_state("Creating port tables")
def create_port_table(txn):
def create_port_table(txn: LoggingTransaction) -> None:
txn.execute(
"CREATE TABLE IF NOT EXISTS port_from_sqlite3 ("
" table_name varchar(100) NOT NULL UNIQUE,"
@ -610,7 +662,7 @@ class Porter(object):
# We want people to be able to rerun this script from an old port
# so that they can pick up any missing events that were not
# ported across.
def alter_table(txn):
def alter_table(txn: LoggingTransaction) -> None:
txn.execute(
"ALTER TABLE IF EXISTS port_from_sqlite3"
" RENAME rowid TO forward_rowid"
@ -723,12 +775,16 @@ class Porter(object):
except Exception as e:
global end_error_exec_info
end_error = str(e)
end_error_exec_info = sys.exc_info()
# Type safety: we're in an exception handler, so the exc_info() tuple
# will not be (None, None, None).
end_error_exec_info = sys.exc_info() # type: ignore[assignment]
logger.exception("")
finally:
reactor.stop()
def _convert_rows(self, table, headers, rows):
def _convert_rows(
self, table: str, headers: List[str], rows: List[Tuple]
) -> List[Tuple]:
bool_col_names = BOOLEAN_COLUMNS.get(table, [])
bool_cols = [i for i, h in enumerate(headers) if h in bool_col_names]
@ -736,7 +792,7 @@ class Porter(object):
class BadValueException(Exception):
pass
def conv(j, col):
def conv(j: int, col: object) -> object:
if j in bool_cols:
return bool(col)
if isinstance(col, bytes):
@ -762,7 +818,7 @@ class Porter(object):
return outrows
async def _setup_sent_transactions(self):
async def _setup_sent_transactions(self) -> Tuple[int, int, int]:
# Only save things from the last day
yesterday = int(time.time() * 1000) - 86400000
@ -774,10 +830,10 @@ class Porter(object):
")"
)
def r(txn):
def r(txn: LoggingTransaction) -> Tuple[List[str], List[Tuple]]:
txn.execute(select)
rows = txn.fetchall()
headers = [column[0] for column in txn.description]
headers: List[str] = [column[0] for column in txn.description]
ts_ind = headers.index("ts")
@ -791,7 +847,7 @@ class Porter(object):
if inserted_rows:
max_inserted_rowid = max(r[0] for r in rows)
def insert(txn):
def insert(txn: LoggingTransaction) -> None:
self.postgres_store.insert_many_txn(
txn, "sent_transactions", headers[1:], rows
)
@ -800,7 +856,7 @@ class Porter(object):
else:
max_inserted_rowid = 0
def get_start_id(txn):
def get_start_id(txn: LoggingTransaction) -> int:
txn.execute(
"SELECT rowid FROM sent_transactions WHERE ts >= ?"
" ORDER BY rowid ASC LIMIT 1",
@ -825,12 +881,13 @@ class Porter(object):
},
)
def get_sent_table_size(txn):
def get_sent_table_size(txn: LoggingTransaction) -> int:
txn.execute(
"SELECT count(*) FROM sent_transactions" " WHERE ts >= ?", (yesterday,)
)
(size,) = txn.fetchone()
return int(size)
result = txn.fetchone()
assert result is not None
return int(result[0])
remaining_count = await self.sqlite_store.execute(get_sent_table_size)
@ -838,25 +895,35 @@ class Porter(object):
return next_chunk, inserted_rows, total_count
async def _get_remaining_count_to_port(self, table, forward_chunk, backward_chunk):
frows = await self.sqlite_store.execute_sql(
"SELECT count(*) FROM %s WHERE rowid >= ?" % (table,), forward_chunk
async def _get_remaining_count_to_port(
self, table: str, forward_chunk: int, backward_chunk: int
) -> int:
frows = cast(
List[Tuple[int]],
await self.sqlite_store.execute_sql(
"SELECT count(*) FROM %s WHERE rowid >= ?" % (table,), forward_chunk
),
)
brows = await self.sqlite_store.execute_sql(
"SELECT count(*) FROM %s WHERE rowid <= ?" % (table,), backward_chunk
brows = cast(
List[Tuple[int]],
await self.sqlite_store.execute_sql(
"SELECT count(*) FROM %s WHERE rowid <= ?" % (table,), backward_chunk
),
)
return frows[0][0] + brows[0][0]
async def _get_already_ported_count(self, table):
async def _get_already_ported_count(self, table: str) -> int:
rows = await self.postgres_store.execute_sql(
"SELECT count(*) FROM %s" % (table,)
)
return rows[0][0]
async def _get_total_count_to_port(self, table, forward_chunk, backward_chunk):
async def _get_total_count_to_port(
self, table: str, forward_chunk: int, backward_chunk: int
) -> Tuple[int, int]:
remaining, done = await make_deferred_yieldable(
defer.gatherResults(
[
@ -877,14 +944,17 @@ class Porter(object):
return done, remaining + done
async def _setup_state_group_id_seq(self) -> None:
curr_id = await self.sqlite_store.db_pool.simple_select_one_onecol(
curr_id: Optional[
int
] = await self.sqlite_store.db_pool.simple_select_one_onecol(
table="state_groups", keyvalues={}, retcol="MAX(id)", allow_none=True
)
if not curr_id:
return
def r(txn):
def r(txn: LoggingTransaction) -> None:
assert curr_id is not None
next_id = curr_id + 1
txn.execute("ALTER SEQUENCE state_group_id_seq RESTART WITH %s", (next_id,))
@ -895,7 +965,7 @@ class Porter(object):
"setup_user_id_seq", find_max_generated_user_id_localpart
)
def r(txn):
def r(txn: LoggingTransaction) -> None:
next_id = curr_id + 1
txn.execute("ALTER SEQUENCE user_id_seq RESTART WITH %s", (next_id,))
@ -917,7 +987,7 @@ class Porter(object):
allow_none=True,
)
def _setup_events_stream_seqs_set_pos(txn):
def _setup_events_stream_seqs_set_pos(txn: LoggingTransaction) -> None:
if curr_forward_id:
txn.execute(
"ALTER SEQUENCE events_stream_seq RESTART WITH %s",
@ -941,17 +1011,20 @@ class Porter(object):
"""Set a sequence to the correct value."""
current_stream_ids = []
for stream_id_table in stream_id_tables:
max_stream_id = await self.sqlite_store.db_pool.simple_select_one_onecol(
table=stream_id_table,
keyvalues={},
retcol="COALESCE(MAX(stream_id), 1)",
allow_none=True,
max_stream_id = cast(
int,
await self.sqlite_store.db_pool.simple_select_one_onecol(
table=stream_id_table,
keyvalues={},
retcol="COALESCE(MAX(stream_id), 1)",
allow_none=True,
),
)
current_stream_ids.append(max_stream_id)
next_id = max(current_stream_ids) + 1
def r(txn):
def r(txn: LoggingTransaction) -> None:
sql = "ALTER SEQUENCE %s RESTART WITH" % (sequence_name,)
txn.execute(sql + " %s", (next_id,))
@ -960,14 +1033,18 @@ class Porter(object):
)
async def _setup_auth_chain_sequence(self) -> None:
curr_chain_id = await self.sqlite_store.db_pool.simple_select_one_onecol(
curr_chain_id: Optional[
int
] = await self.sqlite_store.db_pool.simple_select_one_onecol(
table="event_auth_chains",
keyvalues={},
retcol="MAX(chain_id)",
allow_none=True,
)
def r(txn):
def r(txn: LoggingTransaction) -> None:
# Presumably there is at least one row in event_auth_chains.
assert curr_chain_id is not None
txn.execute(
"ALTER SEQUENCE event_auth_chain_id RESTART WITH %s",
(curr_chain_id + 1,),
@ -985,15 +1062,22 @@ class Porter(object):
##############################################
class Progress(object):
class TableProgress(TypedDict):
start: int
num_done: int
total: int
perc: int
class Progress:
"""Used to report progress of the port"""
def __init__(self):
self.tables = {}
def __init__(self) -> None:
self.tables: Dict[str, TableProgress] = {}
self.start_time = int(time.time())
def add_table(self, table, cur, size):
def add_table(self, table: str, cur: int, size: int) -> None:
self.tables[table] = {
"start": cur,
"num_done": cur,
@ -1001,19 +1085,22 @@ class Progress(object):
"perc": int(cur * 100 / size),
}
def update(self, table, num_done):
def update(self, table: str, num_done: int) -> None:
data = self.tables[table]
data["num_done"] = num_done
data["perc"] = int(num_done * 100 / data["total"])
def done(self):
def done(self) -> None:
pass
def set_state(self, state: str) -> None:
pass
class CursesProgress(Progress):
"""Reports progress to a curses window"""
def __init__(self, stdscr):
def __init__(self, stdscr: "curses.window"):
self.stdscr = stdscr
curses.use_default_colors()
@ -1022,7 +1109,7 @@ class CursesProgress(Progress):
curses.init_pair(1, curses.COLOR_RED, -1)
curses.init_pair(2, curses.COLOR_GREEN, -1)
self.last_update = 0
self.last_update = 0.0
self.finished = False
@ -1031,7 +1118,7 @@ class CursesProgress(Progress):
super(CursesProgress, self).__init__()
def update(self, table, num_done):
def update(self, table: str, num_done: int) -> None:
super(CursesProgress, self).update(table, num_done)
self.total_processed = 0
@ -1042,7 +1129,7 @@ class CursesProgress(Progress):
self.render()
def render(self, force=False):
def render(self, force: bool = False) -> None:
now = time.time()
if not force and now - self.last_update < 0.2:
@ -1081,8 +1168,7 @@ class CursesProgress(Progress):
left_margin = 5
middle_space = 1
items = self.tables.items()
items = sorted(items, key=lambda i: (i[1]["perc"], i[0]))
items = sorted(self.tables.items(), key=lambda i: (i[1]["perc"], i[0]))
for i, (table, data) in enumerate(items):
if i + 2 >= rows:
@ -1115,12 +1201,12 @@ class CursesProgress(Progress):
self.stdscr.refresh()
self.last_update = time.time()
def done(self):
def done(self) -> None:
self.finished = True
self.render(True)
self.stdscr.getch()
def set_state(self, state):
def set_state(self, state: str) -> None:
self.stdscr.clear()
self.stdscr.addstr(0, 0, state + "...", curses.A_BOLD)
self.stdscr.refresh()
@ -1129,7 +1215,7 @@ class CursesProgress(Progress):
class TerminalProgress(Progress):
"""Just prints progress to the terminal"""
def update(self, table, num_done):
def update(self, table: str, num_done: int) -> None:
super(TerminalProgress, self).update(table, num_done)
data = self.tables[table]
@ -1138,7 +1224,7 @@ class TerminalProgress(Progress):
"%s: %d%% (%d/%d)" % (table, data["perc"], data["num_done"], data["total"])
)
def set_state(self, state):
def set_state(self, state: str) -> None:
print(state + "...")
@ -1146,7 +1232,7 @@ class TerminalProgress(Progress):
##############################################
def main():
def main() -> None:
parser = argparse.ArgumentParser(
description="A script to port an existing synapse SQLite database to"
" a new PostgreSQL database."
@ -1178,15 +1264,11 @@ def main():
args = parser.parse_args()
logging_config = {
"level": logging.DEBUG if args.v else logging.INFO,
"format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s",
}
if args.curses:
logging_config["filename"] = "port-synapse.log"
logging.basicConfig(**logging_config)
logging.basicConfig(
level=logging.DEBUG if args.v else logging.INFO,
format="%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s",
filename="port-synapse.log" if args.curses else None,
)
sqlite_config = {
"name": "sqlite3",
@ -1216,7 +1298,8 @@ def main():
config = HomeServerConfig()
config.parse_config_dict(hs_config, "", "")
def start(stdscr=None):
def start(stdscr: Optional["curses.window"] = None) -> None:
progress: Progress
if stdscr:
progress = CursesProgress(stdscr)
else:
@ -1230,7 +1313,7 @@ def main():
)
@defer.inlineCallbacks
def run():
def run() -> Generator["defer.Deferred[Any]", Any, None]:
with LoggingContext("synapse_port_db_run"):
yield defer.ensureDeferred(porter.run())

View File

@ -24,7 +24,7 @@ import signal
import subprocess
import sys
import time
from typing import Iterable, Optional
from typing import Iterable, NoReturn, Optional, TextIO
import yaml
@ -45,7 +45,7 @@ one of the following:
--------------------------------------------------------------------------------"""
def pid_running(pid):
def pid_running(pid: int) -> bool:
try:
os.kill(pid, 0)
except OSError as err:
@ -68,7 +68,7 @@ def pid_running(pid):
return True
def write(message, colour=NORMAL, stream=sys.stdout):
def write(message: str, colour: str = NORMAL, stream: TextIO = sys.stdout) -> None:
# Lets check if we're writing to a TTY before colouring
should_colour = False
try:
@ -84,7 +84,7 @@ def write(message, colour=NORMAL, stream=sys.stdout):
stream.write(colour + message + NORMAL + "\n")
def abort(message, colour=RED, stream=sys.stderr):
def abort(message: str, colour: str = RED, stream: TextIO = sys.stderr) -> NoReturn:
write(message, colour, stream)
sys.exit(1)
@ -166,7 +166,7 @@ Worker = collections.namedtuple(
)
def main():
def main() -> None:
parser = argparse.ArgumentParser()

View File

@ -16,42 +16,47 @@
import argparse
import logging
import sys
from typing import cast
import yaml
from matrix_common.versionstring import get_distribution_version_string
from twisted.internet import defer, reactor
from twisted.internet import defer, reactor as reactor_
from synapse.config.homeserver import HomeServerConfig
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.server import HomeServer
from synapse.storage import DataStore
from synapse.types import ISynapseReactor
# Cast safety: Twisted does some naughty magic which replaces the
# twisted.internet.reactor module with a Reactor instance at runtime.
reactor = cast(ISynapseReactor, reactor_)
logger = logging.getLogger("update_database")
class MockHomeserver(HomeServer):
DATASTORE_CLASS = DataStore
DATASTORE_CLASS = DataStore # type: ignore [assignment]
def __init__(self, config, **kwargs):
def __init__(self, config: HomeServerConfig):
super(MockHomeserver, self).__init__(
config.server.server_name, reactor=reactor, config=config, **kwargs
)
self.version_string = "Synapse/" + get_distribution_version_string(
"matrix-synapse"
hostname=config.server.server_name,
config=config,
reactor=reactor,
version_string="Synapse/"
+ get_distribution_version_string("matrix-synapse"),
)
def run_background_updates(hs):
def run_background_updates(hs: HomeServer) -> None:
store = hs.get_datastores().main
async def run_background_updates():
async def run_background_updates() -> None:
await store.db_pool.updates.run_background_updates(sleep=False)
# Stop the reactor to exit the script once every background update is run.
reactor.stop()
def run():
def run() -> None:
# Apply all background updates on the database.
defer.ensureDeferred(
run_as_background_process("background_updates", run_background_updates)
@ -62,7 +67,7 @@ def run_background_updates(hs):
reactor.run()
def main():
def main() -> None:
parser = argparse.ArgumentParser(
description=(
"Updates a synapse database to the latest schema and optionally runs background updates"
@ -85,12 +90,10 @@ def main():
args = parser.parse_args()
logging_config = {
"level": logging.DEBUG if args.v else logging.INFO,
"format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s",
}
logging.basicConfig(**logging_config)
logging.basicConfig(
level=logging.DEBUG if args.v else logging.INFO,
format="%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s",
)
# Load, process and sanity-check the config.
hs_config = yaml.safe_load(args.database_config)

View File

@ -130,7 +130,7 @@ def start_reactor(
appname: str,
soft_file_limit: int,
gc_thresholds: Optional[Tuple[int, int, int]],
pid_file: str,
pid_file: Optional[str],
daemonize: bool,
print_pidfile: bool,
logger: logging.Logger,
@ -171,6 +171,8 @@ def start_reactor(
# appearing to go backwards.
with PreserveLoggingContext():
if daemonize:
assert pid_file is not None
if print_pidfile:
print(pid_file)

View File

@ -33,7 +33,6 @@ from synapse.handlers.admin import ExfiltrationWriter
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
from synapse.replication.slave.storage.devices import SlavedDeviceStore
from synapse.replication.slave.storage.events import SlavedEventStore
@ -61,7 +60,6 @@ class AdminCmdSlavedStore(
SlavedDeviceStore,
SlavedPushRuleStore,
SlavedEventStore,
SlavedClientIpStore,
BaseSlavedStore,
RoomWorkerStore,
):

View File

@ -53,7 +53,6 @@ from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
from synapse.replication.slave.storage.devices import SlavedDeviceStore
from synapse.replication.slave.storage.directory import DirectoryStore
@ -247,7 +246,6 @@ class GenericWorkerSlavedStore(
SlavedApplicationServiceStore,
SlavedRegistrationStore,
SlavedProfileStore,
SlavedClientIpStore,
SlavedFilteringStore,
MonthlyActiveUsersWorkerStore,
MediaRepositoryStore,

View File

@ -1,4 +1,5 @@
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -22,7 +23,13 @@ from netaddr import IPSet
from synapse.api.constants import EventTypes
from synapse.events import EventBase
from synapse.types import GroupID, JsonDict, UserID, get_domain_from_id
from synapse.types import (
DeviceListUpdates,
GroupID,
JsonDict,
UserID,
get_domain_from_id,
)
from synapse.util.caches.descriptors import _CacheContext, cached
if TYPE_CHECKING:
@ -400,6 +407,7 @@ class AppServiceTransaction:
to_device_messages: List[JsonDict],
one_time_key_counts: TransactionOneTimeKeyCounts,
unused_fallback_keys: TransactionUnusedFallbackKeys,
device_list_summary: DeviceListUpdates,
):
self.service = service
self.id = id
@ -408,6 +416,7 @@ class AppServiceTransaction:
self.to_device_messages = to_device_messages
self.one_time_key_counts = one_time_key_counts
self.unused_fallback_keys = unused_fallback_keys
self.device_list_summary = device_list_summary
async def send(self, as_api: "ApplicationServiceApi") -> bool:
"""Sends this transaction using the provided AS API interface.
@ -424,6 +433,7 @@ class AppServiceTransaction:
to_device_messages=self.to_device_messages,
one_time_key_counts=self.one_time_key_counts,
unused_fallback_keys=self.unused_fallback_keys,
device_list_summary=self.device_list_summary,
txn_id=self.id,
)

View File

@ -1,4 +1,5 @@
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -27,7 +28,7 @@ from synapse.appservice import (
from synapse.events import EventBase
from synapse.events.utils import SerializeEventConfig, serialize_event
from synapse.http.client import SimpleHttpClient
from synapse.types import JsonDict, ThirdPartyInstanceID
from synapse.types import DeviceListUpdates, JsonDict, ThirdPartyInstanceID
from synapse.util.caches.response_cache import ResponseCache
if TYPE_CHECKING:
@ -225,6 +226,7 @@ class ApplicationServiceApi(SimpleHttpClient):
to_device_messages: List[JsonDict],
one_time_key_counts: TransactionOneTimeKeyCounts,
unused_fallback_keys: TransactionUnusedFallbackKeys,
device_list_summary: DeviceListUpdates,
txn_id: Optional[int] = None,
) -> bool:
"""
@ -268,6 +270,7 @@ class ApplicationServiceApi(SimpleHttpClient):
}
)
# TODO: Update to stable prefixes once MSC3202 completes FCP merge
if service.msc3202_transaction_extensions:
if one_time_key_counts:
body[
@ -277,6 +280,11 @@ class ApplicationServiceApi(SimpleHttpClient):
body[
"org.matrix.msc3202.device_unused_fallback_keys"
] = unused_fallback_keys
if device_list_summary:
body["org.matrix.msc3202.device_lists"] = {
"changed": list(device_list_summary.changed),
"left": list(device_list_summary.left),
}
try:
await self.put_json(

View File

@ -72,7 +72,7 @@ from synapse.events import EventBase
from synapse.logging.context import run_in_background
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.databases.main import DataStore
from synapse.types import JsonDict
from synapse.types import DeviceListUpdates, JsonDict
from synapse.util import Clock
if TYPE_CHECKING:
@ -122,6 +122,7 @@ class ApplicationServiceScheduler:
events: Optional[Collection[EventBase]] = None,
ephemeral: Optional[Collection[JsonDict]] = None,
to_device_messages: Optional[Collection[JsonDict]] = None,
device_list_summary: Optional[DeviceListUpdates] = None,
) -> None:
"""
Enqueue some data to be sent off to an application service.
@ -133,10 +134,18 @@ class ApplicationServiceScheduler:
to_device_messages: The to-device messages to send. These differ from normal
to-device messages sent to clients, as they have 'to_device_id' and
'to_user_id' fields.
device_list_summary: A summary of users that the application service either needs
to refresh the device lists of, or those that the application service need no
longer track the device lists of.
"""
# We purposefully allow this method to run with empty events/ephemeral
# collections, so that callers do not need to check iterable size themselves.
if not events and not ephemeral and not to_device_messages:
if (
not events
and not ephemeral
and not to_device_messages
and not device_list_summary
):
return
if events:
@ -147,6 +156,10 @@ class ApplicationServiceScheduler:
self.queuer.queued_to_device_messages.setdefault(appservice.id, []).extend(
to_device_messages
)
if device_list_summary:
self.queuer.queued_device_list_summaries.setdefault(
appservice.id, []
).append(device_list_summary)
# Kick off a new application service transaction
self.queuer.start_background_request(appservice)
@ -169,6 +182,8 @@ class _ServiceQueuer:
self.queued_ephemeral: Dict[str, List[JsonDict]] = {}
# dict of {service_id: [to_device_message_json]}
self.queued_to_device_messages: Dict[str, List[JsonDict]] = {}
# dict of {service_id: [device_list_summary]}
self.queued_device_list_summaries: Dict[str, List[DeviceListUpdates]] = {}
# the appservices which currently have a transaction in flight
self.requests_in_flight: Set[str] = set()
@ -212,7 +227,35 @@ class _ServiceQueuer:
]
del all_to_device_messages[:MAX_TO_DEVICE_MESSAGES_PER_TRANSACTION]
if not events and not ephemeral and not to_device_messages_to_send:
# Consolidate any pending device list summaries into a single, up-to-date
# summary.
# Note: this code assumes that in a single DeviceListUpdates, a user will
# never be in both "changed" and "left" sets.
device_list_summary = DeviceListUpdates()
for summary in self.queued_device_list_summaries.get(service.id, []):
# For every user in the incoming "changed" set:
# * Remove them from the existing "left" set if necessary
# (as we need to start tracking them again)
# * Add them to the existing "changed" set if necessary.
device_list_summary.left.difference_update(summary.changed)
device_list_summary.changed.update(summary.changed)
# For every user in the incoming "left" set:
# * Remove them from the existing "changed" set if necessary
# (we no longer need to track them)
# * Add them to the existing "left" set if necessary.
device_list_summary.changed.difference_update(summary.left)
device_list_summary.left.update(summary.left)
self.queued_device_list_summaries.clear()
if (
not events
and not ephemeral
and not to_device_messages_to_send
# DeviceListUpdates is True if either the 'changed' or 'left' sets have
# at least one entry, otherwise False
and not device_list_summary
):
return
one_time_key_counts: Optional[TransactionOneTimeKeyCounts] = None
@ -240,6 +283,7 @@ class _ServiceQueuer:
to_device_messages_to_send,
one_time_key_counts,
unused_fallback_keys,
device_list_summary,
)
except Exception:
logger.exception("AS request failed")
@ -322,6 +366,7 @@ class _TransactionController:
to_device_messages: Optional[List[JsonDict]] = None,
one_time_key_counts: Optional[TransactionOneTimeKeyCounts] = None,
unused_fallback_keys: Optional[TransactionUnusedFallbackKeys] = None,
device_list_summary: Optional[DeviceListUpdates] = None,
) -> None:
"""
Create a transaction with the given data and send to the provided
@ -336,6 +381,7 @@ class _TransactionController:
appservice devices in the transaction.
unused_fallback_keys: Lists of unused fallback keys for relevant
appservice devices in the transaction.
device_list_summary: The device list summary to include in the transaction.
"""
try:
txn = await self.store.create_appservice_txn(
@ -345,6 +391,7 @@ class _TransactionController:
to_device_messages=to_device_messages or [],
one_time_key_counts=one_time_key_counts or {},
unused_fallback_keys=unused_fallback_keys or {},
device_list_summary=device_list_summary or DeviceListUpdates(),
)
service_is_up = await self._is_service_up(service)
if service_is_up:

View File

@ -702,10 +702,7 @@ class RootConfig:
return obj
def parse_config_dict(
self,
config_dict: Dict[str, Any],
config_dir_path: Optional[str] = None,
data_dir_path: Optional[str] = None,
self, config_dict: Dict[str, Any], config_dir_path: str, data_dir_path: str
) -> None:
"""Read the information from the config dict into this Config object.

View File

@ -126,10 +126,7 @@ class RootConfig:
@classmethod
def invoke_all_static(cls, func_name: str, *args: Any, **kwargs: Any) -> None: ...
def parse_config_dict(
self,
config_dict: Dict[str, Any],
config_dir_path: Optional[str] = ...,
data_dir_path: Optional[str] = ...,
self, config_dict: Dict[str, Any], config_dir_path: str, data_dir_path: str
) -> None: ...
def generate_config(
self,

View File

@ -12,8 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Any
from synapse.config._base import Config, ConfigError
from synapse.types import JsonDict
logger = logging.getLogger(__name__)
@ -29,7 +31,7 @@ https://matrix-org.github.io/synapse/latest/templates.html
class AccountValidityConfig(Config):
section = "account_validity"
def read_config(self, config, **kwargs):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
"""Parses the old account validity config. The config format looks like this:
account_validity:

View File

@ -13,7 +13,7 @@
# limitations under the License.
import logging
from typing import Iterable
from typing import Any, Iterable
from synapse.api.constants import EventTypes
from synapse.config._base import Config, ConfigError
@ -26,12 +26,12 @@ logger = logging.getLogger(__name__)
class ApiConfig(Config):
section = "api"
def read_config(self, config: JsonDict, **kwargs):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
validate_config(_MAIN_SCHEMA, config, ())
self.room_prejoin_state = list(self._get_prejoin_state_types(config))
self.track_puppeted_user_ips = config.get("track_puppeted_user_ips", False)
def generate_config_section(cls, **kwargs) -> str:
def generate_config_section(cls, **kwargs: Any) -> str:
formatted_default_state_types = "\n".join(
" # - %s" % (t,) for t in _DEFAULT_PREJOIN_STATE_TYPES
)

View File

@ -14,7 +14,7 @@
# limitations under the License.
import logging
from typing import Dict, List
from typing import Any, Dict, List
from urllib import parse as urlparse
import yaml
@ -31,12 +31,12 @@ logger = logging.getLogger(__name__)
class AppServiceConfig(Config):
section = "appservice"
def read_config(self, config, **kwargs) -> None:
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
self.app_service_config_files = config.get("app_service_config_files", [])
self.notify_appservices = config.get("notify_appservices", True)
self.track_appservice_user_ips = config.get("track_appservice_user_ips", False)
def generate_config_section(cls, **kwargs) -> str:
def generate_config_section(cls, **kwargs: Any) -> str:
return """\
# A list of application service config files to use
#
@ -170,6 +170,7 @@ def _load_appservice(
# When enabled, appservice transactions contain the following information:
# - device One-Time Key counts
# - device unused fallback key usage states
# - device list changes
msc3202_transaction_extensions = as_info.get("org.matrix.msc3202", False)
if not isinstance(msc3202_transaction_extensions, bool):
raise ValueError(

View File

@ -12,6 +12,9 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
from synapse.types import JsonDict
from ._base import Config
@ -21,7 +24,7 @@ class AuthConfig(Config):
section = "auth"
def read_config(self, config, **kwargs):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
password_config = config.get("password_config", {})
if password_config is None:
password_config = {}
@ -40,7 +43,7 @@ class AuthConfig(Config):
ui_auth.get("session_timeout", 0)
)
def generate_config_section(self, config_dir_path, server_name, **kwargs):
def generate_config_section(self, **kwargs: Any) -> str:
return """\
password_config:
# Uncomment to disable password login

View File

@ -11,6 +11,9 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
from synapse.types import JsonDict
from ._base import Config
@ -18,7 +21,7 @@ from ._base import Config
class BackgroundUpdateConfig(Config):
section = "background_updates"
def generate_config_section(self, **kwargs) -> str:
def generate_config_section(self, **kwargs: Any) -> str:
return """\
## Background Updates ##
@ -52,7 +55,7 @@ class BackgroundUpdateConfig(Config):
#default_batch_size: 50
"""
def read_config(self, config, **kwargs) -> None:
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
bg_update_config = config.get("background_updates") or {}
self.update_duration_ms = bg_update_config.get(

View File

@ -16,10 +16,11 @@ import logging
import os
import re
import threading
from typing import Callable, Dict, Optional
from typing import Any, Callable, Dict, Optional
import attr
from synapse.types import JsonDict
from synapse.util.check_dependencies import DependencyException, check_requirements
from ._base import Config, ConfigError
@ -105,7 +106,7 @@ class CacheConfig(Config):
with _CACHES_LOCK:
_CACHES.clear()
def generate_config_section(self, **kwargs) -> str:
def generate_config_section(self, **kwargs: Any) -> str:
return """\
## Caching ##
@ -172,7 +173,7 @@ class CacheConfig(Config):
#sync_response_cache_duration: 2m
"""
def read_config(self, config, **kwargs) -> None:
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
self.event_cache_size = self.parse_size(
config.get("event_cache_size", _DEFAULT_EVENT_CACHE_SIZE)
)

View File

@ -12,15 +12,31 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import Config
from typing import Any
from synapse.types import JsonDict
from ._base import Config, ConfigError
class CaptchaConfig(Config):
section = "captcha"
def read_config(self, config, **kwargs):
self.recaptcha_private_key = config.get("recaptcha_private_key")
self.recaptcha_public_key = config.get("recaptcha_public_key")
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
recaptcha_private_key = config.get("recaptcha_private_key")
if recaptcha_private_key is not None and not isinstance(
recaptcha_private_key, str
):
raise ConfigError("recaptcha_private_key must be a string.")
self.recaptcha_private_key = recaptcha_private_key
recaptcha_public_key = config.get("recaptcha_public_key")
if recaptcha_public_key is not None and not isinstance(
recaptcha_public_key, str
):
raise ConfigError("recaptcha_public_key must be a string.")
self.recaptcha_public_key = recaptcha_public_key
self.enable_registration_captcha = config.get(
"enable_registration_captcha", False
)
@ -30,7 +46,7 @@ class CaptchaConfig(Config):
)
self.recaptcha_template = self.read_template("recaptcha.html")
def generate_config_section(self, **kwargs):
def generate_config_section(self, **kwargs: Any) -> str:
return """\
## Captcha ##
# See docs/CAPTCHA_SETUP.md for full details of configuring this.

View File

@ -16,6 +16,7 @@
from typing import Any, List
from synapse.config.sso import SsoAttributeRequirement
from synapse.types import JsonDict
from ._base import Config
from ._util import validate_config
@ -29,7 +30,7 @@ class CasConfig(Config):
section = "cas"
def read_config(self, config, **kwargs) -> None:
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
cas_config = config.get("cas_config", None)
self.cas_enabled = cas_config and cas_config.get("enabled", True)
@ -52,7 +53,7 @@ class CasConfig(Config):
self.cas_displayname_attribute = None
self.cas_required_attributes = []
def generate_config_section(self, config_dir_path, server_name, **kwargs) -> str:
def generate_config_section(self, **kwargs: Any) -> str:
return """\
# Enable Central Authentication Service (CAS) for registration and login.
#

View File

@ -13,9 +13,10 @@
# limitations under the License.
from os import path
from typing import Optional
from typing import Any, Optional
from synapse.config import ConfigError
from synapse.types import JsonDict
from ._base import Config
@ -76,18 +77,18 @@ class ConsentConfig(Config):
section = "consent"
def __init__(self, *args):
def __init__(self, *args: Any):
super().__init__(*args)
self.user_consent_version: Optional[str] = None
self.user_consent_template_dir: Optional[str] = None
self.user_consent_server_notice_content = None
self.user_consent_server_notice_content: Optional[JsonDict] = None
self.user_consent_server_notice_to_guests = False
self.block_events_without_consent_error = None
self.block_events_without_consent_error: Optional[str] = None
self.user_consent_at_registration = False
self.user_consent_policy_name = "Privacy Policy"
def read_config(self, config, **kwargs):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
consent_config = config.get("user_consent")
self.terms_template = self.read_template("terms.html")
@ -118,5 +119,5 @@ class ConsentConfig(Config):
"policy_name", "Privacy Policy"
)
def generate_config_section(self, **kwargs):
def generate_config_section(self, **kwargs: Any) -> str:
return DEFAULT_CONFIG

View File

@ -15,8 +15,10 @@
import argparse
import logging
import os
from typing import Any, List
from synapse.config._base import Config, ConfigError
from synapse.types import JsonDict
logger = logging.getLogger(__name__)
@ -121,12 +123,12 @@ class DatabaseConnectionConfig:
class DatabaseConfig(Config):
section = "database"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __init__(self, *args: Any):
super().__init__(*args)
self.databases = []
self.databases: List[DatabaseConnectionConfig] = []
def read_config(self, config, **kwargs) -> None:
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
# We *experimentally* support specifying multiple databases via the
# `databases` key. This is a map from a label to database config in the
# same format as the `database` config option, plus an extra
@ -170,7 +172,7 @@ class DatabaseConfig(Config):
self.databases = [DatabaseConnectionConfig("master", database_config)]
self.set_databasepath(database_path)
def generate_config_section(self, data_dir_path, **kwargs) -> str:
def generate_config_section(self, data_dir_path: str, **kwargs: Any) -> str:
return DEFAULT_CONFIG % {
"database_path": os.path.join(data_dir_path, "homeserver.db")
}

View File

@ -19,9 +19,12 @@ import email.utils
import logging
import os
from enum import Enum
from typing import Any
import attr
from synapse.types import JsonDict
from ._base import Config, ConfigError
logger = logging.getLogger(__name__)
@ -73,7 +76,7 @@ class EmailSubjectConfig:
class EmailConfig(Config):
section = "email"
def read_config(self, config, **kwargs):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
# TODO: We should separate better the email configuration from the notification
# and account validity config.
@ -354,7 +357,7 @@ class EmailConfig(Config):
path=("email", "invite_client_location"),
)
def generate_config_section(self, config_dir_path, server_name, **kwargs):
def generate_config_section(self, **kwargs: Any) -> str:
return (
"""\
# Configuration for sending emails from Synapse.

View File

@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
from synapse.config._base import Config
from synapse.types import JsonDict
@ -21,13 +23,11 @@ class ExperimentalConfig(Config):
section = "experimental"
def read_config(self, config: JsonDict, **kwargs):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
experimental = config.get("experimental_features") or {}
# MSC3440 (thread relation)
self.msc3440_enabled: bool = experimental.get("msc3440_enabled", False)
# MSC3666: including bundled relations in /search.
self.msc3666_enabled: bool = experimental.get("msc3666_enabled", False)
# MSC3026 (busy presence state)
self.msc3026_enabled: bool = experimental.get("msc3026_enabled", False)
@ -59,8 +59,9 @@ class ExperimentalConfig(Config):
"msc3202_device_masquerading", False
)
# Portion of MSC3202 related to transaction extensions:
# sending one-time key counts and fallback key usage to application services.
# The portion of MSC3202 related to transaction extensions:
# sending device list changes, one-time key counts and fallback key
# usage to application services.
self.msc3202_transaction_extensions: bool = experimental.get(
"msc3202_transaction_extensions", False
)
@ -77,3 +78,6 @@ class ExperimentalConfig(Config):
# The deprecated groups feature.
self.groups_enabled: bool = experimental.get("groups_enabled", True)
# MSC2654: Unread counts
self.msc2654_enabled: bool = experimental.get("msc2654_enabled", False)

View File

@ -11,16 +11,17 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from typing import Any, Optional
from synapse.config._base import Config
from synapse.config._util import validate_config
from synapse.types import JsonDict
class FederationConfig(Config):
section = "federation"
def read_config(self, config, **kwargs):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
# FIXME: federation_domain_whitelist needs sytests
self.federation_domain_whitelist: Optional[dict] = None
federation_domain_whitelist = config.get("federation_domain_whitelist", None)
@ -48,7 +49,7 @@ class FederationConfig(Config):
"allow_device_name_lookup_over_federation", True
)
def generate_config_section(self, config_dir_path, server_name, **kwargs):
def generate_config_section(self, **kwargs: Any) -> str:
return """\
## Federation ##

View File

@ -12,17 +12,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
from synapse.types import JsonDict
from ._base import Config
class GroupsConfig(Config):
section = "groups"
def read_config(self, config, **kwargs):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
self.enable_group_creation = config.get("enable_group_creation", False)
self.group_creation_prefix = config.get("group_creation_prefix", "")
def generate_config_section(self, **kwargs):
def generate_config_section(self, **kwargs: Any) -> str:
return """\
# Uncomment to allow non-server-admin users to create groups on this server
#

View File

@ -12,6 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
from synapse.types import JsonDict
from ._base import Config, ConfigError
MISSING_JWT = """Missing jwt library. This is required for jwt login.
@ -24,7 +28,7 @@ MISSING_JWT = """Missing jwt library. This is required for jwt login.
class JWTConfig(Config):
section = "jwt"
def read_config(self, config, **kwargs):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
jwt_config = config.get("jwt_config", None)
if jwt_config:
self.jwt_enabled = jwt_config.get("enabled", False)
@ -52,7 +56,7 @@ class JWTConfig(Config):
self.jwt_issuer = None
self.jwt_audiences = None
def generate_config_section(self, **kwargs):
def generate_config_section(self, **kwargs: Any) -> str:
return """\
# JSON web token integration. The following settings can be used to make
# Synapse JSON web tokens for authentication, instead of its internal

View File

@ -16,7 +16,7 @@
import hashlib
import logging
import os
from typing import Any, Dict, Iterator, List, Optional
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional
import attr
import jsonschema
@ -38,6 +38,9 @@ from synapse.util.stringutils import random_string, random_string_with_symbols
from ._base import Config, ConfigError
if TYPE_CHECKING:
from signedjson.key import VerifyKeyWithExpiry
INSECURE_NOTARY_ERROR = """\
Your server is configured to accept key server responses without signature
validation or TLS certificate validation. This is likely to be very insecure. If
@ -96,11 +99,14 @@ class TrustedKeyServer:
class KeyConfig(Config):
section = "key"
def read_config(self, config, config_dir_path, **kwargs):
def read_config(
self, config: JsonDict, config_dir_path: str, **kwargs: Any
) -> None:
# the signing key can be specified inline or in a separate file
if "signing_key" in config:
self.signing_key = read_signing_keys([config["signing_key"]])
else:
assert config_dir_path is not None
signing_key_path = config.get("signing_key_path")
if signing_key_path is None:
signing_key_path = os.path.join(
@ -169,8 +175,12 @@ class KeyConfig(Config):
self.form_secret = config.get("form_secret", None)
def generate_config_section(
self, config_dir_path, server_name, generate_secrets=False, **kwargs
):
self,
config_dir_path: str,
server_name: str,
generate_secrets: bool = False,
**kwargs: Any,
) -> str:
base_key_name = os.path.join(config_dir_path, server_name)
if generate_secrets:
@ -300,7 +310,7 @@ class KeyConfig(Config):
def read_old_signing_keys(
self, old_signing_keys: Optional[JsonDict]
) -> Dict[str, VerifyKey]:
) -> Dict[str, "VerifyKeyWithExpiry"]:
if old_signing_keys is None:
return {}
keys = {}
@ -308,8 +318,8 @@ class KeyConfig(Config):
if is_signing_algorithm_supported(key_id):
key_base64 = key_data["key"]
key_bytes = decode_base64(key_base64)
verify_key = decode_verify_key_bytes(key_id, key_bytes)
verify_key.expired_ts = key_data["expired_ts"]
verify_key: "VerifyKeyWithExpiry" = decode_verify_key_bytes(key_id, key_bytes) # type: ignore[assignment]
verify_key.expired = key_data["expired_ts"]
keys[key_id] = verify_key
else:
raise ConfigError(
@ -422,7 +432,7 @@ def _parse_key_servers(
server_name = server["server_name"]
result = TrustedKeyServer(server_name=server_name)
verify_keys = server.get("verify_keys")
verify_keys: Optional[Dict[str, str]] = server.get("verify_keys")
if verify_keys is not None:
result.verify_keys = {}
for key_id, key_base64 in verify_keys.items():

View File

@ -35,6 +35,7 @@ from twisted.logger import (
from synapse.logging.context import LoggingContextFilter
from synapse.logging.filter import MetadataFilter
from synapse.types import JsonDict
from ._base import Config, ConfigError
@ -147,13 +148,15 @@ https://matrix-org.github.io/synapse/v1.54/structured_logging.html
class LoggingConfig(Config):
section = "logging"
def read_config(self, config, **kwargs) -> None:
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
if config.get("log_file"):
raise ConfigError(LOG_FILE_ERROR)
self.log_config = self.abspath(config.get("log_config"))
self.no_redirect_stdio = config.get("no_redirect_stdio", False)
def generate_config_section(self, config_dir_path, server_name, **kwargs) -> str:
def generate_config_section(
self, config_dir_path: str, server_name: str, **kwargs: Any
) -> str:
log_config = os.path.join(config_dir_path, server_name + ".log.config")
return (
"""\

View File

@ -13,8 +13,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional
import attr
from synapse.types import JsonDict
from synapse.util.check_dependencies import DependencyException, check_requirements
from ._base import Config, ConfigError
@ -37,7 +40,7 @@ class MetricsFlags:
class MetricsConfig(Config):
section = "metrics"
def read_config(self, config, **kwargs):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
self.enable_metrics = config.get("enable_metrics", False)
self.report_stats = config.get("report_stats", None)
self.report_stats_endpoint = config.get(
@ -67,7 +70,9 @@ class MetricsConfig(Config):
"sentry.dsn field is required when sentry integration is enabled"
)
def generate_config_section(self, report_stats=None, **kwargs):
def generate_config_section(
self, report_stats: Optional[bool] = None, **kwargs: Any
) -> str:
res = """\
## Metrics ###

View File

@ -14,13 +14,14 @@
from typing import Any, Dict, List, Tuple
from synapse.config._base import Config, ConfigError
from synapse.types import JsonDict
from synapse.util.module_loader import load_module
class ModulesConfig(Config):
section = "modules"
def read_config(self, config: dict, **kwargs):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
self.loaded_modules: List[Tuple[Any, Dict]] = []
configured_modules = config.get("modules") or []
@ -31,7 +32,7 @@ class ModulesConfig(Config):
self.loaded_modules.append(load_module(module, config_path))
def generate_config_section(self, **kwargs):
def generate_config_section(self, **kwargs: Any) -> str:
return """
## Modules ##

View File

@ -40,7 +40,7 @@ class OembedConfig(Config):
section = "oembed"
def read_config(self, config, **kwargs):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
oembed_config: Dict[str, Any] = config.get("oembed") or {}
# A list of patterns which will be used.
@ -143,7 +143,7 @@ class OembedConfig(Config):
)
return re.compile(pattern)
def generate_config_section(self, **kwargs):
def generate_config_section(self, **kwargs: Any) -> str:
return """\
# oEmbed allows for easier embedding content from a website. It can be
# used for generating URLs previews of services which support it.

View File

@ -36,7 +36,7 @@ LEGACY_USER_MAPPING_PROVIDER = "synapse.handlers.oidc_handler.JinjaOidcMappingPr
class OIDCConfig(Config):
section = "oidc"
def read_config(self, config, **kwargs) -> None:
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
self.oidc_providers = tuple(_parse_oidc_provider_configs(config))
if not self.oidc_providers:
return
@ -66,7 +66,7 @@ class OIDCConfig(Config):
# OIDC is enabled if we have a provider
return bool(self.oidc_providers)
def generate_config_section(self, config_dir_path, server_name, **kwargs) -> str:
def generate_config_section(self, **kwargs: Any) -> str:
return """\
# List of OpenID Connect (OIDC) / OAuth 2.0 identity providers, for registration
# and login.

View File

@ -14,6 +14,7 @@
from typing import Any, List, Tuple, Type
from synapse.types import JsonDict
from synapse.util.module_loader import load_module
from ._base import Config
@ -24,7 +25,7 @@ LDAP_PROVIDER = "ldap_auth_provider.LdapAuthProvider"
class PasswordAuthProviderConfig(Config):
section = "authproviders"
def read_config(self, config, **kwargs):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
"""Parses the old password auth providers config. The config format looks like this:
password_providers:

View File

@ -13,13 +13,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
from synapse.types import JsonDict
from ._base import Config
class PushConfig(Config):
section = "push"
def read_config(self, config, **kwargs):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
push_config = config.get("push") or {}
self.push_include_content = push_config.get("include_content", True)
self.push_group_unread_count_by_room = push_config.get(
@ -46,7 +50,7 @@ class PushConfig(Config):
)
self.push_include_content = not redact_content
def generate_config_section(self, config_dir_path, server_name, **kwargs):
def generate_config_section(self, **kwargs: Any) -> str:
return """
## Push ##

View File

@ -12,10 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional
from typing import Any, Dict, Optional
import attr
from synapse.types import JsonDict
from ._base import Config
@ -43,7 +45,7 @@ class FederationRateLimitConfig:
class RatelimitConfig(Config):
section = "ratelimiting"
def read_config(self, config, **kwargs):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
# Load the new-style messages config if it exists. Otherwise fall back
# to the old method.
@ -142,7 +144,7 @@ class RatelimitConfig(Config):
},
)
def generate_config_section(self, **kwargs):
def generate_config_section(self, **kwargs: Any) -> str:
return """\
## Ratelimiting ##

View File

@ -12,14 +12,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
from synapse.config._base import Config
from synapse.types import JsonDict
from synapse.util.check_dependencies import check_requirements
class RedisConfig(Config):
section = "redis"
def read_config(self, config, **kwargs):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
redis_config = config.get("redis") or {}
self.redis_enabled = redis_config.get("enabled", False)
@ -32,7 +35,7 @@ class RedisConfig(Config):
self.redis_port = redis_config.get("port", 6379)
self.redis_password = redis_config.get("password")
def generate_config_section(self, config_dir_path, server_name, **kwargs):
def generate_config_section(self, **kwargs: Any) -> str:
return """\
# Configuration for Redis when using workers. This *must* be enabled when
# using workers (unless using old style direct TCP configuration).

View File

@ -13,18 +13,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from typing import Optional
from typing import Any, Optional
from synapse.api.constants import RoomCreationPreset
from synapse.config._base import Config, ConfigError
from synapse.types import RoomAlias, UserID
from synapse.types import JsonDict, RoomAlias, UserID
from synapse.util.stringutils import random_string_with_symbols, strtobool
class RegistrationConfig(Config):
section = "registration"
def read_config(self, config, **kwargs):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
self.enable_registration = strtobool(
str(config.get("enable_registration", False))
)
@ -196,7 +196,9 @@ class RegistrationConfig(Config):
self.inhibit_user_in_use_error = config.get("inhibit_user_in_use_error", False)
def generate_config_section(self, generate_secrets=False, **kwargs):
def generate_config_section(
self, generate_secrets: bool = False, **kwargs: Any
) -> str:
if generate_secrets:
registration_shared_secret = 'registration_shared_secret: "%s"' % (
random_string_with_symbols(50),

View File

@ -14,7 +14,7 @@
import logging
import os
from typing import Dict, List, Tuple
from typing import Any, Dict, List, Tuple
from urllib.request import getproxies_environment # type: ignore
import attr
@ -95,7 +95,7 @@ def parse_thumbnail_requirements(
class ContentRepositoryConfig(Config):
section = "media"
def read_config(self, config, **kwargs):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
# Only enable the media repo if either the media repo is enabled or the
# current worker app is the media repo.
@ -224,7 +224,8 @@ class ContentRepositoryConfig(Config):
"url_preview_accept_language"
) or ["en"]
def generate_config_section(self, data_dir_path, **kwargs):
def generate_config_section(self, data_dir_path: str, **kwargs: Any) -> str:
assert data_dir_path is not None
media_store = os.path.join(data_dir_path, "media_store")
formatted_thumbnail_sizes = "".join(

Some files were not shown because too many files have changed in this diff Show More