mirror of
https://git.anonymousland.org/anonymousland/synapse.git
synced 2024-10-01 11:49:51 -04:00
Merge remote-tracking branch 'upstream/release-v1.59'
This commit is contained in:
commit
67a9abc368
30
.github/workflows/docker.yml
vendored
30
.github/workflows/docker.yml
vendored
@ -34,32 +34,24 @@ jobs:
|
|||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
# TODO: consider using https://github.com/docker/metadata-action instead of this
|
|
||||||
# custom magic
|
|
||||||
- name: Calculate docker image tag
|
- name: Calculate docker image tag
|
||||||
id: set-tag
|
id: set-tag
|
||||||
run: |
|
uses: docker/metadata-action@master
|
||||||
case "${GITHUB_REF}" in
|
with:
|
||||||
refs/heads/develop)
|
images: matrixdotorg/synapse
|
||||||
tag=develop
|
flavor: |
|
||||||
;;
|
latest=false
|
||||||
refs/heads/master|refs/heads/main)
|
tags: |
|
||||||
tag=latest
|
type=raw,value=develop,enable=${{ github.ref == 'refs/heads/develop' }}
|
||||||
;;
|
type=raw,value=latest,enable=${{ github.ref == 'refs/heads/master' }}
|
||||||
refs/tags/*)
|
type=raw,value=latest,enable=${{ github.ref == 'refs/heads/main' }}
|
||||||
tag=${GITHUB_REF#refs/tags/}
|
type=pep440,pattern={{raw}}
|
||||||
;;
|
|
||||||
*)
|
|
||||||
tag=${GITHUB_SHA}
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
echo "::set-output name=tag::$tag"
|
|
||||||
|
|
||||||
- name: Build and push all platforms
|
- name: Build and push all platforms
|
||||||
uses: docker/build-push-action@v2
|
uses: docker/build-push-action@v2
|
||||||
with:
|
with:
|
||||||
push: true
|
push: true
|
||||||
labels: "gitsha1=${{ github.sha }}"
|
labels: "gitsha1=${{ github.sha }}"
|
||||||
tags: "matrixdotorg/synapse:${{ steps.set-tag.outputs.tag }}"
|
tags: "${{ steps.set-tag.outputs.tags }}"
|
||||||
file: "docker/Dockerfile"
|
file: "docker/Dockerfile"
|
||||||
platforms: linux/amd64,linux/arm64
|
platforms: linux/amd64,linux/arm64
|
||||||
|
3
.github/workflows/latest_deps.yml
vendored
3
.github/workflows/latest_deps.yml
vendored
@ -32,12 +32,15 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
python-version: "3.x"
|
python-version: "3.x"
|
||||||
poetry-version: "1.2.0b1"
|
poetry-version: "1.2.0b1"
|
||||||
|
extras: "all"
|
||||||
# Dump installed versions for debugging.
|
# Dump installed versions for debugging.
|
||||||
- run: poetry run pip list > before.txt
|
- run: poetry run pip list > before.txt
|
||||||
# Upgrade all runtime dependencies only. This is intended to mimic a fresh
|
# Upgrade all runtime dependencies only. This is intended to mimic a fresh
|
||||||
# `pip install matrix-synapse[all]` as closely as possible.
|
# `pip install matrix-synapse[all]` as closely as possible.
|
||||||
- run: poetry update --no-dev
|
- run: poetry update --no-dev
|
||||||
- run: poetry run pip list > after.txt && (diff -u before.txt after.txt || true)
|
- run: poetry run pip list > after.txt && (diff -u before.txt after.txt || true)
|
||||||
|
- name: Remove warn_unused_ignores from mypy config
|
||||||
|
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
|
||||||
- run: poetry run mypy
|
- run: poetry run mypy
|
||||||
trial:
|
trial:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
8
.github/workflows/tests.yml
vendored
8
.github/workflows/tests.yml
vendored
@ -20,13 +20,9 @@ jobs:
|
|||||||
- run: scripts-dev/config-lint.sh
|
- run: scripts-dev/config-lint.sh
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
# This does a vanilla `poetry install` - no extras. I'm slightly anxious
|
|
||||||
# that we might skip some typechecks on code that uses extras. However,
|
|
||||||
# I think the right way to fix this is to mark any extras needed for
|
|
||||||
# typechecking as development dependencies. To detect this, we ought to
|
|
||||||
# turn up mypy's strictness: disallow unknown imports and be accept fewer
|
|
||||||
# uses of `Any`.
|
|
||||||
uses: "matrix-org/backend-meta/.github/workflows/python-poetry-ci.yml@v1"
|
uses: "matrix-org/backend-meta/.github/workflows/python-poetry-ci.yml@v1"
|
||||||
|
with:
|
||||||
|
typechecking-extras: "all"
|
||||||
|
|
||||||
lint-crlf:
|
lint-crlf:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
2
.github/workflows/twisted_trunk.yml
vendored
2
.github/workflows/twisted_trunk.yml
vendored
@ -24,6 +24,8 @@ jobs:
|
|||||||
poetry remove twisted
|
poetry remove twisted
|
||||||
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
|
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
|
||||||
poetry install --no-interaction --extras "all test"
|
poetry install --no-interaction --extras "all test"
|
||||||
|
- name: Remove warn_unused_ignores from mypy config
|
||||||
|
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
|
||||||
- run: poetry run mypy
|
- run: poetry run mypy
|
||||||
|
|
||||||
trial:
|
trial:
|
||||||
|
103
CHANGES.md
103
CHANGES.md
@ -1,3 +1,106 @@
|
|||||||
|
Synapse 1.59.0rc1 (2022-05-10)
|
||||||
|
==============================
|
||||||
|
|
||||||
|
This release makes several changes that server administrators should be aware of:
|
||||||
|
|
||||||
|
- Device name lookup over federation is now disabled by default. ([\#12616](https://github.com/matrix-org/synapse/issues/12616))
|
||||||
|
- The `synapse.app.appservice` and `synapse.app.user_dir` worker application types are now deprecated. ([\#12452](https://github.com/matrix-org/synapse/issues/12452), [\#12654](https://github.com/matrix-org/synapse/issues/12654))
|
||||||
|
|
||||||
|
See [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1590) for more details.
|
||||||
|
|
||||||
|
Additionally, this release removes the non-standard `m.login.jwt` login type from Synapse. It can be replaced with `org.matrix.login.jwt` for identical behaviour. This is only used if `jwt_config.enabled` is set to `true` in the configuration. ([\#12597](https://github.com/matrix-org/synapse/issues/12597))
|
||||||
|
|
||||||
|
Features
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Support [MSC3266](https://github.com/matrix-org/matrix-doc/pull/3266) room summaries over federation. ([\#11507](https://github.com/matrix-org/synapse/issues/11507))
|
||||||
|
- Implement [changes](https://github.com/matrix-org/matrix-spec-proposals/pull/2285/commits/4a77139249c2e830aec3c7d6bd5501a514d1cc27) to [MSC2285 (hidden read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). Contributed by @SimonBrandner. ([\#12168](https://github.com/matrix-org/synapse/issues/12168), [\#12635](https://github.com/matrix-org/synapse/issues/12635), [\#12636](https://github.com/matrix-org/synapse/issues/12636), [\#12670](https://github.com/matrix-org/synapse/issues/12670))
|
||||||
|
- Extend the [module API](https://github.com/matrix-org/synapse/blob/release-v1.59/synapse/module_api/__init__.py) to allow modules to change actions for existing push rules of local users. ([\#12406](https://github.com/matrix-org/synapse/issues/12406))
|
||||||
|
- Add the `notify_appservices_from_worker` configuration option (superseding `notify_appservices`) to allow a generic worker to be designated as the worker to send traffic to Application Services. ([\#12452](https://github.com/matrix-org/synapse/issues/12452))
|
||||||
|
- Add the `update_user_directory_from_worker` configuration option (superseding `update_user_directory`) to allow a generic worker to be designated as the worker to update the user directory. ([\#12654](https://github.com/matrix-org/synapse/issues/12654))
|
||||||
|
- Add new `enable_registration_token_3pid_bypass` configuration option to allow registrations via token as an alternative to verifying a 3pid. ([\#12526](https://github.com/matrix-org/synapse/issues/12526))
|
||||||
|
- Implement [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786): Add a default push rule to ignore `m.room.server_acl` events. ([\#12601](https://github.com/matrix-org/synapse/issues/12601))
|
||||||
|
- Add new `mau_appservice_trial_days` configuration option to specify a different trial period for users registered via an appservice. ([\#12619](https://github.com/matrix-org/synapse/issues/12619))
|
||||||
|
|
||||||
|
|
||||||
|
Bugfixes
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Fix a bug introduced in Synapse 1.48.0 where the latest thread reply provided failed to include the proper bundled aggregations. ([\#12273](https://github.com/matrix-org/synapse/issues/12273))
|
||||||
|
- Fix a bug introduced in Synapse 1.22.0 where attempting to send a large amount of read receipts to an application service all at once would result in duplicate content and abnormally high memory usage. Contributed by Brad & Nick @ Beeper. ([\#12544](https://github.com/matrix-org/synapse/issues/12544))
|
||||||
|
- Fix a bug introduced in Synapse 1.57.0 which could cause `Failed to calculate hosts in room` errors to be logged for outbound federation. ([\#12570](https://github.com/matrix-org/synapse/issues/12570))
|
||||||
|
- Fix a long-standing bug where status codes would almost always get logged as `200!`, irrespective of the actual status code, when clients disconnect before a request has finished processing. ([\#12580](https://github.com/matrix-org/synapse/issues/12580))
|
||||||
|
- Fix race when persisting an event and deleting a room that could lead to outbound federation breaking. ([\#12594](https://github.com/matrix-org/synapse/issues/12594))
|
||||||
|
- Fix a bug introduced in Synapse 1.53.0 where bundled aggregations for annotations/edits were incorrectly calculated. ([\#12633](https://github.com/matrix-org/synapse/issues/12633))
|
||||||
|
- Fix a long-standing bug where rooms containing power levels with string values could not be upgraded. ([\#12657](https://github.com/matrix-org/synapse/issues/12657))
|
||||||
|
- Prevent memory leak from reoccurring when presence is disabled. ([\#12656](https://github.com/matrix-org/synapse/issues/12656))
|
||||||
|
|
||||||
|
|
||||||
|
Updates to the Docker image
|
||||||
|
---------------------------
|
||||||
|
|
||||||
|
- Explicitly opt-in to using [BuildKit-specific features](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md) in the Dockerfile. This fixes issues with building images in some GitLab CI environments. ([\#12541](https://github.com/matrix-org/synapse/issues/12541))
|
||||||
|
- Update the "Build docker images" GitHub Actions workflow to use `docker/metadata-action` to generate docker image tags, instead of a custom shell script. Contributed by @henryclw. ([\#12573](https://github.com/matrix-org/synapse/issues/12573))
|
||||||
|
|
||||||
|
|
||||||
|
Improved Documentation
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
- Update SQL statements and replace use of old table `user_stats_historical` in docs for Synapse Admins. ([\#12536](https://github.com/matrix-org/synapse/issues/12536))
|
||||||
|
- Add missing linebreak to `pipx` install instructions. ([\#12579](https://github.com/matrix-org/synapse/issues/12579))
|
||||||
|
- Add information about the TCP replication module to docs. ([\#12621](https://github.com/matrix-org/synapse/issues/12621))
|
||||||
|
- Fixes to the formatting of `README.rst`. ([\#12627](https://github.com/matrix-org/synapse/issues/12627))
|
||||||
|
- Fix docs on how to run specific Complement tests using the `complement.sh` test runner. ([\#12664](https://github.com/matrix-org/synapse/issues/12664))
|
||||||
|
|
||||||
|
|
||||||
|
Deprecations and Removals
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
- Remove unstable identifiers from [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069). ([\#12596](https://github.com/matrix-org/synapse/issues/12596))
|
||||||
|
- Remove the unspecified `m.login.jwt` login type and the unstable `uk.half-shot.msc2778.login.application_service` from
|
||||||
|
[MSC2778](https://github.com/matrix-org/matrix-doc/pull/2778). ([\#12597](https://github.com/matrix-org/synapse/issues/12597))
|
||||||
|
- Synapse now requires at least Python 3.7.1 (up from 3.7.0), for compatibility with the latest Twisted trunk. ([\#12613](https://github.com/matrix-org/synapse/issues/12613))
|
||||||
|
|
||||||
|
|
||||||
|
Internal Changes
|
||||||
|
----------------
|
||||||
|
|
||||||
|
- Use supervisord to supervise Postgres and Caddy in the Complement image to reduce restart time. ([\#12480](https://github.com/matrix-org/synapse/issues/12480))
|
||||||
|
- Immediately retry any requests that have backed off when a server comes back online. ([\#12500](https://github.com/matrix-org/synapse/issues/12500))
|
||||||
|
- Use `make_awaitable` instead of `defer.succeed` for return values of mocks in tests. ([\#12505](https://github.com/matrix-org/synapse/issues/12505))
|
||||||
|
- Consistently check if an object is a `frozendict`. ([\#12564](https://github.com/matrix-org/synapse/issues/12564))
|
||||||
|
- Protect module callbacks with read semantics against cancellation. ([\#12568](https://github.com/matrix-org/synapse/issues/12568))
|
||||||
|
- Improve comments and error messages around access tokens. ([\#12577](https://github.com/matrix-org/synapse/issues/12577))
|
||||||
|
- Improve docstrings for the receipts store. ([\#12581](https://github.com/matrix-org/synapse/issues/12581))
|
||||||
|
- Use constants for read-receipts in tests. ([\#12582](https://github.com/matrix-org/synapse/issues/12582))
|
||||||
|
- Log status code of cancelled requests as 499 and avoid logging stack traces for them. ([\#12587](https://github.com/matrix-org/synapse/issues/12587), [\#12663](https://github.com/matrix-org/synapse/issues/12663))
|
||||||
|
- Remove special-case for `twisted` logger from default log config. ([\#12589](https://github.com/matrix-org/synapse/issues/12589))
|
||||||
|
- Use `getClientAddress` instead of the deprecated `getClientIP`. ([\#12599](https://github.com/matrix-org/synapse/issues/12599))
|
||||||
|
- Add link to documentation in Grafana Dashboard. ([\#12602](https://github.com/matrix-org/synapse/issues/12602))
|
||||||
|
- Reduce log spam when running multiple event persisters. ([\#12610](https://github.com/matrix-org/synapse/issues/12610))
|
||||||
|
- Add extra debug logging to federation sender. ([\#12614](https://github.com/matrix-org/synapse/issues/12614))
|
||||||
|
- Prevent remote homeservers from requesting local user device names by default. ([\#12616](https://github.com/matrix-org/synapse/issues/12616))
|
||||||
|
- Add a consistency check on events which we read from the database. ([\#12620](https://github.com/matrix-org/synapse/issues/12620))
|
||||||
|
- Remove use of the `constantly` library and switch to enums for `EventRedactBehaviour`. Contributed by @andrewdoh. ([\#12624](https://github.com/matrix-org/synapse/issues/12624))
|
||||||
|
- Remove unused code related to receipts. ([\#12632](https://github.com/matrix-org/synapse/issues/12632))
|
||||||
|
- Minor improvements to the scripts for running Synapse in worker mode under Complement. ([\#12637](https://github.com/matrix-org/synapse/issues/12637))
|
||||||
|
- Move `pympler` back in to the `all` extras. ([\#12652](https://github.com/matrix-org/synapse/issues/12652))
|
||||||
|
- Fix spelling of `M_UNRECOGNIZED` in comments. ([\#12665](https://github.com/matrix-org/synapse/issues/12665))
|
||||||
|
- Release script: confirm the commit to be tagged before tagging. ([\#12556](https://github.com/matrix-org/synapse/issues/12556))
|
||||||
|
- Fix a typo in the announcement text generated by the Synapse release development script. ([\#12612](https://github.com/matrix-org/synapse/issues/12612))
|
||||||
|
|
||||||
|
### Typechecking
|
||||||
|
|
||||||
|
- Fix scripts-dev to pass typechecking. ([\#12356](https://github.com/matrix-org/synapse/issues/12356))
|
||||||
|
- Add some type hints to datastore. ([\#12485](https://github.com/matrix-org/synapse/issues/12485))
|
||||||
|
- Remove unused `# type: ignore`s. ([\#12531](https://github.com/matrix-org/synapse/issues/12531))
|
||||||
|
- Allow unused `# type: ignore` comments in bleeding edge CI jobs. ([\#12576](https://github.com/matrix-org/synapse/issues/12576))
|
||||||
|
- Remove redundant lines of config from `mypy.ini`. ([\#12608](https://github.com/matrix-org/synapse/issues/12608))
|
||||||
|
- Update to mypy 0.950. ([\#12650](https://github.com/matrix-org/synapse/issues/12650))
|
||||||
|
- Use `Concatenate` to better annotate `_do_execute`. ([\#12666](https://github.com/matrix-org/synapse/issues/12666))
|
||||||
|
- Use `ParamSpec` to refine type hints. ([\#12667](https://github.com/matrix-org/synapse/issues/12667))
|
||||||
|
- Fix mypy against latest pillow stubs. ([\#12671](https://github.com/matrix-org/synapse/issues/12671))
|
||||||
|
|
||||||
Synapse 1.58.1 (2022-05-05)
|
Synapse 1.58.1 (2022-05-05)
|
||||||
===========================
|
===========================
|
||||||
|
|
||||||
|
@ -294,13 +294,13 @@ directory of your choice::
|
|||||||
cd synapse
|
cd synapse
|
||||||
|
|
||||||
Synapse has a number of external dependencies. We maintain a fixed development
|
Synapse has a number of external dependencies. We maintain a fixed development
|
||||||
environment using [poetry](https://python-poetry.org/). First, install poetry. We recommend
|
environment using `Poetry <https://python-poetry.org/>`_. First, install poetry. We recommend::
|
||||||
|
|
||||||
pip install --user pipx
|
pip install --user pipx
|
||||||
pipx install poetry
|
pipx install poetry
|
||||||
|
|
||||||
as described `here <https://python-poetry.org/docs/#installing-with-pipx>`_.
|
as described `here <https://python-poetry.org/docs/#installing-with-pipx>`_.
|
||||||
(See `poetry's installation docs <https://python-poetry.org/docs/#installation>`
|
(See `poetry's installation docs <https://python-poetry.org/docs/#installation>`_
|
||||||
for other installation methods.) Then ask poetry to create a virtual environment
|
for other installation methods.) Then ask poetry to create a virtual environment
|
||||||
from the project and install Synapse's dependencies::
|
from the project and install Synapse's dependencies::
|
||||||
|
|
||||||
@ -309,11 +309,11 @@ from the project and install Synapse's dependencies::
|
|||||||
This will run a process of downloading and installing all the needed
|
This will run a process of downloading and installing all the needed
|
||||||
dependencies into a virtual env.
|
dependencies into a virtual env.
|
||||||
|
|
||||||
We recommend using the demo which starts 3 federated instances running on ports `8080` - `8082`
|
We recommend using the demo which starts 3 federated instances running on ports `8080` - `8082`::
|
||||||
|
|
||||||
poetry run ./demo/start.sh
|
poetry run ./demo/start.sh
|
||||||
|
|
||||||
(to stop, you can use `poetry run ./demo/stop.sh`)
|
(to stop, you can use ``poetry run ./demo/stop.sh``)
|
||||||
|
|
||||||
See the `demo documentation <https://matrix-org.github.io/synapse/develop/development/demo.html>`_
|
See the `demo documentation <https://matrix-org.github.io/synapse/develop/development/demo.html>`_
|
||||||
for more information.
|
for more information.
|
||||||
|
@ -66,6 +66,18 @@
|
|||||||
],
|
],
|
||||||
"title": "Dashboards",
|
"title": "Dashboards",
|
||||||
"type": "dashboards"
|
"type": "dashboards"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"asDropdown": false,
|
||||||
|
"icon": "external link",
|
||||||
|
"includeVars": false,
|
||||||
|
"keepTime": false,
|
||||||
|
"tags": [],
|
||||||
|
"targetBlank": true,
|
||||||
|
"title": "Synapse Documentation",
|
||||||
|
"tooltip": "Open Documentation",
|
||||||
|
"type": "link",
|
||||||
|
"url": "https://matrix-org.github.io/synapse/latest/"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"panels": [
|
"panels": [
|
||||||
|
1
debian/build_virtualenv
vendored
1
debian/build_virtualenv
vendored
@ -41,7 +41,6 @@ poetry export \
|
|||||||
--extras all \
|
--extras all \
|
||||||
--extras test \
|
--extras test \
|
||||||
--extras systemd \
|
--extras systemd \
|
||||||
--extras cache_memory \
|
|
||||||
-o exported_requirements.txt
|
-o exported_requirements.txt
|
||||||
deactivate
|
deactivate
|
||||||
rm -rf "$TEMP_VENV"
|
rm -rf "$TEMP_VENV"
|
||||||
|
9
debian/changelog
vendored
9
debian/changelog
vendored
@ -1,3 +1,12 @@
|
|||||||
|
matrix-synapse-py3 (1.59.0~rc1) stable; urgency=medium
|
||||||
|
|
||||||
|
* Adjust how the `exported-requirements.txt` file is generated as part of
|
||||||
|
the process of building these packages. This affects the package
|
||||||
|
maintainers only; end-users are unaffected.
|
||||||
|
* New Synapse release 1.59.0rc1.
|
||||||
|
|
||||||
|
-- Synapse Packaging team <packages@matrix.org> Tue, 10 May 2022 10:45:08 +0100
|
||||||
|
|
||||||
matrix-synapse-py3 (1.58.1) stable; urgency=medium
|
matrix-synapse-py3 (1.58.1) stable; urgency=medium
|
||||||
|
|
||||||
* Include python dependencies from the `systemd` and `cache_memory` extras package groups, which
|
* Include python dependencies from the `systemd` and `cache_memory` extras package groups, which
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
# Dockerfile to build the matrixdotorg/synapse docker images.
|
# Dockerfile to build the matrixdotorg/synapse docker images.
|
||||||
#
|
#
|
||||||
# Note that it uses features which are only available in BuildKit - see
|
# Note that it uses features which are only available in BuildKit - see
|
||||||
|
@ -20,6 +20,9 @@ RUN rm /etc/nginx/sites-enabled/default
|
|||||||
# Copy Synapse worker, nginx and supervisord configuration template files
|
# Copy Synapse worker, nginx and supervisord configuration template files
|
||||||
COPY ./docker/conf-workers/* /conf/
|
COPY ./docker/conf-workers/* /conf/
|
||||||
|
|
||||||
|
# Copy a script to prefix log lines with the supervisor program name
|
||||||
|
COPY ./docker/prefix-log /usr/local/bin/
|
||||||
|
|
||||||
# Expose nginx listener port
|
# Expose nginx listener port
|
||||||
EXPOSE 8080/tcp
|
EXPOSE 8080/tcp
|
||||||
|
|
||||||
|
@ -34,13 +34,16 @@ WORKDIR /data
|
|||||||
# Copy the caddy config
|
# Copy the caddy config
|
||||||
COPY conf-workers/caddy.complement.json /root/caddy.json
|
COPY conf-workers/caddy.complement.json /root/caddy.json
|
||||||
|
|
||||||
|
COPY conf-workers/postgres.supervisord.conf /etc/supervisor/conf.d/postgres.conf
|
||||||
|
COPY conf-workers/caddy.supervisord.conf /etc/supervisor/conf.d/caddy.conf
|
||||||
|
|
||||||
# Copy the entrypoint
|
# Copy the entrypoint
|
||||||
COPY conf-workers/start-complement-synapse-workers.sh /
|
COPY conf-workers/start-complement-synapse-workers.sh /
|
||||||
|
|
||||||
# Expose caddy's listener ports
|
# Expose caddy's listener ports
|
||||||
EXPOSE 8008 8448
|
EXPOSE 8008 8448
|
||||||
|
|
||||||
ENTRYPOINT /start-complement-synapse-workers.sh
|
ENTRYPOINT ["/start-complement-synapse-workers.sh"]
|
||||||
|
|
||||||
# Update the healthcheck to have a shorter check interval
|
# Update the healthcheck to have a shorter check interval
|
||||||
HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
|
HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
|
||||||
|
7
docker/complement/conf-workers/caddy.supervisord.conf
Normal file
7
docker/complement/conf-workers/caddy.supervisord.conf
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
[program:caddy]
|
||||||
|
command=/usr/local/bin/prefix-log /root/caddy run --config /root/caddy.json
|
||||||
|
autorestart=unexpected
|
||||||
|
stdout_logfile=/dev/stdout
|
||||||
|
stdout_logfile_maxbytes=0
|
||||||
|
stderr_logfile=/dev/stderr
|
||||||
|
stderr_logfile_maxbytes=0
|
16
docker/complement/conf-workers/postgres.supervisord.conf
Normal file
16
docker/complement/conf-workers/postgres.supervisord.conf
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
[program:postgres]
|
||||||
|
command=/usr/local/bin/prefix-log /usr/bin/pg_ctlcluster 13 main start --foreground
|
||||||
|
|
||||||
|
# Lower priority number = starts first
|
||||||
|
priority=1
|
||||||
|
|
||||||
|
autorestart=unexpected
|
||||||
|
stdout_logfile=/dev/stdout
|
||||||
|
stdout_logfile_maxbytes=0
|
||||||
|
stderr_logfile=/dev/stderr
|
||||||
|
stderr_logfile_maxbytes=0
|
||||||
|
|
||||||
|
# Use 'Fast Shutdown' mode which aborts current transactions and closes connections quickly.
|
||||||
|
# (Default (TERM) is 'Smart Shutdown' which stops accepting new connections but
|
||||||
|
# lets existing connections close gracefully.)
|
||||||
|
stopsignal=INT
|
@ -12,12 +12,6 @@ function log {
|
|||||||
# Replace the server name in the caddy config
|
# Replace the server name in the caddy config
|
||||||
sed -i "s/{{ server_name }}/${SERVER_NAME}/g" /root/caddy.json
|
sed -i "s/{{ server_name }}/${SERVER_NAME}/g" /root/caddy.json
|
||||||
|
|
||||||
log "starting postgres"
|
|
||||||
pg_ctlcluster 13 main start
|
|
||||||
|
|
||||||
log "starting caddy"
|
|
||||||
/root/caddy start --config /root/caddy.json
|
|
||||||
|
|
||||||
# Set the server name of the homeserver
|
# Set the server name of the homeserver
|
||||||
export SYNAPSE_SERVER_NAME=${SERVER_NAME}
|
export SYNAPSE_SERVER_NAME=${SERVER_NAME}
|
||||||
|
|
||||||
|
@ -9,7 +9,7 @@ user=root
|
|||||||
files = /etc/supervisor/conf.d/*.conf
|
files = /etc/supervisor/conf.d/*.conf
|
||||||
|
|
||||||
[program:nginx]
|
[program:nginx]
|
||||||
command=/usr/sbin/nginx -g "daemon off;"
|
command=/usr/local/bin/prefix-log /usr/sbin/nginx -g "daemon off;"
|
||||||
priority=500
|
priority=500
|
||||||
stdout_logfile=/dev/stdout
|
stdout_logfile=/dev/stdout
|
||||||
stdout_logfile_maxbytes=0
|
stdout_logfile_maxbytes=0
|
||||||
@ -19,7 +19,7 @@ username=www-data
|
|||||||
autorestart=true
|
autorestart=true
|
||||||
|
|
||||||
[program:redis]
|
[program:redis]
|
||||||
command=/usr/bin/redis-server /etc/redis/redis.conf --daemonize no
|
command=/usr/local/bin/prefix-log /usr/bin/redis-server /etc/redis/redis.conf --daemonize no
|
||||||
priority=1
|
priority=1
|
||||||
stdout_logfile=/dev/stdout
|
stdout_logfile=/dev/stdout
|
||||||
stdout_logfile_maxbytes=0
|
stdout_logfile_maxbytes=0
|
||||||
@ -29,7 +29,7 @@ username=redis
|
|||||||
autorestart=true
|
autorestart=true
|
||||||
|
|
||||||
[program:synapse_main]
|
[program:synapse_main]
|
||||||
command=/usr/local/bin/python -m synapse.app.homeserver --config-path="{{ main_config_path }}" --config-path=/conf/workers/shared.yaml
|
command=/usr/local/bin/prefix-log /usr/local/bin/python -m synapse.app.homeserver --config-path="{{ main_config_path }}" --config-path=/conf/workers/shared.yaml
|
||||||
priority=10
|
priority=10
|
||||||
# Log startup failures to supervisord's stdout/err
|
# Log startup failures to supervisord's stdout/err
|
||||||
# Regular synapse logs will still go in the configured data directory
|
# Regular synapse logs will still go in the configured data directory
|
||||||
|
@ -2,11 +2,7 @@ version: 1
|
|||||||
|
|
||||||
formatters:
|
formatters:
|
||||||
precise:
|
precise:
|
||||||
{% if worker_name %}
|
|
||||||
format: '%(asctime)s - worker:{{ worker_name }} - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
|
|
||||||
{% else %}
|
|
||||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
|
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
handlers:
|
handlers:
|
||||||
{% if LOG_FILE_PATH %}
|
{% if LOG_FILE_PATH %}
|
||||||
|
@ -69,10 +69,10 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
|||||||
"worker_extra_conf": "enable_media_repo: true",
|
"worker_extra_conf": "enable_media_repo: true",
|
||||||
},
|
},
|
||||||
"appservice": {
|
"appservice": {
|
||||||
"app": "synapse.app.appservice",
|
"app": "synapse.app.generic_worker",
|
||||||
"listener_resources": [],
|
"listener_resources": [],
|
||||||
"endpoint_patterns": [],
|
"endpoint_patterns": [],
|
||||||
"shared_extra_conf": {"notify_appservices": False},
|
"shared_extra_conf": {"notify_appservices_from_worker": "appservice"},
|
||||||
"worker_extra_conf": "",
|
"worker_extra_conf": "",
|
||||||
},
|
},
|
||||||
"federation_sender": {
|
"federation_sender": {
|
||||||
@ -171,7 +171,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
|||||||
# Templates for sections that may be inserted multiple times in config files
|
# Templates for sections that may be inserted multiple times in config files
|
||||||
SUPERVISORD_PROCESS_CONFIG_BLOCK = """
|
SUPERVISORD_PROCESS_CONFIG_BLOCK = """
|
||||||
[program:synapse_{name}]
|
[program:synapse_{name}]
|
||||||
command=/usr/local/bin/python -m {app} \
|
command=/usr/local/bin/prefix-log /usr/local/bin/python -m {app} \
|
||||||
--config-path="{config_path}" \
|
--config-path="{config_path}" \
|
||||||
--config-path=/conf/workers/shared.yaml \
|
--config-path=/conf/workers/shared.yaml \
|
||||||
--config-path=/conf/workers/{name}.yaml
|
--config-path=/conf/workers/{name}.yaml
|
||||||
|
12
docker/prefix-log
Executable file
12
docker/prefix-log
Executable file
@ -0,0 +1,12 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Prefixes all lines on stdout and stderr with the process name (as determined by
|
||||||
|
# the SUPERVISOR_PROCESS_NAME env var, which is automatically set by Supervisor).
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# prefix-log command [args...]
|
||||||
|
#
|
||||||
|
|
||||||
|
exec 1> >(awk '{print "'"${SUPERVISOR_PROCESS_NAME}"' | "$0}' >&1)
|
||||||
|
exec 2> >(awk '{print "'"${SUPERVISOR_PROCESS_NAME}"' | "$0}' >&2)
|
||||||
|
exec "$@"
|
@ -270,13 +270,13 @@ COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh
|
|||||||
To run a specific test file, you can pass the test name at the end of the command. The name passed comes from the naming structure in your Complement tests. If you're unsure of the name, you can do a full run and copy it from the test output:
|
To run a specific test file, you can pass the test name at the end of the command. The name passed comes from the naming structure in your Complement tests. If you're unsure of the name, you can do a full run and copy it from the test output:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh TestBackfillingHistory
|
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -run TestImportHistoricalMessages
|
||||||
```
|
```
|
||||||
|
|
||||||
To run a specific test, you can specify the whole name structure:
|
To run a specific test, you can specify the whole name structure:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh TestBackfillingHistory/parallel/Backfilled_historical_events_resolve_with_proper_state_in_correct_order
|
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -run TestImportHistoricalMessages/parallel/Historical_events_resolve_in_the_correct_order
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
@ -17,9 +17,6 @@ follows:
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that the login type of `m.login.jwt` is supported, but is deprecated. This
|
|
||||||
will be removed in a future version of Synapse.
|
|
||||||
|
|
||||||
The `token` field should include the JSON web token with the following claims:
|
The `token` field should include the JSON web token with the following claims:
|
||||||
|
|
||||||
* A claim that encodes the local part of the user ID is required. By default,
|
* A claim that encodes the local part of the user ID is required. By default,
|
||||||
|
@ -35,3 +35,8 @@ See [the TCP replication documentation](tcp_replication.md).
|
|||||||
There are read-only version of the synapse storage layer in
|
There are read-only version of the synapse storage layer in
|
||||||
`synapse/replication/slave/storage` that use the response of the
|
`synapse/replication/slave/storage` that use the response of the
|
||||||
replication API to invalidate their caches.
|
replication API to invalidate their caches.
|
||||||
|
|
||||||
|
### The TCP Replication Module
|
||||||
|
Information about how the tcp replication module is structured, including how
|
||||||
|
the classes interact, can be found in
|
||||||
|
`synapse/replication/tcp/__init__.py`
|
||||||
|
@ -407,6 +407,11 @@ manhole_settings:
|
|||||||
# sign up in a short space of time never to return after their initial
|
# sign up in a short space of time never to return after their initial
|
||||||
# session.
|
# session.
|
||||||
#
|
#
|
||||||
|
# The option `mau_appservice_trial_days` is similar to `mau_trial_days`, but
|
||||||
|
# applies a different trial number if the user was registered by an appservice.
|
||||||
|
# A value of 0 means no trial days are applied. Appservices not listed in this
|
||||||
|
# dictionary use the value of `mau_trial_days` instead.
|
||||||
|
#
|
||||||
# 'mau_limit_alerting' is a means of limiting client side alerting
|
# 'mau_limit_alerting' is a means of limiting client side alerting
|
||||||
# should the mau limit be reached. This is useful for small instances
|
# should the mau limit be reached. This is useful for small instances
|
||||||
# where the admin has 5 mau seats (say) for 5 specific people and no
|
# where the admin has 5 mau seats (say) for 5 specific people and no
|
||||||
@ -417,6 +422,8 @@ manhole_settings:
|
|||||||
#max_mau_value: 50
|
#max_mau_value: 50
|
||||||
#mau_trial_days: 2
|
#mau_trial_days: 2
|
||||||
#mau_limit_alerting: false
|
#mau_limit_alerting: false
|
||||||
|
#mau_appservice_trial_days:
|
||||||
|
# "appservice-id": 1
|
||||||
|
|
||||||
# If enabled, the metrics for the number of monthly active users will
|
# If enabled, the metrics for the number of monthly active users will
|
||||||
# be populated, however no one will be limited. If limit_usage_by_mau
|
# be populated, however no one will be limited. If limit_usage_by_mau
|
||||||
@ -709,11 +716,11 @@ retention:
|
|||||||
#
|
#
|
||||||
#allow_profile_lookup_over_federation: false
|
#allow_profile_lookup_over_federation: false
|
||||||
|
|
||||||
# Uncomment to disable device display name lookup over federation. By default, the
|
# Uncomment to allow device display name lookup over federation. By default, the
|
||||||
# Federation API allows other homeservers to obtain device display names of any user
|
# Federation API prevents other homeservers from obtaining the display names of
|
||||||
# on this homeserver. Defaults to 'true'.
|
# user devices on this homeserver. Defaults to 'false'.
|
||||||
#
|
#
|
||||||
#allow_device_name_lookup_over_federation: false
|
#allow_device_name_lookup_over_federation: true
|
||||||
|
|
||||||
|
|
||||||
## Caching ##
|
## Caching ##
|
||||||
@ -1323,6 +1330,12 @@ oembed:
|
|||||||
#
|
#
|
||||||
#registration_requires_token: true
|
#registration_requires_token: true
|
||||||
|
|
||||||
|
# Allow users to submit a token during registration to bypass any required 3pid
|
||||||
|
# steps configured in `registrations_require_3pid`.
|
||||||
|
# Defaults to false, requiring that registration tokens (if enabled) complete a 3pid flow.
|
||||||
|
#
|
||||||
|
#enable_registration_token_3pid_bypass: false
|
||||||
|
|
||||||
# If set, allows registration of standard or admin accounts by anyone who
|
# If set, allows registration of standard or admin accounts by anyone who
|
||||||
# has the shared secret, even if registration is otherwise disabled.
|
# has the shared secret, even if registration is otherwise disabled.
|
||||||
#
|
#
|
||||||
|
@ -62,13 +62,6 @@ loggers:
|
|||||||
# information such as access tokens.
|
# information such as access tokens.
|
||||||
level: INFO
|
level: INFO
|
||||||
|
|
||||||
twisted:
|
|
||||||
# We send the twisted logging directly to the file handler,
|
|
||||||
# to work around https://github.com/matrix-org/synapse/issues/3471
|
|
||||||
# when using "buffer" logger. Use "console" to log to stderr instead.
|
|
||||||
handlers: [file]
|
|
||||||
propagate: false
|
|
||||||
|
|
||||||
root:
|
root:
|
||||||
level: INFO
|
level: INFO
|
||||||
|
|
||||||
|
@ -89,6 +89,50 @@ process, for example:
|
|||||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||||
```
|
```
|
||||||
|
|
||||||
|
# Upgrading to v1.59.0
|
||||||
|
|
||||||
|
## Device name lookup over federation has been disabled by default
|
||||||
|
|
||||||
|
The names of user devices are no longer visible to users on other homeservers by default.
|
||||||
|
Device IDs are unaffected, as these are necessary to facilitate end-to-end encryption.
|
||||||
|
|
||||||
|
To re-enable this functionality, set the
|
||||||
|
[`allow_device_name_lookup_over_federation`](https://matrix-org.github.io/synapse/v1.59/usage/configuration/config_documentation.html#federation)
|
||||||
|
homeserver config option to `true`.
|
||||||
|
|
||||||
|
|
||||||
|
## Deprecation of the `synapse.app.appservice` and `synapse.app.user_dir` worker application types
|
||||||
|
|
||||||
|
The `synapse.app.appservice` worker application type allowed you to configure a
|
||||||
|
single worker to use to notify application services of new events, as long
|
||||||
|
as this functionality was disabled on the main process with `notify_appservices: False`.
|
||||||
|
Further, the `synapse.app.user_dir` worker application type allowed you to configure
|
||||||
|
a single worker to be responsible for updating the user directory, as long as this
|
||||||
|
was disabled on the main process with `update_user_directory: False`.
|
||||||
|
|
||||||
|
To unify Synapse's worker types, the `synapse.app.appservice` worker application
|
||||||
|
type and the `notify_appservices` configuration option have been deprecated.
|
||||||
|
The `synapse.app.user_dir` worker application type and `update_user_directory`
|
||||||
|
configuration option have also been deprecated.
|
||||||
|
|
||||||
|
To get the same functionality as was provided by the deprecated options, it's now recommended that the `synapse.app.generic_worker`
|
||||||
|
worker application type is used and that the `notify_appservices_from_worker` and/or
|
||||||
|
`update_user_directory_from_worker` options are set to the name of a worker.
|
||||||
|
|
||||||
|
For the time being, the old options can be used alongside the new options to make
|
||||||
|
it easier to transition between the two configurations, however please note that:
|
||||||
|
|
||||||
|
- the options must not contradict each other (otherwise Synapse won't start); and
|
||||||
|
- the `notify_appservices` and `update_user_directory` options will be removed in a future release of Synapse.
|
||||||
|
|
||||||
|
Please see the [*Notifying Application Services*][v1_59_notify_ases_from] and
|
||||||
|
[*Updating the User Directory*][v1_59_update_user_dir] sections of the worker
|
||||||
|
documentation for more information.
|
||||||
|
|
||||||
|
[v1_59_notify_ases_from]: workers.md#notifying-application-services
|
||||||
|
[v1_59_update_user_dir]: workers.md#updating-the-user-directory
|
||||||
|
|
||||||
|
|
||||||
# Upgrading to v1.58.0
|
# Upgrading to v1.58.0
|
||||||
|
|
||||||
## Groups/communities feature has been disabled by default
|
## Groups/communities feature has been disabled by default
|
||||||
@ -96,6 +140,7 @@ process, for example:
|
|||||||
The non-standard groups/communities feature in Synapse has been disabled by default
|
The non-standard groups/communities feature in Synapse has been disabled by default
|
||||||
and will be removed in Synapse v1.61.0.
|
and will be removed in Synapse v1.61.0.
|
||||||
|
|
||||||
|
|
||||||
# Upgrading to v1.57.0
|
# Upgrading to v1.57.0
|
||||||
|
|
||||||
## Changes to database schema for application services
|
## Changes to database schema for application services
|
||||||
|
@ -28,7 +28,7 @@ See the following for how to decode the dense data available from the default lo
|
|||||||
| NNNN | Total time waiting for response to DB queries across all parallel DB work from this request |
|
| NNNN | Total time waiting for response to DB queries across all parallel DB work from this request |
|
||||||
| OOOO | Count of DB transactions performed |
|
| OOOO | Count of DB transactions performed |
|
||||||
| PPPP | Response body size |
|
| PPPP | Response body size |
|
||||||
| QQQQ | Response status code (prefixed with ! if the socket was closed before the response was generated) |
|
| QQQQ | Response status code<br/>Suffixed with `!` if the socket was closed before the response was generated.<br/>A `499!` status code indicates that Synapse also cancelled request processing after the socket was closed.<br/> |
|
||||||
| RRRR | Request |
|
| RRRR | Request |
|
||||||
| SSSS | User-agent |
|
| SSSS | User-agent |
|
||||||
| TTTT | Events fetched from DB to service this request (note that this does not include events fetched from the cache) |
|
| TTTT | Events fetched from DB to service this request (note that this does not include events fetched from the cache) |
|
||||||
|
@ -1,7 +1,10 @@
|
|||||||
## Some useful SQL queries for Synapse Admins
|
## Some useful SQL queries for Synapse Admins
|
||||||
|
|
||||||
## Size of full matrix db
|
## Size of full matrix db
|
||||||
`SELECT pg_size_pretty( pg_database_size( 'matrix' ) );`
|
```sql
|
||||||
|
SELECT pg_size_pretty( pg_database_size( 'matrix' ) );
|
||||||
|
```
|
||||||
|
|
||||||
### Result example:
|
### Result example:
|
||||||
```
|
```
|
||||||
pg_size_pretty
|
pg_size_pretty
|
||||||
@ -9,39 +12,19 @@ pg_size_pretty
|
|||||||
6420 MB
|
6420 MB
|
||||||
(1 row)
|
(1 row)
|
||||||
```
|
```
|
||||||
## Show top 20 larger rooms by state events count
|
|
||||||
```sql
|
|
||||||
SELECT r.name, s.room_id, s.current_state_events
|
|
||||||
FROM room_stats_current s
|
|
||||||
LEFT JOIN room_stats_state r USING (room_id)
|
|
||||||
ORDER BY current_state_events DESC
|
|
||||||
LIMIT 20;
|
|
||||||
```
|
|
||||||
|
|
||||||
and by state_group_events count:
|
|
||||||
```sql
|
|
||||||
SELECT rss.name, s.room_id, count(s.room_id) FROM state_groups_state s
|
|
||||||
LEFT JOIN room_stats_state rss USING (room_id)
|
|
||||||
GROUP BY s.room_id, rss.name
|
|
||||||
ORDER BY count(s.room_id) DESC
|
|
||||||
LIMIT 20;
|
|
||||||
```
|
|
||||||
plus same, but with join removed for performance reasons:
|
|
||||||
```sql
|
|
||||||
SELECT s.room_id, count(s.room_id) FROM state_groups_state s
|
|
||||||
GROUP BY s.room_id
|
|
||||||
ORDER BY count(s.room_id) DESC
|
|
||||||
LIMIT 20;
|
|
||||||
```
|
|
||||||
|
|
||||||
## Show top 20 larger tables by row count
|
## Show top 20 larger tables by row count
|
||||||
```sql
|
```sql
|
||||||
SELECT relname, n_live_tup as rows
|
SELECT relname, n_live_tup AS "rows"
|
||||||
FROM pg_stat_user_tables
|
FROM pg_stat_user_tables
|
||||||
ORDER BY n_live_tup DESC
|
ORDER BY n_live_tup DESC
|
||||||
LIMIT 20;
|
LIMIT 20;
|
||||||
```
|
```
|
||||||
This query is quick, but may be very approximate, for exact number of rows use `SELECT COUNT(*) FROM <table_name>`.
|
This query is quick, but may be very approximate, for exact number of rows use:
|
||||||
|
```sql
|
||||||
|
SELECT COUNT(*) FROM <table_name>;
|
||||||
|
```
|
||||||
|
|
||||||
### Result example:
|
### Result example:
|
||||||
```
|
```
|
||||||
state_groups_state - 161687170
|
state_groups_state - 161687170
|
||||||
@ -66,46 +49,19 @@ device_lists_stream - 326903
|
|||||||
user_directory_search - 316433
|
user_directory_search - 316433
|
||||||
```
|
```
|
||||||
|
|
||||||
## Show top 20 rooms by new events count in last 1 day:
|
|
||||||
```sql
|
|
||||||
SELECT e.room_id, r.name, COUNT(e.event_id) cnt FROM events e
|
|
||||||
LEFT JOIN room_stats_state r USING (room_id)
|
|
||||||
WHERE e.origin_server_ts >= DATE_PART('epoch', NOW() - INTERVAL '1 day') * 1000 GROUP BY e.room_id, r.name ORDER BY cnt DESC LIMIT 20;
|
|
||||||
```
|
|
||||||
|
|
||||||
## Show top 20 users on homeserver by sent events (messages) at last month:
|
|
||||||
```sql
|
|
||||||
SELECT user_id, SUM(total_events)
|
|
||||||
FROM user_stats_historical
|
|
||||||
WHERE TO_TIMESTAMP(end_ts/1000) AT TIME ZONE 'UTC' > date_trunc('day', now() - interval '1 month')
|
|
||||||
GROUP BY user_id
|
|
||||||
ORDER BY SUM(total_events) DESC
|
|
||||||
LIMIT 20;
|
|
||||||
```
|
|
||||||
|
|
||||||
## Show last 100 messages from needed user, with room names:
|
|
||||||
```sql
|
|
||||||
SELECT e.room_id, r.name, e.event_id, e.type, e.content, j.json FROM events e
|
|
||||||
LEFT JOIN event_json j USING (room_id)
|
|
||||||
LEFT JOIN room_stats_state r USING (room_id)
|
|
||||||
WHERE sender = '@LOGIN:example.com'
|
|
||||||
AND e.type = 'm.room.message'
|
|
||||||
ORDER BY stream_ordering DESC
|
|
||||||
LIMIT 100;
|
|
||||||
```
|
|
||||||
|
|
||||||
## Show top 20 larger tables by storage size
|
## Show top 20 larger tables by storage size
|
||||||
```sql
|
```sql
|
||||||
SELECT nspname || '.' || relname AS "relation",
|
SELECT nspname || '.' || relname AS "relation",
|
||||||
pg_size_pretty(pg_total_relation_size(C.oid)) AS "total_size"
|
pg_size_pretty(pg_total_relation_size(c.oid)) AS "total_size"
|
||||||
FROM pg_class C
|
FROM pg_class c
|
||||||
LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
|
LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace)
|
||||||
WHERE nspname NOT IN ('pg_catalog', 'information_schema')
|
WHERE nspname NOT IN ('pg_catalog', 'information_schema')
|
||||||
AND C.relkind <> 'i'
|
AND c.relkind <> 'i'
|
||||||
AND nspname !~ '^pg_toast'
|
AND nspname !~ '^pg_toast'
|
||||||
ORDER BY pg_total_relation_size(C.oid) DESC
|
ORDER BY pg_total_relation_size(c.oid) DESC
|
||||||
LIMIT 20;
|
LIMIT 20;
|
||||||
```
|
```
|
||||||
|
|
||||||
### Result example:
|
### Result example:
|
||||||
```
|
```
|
||||||
public.state_groups_state - 27 GB
|
public.state_groups_state - 27 GB
|
||||||
@ -130,8 +86,93 @@ public.device_lists_remote_cache - 124 MB
|
|||||||
public.state_group_edges - 122 MB
|
public.state_group_edges - 122 MB
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Show top 20 larger rooms by state events count
|
||||||
|
You get the same information when you use the
|
||||||
|
[admin API](../../admin_api/rooms.md#list-room-api)
|
||||||
|
and set parameter `order_by=state_events`.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT r.name, s.room_id, s.current_state_events
|
||||||
|
FROM room_stats_current s
|
||||||
|
LEFT JOIN room_stats_state r USING (room_id)
|
||||||
|
ORDER BY current_state_events DESC
|
||||||
|
LIMIT 20;
|
||||||
|
```
|
||||||
|
|
||||||
|
and by state_group_events count:
|
||||||
|
```sql
|
||||||
|
SELECT rss.name, s.room_id, COUNT(s.room_id)
|
||||||
|
FROM state_groups_state s
|
||||||
|
LEFT JOIN room_stats_state rss USING (room_id)
|
||||||
|
GROUP BY s.room_id, rss.name
|
||||||
|
ORDER BY COUNT(s.room_id) DESC
|
||||||
|
LIMIT 20;
|
||||||
|
```
|
||||||
|
|
||||||
|
plus same, but with join removed for performance reasons:
|
||||||
|
```sql
|
||||||
|
SELECT s.room_id, COUNT(s.room_id)
|
||||||
|
FROM state_groups_state s
|
||||||
|
GROUP BY s.room_id
|
||||||
|
ORDER BY COUNT(s.room_id) DESC
|
||||||
|
LIMIT 20;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Show top 20 rooms by new events count in last 1 day:
|
||||||
|
```sql
|
||||||
|
SELECT e.room_id, r.name, COUNT(e.event_id) cnt
|
||||||
|
FROM events e
|
||||||
|
LEFT JOIN room_stats_state r USING (room_id)
|
||||||
|
WHERE e.origin_server_ts >= DATE_PART('epoch', NOW() - INTERVAL '1 day') * 1000
|
||||||
|
GROUP BY e.room_id, r.name
|
||||||
|
ORDER BY cnt DESC
|
||||||
|
LIMIT 20;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Show top 20 users on homeserver by sent events (messages) at last month:
|
||||||
|
Caution. This query does not use any indexes, can be slow and create load on the database.
|
||||||
|
```sql
|
||||||
|
SELECT COUNT(*), sender
|
||||||
|
FROM events
|
||||||
|
WHERE (type = 'm.room.encrypted' OR type = 'm.room.message')
|
||||||
|
AND origin_server_ts >= DATE_PART('epoch', NOW() - INTERVAL '1 month') * 1000
|
||||||
|
GROUP BY sender
|
||||||
|
ORDER BY COUNT(*) DESC
|
||||||
|
LIMIT 20;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Show last 100 messages from needed user, with room names:
|
||||||
|
```sql
|
||||||
|
SELECT e.room_id, r.name, e.event_id, e.type, e.content, j.json
|
||||||
|
FROM events e
|
||||||
|
LEFT JOIN event_json j USING (room_id)
|
||||||
|
LEFT JOIN room_stats_state r USING (room_id)
|
||||||
|
WHERE sender = '@LOGIN:example.com'
|
||||||
|
AND e.type = 'm.room.message'
|
||||||
|
ORDER BY stream_ordering DESC
|
||||||
|
LIMIT 100;
|
||||||
|
```
|
||||||
|
|
||||||
## Show rooms with names, sorted by events in this rooms
|
## Show rooms with names, sorted by events in this rooms
|
||||||
`echo "select event_json.room_id,room_stats_state.name from event_json,room_stats_state where room_stats_state.room_id=event_json.room_id" | psql synapse | sort | uniq -c | sort -n`
|
|
||||||
|
**Sort and order with bash**
|
||||||
|
```bash
|
||||||
|
echo "SELECT event_json.room_id, room_stats_state.name FROM event_json, room_stats_state \
|
||||||
|
WHERE room_stats_state.room_id = event_json.room_id" | psql -d synapse -h localhost -U synapse_user -t \
|
||||||
|
| sort | uniq -c | sort -n
|
||||||
|
```
|
||||||
|
Documentation for `psql` command line parameters: https://www.postgresql.org/docs/current/app-psql.html
|
||||||
|
|
||||||
|
**Sort and order with SQL**
|
||||||
|
```sql
|
||||||
|
SELECT COUNT(*), event_json.room_id, room_stats_state.name
|
||||||
|
FROM event_json, room_stats_state
|
||||||
|
WHERE room_stats_state.room_id = event_json.room_id
|
||||||
|
GROUP BY event_json.room_id, room_stats_state.name
|
||||||
|
ORDER BY COUNT(*) DESC
|
||||||
|
LIMIT 50;
|
||||||
|
```
|
||||||
|
|
||||||
### Result example:
|
### Result example:
|
||||||
```
|
```
|
||||||
9459 !FPUfgzXYWTKgIrwKxW:matrix.org | This Week in Matrix
|
9459 !FPUfgzXYWTKgIrwKxW:matrix.org | This Week in Matrix
|
||||||
@ -145,12 +186,22 @@ public.state_group_edges - 122 MB
|
|||||||
```
|
```
|
||||||
|
|
||||||
## Lookup room state info by list of room_id
|
## Lookup room state info by list of room_id
|
||||||
|
You get the same information when you use the
|
||||||
|
[admin API](../../admin_api/rooms.md#room-details-api).
|
||||||
```sql
|
```sql
|
||||||
SELECT rss.room_id, rss.name, rss.canonical_alias, rss.topic, rss.encryption, rsc.joined_members, rsc.local_users_in_room, rss.join_rules
|
SELECT rss.room_id, rss.name, rss.canonical_alias, rss.topic, rss.encryption,
|
||||||
FROM room_stats_state rss
|
rsc.joined_members, rsc.local_users_in_room, rss.join_rules
|
||||||
LEFT JOIN room_stats_current rsc USING (room_id)
|
FROM room_stats_state rss
|
||||||
WHERE room_id IN (WHERE room_id IN (
|
LEFT JOIN room_stats_current rsc USING (room_id)
|
||||||
'!OGEhHVWSdvArJzumhm:matrix.org',
|
WHERE room_id IN ( WHERE room_id IN (
|
||||||
'!YTvKGNlinIzlkMTVRl:matrix.org'
|
'!OGEhHVWSdvArJzumhm:matrix.org',
|
||||||
)
|
'!YTvKGNlinIzlkMTVRl:matrix.org'
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
## Show users and devices that have not been online for a while
|
||||||
|
```sql
|
||||||
|
SELECT user_id, device_id, user_agent, TO_TIMESTAMP(last_seen / 1000) AS "last_seen"
|
||||||
|
FROM devices
|
||||||
|
WHERE last_seen < DATE_PART('epoch', NOW() - INTERVAL '3 month') * 1000;
|
||||||
```
|
```
|
@ -627,6 +627,20 @@ Example configuration:
|
|||||||
mau_trial_days: 5
|
mau_trial_days: 5
|
||||||
```
|
```
|
||||||
---
|
---
|
||||||
|
Config option: `mau_appservice_trial_days`
|
||||||
|
|
||||||
|
The option `mau_appservice_trial_days` is similar to `mau_trial_days`, but applies a different
|
||||||
|
trial number if the user was registered by an appservice. A value
|
||||||
|
of 0 means no trial days are applied. Appservices not listed in this dictionary
|
||||||
|
use the value of `mau_trial_days` instead.
|
||||||
|
|
||||||
|
Example configuration:
|
||||||
|
```yaml
|
||||||
|
mau_appservice_trial_days:
|
||||||
|
my_appservice_id: 3
|
||||||
|
another_appservice_id: 6
|
||||||
|
```
|
||||||
|
---
|
||||||
Config option: `mau_limit_alerting`
|
Config option: `mau_limit_alerting`
|
||||||
|
|
||||||
The option `mau_limit_alerting` is a means of limiting client-side alerting
|
The option `mau_limit_alerting` is a means of limiting client-side alerting
|
||||||
@ -1035,13 +1049,13 @@ allow_profile_lookup_over_federation: false
|
|||||||
---
|
---
|
||||||
Config option: `allow_device_name_lookup_over_federation`
|
Config option: `allow_device_name_lookup_over_federation`
|
||||||
|
|
||||||
Set this option to false to disable device display name lookup over federation. By default, the
|
Set this option to true to allow device display name lookup over federation. By default, the
|
||||||
Federation API allows other homeservers to obtain device display names of any user
|
Federation API prevents other homeservers from obtaining the display names of any user devices
|
||||||
on this homeserver.
|
on this homeserver.
|
||||||
|
|
||||||
Example configuration:
|
Example configuration:
|
||||||
```yaml
|
```yaml
|
||||||
allow_device_name_lookup_over_federation: false
|
allow_device_name_lookup_over_federation: true
|
||||||
```
|
```
|
||||||
---
|
---
|
||||||
## Caching ##
|
## Caching ##
|
||||||
|
@ -426,7 +426,7 @@ the shared configuration would include:
|
|||||||
run_background_tasks_on: background_worker
|
run_background_tasks_on: background_worker
|
||||||
```
|
```
|
||||||
|
|
||||||
You might also wish to investigate the `update_user_directory` and
|
You might also wish to investigate the `update_user_directory_from_worker` and
|
||||||
`media_instance_running_background_jobs` settings.
|
`media_instance_running_background_jobs` settings.
|
||||||
|
|
||||||
An example for a dedicated background worker instance:
|
An example for a dedicated background worker instance:
|
||||||
@ -435,6 +435,40 @@ An example for a dedicated background worker instance:
|
|||||||
{{#include systemd-with-workers/workers/background_worker.yaml}}
|
{{#include systemd-with-workers/workers/background_worker.yaml}}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Updating the User Directory
|
||||||
|
|
||||||
|
You can designate one generic worker to update the user directory.
|
||||||
|
|
||||||
|
Specify its name in the shared configuration as follows:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
update_user_directory_from_worker: worker_name
|
||||||
|
```
|
||||||
|
|
||||||
|
This work cannot be load-balanced; please ensure the main process is restarted
|
||||||
|
after setting this option in the shared configuration!
|
||||||
|
|
||||||
|
This style of configuration supersedes the legacy `synapse.app.user_dir`
|
||||||
|
worker application type.
|
||||||
|
|
||||||
|
|
||||||
|
#### Notifying Application Services
|
||||||
|
|
||||||
|
You can designate one generic worker to send output traffic to Application Services.
|
||||||
|
|
||||||
|
Specify its name in the shared configuration as follows:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
notify_appservices_from_worker: worker_name
|
||||||
|
```
|
||||||
|
|
||||||
|
This work cannot be load-balanced; please ensure the main process is restarted
|
||||||
|
after setting this option in the shared configuration!
|
||||||
|
|
||||||
|
This style of configuration supersedes the legacy `synapse.app.appservice`
|
||||||
|
worker application type.
|
||||||
|
|
||||||
|
|
||||||
### `synapse.app.pusher`
|
### `synapse.app.pusher`
|
||||||
|
|
||||||
Handles sending push notifications to sygnal and email. Doesn't handle any
|
Handles sending push notifications to sygnal and email. Doesn't handle any
|
||||||
@ -453,6 +487,9 @@ pusher_instances:
|
|||||||
|
|
||||||
### `synapse.app.appservice`
|
### `synapse.app.appservice`
|
||||||
|
|
||||||
|
**Deprecated as of Synapse v1.59.** [Use `synapse.app.generic_worker` with the
|
||||||
|
`notify_appservices_from_worker` option instead.](#notifying-application-services)
|
||||||
|
|
||||||
Handles sending output traffic to Application Services. Doesn't handle any
|
Handles sending output traffic to Application Services. Doesn't handle any
|
||||||
REST endpoints itself, but you should set `notify_appservices: False` in the
|
REST endpoints itself, but you should set `notify_appservices: False` in the
|
||||||
shared configuration file to stop the main synapse sending appservice notifications.
|
shared configuration file to stop the main synapse sending appservice notifications.
|
||||||
@ -520,6 +557,9 @@ Note that if a reverse proxy is used , then `/_matrix/media/` must be routed for
|
|||||||
|
|
||||||
### `synapse.app.user_dir`
|
### `synapse.app.user_dir`
|
||||||
|
|
||||||
|
**Deprecated as of Synapse v1.59.** [Use `synapse.app.generic_worker` with the
|
||||||
|
`update_user_directory_from_worker` option instead.](#updating-the-user-directory)
|
||||||
|
|
||||||
Handles searches in the user directory. It can handle REST endpoints matching
|
Handles searches in the user directory. It can handle REST endpoints matching
|
||||||
the following regular expressions:
|
the following regular expressions:
|
||||||
|
|
||||||
|
62
mypy.ini
62
mypy.ini
@ -7,6 +7,7 @@ show_error_codes = True
|
|||||||
show_traceback = True
|
show_traceback = True
|
||||||
mypy_path = stubs
|
mypy_path = stubs
|
||||||
warn_unreachable = True
|
warn_unreachable = True
|
||||||
|
warn_unused_ignores = True
|
||||||
local_partial_types = True
|
local_partial_types = True
|
||||||
no_implicit_optional = True
|
no_implicit_optional = True
|
||||||
|
|
||||||
@ -23,10 +24,6 @@ files =
|
|||||||
# https://docs.python.org/3/library/re.html#re.X
|
# https://docs.python.org/3/library/re.html#re.X
|
||||||
exclude = (?x)
|
exclude = (?x)
|
||||||
^(
|
^(
|
||||||
|scripts-dev/build_debian_packages.py
|
|
||||||
|scripts-dev/federation_client.py
|
|
||||||
|scripts-dev/release.py
|
|
||||||
|
|
||||||
|synapse/storage/databases/__init__.py
|
|synapse/storage/databases/__init__.py
|
||||||
|synapse/storage/databases/main/cache.py
|
|synapse/storage/databases/main/cache.py
|
||||||
|synapse/storage/databases/main/devices.py
|
|synapse/storage/databases/main/devices.py
|
||||||
@ -134,6 +131,11 @@ disallow_untyped_defs = True
|
|||||||
[mypy-synapse.metrics.*]
|
[mypy-synapse.metrics.*]
|
||||||
disallow_untyped_defs = True
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
|
[mypy-synapse.metrics._reactor_metrics]
|
||||||
|
# This module imports select.epoll. That exists on Linux, but doesn't on macOS.
|
||||||
|
# See https://github.com/matrix-org/synapse/pull/11771.
|
||||||
|
warn_unused_ignores = False
|
||||||
|
|
||||||
[mypy-synapse.module_api.*]
|
[mypy-synapse.module_api.*]
|
||||||
disallow_untyped_defs = True
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
@ -239,63 +241,26 @@ disallow_untyped_defs = True
|
|||||||
[mypy-authlib.*]
|
[mypy-authlib.*]
|
||||||
ignore_missing_imports = True
|
ignore_missing_imports = True
|
||||||
|
|
||||||
[mypy-bcrypt]
|
|
||||||
ignore_missing_imports = True
|
|
||||||
|
|
||||||
[mypy-canonicaljson]
|
[mypy-canonicaljson]
|
||||||
ignore_missing_imports = True
|
ignore_missing_imports = True
|
||||||
|
|
||||||
[mypy-constantly]
|
|
||||||
ignore_missing_imports = True
|
|
||||||
|
|
||||||
[mypy-daemonize]
|
|
||||||
ignore_missing_imports = True
|
|
||||||
|
|
||||||
[mypy-h11]
|
|
||||||
ignore_missing_imports = True
|
|
||||||
|
|
||||||
[mypy-hiredis]
|
|
||||||
ignore_missing_imports = True
|
|
||||||
|
|
||||||
[mypy-hyperlink]
|
|
||||||
ignore_missing_imports = True
|
|
||||||
|
|
||||||
[mypy-ijson.*]
|
[mypy-ijson.*]
|
||||||
ignore_missing_imports = True
|
ignore_missing_imports = True
|
||||||
|
|
||||||
[mypy-importlib_metadata.*]
|
|
||||||
ignore_missing_imports = True
|
|
||||||
|
|
||||||
[mypy-jaeger_client.*]
|
|
||||||
ignore_missing_imports = True
|
|
||||||
|
|
||||||
[mypy-josepy.*]
|
|
||||||
ignore_missing_imports = True
|
|
||||||
|
|
||||||
[mypy-jwt.*]
|
|
||||||
ignore_missing_imports = True
|
|
||||||
|
|
||||||
[mypy-lxml]
|
[mypy-lxml]
|
||||||
ignore_missing_imports = True
|
ignore_missing_imports = True
|
||||||
|
|
||||||
[mypy-msgpack]
|
[mypy-msgpack]
|
||||||
ignore_missing_imports = True
|
ignore_missing_imports = True
|
||||||
|
|
||||||
[mypy-nacl.*]
|
# Note: WIP stubs available at
|
||||||
ignore_missing_imports = True
|
# https://github.com/microsoft/python-type-stubs/tree/64934207f523ad6b611e6cfe039d85d7175d7d0d/netaddr
|
||||||
|
|
||||||
[mypy-netaddr]
|
[mypy-netaddr]
|
||||||
ignore_missing_imports = True
|
ignore_missing_imports = True
|
||||||
|
|
||||||
[mypy-parameterized.*]
|
[mypy-parameterized.*]
|
||||||
ignore_missing_imports = True
|
ignore_missing_imports = True
|
||||||
|
|
||||||
[mypy-phonenumbers.*]
|
|
||||||
ignore_missing_imports = True
|
|
||||||
|
|
||||||
[mypy-prometheus_client.*]
|
|
||||||
ignore_missing_imports = True
|
|
||||||
|
|
||||||
[mypy-pymacaroons.*]
|
[mypy-pymacaroons.*]
|
||||||
ignore_missing_imports = True
|
ignore_missing_imports = True
|
||||||
|
|
||||||
@ -308,23 +273,14 @@ ignore_missing_imports = True
|
|||||||
[mypy-saml2.*]
|
[mypy-saml2.*]
|
||||||
ignore_missing_imports = True
|
ignore_missing_imports = True
|
||||||
|
|
||||||
[mypy-sentry_sdk]
|
|
||||||
ignore_missing_imports = True
|
|
||||||
|
|
||||||
[mypy-service_identity.*]
|
[mypy-service_identity.*]
|
||||||
ignore_missing_imports = True
|
ignore_missing_imports = True
|
||||||
|
|
||||||
[mypy-signedjson.*]
|
[mypy-srvlookup.*]
|
||||||
ignore_missing_imports = True
|
ignore_missing_imports = True
|
||||||
|
|
||||||
[mypy-treq.*]
|
[mypy-treq.*]
|
||||||
ignore_missing_imports = True
|
ignore_missing_imports = True
|
||||||
|
|
||||||
[mypy-twisted.*]
|
|
||||||
ignore_missing_imports = True
|
|
||||||
|
|
||||||
[mypy-zope]
|
|
||||||
ignore_missing_imports = True
|
|
||||||
|
|
||||||
[mypy-incremental.*]
|
[mypy-incremental.*]
|
||||||
ignore_missing_imports = True
|
ignore_missing_imports = True
|
||||||
|
@ -54,7 +54,7 @@ skip_gitignore = true
|
|||||||
|
|
||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "matrix-synapse"
|
name = "matrix-synapse"
|
||||||
version = "1.58.1"
|
version = "1.59.0rc1"
|
||||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
@ -100,7 +100,7 @@ synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main"
|
|||||||
update_synapse_database = "synapse._scripts.update_synapse_database:main"
|
update_synapse_database = "synapse._scripts.update_synapse_database:main"
|
||||||
|
|
||||||
[tool.poetry.dependencies]
|
[tool.poetry.dependencies]
|
||||||
python = "^3.7"
|
python = "^3.7.1"
|
||||||
|
|
||||||
# Mandatory Dependencies
|
# Mandatory Dependencies
|
||||||
# ----------------------
|
# ----------------------
|
||||||
@ -142,8 +142,10 @@ netaddr = ">=0.7.18"
|
|||||||
# add a lower bound to the Jinja2 dependency.
|
# add a lower bound to the Jinja2 dependency.
|
||||||
Jinja2 = ">=3.0"
|
Jinja2 = ">=3.0"
|
||||||
bleach = ">=1.4.3"
|
bleach = ">=1.4.3"
|
||||||
# We use `ParamSpec`, which was added in `typing-extensions` 3.10.0.0.
|
# We use `ParamSpec` and `Concatenate`, which were added in `typing-extensions` 3.10.0.0.
|
||||||
typing-extensions = ">=3.10.0"
|
# Additionally we need https://github.com/python/typing/pull/817 to allow types to be
|
||||||
|
# generic over ParamSpecs.
|
||||||
|
typing-extensions = ">=3.10.0.1"
|
||||||
# We enforce that we have a `cryptography` version that bundles an `openssl`
|
# We enforce that we have a `cryptography` version that bundles an `openssl`
|
||||||
# with the latest security patches.
|
# with the latest security patches.
|
||||||
cryptography = ">=3.4.7"
|
cryptography = ">=3.4.7"
|
||||||
@ -231,10 +233,11 @@ all = [
|
|||||||
"jaeger-client", "opentracing",
|
"jaeger-client", "opentracing",
|
||||||
# jwt
|
# jwt
|
||||||
"pyjwt",
|
"pyjwt",
|
||||||
#redis
|
# redis
|
||||||
"txredisapi", "hiredis"
|
"txredisapi", "hiredis",
|
||||||
|
# cache_memory
|
||||||
|
"pympler",
|
||||||
# omitted:
|
# omitted:
|
||||||
# - cache_memory: this is an experimental option
|
|
||||||
# - test: it's useful to have this separate from dev deps in the olddeps job
|
# - test: it's useful to have this separate from dev deps in the olddeps job
|
||||||
# - systemd: this is a system-based requirement
|
# - systemd: this is a system-based requirement
|
||||||
]
|
]
|
||||||
@ -248,9 +251,10 @@ flake8-bugbear = "==21.3.2"
|
|||||||
flake8 = "*"
|
flake8 = "*"
|
||||||
|
|
||||||
# Typechecking
|
# Typechecking
|
||||||
mypy = "==0.931"
|
mypy = "*"
|
||||||
mypy-zope = "==0.3.5"
|
mypy-zope = "*"
|
||||||
types-bleach = ">=4.1.0"
|
types-bleach = ">=4.1.0"
|
||||||
|
types-commonmark = ">=0.9.2"
|
||||||
types-jsonschema = ">=3.2.0"
|
types-jsonschema = ">=3.2.0"
|
||||||
types-opentracing = ">=2.4.2"
|
types-opentracing = ">=2.4.2"
|
||||||
types-Pillow = ">=8.3.4"
|
types-Pillow = ">=8.3.4"
|
||||||
@ -270,7 +274,8 @@ idna = ">=2.5"
|
|||||||
|
|
||||||
# The following are used by the release script
|
# The following are used by the release script
|
||||||
click = "==8.1.0"
|
click = "==8.1.0"
|
||||||
GitPython = "==3.1.14"
|
# GitPython was == 3.1.14; bumped to 3.1.20, the first release with type hints.
|
||||||
|
GitPython = ">=3.1.20"
|
||||||
commonmark = "==0.9.1"
|
commonmark = "==0.9.1"
|
||||||
pygithub = "==1.55"
|
pygithub = "==1.55"
|
||||||
# The following are executed as commands by the release script.
|
# The following are executed as commands by the release script.
|
||||||
|
@ -17,7 +17,8 @@ import subprocess
|
|||||||
import sys
|
import sys
|
||||||
import threading
|
import threading
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
from typing import Optional, Sequence
|
from types import FrameType
|
||||||
|
from typing import Collection, Optional, Sequence, Set
|
||||||
|
|
||||||
DISTS = (
|
DISTS = (
|
||||||
"debian:buster", # oldstable: EOL 2022-08
|
"debian:buster", # oldstable: EOL 2022-08
|
||||||
@ -41,15 +42,17 @@ projdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
|||||||
|
|
||||||
class Builder(object):
|
class Builder(object):
|
||||||
def __init__(
|
def __init__(
|
||||||
self, redirect_stdout=False, docker_build_args: Optional[Sequence[str]] = None
|
self,
|
||||||
|
redirect_stdout: bool = False,
|
||||||
|
docker_build_args: Optional[Sequence[str]] = None,
|
||||||
):
|
):
|
||||||
self.redirect_stdout = redirect_stdout
|
self.redirect_stdout = redirect_stdout
|
||||||
self._docker_build_args = tuple(docker_build_args or ())
|
self._docker_build_args = tuple(docker_build_args or ())
|
||||||
self.active_containers = set()
|
self.active_containers: Set[str] = set()
|
||||||
self._lock = threading.Lock()
|
self._lock = threading.Lock()
|
||||||
self._failed = False
|
self._failed = False
|
||||||
|
|
||||||
def run_build(self, dist, skip_tests=False):
|
def run_build(self, dist: str, skip_tests: bool = False) -> None:
|
||||||
"""Build deb for a single distribution"""
|
"""Build deb for a single distribution"""
|
||||||
|
|
||||||
if self._failed:
|
if self._failed:
|
||||||
@ -63,7 +66,7 @@ class Builder(object):
|
|||||||
self._failed = True
|
self._failed = True
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def _inner_build(self, dist, skip_tests=False):
|
def _inner_build(self, dist: str, skip_tests: bool = False) -> None:
|
||||||
tag = dist.split(":", 1)[1]
|
tag = dist.split(":", 1)[1]
|
||||||
|
|
||||||
# Make the dir where the debs will live.
|
# Make the dir where the debs will live.
|
||||||
@ -138,7 +141,7 @@ class Builder(object):
|
|||||||
stdout.close()
|
stdout.close()
|
||||||
print("Completed build of %s" % (dist,))
|
print("Completed build of %s" % (dist,))
|
||||||
|
|
||||||
def kill_containers(self):
|
def kill_containers(self) -> None:
|
||||||
with self._lock:
|
with self._lock:
|
||||||
active = list(self.active_containers)
|
active = list(self.active_containers)
|
||||||
|
|
||||||
@ -156,8 +159,10 @@ class Builder(object):
|
|||||||
self.active_containers.remove(c)
|
self.active_containers.remove(c)
|
||||||
|
|
||||||
|
|
||||||
def run_builds(builder, dists, jobs=1, skip_tests=False):
|
def run_builds(
|
||||||
def sig(signum, _frame):
|
builder: Builder, dists: Collection[str], jobs: int = 1, skip_tests: bool = False
|
||||||
|
) -> None:
|
||||||
|
def sig(signum: int, _frame: Optional[FrameType]) -> None:
|
||||||
print("Caught SIGINT")
|
print("Caught SIGINT")
|
||||||
builder.kill_containers()
|
builder.kill_containers()
|
||||||
|
|
||||||
|
@ -43,6 +43,8 @@ fi
|
|||||||
# Build the base Synapse image from the local checkout
|
# Build the base Synapse image from the local checkout
|
||||||
docker build -t matrixdotorg/synapse -f "docker/Dockerfile" .
|
docker build -t matrixdotorg/synapse -f "docker/Dockerfile" .
|
||||||
|
|
||||||
|
extra_test_args=()
|
||||||
|
|
||||||
# If we're using workers, modify the docker files slightly.
|
# If we're using workers, modify the docker files slightly.
|
||||||
if [[ -n "$WORKERS" ]]; then
|
if [[ -n "$WORKERS" ]]; then
|
||||||
# Build the workers docker image (from the base Synapse image).
|
# Build the workers docker image (from the base Synapse image).
|
||||||
@ -52,7 +54,14 @@ if [[ -n "$WORKERS" ]]; then
|
|||||||
COMPLEMENT_DOCKERFILE=SynapseWorkers.Dockerfile
|
COMPLEMENT_DOCKERFILE=SynapseWorkers.Dockerfile
|
||||||
|
|
||||||
# And provide some more configuration to complement.
|
# And provide some more configuration to complement.
|
||||||
export COMPLEMENT_SPAWN_HS_TIMEOUT_SECS=60
|
|
||||||
|
# It can take quite a while to spin up a worker-mode Synapse for the first
|
||||||
|
# time (the main problem is that we start 14 python processes for each test,
|
||||||
|
# and complement likes to do two of them in parallel).
|
||||||
|
export COMPLEMENT_SPAWN_HS_TIMEOUT_SECS=120
|
||||||
|
|
||||||
|
# ... and it takes longer than 10m to run the whole suite.
|
||||||
|
extra_test_args+=("-timeout=60m")
|
||||||
else
|
else
|
||||||
export COMPLEMENT_BASE_IMAGE=complement-synapse
|
export COMPLEMENT_BASE_IMAGE=complement-synapse
|
||||||
COMPLEMENT_DOCKERFILE=Dockerfile
|
COMPLEMENT_DOCKERFILE=Dockerfile
|
||||||
@ -64,4 +73,4 @@ docker build -t $COMPLEMENT_BASE_IMAGE -f "docker/complement/$COMPLEMENT_DOCKERF
|
|||||||
# Run the tests!
|
# Run the tests!
|
||||||
echo "Images built; running complement"
|
echo "Images built; running complement"
|
||||||
cd "$COMPLEMENT_DIR"
|
cd "$COMPLEMENT_DIR"
|
||||||
go test -v -tags synapse_blacklist,msc2716,msc3030,faster_joins -count=1 "$@" ./tests/...
|
go test -v -tags synapse_blacklist,msc2716,msc3030,faster_joins -count=1 "${extra_test_args[@]}" "$@" ./tests/...
|
||||||
|
@ -38,7 +38,7 @@ import argparse
|
|||||||
import base64
|
import base64
|
||||||
import json
|
import json
|
||||||
import sys
|
import sys
|
||||||
from typing import Any, Optional
|
from typing import Any, Dict, Optional, Tuple
|
||||||
from urllib import parse as urlparse
|
from urllib import parse as urlparse
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
@ -47,13 +47,14 @@ import signedjson.types
|
|||||||
import srvlookup
|
import srvlookup
|
||||||
import yaml
|
import yaml
|
||||||
from requests.adapters import HTTPAdapter
|
from requests.adapters import HTTPAdapter
|
||||||
|
from urllib3 import HTTPConnectionPool
|
||||||
|
|
||||||
# uncomment the following to enable debug logging of http requests
|
# uncomment the following to enable debug logging of http requests
|
||||||
# from httplib import HTTPConnection
|
# from httplib import HTTPConnection
|
||||||
# HTTPConnection.debuglevel = 1
|
# HTTPConnection.debuglevel = 1
|
||||||
|
|
||||||
|
|
||||||
def encode_base64(input_bytes):
|
def encode_base64(input_bytes: bytes) -> str:
|
||||||
"""Encode bytes as a base64 string without any padding."""
|
"""Encode bytes as a base64 string without any padding."""
|
||||||
|
|
||||||
input_len = len(input_bytes)
|
input_len = len(input_bytes)
|
||||||
@ -63,7 +64,7 @@ def encode_base64(input_bytes):
|
|||||||
return output_string
|
return output_string
|
||||||
|
|
||||||
|
|
||||||
def encode_canonical_json(value):
|
def encode_canonical_json(value: object) -> bytes:
|
||||||
return json.dumps(
|
return json.dumps(
|
||||||
value,
|
value,
|
||||||
# Encode code-points outside of ASCII as UTF-8 rather than \u escapes
|
# Encode code-points outside of ASCII as UTF-8 rather than \u escapes
|
||||||
@ -130,7 +131,7 @@ def request(
|
|||||||
sig,
|
sig,
|
||||||
destination,
|
destination,
|
||||||
)
|
)
|
||||||
authorization_headers.append(header.encode("ascii"))
|
authorization_headers.append(header)
|
||||||
print("Authorization: %s" % header, file=sys.stderr)
|
print("Authorization: %s" % header, file=sys.stderr)
|
||||||
|
|
||||||
dest = "matrix://%s%s" % (destination, path)
|
dest = "matrix://%s%s" % (destination, path)
|
||||||
@ -139,7 +140,10 @@ def request(
|
|||||||
s = requests.Session()
|
s = requests.Session()
|
||||||
s.mount("matrix://", MatrixConnectionAdapter())
|
s.mount("matrix://", MatrixConnectionAdapter())
|
||||||
|
|
||||||
headers = {"Host": destination, "Authorization": authorization_headers[0]}
|
headers: Dict[str, str] = {
|
||||||
|
"Host": destination,
|
||||||
|
"Authorization": authorization_headers[0],
|
||||||
|
}
|
||||||
|
|
||||||
if method == "POST":
|
if method == "POST":
|
||||||
headers["Content-Type"] = "application/json"
|
headers["Content-Type"] = "application/json"
|
||||||
@ -154,7 +158,7 @@ def request(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main() -> None:
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="Signs and sends a federation request to a matrix homeserver"
|
description="Signs and sends a federation request to a matrix homeserver"
|
||||||
)
|
)
|
||||||
@ -212,6 +216,7 @@ def main():
|
|||||||
if not args.server_name or not args.signing_key:
|
if not args.server_name or not args.signing_key:
|
||||||
read_args_from_config(args)
|
read_args_from_config(args)
|
||||||
|
|
||||||
|
assert isinstance(args.signing_key, str)
|
||||||
algorithm, version, key_base64 = args.signing_key.split()
|
algorithm, version, key_base64 = args.signing_key.split()
|
||||||
key = signedjson.key.decode_signing_key_base64(algorithm, version, key_base64)
|
key = signedjson.key.decode_signing_key_base64(algorithm, version, key_base64)
|
||||||
|
|
||||||
@ -233,7 +238,7 @@ def main():
|
|||||||
print("")
|
print("")
|
||||||
|
|
||||||
|
|
||||||
def read_args_from_config(args):
|
def read_args_from_config(args: argparse.Namespace) -> None:
|
||||||
with open(args.config, "r") as fh:
|
with open(args.config, "r") as fh:
|
||||||
config = yaml.safe_load(fh)
|
config = yaml.safe_load(fh)
|
||||||
|
|
||||||
@ -250,7 +255,7 @@ def read_args_from_config(args):
|
|||||||
|
|
||||||
class MatrixConnectionAdapter(HTTPAdapter):
|
class MatrixConnectionAdapter(HTTPAdapter):
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def lookup(s, skip_well_known=False):
|
def lookup(s: str, skip_well_known: bool = False) -> Tuple[str, int]:
|
||||||
if s[-1] == "]":
|
if s[-1] == "]":
|
||||||
# ipv6 literal (with no port)
|
# ipv6 literal (with no port)
|
||||||
return s, 8448
|
return s, 8448
|
||||||
@ -276,7 +281,7 @@ class MatrixConnectionAdapter(HTTPAdapter):
|
|||||||
return s, 8448
|
return s, 8448
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_well_known(server_name):
|
def get_well_known(server_name: str) -> Optional[str]:
|
||||||
uri = "https://%s/.well-known/matrix/server" % (server_name,)
|
uri = "https://%s/.well-known/matrix/server" % (server_name,)
|
||||||
print("fetching %s" % (uri,), file=sys.stderr)
|
print("fetching %s" % (uri,), file=sys.stderr)
|
||||||
|
|
||||||
@ -299,7 +304,9 @@ class MatrixConnectionAdapter(HTTPAdapter):
|
|||||||
print("Invalid response from %s: %s" % (uri, e), file=sys.stderr)
|
print("Invalid response from %s: %s" % (uri, e), file=sys.stderr)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def get_connection(self, url, proxies=None):
|
def get_connection(
|
||||||
|
self, url: str, proxies: Optional[Dict[str, str]] = None
|
||||||
|
) -> HTTPConnectionPool:
|
||||||
parsed = urlparse.urlparse(url)
|
parsed = urlparse.urlparse(url)
|
||||||
|
|
||||||
(host, port) = self.lookup(parsed.netloc)
|
(host, port) = self.lookup(parsed.netloc)
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
can crop up, e.g the cache descriptors.
|
can crop up, e.g the cache descriptors.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from typing import Callable, Optional
|
from typing import Callable, Optional, Type
|
||||||
|
|
||||||
from mypy.nodes import ARG_NAMED_OPT
|
from mypy.nodes import ARG_NAMED_OPT
|
||||||
from mypy.plugin import MethodSigContext, Plugin
|
from mypy.plugin import MethodSigContext, Plugin
|
||||||
@ -94,7 +94,7 @@ def cached_function_method_signature(ctx: MethodSigContext) -> CallableType:
|
|||||||
return signature
|
return signature
|
||||||
|
|
||||||
|
|
||||||
def plugin(version: str):
|
def plugin(version: str) -> Type[SynapsePlugin]:
|
||||||
# This is the entry point of the plugin, and let's us deal with the fact
|
# This is the entry point of the plugin, and let's us deal with the fact
|
||||||
# that the mypy plugin interface is *not* stable by looking at the version
|
# that the mypy plugin interface is *not* stable by looking at the version
|
||||||
# string.
|
# string.
|
||||||
|
@ -25,7 +25,7 @@ import sys
|
|||||||
import urllib.request
|
import urllib.request
|
||||||
from os import path
|
from os import path
|
||||||
from tempfile import TemporaryDirectory
|
from tempfile import TemporaryDirectory
|
||||||
from typing import List, Optional
|
from typing import Any, List, Optional, cast
|
||||||
|
|
||||||
import attr
|
import attr
|
||||||
import click
|
import click
|
||||||
@ -36,7 +36,9 @@ from github import Github
|
|||||||
from packaging import version
|
from packaging import version
|
||||||
|
|
||||||
|
|
||||||
def run_until_successful(command, *args, **kwargs):
|
def run_until_successful(
|
||||||
|
command: str, *args: Any, **kwargs: Any
|
||||||
|
) -> subprocess.CompletedProcess:
|
||||||
while True:
|
while True:
|
||||||
completed_process = subprocess.run(command, *args, **kwargs)
|
completed_process = subprocess.run(command, *args, **kwargs)
|
||||||
exit_code = completed_process.returncode
|
exit_code = completed_process.returncode
|
||||||
@ -50,7 +52,7 @@ def run_until_successful(command, *args, **kwargs):
|
|||||||
|
|
||||||
|
|
||||||
@click.group()
|
@click.group()
|
||||||
def cli():
|
def cli() -> None:
|
||||||
"""An interactive script to walk through the parts of creating a release.
|
"""An interactive script to walk through the parts of creating a release.
|
||||||
|
|
||||||
Requires the dev dependencies be installed, which can be done via:
|
Requires the dev dependencies be installed, which can be done via:
|
||||||
@ -81,19 +83,13 @@ def cli():
|
|||||||
|
|
||||||
|
|
||||||
@cli.command()
|
@cli.command()
|
||||||
def prepare():
|
def prepare() -> None:
|
||||||
"""Do the initial stages of creating a release, including creating release
|
"""Do the initial stages of creating a release, including creating release
|
||||||
branch, updating changelog and pushing to GitHub.
|
branch, updating changelog and pushing to GitHub.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Make sure we're in a git repo.
|
# Make sure we're in a git repo.
|
||||||
try:
|
repo = get_repo_and_check_clean_checkout()
|
||||||
repo = git.Repo()
|
|
||||||
except git.InvalidGitRepositoryError:
|
|
||||||
raise click.ClickException("Not in Synapse repo.")
|
|
||||||
|
|
||||||
if repo.is_dirty():
|
|
||||||
raise click.ClickException("Uncommitted changes exist.")
|
|
||||||
|
|
||||||
click.secho("Updating git repo...")
|
click.secho("Updating git repo...")
|
||||||
repo.remote().fetch()
|
repo.remote().fetch()
|
||||||
@ -161,22 +157,21 @@ def prepare():
|
|||||||
click.get_current_context().abort()
|
click.get_current_context().abort()
|
||||||
|
|
||||||
# Switch to the release branch.
|
# Switch to the release branch.
|
||||||
parsed_new_version: version.Version = version.parse(new_version)
|
# Cast safety: parse() won't return a version.LegacyVersion from our
|
||||||
|
# version string format.
|
||||||
|
parsed_new_version = cast(version.Version, version.parse(new_version))
|
||||||
|
|
||||||
# We assume for debian changelogs that we only do RCs or full releases.
|
# We assume for debian changelogs that we only do RCs or full releases.
|
||||||
assert not parsed_new_version.is_devrelease
|
assert not parsed_new_version.is_devrelease
|
||||||
assert not parsed_new_version.is_postrelease
|
assert not parsed_new_version.is_postrelease
|
||||||
|
|
||||||
release_branch_name = (
|
release_branch_name = get_release_branch_name(parsed_new_version)
|
||||||
f"release-v{parsed_new_version.major}.{parsed_new_version.minor}"
|
|
||||||
)
|
|
||||||
release_branch = find_ref(repo, release_branch_name)
|
release_branch = find_ref(repo, release_branch_name)
|
||||||
if release_branch:
|
if release_branch:
|
||||||
if release_branch.is_remote():
|
if release_branch.is_remote():
|
||||||
# If the release branch only exists on the remote we check it out
|
# If the release branch only exists on the remote we check it out
|
||||||
# locally.
|
# locally.
|
||||||
repo.git.checkout(release_branch_name)
|
repo.git.checkout(release_branch_name)
|
||||||
release_branch = repo.active_branch
|
|
||||||
else:
|
else:
|
||||||
# If a branch doesn't exist we create one. We ask which one branch it
|
# If a branch doesn't exist we create one. We ask which one branch it
|
||||||
# should be based off, defaulting to sensible values depending on the
|
# should be based off, defaulting to sensible values depending on the
|
||||||
@ -198,13 +193,15 @@ def prepare():
|
|||||||
click.get_current_context().abort()
|
click.get_current_context().abort()
|
||||||
|
|
||||||
# Check out the base branch and ensure it's up to date
|
# Check out the base branch and ensure it's up to date
|
||||||
repo.head.reference = base_branch
|
repo.head.set_reference(base_branch, "check out the base branch")
|
||||||
repo.head.reset(index=True, working_tree=True)
|
repo.head.reset(index=True, working_tree=True)
|
||||||
if not base_branch.is_remote():
|
if not base_branch.is_remote():
|
||||||
update_branch(repo)
|
update_branch(repo)
|
||||||
|
|
||||||
# Create the new release branch
|
# Create the new release branch
|
||||||
release_branch = repo.create_head(release_branch_name, commit=base_branch)
|
# Type ignore will no longer be needed after GitPython 3.1.28.
|
||||||
|
# See https://github.com/gitpython-developers/GitPython/pull/1419
|
||||||
|
repo.create_head(release_branch_name, commit=base_branch) # type: ignore[arg-type]
|
||||||
|
|
||||||
# Switch to the release branch and ensure it's up to date.
|
# Switch to the release branch and ensure it's up to date.
|
||||||
repo.git.checkout(release_branch_name)
|
repo.git.checkout(release_branch_name)
|
||||||
@ -265,17 +262,11 @@ def prepare():
|
|||||||
|
|
||||||
@cli.command()
|
@cli.command()
|
||||||
@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"])
|
@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"])
|
||||||
def tag(gh_token: Optional[str]):
|
def tag(gh_token: Optional[str]) -> None:
|
||||||
"""Tags the release and generates a draft GitHub release"""
|
"""Tags the release and generates a draft GitHub release"""
|
||||||
|
|
||||||
# Make sure we're in a git repo.
|
# Make sure we're in a git repo.
|
||||||
try:
|
repo = get_repo_and_check_clean_checkout()
|
||||||
repo = git.Repo()
|
|
||||||
except git.InvalidGitRepositoryError:
|
|
||||||
raise click.ClickException("Not in Synapse repo.")
|
|
||||||
|
|
||||||
if repo.is_dirty():
|
|
||||||
raise click.ClickException("Uncommitted changes exist.")
|
|
||||||
|
|
||||||
click.secho("Updating git repo...")
|
click.secho("Updating git repo...")
|
||||||
repo.remote().fetch()
|
repo.remote().fetch()
|
||||||
@ -288,12 +279,26 @@ def tag(gh_token: Optional[str]):
|
|||||||
if tag_name in repo.tags:
|
if tag_name in repo.tags:
|
||||||
raise click.ClickException(f"Tag {tag_name} already exists!\n")
|
raise click.ClickException(f"Tag {tag_name} already exists!\n")
|
||||||
|
|
||||||
|
# Check we're on the right release branch
|
||||||
|
release_branch = get_release_branch_name(current_version)
|
||||||
|
if repo.active_branch.name != release_branch:
|
||||||
|
click.echo(
|
||||||
|
f"Need to be on the release branch ({release_branch}) before tagging. "
|
||||||
|
f"Currently on ({repo.active_branch.name})."
|
||||||
|
)
|
||||||
|
click.get_current_context().abort()
|
||||||
|
|
||||||
# Get the appropriate changelogs and tag.
|
# Get the appropriate changelogs and tag.
|
||||||
changes = get_changes_for_version(current_version)
|
changes = get_changes_for_version(current_version)
|
||||||
|
|
||||||
click.echo_via_pager(changes)
|
click.echo_via_pager(changes)
|
||||||
if click.confirm("Edit text?", default=False):
|
if click.confirm("Edit text?", default=False):
|
||||||
changes = click.edit(changes, require_save=False)
|
edited_changes = click.edit(changes, require_save=False)
|
||||||
|
# This assert is for mypy's benefit. click's docs are a little unclear, but
|
||||||
|
# when `require_save=False`, not saving the temp file in the editor returns
|
||||||
|
# the original string.
|
||||||
|
assert edited_changes is not None
|
||||||
|
changes = edited_changes
|
||||||
|
|
||||||
repo.create_tag(tag_name, message=changes, sign=True)
|
repo.create_tag(tag_name, message=changes, sign=True)
|
||||||
|
|
||||||
@ -347,22 +352,16 @@ def tag(gh_token: Optional[str]):
|
|||||||
|
|
||||||
@cli.command()
|
@cli.command()
|
||||||
@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=True)
|
@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=True)
|
||||||
def publish(gh_token: str):
|
def publish(gh_token: str) -> None:
|
||||||
"""Publish release."""
|
"""Publish release on GitHub."""
|
||||||
|
|
||||||
# Make sure we're in a git repo.
|
# Make sure we're in a git repo.
|
||||||
try:
|
get_repo_and_check_clean_checkout()
|
||||||
repo = git.Repo()
|
|
||||||
except git.InvalidGitRepositoryError:
|
|
||||||
raise click.ClickException("Not in Synapse repo.")
|
|
||||||
|
|
||||||
if repo.is_dirty():
|
|
||||||
raise click.ClickException("Uncommitted changes exist.")
|
|
||||||
|
|
||||||
current_version = get_package_version()
|
current_version = get_package_version()
|
||||||
tag_name = f"v{current_version}"
|
tag_name = f"v{current_version}"
|
||||||
|
|
||||||
if not click.confirm(f"Publish {tag_name}?", default=True):
|
if not click.confirm(f"Publish release {tag_name} on GitHub?", default=True):
|
||||||
return
|
return
|
||||||
|
|
||||||
# Publish the draft release
|
# Publish the draft release
|
||||||
@ -390,12 +389,19 @@ def publish(gh_token: str):
|
|||||||
|
|
||||||
|
|
||||||
@cli.command()
|
@cli.command()
|
||||||
def upload():
|
def upload() -> None:
|
||||||
"""Upload release to pypi."""
|
"""Upload release to pypi."""
|
||||||
|
|
||||||
current_version = get_package_version()
|
current_version = get_package_version()
|
||||||
tag_name = f"v{current_version}"
|
tag_name = f"v{current_version}"
|
||||||
|
|
||||||
|
# Check we have the right tag checked out.
|
||||||
|
repo = get_repo_and_check_clean_checkout()
|
||||||
|
tag = repo.tag(f"refs/tags/{tag_name}")
|
||||||
|
if repo.head.commit != tag.commit:
|
||||||
|
click.echo("Tag {tag_name} (tag.commit) is not currently checked out!")
|
||||||
|
click.get_current_context().abort()
|
||||||
|
|
||||||
pypi_asset_names = [
|
pypi_asset_names = [
|
||||||
f"matrix_synapse-{current_version}-py3-none-any.whl",
|
f"matrix_synapse-{current_version}-py3-none-any.whl",
|
||||||
f"matrix-synapse-{current_version}.tar.gz",
|
f"matrix-synapse-{current_version}.tar.gz",
|
||||||
@ -418,7 +424,7 @@ def upload():
|
|||||||
|
|
||||||
|
|
||||||
@cli.command()
|
@cli.command()
|
||||||
def announce():
|
def announce() -> None:
|
||||||
"""Generate markdown to announce the release."""
|
"""Generate markdown to announce the release."""
|
||||||
|
|
||||||
current_version = get_package_version()
|
current_version = get_package_version()
|
||||||
@ -428,7 +434,7 @@ def announce():
|
|||||||
f"""
|
f"""
|
||||||
Hi everyone. Synapse {current_version} has just been released.
|
Hi everyone. Synapse {current_version} has just been released.
|
||||||
|
|
||||||
[notes](https://github.com/matrix-org/synapse/releases/tag/{tag_name}) |\
|
[notes](https://github.com/matrix-org/synapse/releases/tag/{tag_name}) | \
|
||||||
[docker](https://hub.docker.com/r/matrixdotorg/synapse/tags?name={tag_name}) | \
|
[docker](https://hub.docker.com/r/matrixdotorg/synapse/tags?name={tag_name}) | \
|
||||||
[debs](https://packages.matrix.org/debian/) | \
|
[debs](https://packages.matrix.org/debian/) | \
|
||||||
[pypi](https://pypi.org/project/matrix-synapse/{current_version}/)"""
|
[pypi](https://pypi.org/project/matrix-synapse/{current_version}/)"""
|
||||||
@ -459,20 +465,36 @@ def get_package_version() -> version.Version:
|
|||||||
return version.Version(version_string)
|
return version.Version(version_string)
|
||||||
|
|
||||||
|
|
||||||
|
def get_release_branch_name(version_number: version.Version) -> str:
|
||||||
|
return f"release-v{version_number.major}.{version_number.minor}"
|
||||||
|
|
||||||
|
|
||||||
|
def get_repo_and_check_clean_checkout() -> git.Repo:
|
||||||
|
"""Get the project repo and check it's not got any uncommitted changes."""
|
||||||
|
try:
|
||||||
|
repo = git.Repo()
|
||||||
|
except git.InvalidGitRepositoryError:
|
||||||
|
raise click.ClickException("Not in Synapse repo.")
|
||||||
|
if repo.is_dirty():
|
||||||
|
raise click.ClickException("Uncommitted changes exist.")
|
||||||
|
return repo
|
||||||
|
|
||||||
|
|
||||||
def find_ref(repo: git.Repo, ref_name: str) -> Optional[git.HEAD]:
|
def find_ref(repo: git.Repo, ref_name: str) -> Optional[git.HEAD]:
|
||||||
"""Find the branch/ref, looking first locally then in the remote."""
|
"""Find the branch/ref, looking first locally then in the remote."""
|
||||||
if ref_name in repo.refs:
|
if ref_name in repo.references:
|
||||||
return repo.refs[ref_name]
|
return repo.references[ref_name]
|
||||||
elif ref_name in repo.remote().refs:
|
elif ref_name in repo.remote().refs:
|
||||||
return repo.remote().refs[ref_name]
|
return repo.remote().refs[ref_name]
|
||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def update_branch(repo: git.Repo):
|
def update_branch(repo: git.Repo) -> None:
|
||||||
"""Ensure branch is up to date if it has a remote"""
|
"""Ensure branch is up to date if it has a remote"""
|
||||||
if repo.active_branch.tracking_branch():
|
tracking_branch = repo.active_branch.tracking_branch()
|
||||||
repo.git.merge(repo.active_branch.tracking_branch().name)
|
if tracking_branch:
|
||||||
|
repo.git.merge(tracking_branch.name)
|
||||||
|
|
||||||
|
|
||||||
def get_changes_for_version(wanted_version: version.Version) -> str:
|
def get_changes_for_version(wanted_version: version.Version) -> str:
|
||||||
@ -536,7 +558,9 @@ def get_changes_for_version(wanted_version: version.Version) -> str:
|
|||||||
return "\n".join(version_changelog)
|
return "\n".join(version_changelog)
|
||||||
|
|
||||||
|
|
||||||
def generate_and_write_changelog(current_version: version.Version, new_version: str):
|
def generate_and_write_changelog(
|
||||||
|
current_version: version.Version, new_version: str
|
||||||
|
) -> None:
|
||||||
# We do this by getting a draft so that we can edit it before writing to the
|
# We do this by getting a draft so that we can edit it before writing to the
|
||||||
# changelog.
|
# changelog.
|
||||||
result = run_until_successful(
|
result = run_until_successful(
|
||||||
@ -558,8 +582,8 @@ def generate_and_write_changelog(current_version: version.Version, new_version:
|
|||||||
f.write(existing_content)
|
f.write(existing_content)
|
||||||
|
|
||||||
# Remove all the news fragments
|
# Remove all the news fragments
|
||||||
for f in glob.iglob("changelog.d/*.*"):
|
for filename in glob.iglob("changelog.d/*.*"):
|
||||||
os.remove(f)
|
os.remove(filename)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -27,7 +27,7 @@ from synapse.crypto.event_signing import add_hashes_and_signatures
|
|||||||
from synapse.util import json_encoder
|
from synapse.util import json_encoder
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main() -> None:
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="""Adds a signature to a JSON object.
|
description="""Adds a signature to a JSON object.
|
||||||
|
|
||||||
|
@ -85,12 +85,19 @@ class SortedDict(Dict[_KT, _VT]):
|
|||||||
def popitem(self, index: int = ...) -> Tuple[_KT, _VT]: ...
|
def popitem(self, index: int = ...) -> Tuple[_KT, _VT]: ...
|
||||||
def peekitem(self, index: int = ...) -> Tuple[_KT, _VT]: ...
|
def peekitem(self, index: int = ...) -> Tuple[_KT, _VT]: ...
|
||||||
def setdefault(self, key: _KT, default: Optional[_VT] = ...) -> _VT: ...
|
def setdefault(self, key: _KT, default: Optional[_VT] = ...) -> _VT: ...
|
||||||
@overload
|
# Mypy now reports the first overload as an error, because typeshed widened the type
|
||||||
def update(self, __map: Mapping[_KT, _VT], **kwargs: _VT) -> None: ...
|
# of `__map` to its internal `_typeshed.SupportsKeysAndGetItem` type in
|
||||||
@overload
|
# https://github.com/python/typeshed/pull/6653
|
||||||
def update(self, __iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT) -> None: ...
|
# Since sorteddicts don't change the signature of `update` from that of `dict`, we
|
||||||
@overload
|
# let the stubs for `update` inherit from the stubs for `dict`. (I suspect we could
|
||||||
def update(self, **kwargs: _VT) -> None: ...
|
# do the same for many othe methods.) We leave the stubs commented to better track
|
||||||
|
# how this file has evolved from the original stubs.
|
||||||
|
# @overload
|
||||||
|
# def update(self, __map: Mapping[_KT, _VT], **kwargs: _VT) -> None: ...
|
||||||
|
# @overload
|
||||||
|
# def update(self, __iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT) -> None: ...
|
||||||
|
# @overload
|
||||||
|
# def update(self, **kwargs: _VT) -> None: ...
|
||||||
def __reduce__(
|
def __reduce__(
|
||||||
self,
|
self,
|
||||||
) -> Tuple[
|
) -> Tuple[
|
||||||
@ -115,9 +122,7 @@ class SortedKeysView(KeysView[_KT_co], Sequence[_KT_co]):
|
|||||||
def __getitem__(self, index: slice) -> List[_KT_co]: ...
|
def __getitem__(self, index: slice) -> List[_KT_co]: ...
|
||||||
def __delitem__(self, index: Union[int, slice]) -> None: ...
|
def __delitem__(self, index: Union[int, slice]) -> None: ...
|
||||||
|
|
||||||
class SortedItemsView( # type: ignore
|
class SortedItemsView(ItemsView[_KT_co, _VT_co], Sequence[Tuple[_KT_co, _VT_co]]):
|
||||||
ItemsView[_KT_co, _VT_co], Sequence[Tuple[_KT_co, _VT_co]]
|
|
||||||
):
|
|
||||||
def __iter__(self) -> Iterator[Tuple[_KT_co, _VT_co]]: ...
|
def __iter__(self) -> Iterator[Tuple[_KT_co, _VT_co]]: ...
|
||||||
@overload
|
@overload
|
||||||
def __getitem__(self, index: int) -> Tuple[_KT_co, _VT_co]: ...
|
def __getitem__(self, index: int) -> Tuple[_KT_co, _VT_co]: ...
|
||||||
|
@ -187,7 +187,7 @@ class Auth:
|
|||||||
Once get_user_by_req has set up the opentracing span, this does the actual work.
|
Once get_user_by_req has set up the opentracing span, this does the actual work.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
ip_addr = request.getClientIP()
|
ip_addr = request.getClientAddress().host
|
||||||
user_agent = get_request_user_agent(request)
|
user_agent = get_request_user_agent(request)
|
||||||
|
|
||||||
access_token = self.get_access_token_from_request(request)
|
access_token = self.get_access_token_from_request(request)
|
||||||
@ -357,7 +357,7 @@ class Auth:
|
|||||||
return None, None, None
|
return None, None, None
|
||||||
|
|
||||||
if app_service.ip_range_whitelist:
|
if app_service.ip_range_whitelist:
|
||||||
ip_address = IPAddress(request.getClientIP())
|
ip_address = IPAddress(request.getClientAddress().host)
|
||||||
if ip_address not in app_service.ip_range_whitelist:
|
if ip_address not in app_service.ip_range_whitelist:
|
||||||
return None, None, None
|
return None, None, None
|
||||||
|
|
||||||
@ -418,7 +418,8 @@ class Auth:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
if rights == "access":
|
if rights == "access":
|
||||||
# first look in the database
|
# First look in the database to see if the access token is present
|
||||||
|
# as an opaque token.
|
||||||
r = await self.store.get_user_by_access_token(token)
|
r = await self.store.get_user_by_access_token(token)
|
||||||
if r:
|
if r:
|
||||||
valid_until_ms = r.valid_until_ms
|
valid_until_ms = r.valid_until_ms
|
||||||
@ -435,7 +436,8 @@ class Auth:
|
|||||||
|
|
||||||
return r
|
return r
|
||||||
|
|
||||||
# otherwise it needs to be a valid macaroon
|
# If the token isn't found in the database, then it could still be a
|
||||||
|
# macaroon, so we check that here.
|
||||||
try:
|
try:
|
||||||
user_id, guest = self._parse_and_validate_macaroon(token, rights)
|
user_id, guest = self._parse_and_validate_macaroon(token, rights)
|
||||||
|
|
||||||
@ -483,8 +485,12 @@ class Auth:
|
|||||||
TypeError,
|
TypeError,
|
||||||
ValueError,
|
ValueError,
|
||||||
) as e:
|
) as e:
|
||||||
logger.warning("Invalid macaroon in auth: %s %s", type(e), e)
|
logger.warning(
|
||||||
raise InvalidClientTokenError("Invalid macaroon passed.")
|
"Invalid access token in auth: %s %s.",
|
||||||
|
type(e),
|
||||||
|
e,
|
||||||
|
)
|
||||||
|
raise InvalidClientTokenError("Invalid access token passed.")
|
||||||
|
|
||||||
def _parse_and_validate_macaroon(
|
def _parse_and_validate_macaroon(
|
||||||
self, token: str, rights: str = "access"
|
self, token: str, rights: str = "access"
|
||||||
@ -505,10 +511,7 @@ class Auth:
|
|||||||
try:
|
try:
|
||||||
macaroon = pymacaroons.Macaroon.deserialize(token)
|
macaroon = pymacaroons.Macaroon.deserialize(token)
|
||||||
except Exception: # deserialize can throw more-or-less anything
|
except Exception: # deserialize can throw more-or-less anything
|
||||||
# doesn't look like a macaroon: treat it as an opaque token which
|
# The access token doesn't look like a macaroon.
|
||||||
# must be in the database.
|
|
||||||
# TODO: it would be nice to get rid of this, but apparently some
|
|
||||||
# people use access tokens which aren't macaroons
|
|
||||||
raise _InvalidMacaroonException()
|
raise _InvalidMacaroonException()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -255,7 +255,5 @@ class GuestAccess:
|
|||||||
|
|
||||||
class ReceiptTypes:
|
class ReceiptTypes:
|
||||||
READ: Final = "m.read"
|
READ: Final = "m.read"
|
||||||
|
READ_PRIVATE: Final = "org.matrix.msc2285.read.private"
|
||||||
|
FULLY_READ: Final = "m.fully_read"
|
||||||
class ReadReceiptEventFields:
|
|
||||||
MSC2285_HIDDEN: Final = "org.matrix.msc2285.hidden"
|
|
||||||
|
@ -38,6 +38,7 @@ from typing import (
|
|||||||
|
|
||||||
from cryptography.utils import CryptographyDeprecationWarning
|
from cryptography.utils import CryptographyDeprecationWarning
|
||||||
from matrix_common.versionstring import get_distribution_version_string
|
from matrix_common.versionstring import get_distribution_version_string
|
||||||
|
from typing_extensions import ParamSpec
|
||||||
|
|
||||||
import twisted
|
import twisted
|
||||||
from twisted.internet import defer, error, reactor as _reactor
|
from twisted.internet import defer, error, reactor as _reactor
|
||||||
@ -48,7 +49,6 @@ from twisted.logger import LoggingFile, LogLevel
|
|||||||
from twisted.protocols.tls import TLSMemoryBIOFactory
|
from twisted.protocols.tls import TLSMemoryBIOFactory
|
||||||
from twisted.python.threadpool import ThreadPool
|
from twisted.python.threadpool import ThreadPool
|
||||||
|
|
||||||
import synapse
|
|
||||||
from synapse.api.constants import MAX_PDU_SIZE
|
from synapse.api.constants import MAX_PDU_SIZE
|
||||||
from synapse.app import check_bind_error
|
from synapse.app import check_bind_error
|
||||||
from synapse.app.phone_stats_home import start_phone_stats_home
|
from synapse.app.phone_stats_home import start_phone_stats_home
|
||||||
@ -60,6 +60,7 @@ from synapse.events.spamcheck import load_legacy_spam_checkers
|
|||||||
from synapse.events.third_party_rules import load_legacy_third_party_event_rules
|
from synapse.events.third_party_rules import load_legacy_third_party_event_rules
|
||||||
from synapse.handlers.auth import load_legacy_password_auth_providers
|
from synapse.handlers.auth import load_legacy_password_auth_providers
|
||||||
from synapse.logging.context import PreserveLoggingContext
|
from synapse.logging.context import PreserveLoggingContext
|
||||||
|
from synapse.logging.opentracing import init_tracer
|
||||||
from synapse.metrics import install_gc_manager, register_threadpool
|
from synapse.metrics import install_gc_manager, register_threadpool
|
||||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||||
from synapse.metrics.jemalloc import setup_jemalloc_stats
|
from synapse.metrics.jemalloc import setup_jemalloc_stats
|
||||||
@ -81,11 +82,12 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
# list of tuples of function, args list, kwargs dict
|
# list of tuples of function, args list, kwargs dict
|
||||||
_sighup_callbacks: List[
|
_sighup_callbacks: List[
|
||||||
Tuple[Callable[..., None], Tuple[Any, ...], Dict[str, Any]]
|
Tuple[Callable[..., None], Tuple[object, ...], Dict[str, object]]
|
||||||
] = []
|
] = []
|
||||||
|
P = ParamSpec("P")
|
||||||
|
|
||||||
|
|
||||||
def register_sighup(func: Callable[..., None], *args: Any, **kwargs: Any) -> None:
|
def register_sighup(func: Callable[P, None], *args: P.args, **kwargs: P.kwargs) -> None:
|
||||||
"""
|
"""
|
||||||
Register a function to be called when a SIGHUP occurs.
|
Register a function to be called when a SIGHUP occurs.
|
||||||
|
|
||||||
@ -93,7 +95,9 @@ def register_sighup(func: Callable[..., None], *args: Any, **kwargs: Any) -> Non
|
|||||||
func: Function to be called when sent a SIGHUP signal.
|
func: Function to be called when sent a SIGHUP signal.
|
||||||
*args, **kwargs: args and kwargs to be passed to the target function.
|
*args, **kwargs: args and kwargs to be passed to the target function.
|
||||||
"""
|
"""
|
||||||
_sighup_callbacks.append((func, args, kwargs))
|
# This type-ignore should be redundant once we use a mypy release with
|
||||||
|
# https://github.com/python/mypy/pull/12668.
|
||||||
|
_sighup_callbacks.append((func, args, kwargs)) # type: ignore[arg-type]
|
||||||
|
|
||||||
|
|
||||||
def start_worker_reactor(
|
def start_worker_reactor(
|
||||||
@ -214,7 +218,9 @@ def redirect_stdio_to_logs() -> None:
|
|||||||
print("Redirected stdout/stderr to logs")
|
print("Redirected stdout/stderr to logs")
|
||||||
|
|
||||||
|
|
||||||
def register_start(cb: Callable[..., Awaitable], *args: Any, **kwargs: Any) -> None:
|
def register_start(
|
||||||
|
cb: Callable[P, Awaitable], *args: P.args, **kwargs: P.kwargs
|
||||||
|
) -> None:
|
||||||
"""Register a callback with the reactor, to be called once it is running
|
"""Register a callback with the reactor, to be called once it is running
|
||||||
|
|
||||||
This can be used to initialise parts of the system which require an asynchronous
|
This can be used to initialise parts of the system which require an asynchronous
|
||||||
@ -431,7 +437,7 @@ async def start(hs: "HomeServer") -> None:
|
|||||||
refresh_certificate(hs)
|
refresh_certificate(hs)
|
||||||
|
|
||||||
# Start the tracer
|
# Start the tracer
|
||||||
synapse.logging.opentracing.init_tracer(hs) # type: ignore[attr-defined] # noqa
|
init_tracer(hs) # noqa
|
||||||
|
|
||||||
# Instantiate the modules so they can register their web resources to the module API
|
# Instantiate the modules so they can register their web resources to the module API
|
||||||
# before we start the listeners.
|
# before we start the listeners.
|
||||||
|
@ -210,7 +210,7 @@ def start(config_options: List[str]) -> None:
|
|||||||
config.logging.no_redirect_stdio = True
|
config.logging.no_redirect_stdio = True
|
||||||
|
|
||||||
# Explicitly disable background processes
|
# Explicitly disable background processes
|
||||||
config.server.update_user_directory = False
|
config.worker.should_update_user_directory = False
|
||||||
config.worker.run_background_tasks = False
|
config.worker.run_background_tasks = False
|
||||||
config.worker.start_pushers = False
|
config.worker.start_pushers = False
|
||||||
config.worker.pusher_shard_config.instances = []
|
config.worker.pusher_shard_config.instances = []
|
||||||
|
@ -441,38 +441,6 @@ def start(config_options: List[str]) -> None:
|
|||||||
"synapse.app.user_dir",
|
"synapse.app.user_dir",
|
||||||
)
|
)
|
||||||
|
|
||||||
if config.worker.worker_app == "synapse.app.appservice":
|
|
||||||
if config.appservice.notify_appservices:
|
|
||||||
sys.stderr.write(
|
|
||||||
"\nThe appservices must be disabled in the main synapse process"
|
|
||||||
"\nbefore they can be run in a separate worker."
|
|
||||||
"\nPlease add ``notify_appservices: false`` to the main config"
|
|
||||||
"\n"
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
# Force the appservice to start since they will be disabled in the main config
|
|
||||||
config.appservice.notify_appservices = True
|
|
||||||
else:
|
|
||||||
# For other worker types we force this to off.
|
|
||||||
config.appservice.notify_appservices = False
|
|
||||||
|
|
||||||
if config.worker.worker_app == "synapse.app.user_dir":
|
|
||||||
if config.server.update_user_directory:
|
|
||||||
sys.stderr.write(
|
|
||||||
"\nThe update_user_directory must be disabled in the main synapse process"
|
|
||||||
"\nbefore they can be run in a separate worker."
|
|
||||||
"\nPlease add ``update_user_directory: false`` to the main config"
|
|
||||||
"\n"
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
# Force the pushers to start since they will be disabled in the main config
|
|
||||||
config.server.update_user_directory = True
|
|
||||||
else:
|
|
||||||
# For other worker types we force this to off.
|
|
||||||
config.server.update_user_directory = False
|
|
||||||
|
|
||||||
synapse.events.USE_FROZEN_DICTS = config.server.use_frozen_dicts
|
synapse.events.USE_FROZEN_DICTS = config.server.use_frozen_dicts
|
||||||
synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage
|
synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage
|
||||||
|
|
||||||
|
@ -17,6 +17,7 @@ import urllib.parse
|
|||||||
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple
|
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple
|
||||||
|
|
||||||
from prometheus_client import Counter
|
from prometheus_client import Counter
|
||||||
|
from typing_extensions import TypeGuard
|
||||||
|
|
||||||
from synapse.api.constants import EventTypes, Membership, ThirdPartyEntityKind
|
from synapse.api.constants import EventTypes, Membership, ThirdPartyEntityKind
|
||||||
from synapse.api.errors import CodeMessageException
|
from synapse.api.errors import CodeMessageException
|
||||||
@ -66,7 +67,7 @@ def _is_valid_3pe_metadata(info: JsonDict) -> bool:
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def _is_valid_3pe_result(r: JsonDict, field: str) -> bool:
|
def _is_valid_3pe_result(r: object, field: str) -> TypeGuard[JsonDict]:
|
||||||
if not isinstance(r, dict):
|
if not isinstance(r, dict):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@ -33,7 +33,6 @@ class AppServiceConfig(Config):
|
|||||||
|
|
||||||
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
|
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
|
||||||
self.app_service_config_files = config.get("app_service_config_files", [])
|
self.app_service_config_files = config.get("app_service_config_files", [])
|
||||||
self.notify_appservices = config.get("notify_appservices", True)
|
|
||||||
self.track_appservice_user_ips = config.get("track_appservice_user_ips", False)
|
self.track_appservice_user_ips = config.get("track_appservice_user_ips", False)
|
||||||
|
|
||||||
def generate_config_section(cls, **kwargs: Any) -> str:
|
def generate_config_section(cls, **kwargs: Any) -> str:
|
||||||
@ -56,7 +55,8 @@ def load_appservices(
|
|||||||
) -> List[ApplicationService]:
|
) -> List[ApplicationService]:
|
||||||
"""Returns a list of Application Services from the config files."""
|
"""Returns a list of Application Services from the config files."""
|
||||||
if not isinstance(config_files, list):
|
if not isinstance(config_files, list):
|
||||||
logger.warning("Expected %s to be a list of AS config files.", config_files)
|
# type-ignore: this function gets arbitrary json value; we do use this path.
|
||||||
|
logger.warning("Expected %s to be a list of AS config files.", config_files) # type: ignore[unreachable]
|
||||||
return []
|
return []
|
||||||
|
|
||||||
# Dicts of value -> filename
|
# Dicts of value -> filename
|
||||||
|
@ -32,7 +32,7 @@ class ExperimentalConfig(Config):
|
|||||||
# MSC2716 (importing historical messages)
|
# MSC2716 (importing historical messages)
|
||||||
self.msc2716_enabled: bool = experimental.get("msc2716_enabled", False)
|
self.msc2716_enabled: bool = experimental.get("msc2716_enabled", False)
|
||||||
|
|
||||||
# MSC2285 (hidden read receipts)
|
# MSC2285 (private read receipts)
|
||||||
self.msc2285_enabled: bool = experimental.get("msc2285_enabled", False)
|
self.msc2285_enabled: bool = experimental.get("msc2285_enabled", False)
|
||||||
|
|
||||||
# MSC3244 (room version capabilities)
|
# MSC3244 (room version capabilities)
|
||||||
@ -81,3 +81,6 @@ class ExperimentalConfig(Config):
|
|||||||
|
|
||||||
# MSC2815 (allow room moderators to view redacted event content)
|
# MSC2815 (allow room moderators to view redacted event content)
|
||||||
self.msc2815_enabled: bool = experimental.get("msc2815_enabled", False)
|
self.msc2815_enabled: bool = experimental.get("msc2815_enabled", False)
|
||||||
|
|
||||||
|
# MSC3786 (Add a default push rule to ignore m.room.server_acl events)
|
||||||
|
self.msc3786_enabled: bool = experimental.get("msc3786_enabled", False)
|
||||||
|
@ -46,7 +46,7 @@ class FederationConfig(Config):
|
|||||||
)
|
)
|
||||||
|
|
||||||
self.allow_device_name_lookup_over_federation = config.get(
|
self.allow_device_name_lookup_over_federation = config.get(
|
||||||
"allow_device_name_lookup_over_federation", True
|
"allow_device_name_lookup_over_federation", False
|
||||||
)
|
)
|
||||||
|
|
||||||
def generate_config_section(self, **kwargs: Any) -> str:
|
def generate_config_section(self, **kwargs: Any) -> str:
|
||||||
@ -81,11 +81,11 @@ class FederationConfig(Config):
|
|||||||
#
|
#
|
||||||
#allow_profile_lookup_over_federation: false
|
#allow_profile_lookup_over_federation: false
|
||||||
|
|
||||||
# Uncomment to disable device display name lookup over federation. By default, the
|
# Uncomment to allow device display name lookup over federation. By default, the
|
||||||
# Federation API allows other homeservers to obtain device display names of any user
|
# Federation API prevents other homeservers from obtaining the display names of
|
||||||
# on this homeserver. Defaults to 'true'.
|
# user devices on this homeserver. Defaults to 'false'.
|
||||||
#
|
#
|
||||||
#allow_device_name_lookup_over_federation: false
|
#allow_device_name_lookup_over_federation: true
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
@ -110,13 +110,6 @@ loggers:
|
|||||||
# information such as access tokens.
|
# information such as access tokens.
|
||||||
level: INFO
|
level: INFO
|
||||||
|
|
||||||
twisted:
|
|
||||||
# We send the twisted logging directly to the file handler,
|
|
||||||
# to work around https://github.com/matrix-org/synapse/issues/3471
|
|
||||||
# when using "buffer" logger. Use "console" to log to stderr instead.
|
|
||||||
handlers: [file]
|
|
||||||
propagate: false
|
|
||||||
|
|
||||||
root:
|
root:
|
||||||
level: INFO
|
level: INFO
|
||||||
|
|
||||||
|
@ -43,6 +43,9 @@ class RegistrationConfig(Config):
|
|||||||
self.registration_requires_token = config.get(
|
self.registration_requires_token = config.get(
|
||||||
"registration_requires_token", False
|
"registration_requires_token", False
|
||||||
)
|
)
|
||||||
|
self.enable_registration_token_3pid_bypass = config.get(
|
||||||
|
"enable_registration_token_3pid_bypass", False
|
||||||
|
)
|
||||||
self.registration_shared_secret = config.get("registration_shared_secret")
|
self.registration_shared_secret = config.get("registration_shared_secret")
|
||||||
|
|
||||||
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
|
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
|
||||||
@ -309,6 +312,12 @@ class RegistrationConfig(Config):
|
|||||||
#
|
#
|
||||||
#registration_requires_token: true
|
#registration_requires_token: true
|
||||||
|
|
||||||
|
# Allow users to submit a token during registration to bypass any required 3pid
|
||||||
|
# steps configured in `registrations_require_3pid`.
|
||||||
|
# Defaults to false, requiring that registration tokens (if enabled) complete a 3pid flow.
|
||||||
|
#
|
||||||
|
#enable_registration_token_3pid_bypass: false
|
||||||
|
|
||||||
# If set, allows registration of standard or admin accounts by anyone who
|
# If set, allows registration of standard or admin accounts by anyone who
|
||||||
# has the shared secret, even if registration is otherwise disabled.
|
# has the shared secret, even if registration is otherwise disabled.
|
||||||
#
|
#
|
||||||
|
@ -186,7 +186,7 @@ KNOWN_RESOURCES = {
|
|||||||
class HttpResourceConfig:
|
class HttpResourceConfig:
|
||||||
names: List[str] = attr.ib(
|
names: List[str] = attr.ib(
|
||||||
factory=list,
|
factory=list,
|
||||||
validator=attr.validators.deep_iterable(attr.validators.in_(KNOWN_RESOURCES)), # type: ignore
|
validator=attr.validators.deep_iterable(attr.validators.in_(KNOWN_RESOURCES)),
|
||||||
)
|
)
|
||||||
compress: bool = attr.ib(
|
compress: bool = attr.ib(
|
||||||
default=False,
|
default=False,
|
||||||
@ -231,9 +231,7 @@ class ManholeConfig:
|
|||||||
class LimitRemoteRoomsConfig:
|
class LimitRemoteRoomsConfig:
|
||||||
enabled: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
|
enabled: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
|
||||||
complexity: Union[float, int] = attr.ib(
|
complexity: Union[float, int] = attr.ib(
|
||||||
validator=attr.validators.instance_of(
|
validator=attr.validators.instance_of((float, int)), # noqa
|
||||||
(float, int) # type: ignore[arg-type] # noqa
|
|
||||||
),
|
|
||||||
default=1.0,
|
default=1.0,
|
||||||
)
|
)
|
||||||
complexity_error: str = attr.ib(
|
complexity_error: str = attr.ib(
|
||||||
@ -321,10 +319,6 @@ class ServerConfig(Config):
|
|||||||
self.presence_router_config,
|
self.presence_router_config,
|
||||||
) = load_module(presence_router_config, ("presence", "presence_router"))
|
) = load_module(presence_router_config, ("presence", "presence_router"))
|
||||||
|
|
||||||
# Whether to update the user directory or not. This should be set to
|
|
||||||
# false only if we are updating the user directory in a worker
|
|
||||||
self.update_user_directory = config.get("update_user_directory", True)
|
|
||||||
|
|
||||||
# whether to enable the media repository endpoints. This should be set
|
# whether to enable the media repository endpoints. This should be set
|
||||||
# to false if the media repository is running as a separate endpoint;
|
# to false if the media repository is running as a separate endpoint;
|
||||||
# doing so ensures that we will not run cache cleanup jobs on the
|
# doing so ensures that we will not run cache cleanup jobs on the
|
||||||
@ -415,6 +409,7 @@ class ServerConfig(Config):
|
|||||||
)
|
)
|
||||||
|
|
||||||
self.mau_trial_days = config.get("mau_trial_days", 0)
|
self.mau_trial_days = config.get("mau_trial_days", 0)
|
||||||
|
self.mau_appservice_trial_days = config.get("mau_appservice_trial_days", {})
|
||||||
self.mau_limit_alerting = config.get("mau_limit_alerting", True)
|
self.mau_limit_alerting = config.get("mau_limit_alerting", True)
|
||||||
|
|
||||||
# How long to keep redacted events in the database in unredacted form
|
# How long to keep redacted events in the database in unredacted form
|
||||||
@ -1107,6 +1102,11 @@ class ServerConfig(Config):
|
|||||||
# sign up in a short space of time never to return after their initial
|
# sign up in a short space of time never to return after their initial
|
||||||
# session.
|
# session.
|
||||||
#
|
#
|
||||||
|
# The option `mau_appservice_trial_days` is similar to `mau_trial_days`, but
|
||||||
|
# applies a different trial number if the user was registered by an appservice.
|
||||||
|
# A value of 0 means no trial days are applied. Appservices not listed in this
|
||||||
|
# dictionary use the value of `mau_trial_days` instead.
|
||||||
|
#
|
||||||
# 'mau_limit_alerting' is a means of limiting client side alerting
|
# 'mau_limit_alerting' is a means of limiting client side alerting
|
||||||
# should the mau limit be reached. This is useful for small instances
|
# should the mau limit be reached. This is useful for small instances
|
||||||
# where the admin has 5 mau seats (say) for 5 specific people and no
|
# where the admin has 5 mau seats (say) for 5 specific people and no
|
||||||
@ -1117,6 +1117,8 @@ class ServerConfig(Config):
|
|||||||
#max_mau_value: 50
|
#max_mau_value: 50
|
||||||
#mau_trial_days: 2
|
#mau_trial_days: 2
|
||||||
#mau_limit_alerting: false
|
#mau_limit_alerting: false
|
||||||
|
#mau_appservice_trial_days:
|
||||||
|
# "appservice-id": 1
|
||||||
|
|
||||||
# If enabled, the metrics for the number of monthly active users will
|
# If enabled, the metrics for the number of monthly active users will
|
||||||
# be populated, however no one will be limited. If limit_usage_by_mau
|
# be populated, however no one will be limited. If limit_usage_by_mau
|
||||||
|
@ -14,7 +14,8 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
from typing import Any, List, Union
|
import logging
|
||||||
|
from typing import Any, Dict, List, Union
|
||||||
|
|
||||||
import attr
|
import attr
|
||||||
|
|
||||||
@ -42,6 +43,13 @@ synapse process before they can be run in a separate worker.
|
|||||||
Please add ``start_pushers: false`` to the main config
|
Please add ``start_pushers: false`` to the main config
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
_DEPRECATED_WORKER_DUTY_OPTION_USED = """
|
||||||
|
The '%s' configuration option is deprecated and will be removed in a future
|
||||||
|
Synapse version. Please use ``%s: name_of_worker`` instead.
|
||||||
|
"""
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def _instance_to_list_converter(obj: Union[str, List[str]]) -> List[str]:
|
def _instance_to_list_converter(obj: Union[str, List[str]]) -> List[str]:
|
||||||
"""Helper for allowing parsing a string or list of strings to a config
|
"""Helper for allowing parsing a string or list of strings to a config
|
||||||
@ -296,6 +304,112 @@ class WorkerConfig(Config):
|
|||||||
self.worker_name is None and background_tasks_instance == "master"
|
self.worker_name is None and background_tasks_instance == "master"
|
||||||
) or self.worker_name == background_tasks_instance
|
) or self.worker_name == background_tasks_instance
|
||||||
|
|
||||||
|
self.should_notify_appservices = self._should_this_worker_perform_duty(
|
||||||
|
config,
|
||||||
|
legacy_master_option_name="notify_appservices",
|
||||||
|
legacy_worker_app_name="synapse.app.appservice",
|
||||||
|
new_option_name="notify_appservices_from_worker",
|
||||||
|
)
|
||||||
|
|
||||||
|
self.should_update_user_directory = self._should_this_worker_perform_duty(
|
||||||
|
config,
|
||||||
|
legacy_master_option_name="update_user_directory",
|
||||||
|
legacy_worker_app_name="synapse.app.user_dir",
|
||||||
|
new_option_name="update_user_directory_from_worker",
|
||||||
|
)
|
||||||
|
|
||||||
|
def _should_this_worker_perform_duty(
|
||||||
|
self,
|
||||||
|
config: Dict[str, Any],
|
||||||
|
legacy_master_option_name: str,
|
||||||
|
legacy_worker_app_name: str,
|
||||||
|
new_option_name: str,
|
||||||
|
) -> bool:
|
||||||
|
"""
|
||||||
|
Figures out whether this worker should perform a certain duty.
|
||||||
|
|
||||||
|
This function is temporary and is only to deal with the complexity
|
||||||
|
of allowing old, transitional and new configurations all at once.
|
||||||
|
|
||||||
|
Contradictions between the legacy and new part of a transitional configuration
|
||||||
|
will lead to a ConfigError.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
config: The config dictionary
|
||||||
|
legacy_master_option_name: The name of a legacy option, whose value is boolean,
|
||||||
|
specifying whether it's the master that should handle a certain duty.
|
||||||
|
e.g. "notify_appservices"
|
||||||
|
legacy_worker_app_name: The name of a legacy Synapse worker application
|
||||||
|
that would traditionally perform this duty.
|
||||||
|
e.g. "synapse.app.appservice"
|
||||||
|
new_option_name: The name of the new option, whose value is the name of a
|
||||||
|
designated worker to perform the duty.
|
||||||
|
e.g. "notify_appservices_from_worker"
|
||||||
|
"""
|
||||||
|
|
||||||
|
# None means 'unspecified'; True means 'run here' and False means
|
||||||
|
# 'don't run here'.
|
||||||
|
new_option_should_run_here = None
|
||||||
|
if new_option_name in config:
|
||||||
|
designated_worker = config[new_option_name] or "master"
|
||||||
|
new_option_should_run_here = (
|
||||||
|
designated_worker == "master" and self.worker_name is None
|
||||||
|
) or designated_worker == self.worker_name
|
||||||
|
|
||||||
|
legacy_option_should_run_here = None
|
||||||
|
if legacy_master_option_name in config:
|
||||||
|
run_on_master = bool(config[legacy_master_option_name])
|
||||||
|
|
||||||
|
legacy_option_should_run_here = (
|
||||||
|
self.worker_name is None and run_on_master
|
||||||
|
) or (self.worker_app == legacy_worker_app_name and not run_on_master)
|
||||||
|
|
||||||
|
# Suggest using the new option instead.
|
||||||
|
logger.warning(
|
||||||
|
_DEPRECATED_WORKER_DUTY_OPTION_USED,
|
||||||
|
legacy_master_option_name,
|
||||||
|
new_option_name,
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.worker_app == legacy_worker_app_name and config.get(
|
||||||
|
legacy_master_option_name, True
|
||||||
|
):
|
||||||
|
# As an extra bit of complication, we need to check that the
|
||||||
|
# specialised worker is only used if the legacy config says the
|
||||||
|
# master isn't performing the duties.
|
||||||
|
raise ConfigError(
|
||||||
|
f"Cannot use deprecated worker app type '{legacy_worker_app_name}' whilst deprecated option '{legacy_master_option_name}' is not set to false.\n"
|
||||||
|
f"Consider setting `worker_app: synapse.app.generic_worker` and using the '{new_option_name}' option instead.\n"
|
||||||
|
f"The '{new_option_name}' option replaces '{legacy_master_option_name}'."
|
||||||
|
)
|
||||||
|
|
||||||
|
if new_option_should_run_here is None and legacy_option_should_run_here is None:
|
||||||
|
# Neither option specified; the fallback behaviour is to run on the main process
|
||||||
|
return self.worker_name is None
|
||||||
|
|
||||||
|
if (
|
||||||
|
new_option_should_run_here is not None
|
||||||
|
and legacy_option_should_run_here is not None
|
||||||
|
):
|
||||||
|
# Both options specified; ensure they match!
|
||||||
|
if new_option_should_run_here != legacy_option_should_run_here:
|
||||||
|
update_worker_type = (
|
||||||
|
" and set worker_app: synapse.app.generic_worker"
|
||||||
|
if self.worker_app == legacy_worker_app_name
|
||||||
|
else ""
|
||||||
|
)
|
||||||
|
# If the values conflict, we suggest the admin removes the legacy option
|
||||||
|
# for simplicity.
|
||||||
|
raise ConfigError(
|
||||||
|
f"Conflicting configuration options: {legacy_master_option_name} (legacy), {new_option_name} (new).\n"
|
||||||
|
f"Suggestion: remove {legacy_master_option_name}{update_worker_type}.\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
# We've already validated that these aren't conflicting; now just see if
|
||||||
|
# either is True.
|
||||||
|
# (By this point, these are either the same value or only one is not None.)
|
||||||
|
return bool(new_option_should_run_here or legacy_option_should_run_here)
|
||||||
|
|
||||||
def generate_config_section(self, **kwargs: Any) -> str:
|
def generate_config_section(self, **kwargs: Any) -> str:
|
||||||
return """\
|
return """\
|
||||||
## Workers ##
|
## Workers ##
|
||||||
|
@ -213,10 +213,17 @@ class _EventInternalMetadata:
|
|||||||
return self.outlier
|
return self.outlier
|
||||||
|
|
||||||
def is_out_of_band_membership(self) -> bool:
|
def is_out_of_band_membership(self) -> bool:
|
||||||
"""Whether this is an out of band membership, like an invite or an invite
|
"""Whether this event is an out-of-band membership.
|
||||||
rejection. This is needed as those events are marked as outliers, but
|
|
||||||
they still need to be processed as if they're new events (e.g. updating
|
OOB memberships are a special case of outlier events: they are membership events
|
||||||
invite state in the database, relaying to clients, etc).
|
for federated rooms that we aren't full members of. Examples include invites
|
||||||
|
received over federation, and rejections for such invites.
|
||||||
|
|
||||||
|
The concept of an OOB membership is needed because these events need to be
|
||||||
|
processed as if they're new regular events (e.g. updating membership state in
|
||||||
|
the database, relaying to clients via /sync, etc) despite being outliers.
|
||||||
|
|
||||||
|
See also https://matrix-org.github.io/synapse/develop/development/room-dag-concepts.html#out-of-band-membership-events.
|
||||||
|
|
||||||
(Added in synapse 0.99.0, so may be unreliable for events received before that)
|
(Added in synapse 0.99.0, so may be unreliable for events received before that)
|
||||||
"""
|
"""
|
||||||
|
@ -22,11 +22,16 @@ from typing import (
|
|||||||
List,
|
List,
|
||||||
Optional,
|
Optional,
|
||||||
Set,
|
Set,
|
||||||
|
TypeVar,
|
||||||
Union,
|
Union,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
from typing_extensions import ParamSpec
|
||||||
|
|
||||||
|
from twisted.internet.defer import CancelledError
|
||||||
|
|
||||||
from synapse.api.presence import UserPresenceState
|
from synapse.api.presence import UserPresenceState
|
||||||
from synapse.util.async_helpers import maybe_awaitable
|
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
@ -40,6 +45,10 @@ GET_INTERESTED_USERS_CALLBACK = Callable[[str], Awaitable[Union[Set[str], str]]]
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
P = ParamSpec("P")
|
||||||
|
R = TypeVar("R")
|
||||||
|
|
||||||
|
|
||||||
def load_legacy_presence_router(hs: "HomeServer") -> None:
|
def load_legacy_presence_router(hs: "HomeServer") -> None:
|
||||||
"""Wrapper that loads a presence router module configured using the old
|
"""Wrapper that loads a presence router module configured using the old
|
||||||
configuration, and registers the hooks they implement.
|
configuration, and registers the hooks they implement.
|
||||||
@ -63,13 +72,15 @@ def load_legacy_presence_router(hs: "HomeServer") -> None:
|
|||||||
|
|
||||||
# All methods that the module provides should be async, but this wasn't enforced
|
# All methods that the module provides should be async, but this wasn't enforced
|
||||||
# in the old module system, so we wrap them if needed
|
# in the old module system, so we wrap them if needed
|
||||||
def async_wrapper(f: Optional[Callable]) -> Optional[Callable[..., Awaitable]]:
|
def async_wrapper(
|
||||||
|
f: Optional[Callable[P, R]]
|
||||||
|
) -> Optional[Callable[P, Awaitable[R]]]:
|
||||||
# f might be None if the callback isn't implemented by the module. In this
|
# f might be None if the callback isn't implemented by the module. In this
|
||||||
# case we don't want to register a callback at all so we return None.
|
# case we don't want to register a callback at all so we return None.
|
||||||
if f is None:
|
if f is None:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def run(*args: Any, **kwargs: Any) -> Awaitable:
|
def run(*args: P.args, **kwargs: P.kwargs) -> Awaitable[R]:
|
||||||
# Assertion required because mypy can't prove we won't change `f`
|
# Assertion required because mypy can't prove we won't change `f`
|
||||||
# back to `None`. See
|
# back to `None`. See
|
||||||
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
|
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
|
||||||
@ -80,7 +91,7 @@ def load_legacy_presence_router(hs: "HomeServer") -> None:
|
|||||||
return run
|
return run
|
||||||
|
|
||||||
# Register the hooks through the module API.
|
# Register the hooks through the module API.
|
||||||
hooks = {
|
hooks: Dict[str, Optional[Callable[..., Any]]] = {
|
||||||
hook: async_wrapper(getattr(presence_router, hook, None))
|
hook: async_wrapper(getattr(presence_router, hook, None))
|
||||||
for hook in presence_router_methods
|
for hook in presence_router_methods
|
||||||
}
|
}
|
||||||
@ -147,7 +158,11 @@ class PresenceRouter:
|
|||||||
# run all the callbacks for get_users_for_states and combine the results
|
# run all the callbacks for get_users_for_states and combine the results
|
||||||
for callback in self._get_users_for_states_callbacks:
|
for callback in self._get_users_for_states_callbacks:
|
||||||
try:
|
try:
|
||||||
result = await callback(state_updates)
|
# Note: result is an object here, because we don't trust modules to
|
||||||
|
# return the types they're supposed to.
|
||||||
|
result: object = await delay_cancellation(callback(state_updates))
|
||||||
|
except CancelledError:
|
||||||
|
raise
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
||||||
continue
|
continue
|
||||||
@ -199,7 +214,9 @@ class PresenceRouter:
|
|||||||
# run all the callbacks for get_interested_users and combine the results
|
# run all the callbacks for get_interested_users and combine the results
|
||||||
for callback in self._get_interested_users_callbacks:
|
for callback in self._get_interested_users_callbacks:
|
||||||
try:
|
try:
|
||||||
result = await callback(user_id)
|
result = await delay_cancellation(callback(user_id))
|
||||||
|
except CancelledError:
|
||||||
|
raise
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
||||||
continue
|
continue
|
||||||
|
@ -31,7 +31,7 @@ from synapse.rest.media.v1._base import FileInfo
|
|||||||
from synapse.rest.media.v1.media_storage import ReadableFileWrapper
|
from synapse.rest.media.v1.media_storage import ReadableFileWrapper
|
||||||
from synapse.spam_checker_api import RegistrationBehaviour
|
from synapse.spam_checker_api import RegistrationBehaviour
|
||||||
from synapse.types import RoomAlias, UserProfile
|
from synapse.types import RoomAlias, UserProfile
|
||||||
from synapse.util.async_helpers import maybe_awaitable
|
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
import synapse.events
|
import synapse.events
|
||||||
@ -255,7 +255,7 @@ class SpamChecker:
|
|||||||
will be used as the error message returned to the user.
|
will be used as the error message returned to the user.
|
||||||
"""
|
"""
|
||||||
for callback in self._check_event_for_spam_callbacks:
|
for callback in self._check_event_for_spam_callbacks:
|
||||||
res: Union[bool, str] = await callback(event)
|
res: Union[bool, str] = await delay_cancellation(callback(event))
|
||||||
if res:
|
if res:
|
||||||
return res
|
return res
|
||||||
|
|
||||||
@ -276,7 +276,10 @@ class SpamChecker:
|
|||||||
Whether the user may join the room
|
Whether the user may join the room
|
||||||
"""
|
"""
|
||||||
for callback in self._user_may_join_room_callbacks:
|
for callback in self._user_may_join_room_callbacks:
|
||||||
if await callback(user_id, room_id, is_invited) is False:
|
may_join_room = await delay_cancellation(
|
||||||
|
callback(user_id, room_id, is_invited)
|
||||||
|
)
|
||||||
|
if may_join_room is False:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return True
|
return True
|
||||||
@ -297,7 +300,10 @@ class SpamChecker:
|
|||||||
True if the user may send an invite, otherwise False
|
True if the user may send an invite, otherwise False
|
||||||
"""
|
"""
|
||||||
for callback in self._user_may_invite_callbacks:
|
for callback in self._user_may_invite_callbacks:
|
||||||
if await callback(inviter_userid, invitee_userid, room_id) is False:
|
may_invite = await delay_cancellation(
|
||||||
|
callback(inviter_userid, invitee_userid, room_id)
|
||||||
|
)
|
||||||
|
if may_invite is False:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return True
|
return True
|
||||||
@ -322,7 +328,10 @@ class SpamChecker:
|
|||||||
True if the user may send the invite, otherwise False
|
True if the user may send the invite, otherwise False
|
||||||
"""
|
"""
|
||||||
for callback in self._user_may_send_3pid_invite_callbacks:
|
for callback in self._user_may_send_3pid_invite_callbacks:
|
||||||
if await callback(inviter_userid, medium, address, room_id) is False:
|
may_send_3pid_invite = await delay_cancellation(
|
||||||
|
callback(inviter_userid, medium, address, room_id)
|
||||||
|
)
|
||||||
|
if may_send_3pid_invite is False:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return True
|
return True
|
||||||
@ -339,7 +348,8 @@ class SpamChecker:
|
|||||||
True if the user may create a room, otherwise False
|
True if the user may create a room, otherwise False
|
||||||
"""
|
"""
|
||||||
for callback in self._user_may_create_room_callbacks:
|
for callback in self._user_may_create_room_callbacks:
|
||||||
if await callback(userid) is False:
|
may_create_room = await delay_cancellation(callback(userid))
|
||||||
|
if may_create_room is False:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return True
|
return True
|
||||||
@ -359,7 +369,10 @@ class SpamChecker:
|
|||||||
True if the user may create a room alias, otherwise False
|
True if the user may create a room alias, otherwise False
|
||||||
"""
|
"""
|
||||||
for callback in self._user_may_create_room_alias_callbacks:
|
for callback in self._user_may_create_room_alias_callbacks:
|
||||||
if await callback(userid, room_alias) is False:
|
may_create_room_alias = await delay_cancellation(
|
||||||
|
callback(userid, room_alias)
|
||||||
|
)
|
||||||
|
if may_create_room_alias is False:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return True
|
return True
|
||||||
@ -377,7 +390,8 @@ class SpamChecker:
|
|||||||
True if the user may publish the room, otherwise False
|
True if the user may publish the room, otherwise False
|
||||||
"""
|
"""
|
||||||
for callback in self._user_may_publish_room_callbacks:
|
for callback in self._user_may_publish_room_callbacks:
|
||||||
if await callback(userid, room_id) is False:
|
may_publish_room = await delay_cancellation(callback(userid, room_id))
|
||||||
|
if may_publish_room is False:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return True
|
return True
|
||||||
@ -400,7 +414,7 @@ class SpamChecker:
|
|||||||
for callback in self._check_username_for_spam_callbacks:
|
for callback in self._check_username_for_spam_callbacks:
|
||||||
# Make a copy of the user profile object to ensure the spam checker cannot
|
# Make a copy of the user profile object to ensure the spam checker cannot
|
||||||
# modify it.
|
# modify it.
|
||||||
if await callback(user_profile.copy()):
|
if await delay_cancellation(callback(user_profile.copy())):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
return False
|
return False
|
||||||
@ -428,7 +442,7 @@ class SpamChecker:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
for callback in self._check_registration_for_spam_callbacks:
|
for callback in self._check_registration_for_spam_callbacks:
|
||||||
behaviour = await (
|
behaviour = await delay_cancellation(
|
||||||
callback(email_threepid, username, request_info, auth_provider_id)
|
callback(email_threepid, username, request_info, auth_provider_id)
|
||||||
)
|
)
|
||||||
assert isinstance(behaviour, RegistrationBehaviour)
|
assert isinstance(behaviour, RegistrationBehaviour)
|
||||||
@ -472,7 +486,7 @@ class SpamChecker:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
for callback in self._check_media_file_for_spam_callbacks:
|
for callback in self._check_media_file_for_spam_callbacks:
|
||||||
spam = await callback(file_wrapper, file_info)
|
spam = await delay_cancellation(callback(file_wrapper, file_info))
|
||||||
if spam:
|
if spam:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
@ -14,12 +14,14 @@
|
|||||||
import logging
|
import logging
|
||||||
from typing import TYPE_CHECKING, Any, Awaitable, Callable, List, Optional, Tuple
|
from typing import TYPE_CHECKING, Any, Awaitable, Callable, List, Optional, Tuple
|
||||||
|
|
||||||
|
from twisted.internet.defer import CancelledError
|
||||||
|
|
||||||
from synapse.api.errors import ModuleFailedException, SynapseError
|
from synapse.api.errors import ModuleFailedException, SynapseError
|
||||||
from synapse.events import EventBase
|
from synapse.events import EventBase
|
||||||
from synapse.events.snapshot import EventContext
|
from synapse.events.snapshot import EventContext
|
||||||
from synapse.storage.roommember import ProfileInfo
|
from synapse.storage.roommember import ProfileInfo
|
||||||
from synapse.types import Requester, StateMap
|
from synapse.types import Requester, StateMap
|
||||||
from synapse.util.async_helpers import maybe_awaitable
|
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
@ -263,7 +265,11 @@ class ThirdPartyEventRules:
|
|||||||
|
|
||||||
for callback in self._check_event_allowed_callbacks:
|
for callback in self._check_event_allowed_callbacks:
|
||||||
try:
|
try:
|
||||||
res, replacement_data = await callback(event, state_events)
|
res, replacement_data = await delay_cancellation(
|
||||||
|
callback(event, state_events)
|
||||||
|
)
|
||||||
|
except CancelledError:
|
||||||
|
raise
|
||||||
except SynapseError as e:
|
except SynapseError as e:
|
||||||
# FIXME: Being able to throw SynapseErrors is relied upon by
|
# FIXME: Being able to throw SynapseErrors is relied upon by
|
||||||
# some modules. PR #10386 accidentally broke this ability.
|
# some modules. PR #10386 accidentally broke this ability.
|
||||||
@ -333,8 +339,13 @@ class ThirdPartyEventRules:
|
|||||||
|
|
||||||
for callback in self._check_threepid_can_be_invited_callbacks:
|
for callback in self._check_threepid_can_be_invited_callbacks:
|
||||||
try:
|
try:
|
||||||
if await callback(medium, address, state_events) is False:
|
threepid_can_be_invited = await delay_cancellation(
|
||||||
|
callback(medium, address, state_events)
|
||||||
|
)
|
||||||
|
if threepid_can_be_invited is False:
|
||||||
return False
|
return False
|
||||||
|
except CancelledError:
|
||||||
|
raise
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
||||||
|
|
||||||
@ -361,8 +372,13 @@ class ThirdPartyEventRules:
|
|||||||
|
|
||||||
for callback in self._check_visibility_can_be_modified_callbacks:
|
for callback in self._check_visibility_can_be_modified_callbacks:
|
||||||
try:
|
try:
|
||||||
if await callback(room_id, state_events, new_visibility) is False:
|
visibility_can_be_modified = await delay_cancellation(
|
||||||
|
callback(room_id, state_events, new_visibility)
|
||||||
|
)
|
||||||
|
if visibility_can_be_modified is False:
|
||||||
return False
|
return False
|
||||||
|
except CancelledError:
|
||||||
|
raise
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
||||||
|
|
||||||
@ -400,8 +416,11 @@ class ThirdPartyEventRules:
|
|||||||
"""
|
"""
|
||||||
for callback in self._check_can_shutdown_room_callbacks:
|
for callback in self._check_can_shutdown_room_callbacks:
|
||||||
try:
|
try:
|
||||||
if await callback(user_id, room_id) is False:
|
can_shutdown_room = await delay_cancellation(callback(user_id, room_id))
|
||||||
|
if can_shutdown_room is False:
|
||||||
return False
|
return False
|
||||||
|
except CancelledError:
|
||||||
|
raise
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.exception(
|
logger.exception(
|
||||||
"Failed to run module API callback %s: %s", callback, e
|
"Failed to run module API callback %s: %s", callback, e
|
||||||
@ -422,8 +441,13 @@ class ThirdPartyEventRules:
|
|||||||
"""
|
"""
|
||||||
for callback in self._check_can_deactivate_user_callbacks:
|
for callback in self._check_can_deactivate_user_callbacks:
|
||||||
try:
|
try:
|
||||||
if await callback(user_id, by_admin) is False:
|
can_deactivate_user = await delay_cancellation(
|
||||||
|
callback(user_id, by_admin)
|
||||||
|
)
|
||||||
|
if can_deactivate_user is False:
|
||||||
return False
|
return False
|
||||||
|
except CancelledError:
|
||||||
|
raise
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.exception(
|
logger.exception(
|
||||||
"Failed to run module API callback %s: %s", callback, e
|
"Failed to run module API callback %s: %s", callback, e
|
||||||
|
@ -22,12 +22,12 @@ from typing import (
|
|||||||
Iterable,
|
Iterable,
|
||||||
List,
|
List,
|
||||||
Mapping,
|
Mapping,
|
||||||
|
MutableMapping,
|
||||||
Optional,
|
Optional,
|
||||||
Union,
|
Union,
|
||||||
)
|
)
|
||||||
|
|
||||||
import attr
|
import attr
|
||||||
from frozendict import frozendict
|
|
||||||
|
|
||||||
from synapse.api.constants import EventContentFields, EventTypes, RelationTypes
|
from synapse.api.constants import EventContentFields, EventTypes, RelationTypes
|
||||||
from synapse.api.errors import Codes, SynapseError
|
from synapse.api.errors import Codes, SynapseError
|
||||||
@ -204,7 +204,9 @@ def _copy_field(src: JsonDict, dst: JsonDict, field: List[str]) -> None:
|
|||||||
key_to_move = field.pop(-1)
|
key_to_move = field.pop(-1)
|
||||||
sub_dict = src
|
sub_dict = src
|
||||||
for sub_field in field: # e.g. sub_field => "content"
|
for sub_field in field: # e.g. sub_field => "content"
|
||||||
if sub_field in sub_dict and type(sub_dict[sub_field]) in [dict, frozendict]:
|
if sub_field in sub_dict and isinstance(
|
||||||
|
sub_dict[sub_field], collections.abc.Mapping
|
||||||
|
):
|
||||||
sub_dict = sub_dict[sub_field]
|
sub_dict = sub_dict[sub_field]
|
||||||
else:
|
else:
|
||||||
return
|
return
|
||||||
@ -425,13 +427,12 @@ class EventClientSerializer:
|
|||||||
|
|
||||||
# Check if there are any bundled aggregations to include with the event.
|
# Check if there are any bundled aggregations to include with the event.
|
||||||
if bundle_aggregations:
|
if bundle_aggregations:
|
||||||
event_aggregations = bundle_aggregations.get(event.event_id)
|
if event.event_id in bundle_aggregations:
|
||||||
if event_aggregations:
|
|
||||||
self._inject_bundled_aggregations(
|
self._inject_bundled_aggregations(
|
||||||
event,
|
event,
|
||||||
time_now,
|
time_now,
|
||||||
config,
|
config,
|
||||||
event_aggregations,
|
bundle_aggregations,
|
||||||
serialized_event,
|
serialized_event,
|
||||||
apply_edits=apply_edits,
|
apply_edits=apply_edits,
|
||||||
)
|
)
|
||||||
@ -470,7 +471,7 @@ class EventClientSerializer:
|
|||||||
event: EventBase,
|
event: EventBase,
|
||||||
time_now: int,
|
time_now: int,
|
||||||
config: SerializeEventConfig,
|
config: SerializeEventConfig,
|
||||||
aggregations: "BundledAggregations",
|
bundled_aggregations: Dict[str, "BundledAggregations"],
|
||||||
serialized_event: JsonDict,
|
serialized_event: JsonDict,
|
||||||
apply_edits: bool,
|
apply_edits: bool,
|
||||||
) -> None:
|
) -> None:
|
||||||
@ -480,22 +481,37 @@ class EventClientSerializer:
|
|||||||
event: The event being serialized.
|
event: The event being serialized.
|
||||||
time_now: The current time in milliseconds
|
time_now: The current time in milliseconds
|
||||||
config: Event serialization config
|
config: Event serialization config
|
||||||
aggregations: The bundled aggregation to serialize.
|
bundled_aggregations: Bundled aggregations to be injected.
|
||||||
|
A map from event_id to aggregation data. Must contain at least an
|
||||||
|
entry for `event`.
|
||||||
|
|
||||||
|
While serializing the bundled aggregations this map may be searched
|
||||||
|
again for additional events in a recursive manner.
|
||||||
serialized_event: The serialized event which may be modified.
|
serialized_event: The serialized event which may be modified.
|
||||||
apply_edits: Whether the content of the event should be modified to reflect
|
apply_edits: Whether the content of the event should be modified to reflect
|
||||||
any replacement in `aggregations.replace`.
|
any replacement in `aggregations.replace`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# We have already checked that aggregations exist for this event.
|
||||||
|
event_aggregations = bundled_aggregations[event.event_id]
|
||||||
|
|
||||||
|
# The JSON dictionary to be added under the unsigned property of the event
|
||||||
|
# being serialized.
|
||||||
serialized_aggregations = {}
|
serialized_aggregations = {}
|
||||||
|
|
||||||
if aggregations.annotations:
|
if event_aggregations.annotations:
|
||||||
serialized_aggregations[RelationTypes.ANNOTATION] = aggregations.annotations
|
serialized_aggregations[
|
||||||
|
RelationTypes.ANNOTATION
|
||||||
|
] = event_aggregations.annotations
|
||||||
|
|
||||||
if aggregations.references:
|
if event_aggregations.references:
|
||||||
serialized_aggregations[RelationTypes.REFERENCE] = aggregations.references
|
serialized_aggregations[
|
||||||
|
RelationTypes.REFERENCE
|
||||||
|
] = event_aggregations.references
|
||||||
|
|
||||||
if aggregations.replace:
|
if event_aggregations.replace:
|
||||||
# If there is an edit, optionally apply it to the event.
|
# If there is an edit, optionally apply it to the event.
|
||||||
edit = aggregations.replace
|
edit = event_aggregations.replace
|
||||||
if apply_edits:
|
if apply_edits:
|
||||||
self._apply_edit(event, serialized_event, edit)
|
self._apply_edit(event, serialized_event, edit)
|
||||||
|
|
||||||
@ -506,19 +522,16 @@ class EventClientSerializer:
|
|||||||
"sender": edit.sender,
|
"sender": edit.sender,
|
||||||
}
|
}
|
||||||
|
|
||||||
# If this event is the start of a thread, include a summary of the replies.
|
# Include any threaded replies to this event.
|
||||||
if aggregations.thread:
|
if event_aggregations.thread:
|
||||||
thread = aggregations.thread
|
thread = event_aggregations.thread
|
||||||
|
|
||||||
# Don't bundle aggregations as this could recurse forever.
|
serialized_latest_event = self.serialize_event(
|
||||||
serialized_latest_event = serialize_event(
|
thread.latest_event,
|
||||||
thread.latest_event, time_now, config=config
|
time_now,
|
||||||
|
config=config,
|
||||||
|
bundle_aggregations=bundled_aggregations,
|
||||||
)
|
)
|
||||||
# Manually apply an edit, if one exists.
|
|
||||||
if thread.latest_edit:
|
|
||||||
self._apply_edit(
|
|
||||||
thread.latest_event, serialized_latest_event, thread.latest_edit
|
|
||||||
)
|
|
||||||
|
|
||||||
thread_summary = {
|
thread_summary = {
|
||||||
"latest_event": serialized_latest_event,
|
"latest_event": serialized_latest_event,
|
||||||
@ -568,10 +581,20 @@ class EventClientSerializer:
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
def copy_power_levels_contents(
|
_PowerLevel = Union[str, int]
|
||||||
old_power_levels: Mapping[str, Union[int, Mapping[str, int]]]
|
|
||||||
|
|
||||||
|
def copy_and_fixup_power_levels_contents(
|
||||||
|
old_power_levels: Mapping[str, Union[_PowerLevel, Mapping[str, _PowerLevel]]]
|
||||||
) -> Dict[str, Union[int, Dict[str, int]]]:
|
) -> Dict[str, Union[int, Dict[str, int]]]:
|
||||||
"""Copy the content of a power_levels event, unfreezing frozendicts along the way
|
"""Copy the content of a power_levels event, unfreezing frozendicts along the way.
|
||||||
|
|
||||||
|
We accept as input power level values which are strings, provided they represent an
|
||||||
|
integer, e.g. `"`100"` instead of 100. Such strings are converted to integers
|
||||||
|
in the returned dictionary (hence "fixup" in the function name).
|
||||||
|
|
||||||
|
Note that future room versions will outlaw such stringy power levels (see
|
||||||
|
https://github.com/matrix-org/matrix-spec/issues/853).
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
TypeError if the input does not look like a valid power levels event content
|
TypeError if the input does not look like a valid power levels event content
|
||||||
@ -580,29 +603,47 @@ def copy_power_levels_contents(
|
|||||||
raise TypeError("Not a valid power-levels content: %r" % (old_power_levels,))
|
raise TypeError("Not a valid power-levels content: %r" % (old_power_levels,))
|
||||||
|
|
||||||
power_levels: Dict[str, Union[int, Dict[str, int]]] = {}
|
power_levels: Dict[str, Union[int, Dict[str, int]]] = {}
|
||||||
|
|
||||||
for k, v in old_power_levels.items():
|
for k, v in old_power_levels.items():
|
||||||
|
|
||||||
if isinstance(v, int):
|
|
||||||
power_levels[k] = v
|
|
||||||
continue
|
|
||||||
|
|
||||||
if isinstance(v, collections.abc.Mapping):
|
if isinstance(v, collections.abc.Mapping):
|
||||||
h: Dict[str, int] = {}
|
h: Dict[str, int] = {}
|
||||||
power_levels[k] = h
|
power_levels[k] = h
|
||||||
for k1, v1 in v.items():
|
for k1, v1 in v.items():
|
||||||
# we should only have one level of nesting
|
_copy_power_level_value_as_integer(v1, h, k1)
|
||||||
if not isinstance(v1, int):
|
|
||||||
raise TypeError(
|
|
||||||
"Invalid power_levels value for %s.%s: %r" % (k, k1, v1)
|
|
||||||
)
|
|
||||||
h[k1] = v1
|
|
||||||
continue
|
|
||||||
|
|
||||||
raise TypeError("Invalid power_levels value for %s: %r" % (k, v))
|
else:
|
||||||
|
_copy_power_level_value_as_integer(v, power_levels, k)
|
||||||
|
|
||||||
return power_levels
|
return power_levels
|
||||||
|
|
||||||
|
|
||||||
|
def _copy_power_level_value_as_integer(
|
||||||
|
old_value: object,
|
||||||
|
power_levels: MutableMapping[str, Any],
|
||||||
|
key: str,
|
||||||
|
) -> None:
|
||||||
|
"""Set `power_levels[key]` to the integer represented by `old_value`.
|
||||||
|
|
||||||
|
:raises TypeError: if `old_value` is not an integer, nor a base-10 string
|
||||||
|
representation of an integer.
|
||||||
|
"""
|
||||||
|
if isinstance(old_value, int):
|
||||||
|
power_levels[key] = old_value
|
||||||
|
return
|
||||||
|
|
||||||
|
if isinstance(old_value, str):
|
||||||
|
try:
|
||||||
|
parsed_value = int(old_value, base=10)
|
||||||
|
except ValueError:
|
||||||
|
# Fall through to the final TypeError.
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
power_levels[key] = parsed_value
|
||||||
|
return
|
||||||
|
|
||||||
|
raise TypeError(f"Invalid power_levels value for {key}: {old_value}")
|
||||||
|
|
||||||
|
|
||||||
def validate_canonicaljson(value: Any) -> None:
|
def validate_canonicaljson(value: Any) -> None:
|
||||||
"""
|
"""
|
||||||
Ensure that the JSON object is valid according to the rules of canonical JSON.
|
Ensure that the JSON object is valid according to the rules of canonical JSON.
|
||||||
@ -622,7 +663,7 @@ def validate_canonicaljson(value: Any) -> None:
|
|||||||
# Note that Infinity, -Infinity, and NaN are also considered floats.
|
# Note that Infinity, -Infinity, and NaN are also considered floats.
|
||||||
raise SynapseError(400, "Bad JSON value: float", Codes.BAD_JSON)
|
raise SynapseError(400, "Bad JSON value: float", Codes.BAD_JSON)
|
||||||
|
|
||||||
elif isinstance(value, (dict, frozendict)):
|
elif isinstance(value, collections.abc.Mapping):
|
||||||
for v in value.values():
|
for v in value.values():
|
||||||
validate_canonicaljson(v)
|
validate_canonicaljson(v)
|
||||||
|
|
||||||
|
@ -618,7 +618,7 @@ class FederationClient(FederationBase):
|
|||||||
#
|
#
|
||||||
# Dendrite returns a 404 (with a body of "404 page not found");
|
# Dendrite returns a 404 (with a body of "404 page not found");
|
||||||
# Conduit returns a 404 (with no body); and Synapse returns a 400
|
# Conduit returns a 404 (with no body); and Synapse returns a 400
|
||||||
# with M_UNRECOGNISED.
|
# with M_UNRECOGNIZED.
|
||||||
#
|
#
|
||||||
# This needs to be rather specific as some endpoints truly do return 404
|
# This needs to be rather specific as some endpoints truly do return 404
|
||||||
# errors.
|
# errors.
|
||||||
@ -1426,6 +1426,8 @@ class FederationClient(FederationBase):
|
|||||||
room = res.get("room")
|
room = res.get("room")
|
||||||
if not isinstance(room, dict):
|
if not isinstance(room, dict):
|
||||||
raise InvalidResponseError("'room' must be a dict")
|
raise InvalidResponseError("'room' must be a dict")
|
||||||
|
if room.get("room_id") != room_id:
|
||||||
|
raise InvalidResponseError("wrong room returned in hierarchy response")
|
||||||
|
|
||||||
# Validate children_state of the room.
|
# Validate children_state of the room.
|
||||||
children_state = room.pop("children_state", [])
|
children_state = room.pop("children_state", [])
|
||||||
|
@ -268,8 +268,8 @@ class FederationServer(FederationBase):
|
|||||||
transaction_id=transaction_id,
|
transaction_id=transaction_id,
|
||||||
destination=destination,
|
destination=destination,
|
||||||
origin=origin,
|
origin=origin,
|
||||||
origin_server_ts=transaction_data.get("origin_server_ts"), # type: ignore
|
origin_server_ts=transaction_data.get("origin_server_ts"), # type: ignore[arg-type]
|
||||||
pdus=transaction_data.get("pdus"), # type: ignore
|
pdus=transaction_data.get("pdus"),
|
||||||
edus=transaction_data.get("edus"),
|
edus=transaction_data.get("edus"),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -343,9 +343,16 @@ class FederationSender(AbstractFederationSender):
|
|||||||
last_token, self._last_poked_id, limit=100
|
last_token, self._last_poked_id, limit=100
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.debug("Handling %s -> %s", last_token, next_token)
|
logger.debug(
|
||||||
|
"Handling %i -> %i: %i events to send (current id %i)",
|
||||||
|
last_token,
|
||||||
|
next_token,
|
||||||
|
len(events),
|
||||||
|
self._last_poked_id,
|
||||||
|
)
|
||||||
|
|
||||||
if not events and next_token >= self._last_poked_id:
|
if not events and next_token >= self._last_poked_id:
|
||||||
|
logger.debug("All events processed")
|
||||||
break
|
break
|
||||||
|
|
||||||
async def handle_event(event: EventBase) -> None:
|
async def handle_event(event: EventBase) -> None:
|
||||||
@ -353,9 +360,53 @@ class FederationSender(AbstractFederationSender):
|
|||||||
send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of()
|
send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of()
|
||||||
is_mine = self.is_mine_id(event.sender)
|
is_mine = self.is_mine_id(event.sender)
|
||||||
if not is_mine and send_on_behalf_of is None:
|
if not is_mine and send_on_behalf_of is None:
|
||||||
|
logger.debug("Not sending remote-origin event %s", event)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# We also want to not send out-of-band membership events.
|
||||||
|
#
|
||||||
|
# OOB memberships are used in three (and a half) situations:
|
||||||
|
#
|
||||||
|
# (1) invite events which we have received over federation. Those
|
||||||
|
# will have a `sender` on a different server, so will be
|
||||||
|
# skipped by the "is_mine" test above anyway.
|
||||||
|
#
|
||||||
|
# (2) rejections of invites to federated rooms - either remotely
|
||||||
|
# or locally generated. (Such rejections are normally
|
||||||
|
# created via federation, in which case the remote server is
|
||||||
|
# responsible for sending out the rejection. If that fails,
|
||||||
|
# we'll create a leave event locally, but that's only really
|
||||||
|
# for the benefit of the invited user - we don't have enough
|
||||||
|
# information to send it out over federation).
|
||||||
|
#
|
||||||
|
# (2a) rescinded knocks. These are identical to rejected invites.
|
||||||
|
#
|
||||||
|
# (3) knock events which we have sent over federation. As with
|
||||||
|
# invite rejections, the remote server should send them out to
|
||||||
|
# the federation.
|
||||||
|
#
|
||||||
|
# So, in all the above cases, we want to ignore such events.
|
||||||
|
#
|
||||||
|
# OOB memberships are always(?) outliers anyway, so if we *don't*
|
||||||
|
# ignore them, we'll get an exception further down when we try to
|
||||||
|
# fetch the membership list for the room.
|
||||||
|
#
|
||||||
|
# Arguably, we could equivalently ignore all outliers here, since
|
||||||
|
# in theory the only way for an outlier with a local `sender` to
|
||||||
|
# exist is by being an OOB membership (via one of (2), (2a) or (3)
|
||||||
|
# above).
|
||||||
|
#
|
||||||
|
if event.internal_metadata.is_out_of_band_membership():
|
||||||
|
logger.debug("Not sending OOB membership event %s", event)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Finally, there are some other events that we should not send out
|
||||||
|
# until someone asks for them. They are explicitly flagged as such
|
||||||
|
# with `proactively_send: False`.
|
||||||
if not event.internal_metadata.should_proactively_send():
|
if not event.internal_metadata.should_proactively_send():
|
||||||
|
logger.debug(
|
||||||
|
"Not sending event with proactively_send=false: %s", event
|
||||||
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
destinations: Optional[Set[str]] = None
|
destinations: Optional[Set[str]] = None
|
||||||
@ -419,7 +470,10 @@ class FederationSender(AbstractFederationSender):
|
|||||||
"federation_sender"
|
"federation_sender"
|
||||||
).observe((now - ts) / 1000)
|
).observe((now - ts) / 1000)
|
||||||
|
|
||||||
async def handle_room_events(events: Iterable[EventBase]) -> None:
|
async def handle_room_events(events: List[EventBase]) -> None:
|
||||||
|
logger.debug(
|
||||||
|
"Handling %i events in room %s", len(events), events[0].room_id
|
||||||
|
)
|
||||||
with Measure(self.clock, "handle_room_events"):
|
with Measure(self.clock, "handle_room_events"):
|
||||||
for event in events:
|
for event in events:
|
||||||
await handle_event(event)
|
await handle_event(event)
|
||||||
@ -438,6 +492,7 @@ class FederationSender(AbstractFederationSender):
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
logger.debug("Successfully handled up to %i", next_token)
|
||||||
await self.store.update_federation_out_pos("events", next_token)
|
await self.store.update_federation_out_pos("events", next_token)
|
||||||
|
|
||||||
if events:
|
if events:
|
||||||
|
@ -229,21 +229,21 @@ class TransportLayerClient:
|
|||||||
"""
|
"""
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"send_data dest=%s, txid=%s",
|
"send_data dest=%s, txid=%s",
|
||||||
transaction.destination, # type: ignore
|
transaction.destination,
|
||||||
transaction.transaction_id, # type: ignore
|
transaction.transaction_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
if transaction.destination == self.server_name: # type: ignore
|
if transaction.destination == self.server_name:
|
||||||
raise RuntimeError("Transport layer cannot send to itself!")
|
raise RuntimeError("Transport layer cannot send to itself!")
|
||||||
|
|
||||||
# FIXME: This is only used by the tests. The actual json sent is
|
# FIXME: This is only used by the tests. The actual json sent is
|
||||||
# generated by the json_data_callback.
|
# generated by the json_data_callback.
|
||||||
json_data = transaction.get_dict()
|
json_data = transaction.get_dict()
|
||||||
|
|
||||||
path = _create_v1_path("/send/%s", transaction.transaction_id) # type: ignore
|
path = _create_v1_path("/send/%s", transaction.transaction_id)
|
||||||
|
|
||||||
return await self.client.put_json(
|
return await self.client.put_json(
|
||||||
transaction.destination, # type: ignore
|
transaction.destination,
|
||||||
path=path,
|
path=path,
|
||||||
data=json_data,
|
data=json_data,
|
||||||
json_data_callback=json_data_callback,
|
json_data_callback=json_data_callback,
|
||||||
|
@ -23,6 +23,7 @@ from synapse.api.errors import AuthError, StoreError, SynapseError
|
|||||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||||
from synapse.types import UserID
|
from synapse.types import UserID
|
||||||
from synapse.util import stringutils
|
from synapse.util import stringutils
|
||||||
|
from synapse.util.async_helpers import delay_cancellation
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
@ -150,7 +151,7 @@ class AccountValidityHandler:
|
|||||||
Whether the user has expired.
|
Whether the user has expired.
|
||||||
"""
|
"""
|
||||||
for callback in self._is_user_expired_callbacks:
|
for callback in self._is_user_expired_callbacks:
|
||||||
expired = await callback(user_id)
|
expired = await delay_cancellation(callback(user_id))
|
||||||
if expired is not None:
|
if expired is not None:
|
||||||
return expired
|
return expired
|
||||||
|
|
||||||
|
@ -59,7 +59,7 @@ class ApplicationServicesHandler:
|
|||||||
self.scheduler = hs.get_application_service_scheduler()
|
self.scheduler = hs.get_application_service_scheduler()
|
||||||
self.started_scheduler = False
|
self.started_scheduler = False
|
||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
self.notify_appservices = hs.config.appservice.notify_appservices
|
self.notify_appservices = hs.config.worker.should_notify_appservices
|
||||||
self.event_sources = hs.get_event_sources()
|
self.event_sources = hs.get_event_sources()
|
||||||
self._msc2409_to_device_messages_enabled = (
|
self._msc2409_to_device_messages_enabled = (
|
||||||
hs.config.experimental.msc2409_to_device_messages_enabled
|
hs.config.experimental.msc2409_to_device_messages_enabled
|
||||||
@ -416,7 +416,7 @@ class ApplicationServicesHandler:
|
|||||||
return typing
|
return typing
|
||||||
|
|
||||||
async def _handle_receipts(
|
async def _handle_receipts(
|
||||||
self, service: ApplicationService, new_token: Optional[int]
|
self, service: ApplicationService, new_token: int
|
||||||
) -> List[JsonDict]:
|
) -> List[JsonDict]:
|
||||||
"""
|
"""
|
||||||
Return the latest read receipts that the given application service should receive.
|
Return the latest read receipts that the given application service should receive.
|
||||||
@ -447,7 +447,7 @@ class ApplicationServicesHandler:
|
|||||||
|
|
||||||
receipts_source = self.event_sources.sources.receipt
|
receipts_source = self.event_sources.sources.receipt
|
||||||
receipts, _ = await receipts_source.get_new_events_as(
|
receipts, _ = await receipts_source.get_new_events_as(
|
||||||
service=service, from_key=from_key
|
service=service, from_key=from_key, to_key=new_token
|
||||||
)
|
)
|
||||||
return receipts
|
return receipts
|
||||||
|
|
||||||
|
@ -41,6 +41,7 @@ import pymacaroons
|
|||||||
import unpaddedbase64
|
import unpaddedbase64
|
||||||
from pymacaroons.exceptions import MacaroonVerificationFailedException
|
from pymacaroons.exceptions import MacaroonVerificationFailedException
|
||||||
|
|
||||||
|
from twisted.internet.defer import CancelledError
|
||||||
from twisted.web.server import Request
|
from twisted.web.server import Request
|
||||||
|
|
||||||
from synapse.api.constants import LoginType
|
from synapse.api.constants import LoginType
|
||||||
@ -67,7 +68,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process
|
|||||||
from synapse.storage.roommember import ProfileInfo
|
from synapse.storage.roommember import ProfileInfo
|
||||||
from synapse.types import JsonDict, Requester, UserID
|
from synapse.types import JsonDict, Requester, UserID
|
||||||
from synapse.util import stringutils as stringutils
|
from synapse.util import stringutils as stringutils
|
||||||
from synapse.util.async_helpers import maybe_awaitable
|
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
|
||||||
from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry
|
from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry
|
||||||
from synapse.util.msisdn import phone_number_to_msisdn
|
from synapse.util.msisdn import phone_number_to_msisdn
|
||||||
from synapse.util.stringutils import base62_encode
|
from synapse.util.stringutils import base62_encode
|
||||||
@ -481,7 +482,7 @@ class AuthHandler:
|
|||||||
sid = authdict["session"]
|
sid = authdict["session"]
|
||||||
|
|
||||||
# Convert the URI and method to strings.
|
# Convert the URI and method to strings.
|
||||||
uri = request.uri.decode("utf-8") # type: ignore
|
uri = request.uri.decode("utf-8")
|
||||||
method = request.method.decode("utf-8")
|
method = request.method.decode("utf-8")
|
||||||
|
|
||||||
# If there's no session ID, create a new session.
|
# If there's no session ID, create a new session.
|
||||||
@ -551,7 +552,7 @@ class AuthHandler:
|
|||||||
await self.store.set_ui_auth_clientdict(sid, clientdict)
|
await self.store.set_ui_auth_clientdict(sid, clientdict)
|
||||||
|
|
||||||
user_agent = get_request_user_agent(request)
|
user_agent = get_request_user_agent(request)
|
||||||
clientip = request.getClientIP()
|
clientip = request.getClientAddress().host
|
||||||
|
|
||||||
await self.store.add_user_agent_ip_to_ui_auth_session(
|
await self.store.add_user_agent_ip_to_ui_auth_session(
|
||||||
session.session_id, user_agent, clientip
|
session.session_id, user_agent, clientip
|
||||||
@ -2202,7 +2203,11 @@ class PasswordAuthProvider:
|
|||||||
# other than None (i.e. until a callback returns a success)
|
# other than None (i.e. until a callback returns a success)
|
||||||
for callback in self.auth_checker_callbacks[login_type]:
|
for callback in self.auth_checker_callbacks[login_type]:
|
||||||
try:
|
try:
|
||||||
result = await callback(username, login_type, login_dict)
|
result = await delay_cancellation(
|
||||||
|
callback(username, login_type, login_dict)
|
||||||
|
)
|
||||||
|
except CancelledError:
|
||||||
|
raise
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
||||||
continue
|
continue
|
||||||
@ -2263,7 +2268,9 @@ class PasswordAuthProvider:
|
|||||||
|
|
||||||
for callback in self.check_3pid_auth_callbacks:
|
for callback in self.check_3pid_auth_callbacks:
|
||||||
try:
|
try:
|
||||||
result = await callback(medium, address, password)
|
result = await delay_cancellation(callback(medium, address, password))
|
||||||
|
except CancelledError:
|
||||||
|
raise
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
||||||
continue
|
continue
|
||||||
@ -2345,7 +2352,7 @@ class PasswordAuthProvider:
|
|||||||
"""
|
"""
|
||||||
for callback in self.get_username_for_registration_callbacks:
|
for callback in self.get_username_for_registration_callbacks:
|
||||||
try:
|
try:
|
||||||
res = await callback(uia_results, params)
|
res = await delay_cancellation(callback(uia_results, params))
|
||||||
|
|
||||||
if isinstance(res, str):
|
if isinstance(res, str):
|
||||||
return res
|
return res
|
||||||
@ -2359,6 +2366,8 @@ class PasswordAuthProvider:
|
|||||||
callback,
|
callback,
|
||||||
res,
|
res,
|
||||||
)
|
)
|
||||||
|
except CancelledError:
|
||||||
|
raise
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(
|
logger.error(
|
||||||
"Module raised an exception in get_username_for_registration: %s",
|
"Module raised an exception in get_username_for_registration: %s",
|
||||||
@ -2388,7 +2397,7 @@ class PasswordAuthProvider:
|
|||||||
"""
|
"""
|
||||||
for callback in self.get_displayname_for_registration_callbacks:
|
for callback in self.get_displayname_for_registration_callbacks:
|
||||||
try:
|
try:
|
||||||
res = await callback(uia_results, params)
|
res = await delay_cancellation(callback(uia_results, params))
|
||||||
|
|
||||||
if isinstance(res, str):
|
if isinstance(res, str):
|
||||||
return res
|
return res
|
||||||
@ -2402,6 +2411,8 @@ class PasswordAuthProvider:
|
|||||||
callback,
|
callback,
|
||||||
res,
|
res,
|
||||||
)
|
)
|
||||||
|
except CancelledError:
|
||||||
|
raise
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(
|
logger.error(
|
||||||
"Module raised an exception in get_displayname_for_registration: %s",
|
"Module raised an exception in get_displayname_for_registration: %s",
|
||||||
@ -2429,7 +2440,7 @@ class PasswordAuthProvider:
|
|||||||
"""
|
"""
|
||||||
for callback in self.is_3pid_allowed_callbacks:
|
for callback in self.is_3pid_allowed_callbacks:
|
||||||
try:
|
try:
|
||||||
res = await callback(medium, address, registration)
|
res = await delay_cancellation(callback(medium, address, registration))
|
||||||
|
|
||||||
if res is False:
|
if res is False:
|
||||||
return res
|
return res
|
||||||
@ -2443,6 +2454,8 @@ class PasswordAuthProvider:
|
|||||||
callback,
|
callback,
|
||||||
res,
|
res,
|
||||||
)
|
)
|
||||||
|
except CancelledError:
|
||||||
|
raise
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error("Module raised an exception in is_3pid_allowed: %s", e)
|
logger.error("Module raised an exception in is_3pid_allowed: %s", e)
|
||||||
raise SynapseError(code=500, msg="Internal Server Error")
|
raise SynapseError(code=500, msg="Internal Server Error")
|
||||||
|
@ -164,7 +164,7 @@ class EventHandler:
|
|||||||
event.
|
event.
|
||||||
"""
|
"""
|
||||||
redact_behaviour = (
|
redact_behaviour = (
|
||||||
EventRedactBehaviour.AS_IS if show_redacted else EventRedactBehaviour.REDACT
|
EventRedactBehaviour.as_is if show_redacted else EventRedactBehaviour.redact
|
||||||
)
|
)
|
||||||
event = await self.store.get_event(
|
event = await self.store.get_event(
|
||||||
event_id, check_room_id=room_id, redact_behaviour=redact_behaviour
|
event_id, check_room_id=room_id, redact_behaviour=redact_behaviour
|
||||||
|
@ -316,7 +316,7 @@ class FederationHandler:
|
|||||||
|
|
||||||
events_to_check = await self.store.get_events_as_list(
|
events_to_check = await self.store.get_events_as_list(
|
||||||
event_ids_to_check,
|
event_ids_to_check,
|
||||||
redact_behaviour=EventRedactBehaviour.AS_IS,
|
redact_behaviour=EventRedactBehaviour.as_is,
|
||||||
get_prev_content=False,
|
get_prev_content=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -1494,7 +1494,7 @@ class FederationHandler:
|
|||||||
|
|
||||||
events = await self.store.get_events_as_list(
|
events = await self.store.get_events_as_list(
|
||||||
batch,
|
batch,
|
||||||
redact_behaviour=EventRedactBehaviour.AS_IS,
|
redact_behaviour=EventRedactBehaviour.as_is,
|
||||||
allow_rejected=True,
|
allow_rejected=True,
|
||||||
)
|
)
|
||||||
for event in events:
|
for event in events:
|
||||||
|
@ -860,7 +860,7 @@ class FederationEventHandler:
|
|||||||
evs = await self._store.get_events(
|
evs = await self._store.get_events(
|
||||||
list(state_map.values()),
|
list(state_map.values()),
|
||||||
get_prev_content=False,
|
get_prev_content=False,
|
||||||
redact_behaviour=EventRedactBehaviour.AS_IS,
|
redact_behaviour=EventRedactBehaviour.as_is,
|
||||||
)
|
)
|
||||||
event_map.update(evs)
|
event_map.update(evs)
|
||||||
|
|
||||||
|
@ -92,7 +92,7 @@ class IdentityHandler:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
await self._3pid_validation_ratelimiter_ip.ratelimit(
|
await self._3pid_validation_ratelimiter_ip.ratelimit(
|
||||||
None, (medium, request.getClientIP())
|
None, (medium, request.getClientAddress().host)
|
||||||
)
|
)
|
||||||
await self._3pid_validation_ratelimiter_address.ratelimit(
|
await self._3pid_validation_ratelimiter_address.ratelimit(
|
||||||
None, (medium, address)
|
None, (medium, address)
|
||||||
|
@ -143,7 +143,7 @@ class InitialSyncHandler:
|
|||||||
to_key=int(now_token.receipt_key),
|
to_key=int(now_token.receipt_key),
|
||||||
)
|
)
|
||||||
if self.hs.config.experimental.msc2285_enabled:
|
if self.hs.config.experimental.msc2285_enabled:
|
||||||
receipt = ReceiptEventSource.filter_out_hidden(receipt, user_id)
|
receipt = ReceiptEventSource.filter_out_private(receipt, user_id)
|
||||||
|
|
||||||
tags_by_room = await self.store.get_tags_for_user(user_id)
|
tags_by_room = await self.store.get_tags_for_user(user_id)
|
||||||
|
|
||||||
@ -449,7 +449,7 @@ class InitialSyncHandler:
|
|||||||
if not receipts:
|
if not receipts:
|
||||||
return []
|
return []
|
||||||
if self.hs.config.experimental.msc2285_enabled:
|
if self.hs.config.experimental.msc2285_enabled:
|
||||||
receipts = ReceiptEventSource.filter_out_hidden(receipts, user_id)
|
receipts = ReceiptEventSource.filter_out_private(receipts, user_id)
|
||||||
return receipts
|
return receipts
|
||||||
|
|
||||||
presence, receipts, (messages, token) = await make_deferred_yieldable(
|
presence, receipts, (messages, token) = await make_deferred_yieldable(
|
||||||
|
@ -1409,7 +1409,7 @@ class EventCreationHandler:
|
|||||||
|
|
||||||
original_event = await self.store.get_event(
|
original_event = await self.store.get_event(
|
||||||
event.redacts,
|
event.redacts,
|
||||||
redact_behaviour=EventRedactBehaviour.AS_IS,
|
redact_behaviour=EventRedactBehaviour.as_is,
|
||||||
get_prev_content=False,
|
get_prev_content=False,
|
||||||
allow_rejected=False,
|
allow_rejected=False,
|
||||||
allow_none=True,
|
allow_none=True,
|
||||||
@ -1430,7 +1430,7 @@ class EventCreationHandler:
|
|||||||
# Validate a newly added alias or newly added alt_aliases.
|
# Validate a newly added alias or newly added alt_aliases.
|
||||||
|
|
||||||
original_alias = None
|
original_alias = None
|
||||||
original_alt_aliases: List[str] = []
|
original_alt_aliases: object = []
|
||||||
|
|
||||||
original_event_id = event.unsigned.get("replaces_state")
|
original_event_id = event.unsigned.get("replaces_state")
|
||||||
if original_event_id:
|
if original_event_id:
|
||||||
@ -1458,6 +1458,7 @@ class EventCreationHandler:
|
|||||||
# If the old version of alt_aliases is of an unknown form,
|
# If the old version of alt_aliases is of an unknown form,
|
||||||
# completely replace it.
|
# completely replace it.
|
||||||
if not isinstance(original_alt_aliases, (list, tuple)):
|
if not isinstance(original_alt_aliases, (list, tuple)):
|
||||||
|
# TODO: check that the original_alt_aliases' entries are all strings
|
||||||
original_alt_aliases = []
|
original_alt_aliases = []
|
||||||
|
|
||||||
# Check that each alias is currently valid.
|
# Check that each alias is currently valid.
|
||||||
@ -1509,7 +1510,7 @@ class EventCreationHandler:
|
|||||||
|
|
||||||
original_event = await self.store.get_event(
|
original_event = await self.store.get_event(
|
||||||
event.redacts,
|
event.redacts,
|
||||||
redact_behaviour=EventRedactBehaviour.AS_IS,
|
redact_behaviour=EventRedactBehaviour.as_is,
|
||||||
get_prev_content=False,
|
get_prev_content=False,
|
||||||
allow_rejected=False,
|
allow_rejected=False,
|
||||||
allow_none=True,
|
allow_none=True,
|
||||||
|
@ -966,7 +966,7 @@ class OidcProvider:
|
|||||||
"Mapping provider does not support de-duplicating Matrix IDs"
|
"Mapping provider does not support de-duplicating Matrix IDs"
|
||||||
)
|
)
|
||||||
|
|
||||||
attributes = await self._user_mapping_provider.map_user_attributes( # type: ignore
|
attributes = await self._user_mapping_provider.map_user_attributes(
|
||||||
userinfo, token
|
userinfo, token
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -659,27 +659,28 @@ class PresenceHandler(BasePresenceHandler):
|
|||||||
)
|
)
|
||||||
|
|
||||||
now = self.clock.time_msec()
|
now = self.clock.time_msec()
|
||||||
for state in self.user_to_current_state.values():
|
if self._presence_enabled:
|
||||||
self.wheel_timer.insert(
|
for state in self.user_to_current_state.values():
|
||||||
now=now, obj=state.user_id, then=state.last_active_ts + IDLE_TIMER
|
self.wheel_timer.insert(
|
||||||
)
|
now=now, obj=state.user_id, then=state.last_active_ts + IDLE_TIMER
|
||||||
self.wheel_timer.insert(
|
)
|
||||||
now=now,
|
|
||||||
obj=state.user_id,
|
|
||||||
then=state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT,
|
|
||||||
)
|
|
||||||
if self.is_mine_id(state.user_id):
|
|
||||||
self.wheel_timer.insert(
|
self.wheel_timer.insert(
|
||||||
now=now,
|
now=now,
|
||||||
obj=state.user_id,
|
obj=state.user_id,
|
||||||
then=state.last_federation_update_ts + FEDERATION_PING_INTERVAL,
|
then=state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT,
|
||||||
)
|
|
||||||
else:
|
|
||||||
self.wheel_timer.insert(
|
|
||||||
now=now,
|
|
||||||
obj=state.user_id,
|
|
||||||
then=state.last_federation_update_ts + FEDERATION_TIMEOUT,
|
|
||||||
)
|
)
|
||||||
|
if self.is_mine_id(state.user_id):
|
||||||
|
self.wheel_timer.insert(
|
||||||
|
now=now,
|
||||||
|
obj=state.user_id,
|
||||||
|
then=state.last_federation_update_ts + FEDERATION_PING_INTERVAL,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.wheel_timer.insert(
|
||||||
|
now=now,
|
||||||
|
obj=state.user_id,
|
||||||
|
then=state.last_federation_update_ts + FEDERATION_TIMEOUT,
|
||||||
|
)
|
||||||
|
|
||||||
# Set of users who have presence in the `user_to_current_state` that
|
# Set of users who have presence in the `user_to_current_state` that
|
||||||
# have not yet been persisted
|
# have not yet been persisted
|
||||||
@ -804,6 +805,13 @@ class PresenceHandler(BasePresenceHandler):
|
|||||||
This is currently used to bump the max presence stream ID without changing any
|
This is currently used to bump the max presence stream ID without changing any
|
||||||
user's presence (see PresenceHandler.add_users_to_send_full_presence_to).
|
user's presence (see PresenceHandler.add_users_to_send_full_presence_to).
|
||||||
"""
|
"""
|
||||||
|
if not self._presence_enabled:
|
||||||
|
# We shouldn't get here if presence is disabled, but we check anyway
|
||||||
|
# to ensure that we don't a) send out presence federation and b)
|
||||||
|
# don't add things to the wheel timer that will never be handled.
|
||||||
|
logger.warning("Tried to update presence states when presence is disabled")
|
||||||
|
return
|
||||||
|
|
||||||
now = self.clock.time_msec()
|
now = self.clock.time_msec()
|
||||||
|
|
||||||
with Measure(self.clock, "presence_update_states"):
|
with Measure(self.clock, "presence_update_states"):
|
||||||
@ -1229,6 +1237,10 @@ class PresenceHandler(BasePresenceHandler):
|
|||||||
):
|
):
|
||||||
raise SynapseError(400, "Invalid presence state")
|
raise SynapseError(400, "Invalid presence state")
|
||||||
|
|
||||||
|
# If presence is disabled, no-op
|
||||||
|
if not self.hs.config.server.use_presence:
|
||||||
|
return
|
||||||
|
|
||||||
user_id = target_user.to_string()
|
user_id = target_user.to_string()
|
||||||
|
|
||||||
prev_state = await self.current_state_for_user(user_id)
|
prev_state = await self.current_state_for_user(user_id)
|
||||||
|
138
synapse/handlers/push_rules.py
Normal file
138
synapse/handlers/push_rules.py
Normal file
@ -0,0 +1,138 @@
|
|||||||
|
# Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
from typing import TYPE_CHECKING, List, Optional, Union
|
||||||
|
|
||||||
|
import attr
|
||||||
|
|
||||||
|
from synapse.api.errors import SynapseError, UnrecognizedRequestError
|
||||||
|
from synapse.push.baserules import BASE_RULE_IDS
|
||||||
|
from synapse.storage.push_rule import RuleNotFoundException
|
||||||
|
from synapse.types import JsonDict
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
|
||||||
|
|
||||||
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||||
|
class RuleSpec:
|
||||||
|
scope: str
|
||||||
|
template: str
|
||||||
|
rule_id: str
|
||||||
|
attr: Optional[str]
|
||||||
|
|
||||||
|
|
||||||
|
class PushRulesHandler:
|
||||||
|
"""A class to handle changes in push rules for users."""
|
||||||
|
|
||||||
|
def __init__(self, hs: "HomeServer"):
|
||||||
|
self._notifier = hs.get_notifier()
|
||||||
|
self._main_store = hs.get_datastores().main
|
||||||
|
|
||||||
|
async def set_rule_attr(
|
||||||
|
self, user_id: str, spec: RuleSpec, val: Union[bool, JsonDict]
|
||||||
|
) -> None:
|
||||||
|
"""Set an attribute (enabled or actions) on an existing push rule.
|
||||||
|
|
||||||
|
Notifies listeners (e.g. sync handler) of the change.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id: the user for which to modify the push rule.
|
||||||
|
spec: the spec of the push rule to modify.
|
||||||
|
val: the value to change the attribute to.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
RuleNotFoundException if the rule being modified doesn't exist.
|
||||||
|
SynapseError(400) if the value is malformed.
|
||||||
|
UnrecognizedRequestError if the attribute to change is unknown.
|
||||||
|
InvalidRuleException if we're trying to change the actions on a rule but
|
||||||
|
the provided actions aren't compliant with the spec.
|
||||||
|
"""
|
||||||
|
if spec.attr not in ("enabled", "actions"):
|
||||||
|
# for the sake of potential future expansion, shouldn't report
|
||||||
|
# 404 in the case of an unknown request so check it corresponds to
|
||||||
|
# a known attribute first.
|
||||||
|
raise UnrecognizedRequestError()
|
||||||
|
|
||||||
|
namespaced_rule_id = f"global/{spec.template}/{spec.rule_id}"
|
||||||
|
rule_id = spec.rule_id
|
||||||
|
is_default_rule = rule_id.startswith(".")
|
||||||
|
if is_default_rule:
|
||||||
|
if namespaced_rule_id not in BASE_RULE_IDS:
|
||||||
|
raise RuleNotFoundException("Unknown rule %r" % (namespaced_rule_id,))
|
||||||
|
if spec.attr == "enabled":
|
||||||
|
if isinstance(val, dict) and "enabled" in val:
|
||||||
|
val = val["enabled"]
|
||||||
|
if not isinstance(val, bool):
|
||||||
|
# Legacy fallback
|
||||||
|
# This should *actually* take a dict, but many clients pass
|
||||||
|
# bools directly, so let's not break them.
|
||||||
|
raise SynapseError(400, "Value for 'enabled' must be boolean")
|
||||||
|
await self._main_store.set_push_rule_enabled(
|
||||||
|
user_id, namespaced_rule_id, val, is_default_rule
|
||||||
|
)
|
||||||
|
elif spec.attr == "actions":
|
||||||
|
if not isinstance(val, dict):
|
||||||
|
raise SynapseError(400, "Value must be a dict")
|
||||||
|
actions = val.get("actions")
|
||||||
|
if not isinstance(actions, list):
|
||||||
|
raise SynapseError(400, "Value for 'actions' must be dict")
|
||||||
|
check_actions(actions)
|
||||||
|
rule_id = spec.rule_id
|
||||||
|
is_default_rule = rule_id.startswith(".")
|
||||||
|
if is_default_rule:
|
||||||
|
if namespaced_rule_id not in BASE_RULE_IDS:
|
||||||
|
raise RuleNotFoundException(
|
||||||
|
"Unknown rule %r" % (namespaced_rule_id,)
|
||||||
|
)
|
||||||
|
await self._main_store.set_push_rule_actions(
|
||||||
|
user_id, namespaced_rule_id, actions, is_default_rule
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise UnrecognizedRequestError()
|
||||||
|
|
||||||
|
self.notify_user(user_id)
|
||||||
|
|
||||||
|
def notify_user(self, user_id: str) -> None:
|
||||||
|
"""Notify listeners about a push rule change.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id: the user ID the change is for.
|
||||||
|
"""
|
||||||
|
stream_id = self._main_store.get_max_push_rules_stream_id()
|
||||||
|
self._notifier.on_new_event("push_rules_key", stream_id, users=[user_id])
|
||||||
|
|
||||||
|
|
||||||
|
def check_actions(actions: List[Union[str, JsonDict]]) -> None:
|
||||||
|
"""Check if the given actions are spec compliant.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
actions: the actions to check.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
InvalidRuleException if the rules aren't compliant with the spec.
|
||||||
|
"""
|
||||||
|
if not isinstance(actions, list):
|
||||||
|
raise InvalidRuleException("No actions found")
|
||||||
|
|
||||||
|
for a in actions:
|
||||||
|
if a in ["notify", "dont_notify", "coalesce"]:
|
||||||
|
pass
|
||||||
|
elif isinstance(a, dict) and "set_tweak" in a:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
raise InvalidRuleException("Unrecognised action %s" % a)
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidRuleException(Exception):
|
||||||
|
pass
|
@ -14,7 +14,7 @@
|
|||||||
import logging
|
import logging
|
||||||
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple
|
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple
|
||||||
|
|
||||||
from synapse.api.constants import ReadReceiptEventFields, ReceiptTypes
|
from synapse.api.constants import ReceiptTypes
|
||||||
from synapse.appservice import ApplicationService
|
from synapse.appservice import ApplicationService
|
||||||
from synapse.streams import EventSource
|
from synapse.streams import EventSource
|
||||||
from synapse.types import JsonDict, ReadReceipt, UserID, get_domain_from_id
|
from synapse.types import JsonDict, ReadReceipt, UserID, get_domain_from_id
|
||||||
@ -112,7 +112,7 @@ class ReceiptsHandler:
|
|||||||
)
|
)
|
||||||
|
|
||||||
if not res:
|
if not res:
|
||||||
# res will be None if this read receipt is 'old'
|
# res will be None if this receipt is 'old'
|
||||||
continue
|
continue
|
||||||
|
|
||||||
stream_id, max_persisted_id = res
|
stream_id, max_persisted_id = res
|
||||||
@ -138,7 +138,7 @@ class ReceiptsHandler:
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
async def received_client_receipt(
|
async def received_client_receipt(
|
||||||
self, room_id: str, receipt_type: str, user_id: str, event_id: str, hidden: bool,
|
self, room_id: str, receipt_type: str, user_id: str, event_id: str
|
||||||
extra_content: Optional[JsonDict] = None,
|
extra_content: Optional[JsonDict] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Called when a client tells us a local user has read up to the given
|
"""Called when a client tells us a local user has read up to the given
|
||||||
@ -149,16 +149,14 @@ class ReceiptsHandler:
|
|||||||
receipt_type=receipt_type,
|
receipt_type=receipt_type,
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
event_ids=[event_id],
|
event_ids=[event_id],
|
||||||
data={"ts": int(self.clock.time_msec()), "hidden": hidden, **(extra_content or {})},
|
data={"ts": int(self.clock.time_msec()), **(extra_content or {})},
|
||||||
)
|
)
|
||||||
|
|
||||||
is_new = await self._handle_new_receipts([receipt])
|
is_new = await self._handle_new_receipts([receipt])
|
||||||
if not is_new:
|
if not is_new:
|
||||||
return
|
return
|
||||||
|
|
||||||
if self.federation_sender and not (
|
if self.federation_sender and receipt_type != ReceiptTypes.READ_PRIVATE:
|
||||||
self.hs.config.experimental.msc2285_enabled and hidden
|
|
||||||
):
|
|
||||||
await self.federation_sender.send_read_receipt(receipt)
|
await self.federation_sender.send_read_receipt(receipt)
|
||||||
|
|
||||||
|
|
||||||
@ -168,46 +166,37 @@ class ReceiptEventSource(EventSource[int, JsonDict]):
|
|||||||
self.config = hs.config
|
self.config = hs.config
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def filter_out_hidden(events: List[JsonDict], user_id: str) -> List[JsonDict]:
|
def filter_out_private(events: List[JsonDict], user_id: str) -> List[JsonDict]:
|
||||||
|
"""
|
||||||
|
This method takes in what is returned by
|
||||||
|
get_linearized_receipts_for_rooms() and goes through read receipts
|
||||||
|
filtering out m.read.private receipts if they were not sent by the
|
||||||
|
current user.
|
||||||
|
"""
|
||||||
|
|
||||||
visible_events = []
|
visible_events = []
|
||||||
|
|
||||||
# filter out hidden receipts the user shouldn't see
|
# filter out private receipts the user shouldn't see
|
||||||
for event in events:
|
for event in events:
|
||||||
content = event.get("content", {})
|
content = event.get("content", {})
|
||||||
new_event = event.copy()
|
new_event = event.copy()
|
||||||
new_event["content"] = {}
|
new_event["content"] = {}
|
||||||
|
|
||||||
for event_id in content.keys():
|
for event_id, event_content in content.items():
|
||||||
event_content = content.get(event_id, {})
|
receipt_event = {}
|
||||||
m_read = event_content.get(ReceiptTypes.READ, {})
|
for receipt_type, receipt_content in event_content.items():
|
||||||
|
if receipt_type == ReceiptTypes.READ_PRIVATE:
|
||||||
|
user_rr = receipt_content.get(user_id, None)
|
||||||
|
if user_rr:
|
||||||
|
receipt_event[ReceiptTypes.READ_PRIVATE] = {
|
||||||
|
user_id: user_rr.copy()
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
receipt_event[receipt_type] = receipt_content.copy()
|
||||||
|
|
||||||
# If m_read is missing copy over the original event_content as there is nothing to process here
|
# Only include the receipt event if it is non-empty.
|
||||||
if not m_read:
|
if receipt_event:
|
||||||
new_event["content"][event_id] = event_content.copy()
|
new_event["content"][event_id] = receipt_event
|
||||||
continue
|
|
||||||
|
|
||||||
new_users = {}
|
|
||||||
for rr_user_id, user_rr in m_read.items():
|
|
||||||
try:
|
|
||||||
hidden = user_rr.get("hidden")
|
|
||||||
except AttributeError:
|
|
||||||
# Due to https://github.com/matrix-org/synapse/issues/10376
|
|
||||||
# there are cases where user_rr is a string, in those cases
|
|
||||||
# we just ignore the read receipt
|
|
||||||
continue
|
|
||||||
|
|
||||||
if hidden is not True or rr_user_id == user_id:
|
|
||||||
new_users[rr_user_id] = user_rr.copy()
|
|
||||||
# If hidden has a value replace hidden with the correct prefixed key
|
|
||||||
if hidden is not None:
|
|
||||||
new_users[rr_user_id].pop("hidden")
|
|
||||||
new_users[rr_user_id][
|
|
||||||
ReadReceiptEventFields.MSC2285_HIDDEN
|
|
||||||
] = hidden
|
|
||||||
|
|
||||||
# Set new users unless empty
|
|
||||||
if len(new_users.keys()) > 0:
|
|
||||||
new_event["content"][event_id] = {ReceiptTypes.READ: new_users}
|
|
||||||
|
|
||||||
# Append new_event to visible_events unless empty
|
# Append new_event to visible_events unless empty
|
||||||
if len(new_event["content"].keys()) > 0:
|
if len(new_event["content"].keys()) > 0:
|
||||||
@ -235,18 +224,19 @@ class ReceiptEventSource(EventSource[int, JsonDict]):
|
|||||||
)
|
)
|
||||||
|
|
||||||
if self.config.experimental.msc2285_enabled:
|
if self.config.experimental.msc2285_enabled:
|
||||||
events = ReceiptEventSource.filter_out_hidden(events, user.to_string())
|
events = ReceiptEventSource.filter_out_private(events, user.to_string())
|
||||||
|
|
||||||
return events, to_key
|
return events, to_key
|
||||||
|
|
||||||
async def get_new_events_as(
|
async def get_new_events_as(
|
||||||
self, from_key: int, service: ApplicationService
|
self, from_key: int, to_key: int, service: ApplicationService
|
||||||
) -> Tuple[List[JsonDict], int]:
|
) -> Tuple[List[JsonDict], int]:
|
||||||
"""Returns a set of new read receipt events that an appservice
|
"""Returns a set of new read receipt events that an appservice
|
||||||
may be interested in.
|
may be interested in.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
from_key: the stream position at which events should be fetched from
|
from_key: the stream position at which events should be fetched from
|
||||||
|
to_key: the stream position up to which events should be fetched to
|
||||||
service: The appservice which may be interested
|
service: The appservice which may be interested
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
@ -256,7 +246,6 @@ class ReceiptEventSource(EventSource[int, JsonDict]):
|
|||||||
* The current read receipt stream token.
|
* The current read receipt stream token.
|
||||||
"""
|
"""
|
||||||
from_key = int(from_key)
|
from_key = int(from_key)
|
||||||
to_key = self.get_current_key()
|
|
||||||
|
|
||||||
if from_key == to_key:
|
if from_key == to_key:
|
||||||
return [], to_key
|
return [], to_key
|
||||||
|
@ -11,6 +11,7 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
import collections.abc
|
||||||
import logging
|
import logging
|
||||||
from typing import (
|
from typing import (
|
||||||
TYPE_CHECKING,
|
TYPE_CHECKING,
|
||||||
@ -24,7 +25,6 @@ from typing import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
import attr
|
import attr
|
||||||
from frozendict import frozendict
|
|
||||||
|
|
||||||
from synapse.api.constants import RelationTypes
|
from synapse.api.constants import RelationTypes
|
||||||
from synapse.api.errors import SynapseError
|
from synapse.api.errors import SynapseError
|
||||||
@ -44,8 +44,6 @@ logger = logging.getLogger(__name__)
|
|||||||
class _ThreadAggregation:
|
class _ThreadAggregation:
|
||||||
# The latest event in the thread.
|
# The latest event in the thread.
|
||||||
latest_event: EventBase
|
latest_event: EventBase
|
||||||
# The latest edit to the latest event in the thread.
|
|
||||||
latest_edit: Optional[EventBase]
|
|
||||||
# The total number of events in the thread.
|
# The total number of events in the thread.
|
||||||
count: int
|
count: int
|
||||||
# True if the current user has sent an event to the thread.
|
# True if the current user has sent an event to the thread.
|
||||||
@ -295,7 +293,7 @@ class RelationsHandler:
|
|||||||
|
|
||||||
for event_id, summary in summaries.items():
|
for event_id, summary in summaries.items():
|
||||||
if summary:
|
if summary:
|
||||||
thread_count, latest_thread_event, edit = summary
|
thread_count, latest_thread_event = summary
|
||||||
|
|
||||||
# Subtract off the count of any ignored users.
|
# Subtract off the count of any ignored users.
|
||||||
for ignored_user in ignored_users:
|
for ignored_user in ignored_users:
|
||||||
@ -340,7 +338,6 @@ class RelationsHandler:
|
|||||||
|
|
||||||
results[event_id] = _ThreadAggregation(
|
results[event_id] = _ThreadAggregation(
|
||||||
latest_event=latest_thread_event,
|
latest_event=latest_thread_event,
|
||||||
latest_edit=edit,
|
|
||||||
count=thread_count,
|
count=thread_count,
|
||||||
# If there's a thread summary it must also exist in the
|
# If there's a thread summary it must also exist in the
|
||||||
# participated dictionary.
|
# participated dictionary.
|
||||||
@ -359,15 +356,37 @@ class RelationsHandler:
|
|||||||
user_id: The user requesting the bundled aggregations.
|
user_id: The user requesting the bundled aggregations.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
A map of event ID to the bundled aggregation for the event. Not all
|
A map of event ID to the bundled aggregations for the event.
|
||||||
events may have bundled aggregations in the results.
|
|
||||||
|
Not all requested events may exist in the results (if they don't have
|
||||||
|
bundled aggregations).
|
||||||
|
|
||||||
|
The results may include additional events which are related to the
|
||||||
|
requested events.
|
||||||
"""
|
"""
|
||||||
# De-duplicate events by ID to handle the same event requested multiple times.
|
# De-duplicated events by ID to handle the same event requested multiple times.
|
||||||
#
|
events_by_id = {}
|
||||||
# State events do not get bundled aggregations.
|
# A map of event ID to the relation in that event, if there is one.
|
||||||
events_by_id = {
|
relations_by_id: Dict[str, str] = {}
|
||||||
event.event_id: event for event in events if not event.is_state()
|
for event in events:
|
||||||
}
|
# State events do not get bundled aggregations.
|
||||||
|
if event.is_state():
|
||||||
|
continue
|
||||||
|
|
||||||
|
relates_to = event.content.get("m.relates_to")
|
||||||
|
relation_type = None
|
||||||
|
if isinstance(relates_to, collections.abc.Mapping):
|
||||||
|
relation_type = relates_to.get("rel_type")
|
||||||
|
# An event which is a replacement (ie edit) or annotation (ie,
|
||||||
|
# reaction) may not have any other event related to it.
|
||||||
|
if relation_type in (RelationTypes.ANNOTATION, RelationTypes.REPLACE):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# The event should get bundled aggregations.
|
||||||
|
events_by_id[event.event_id] = event
|
||||||
|
# Track the event's relation information for later.
|
||||||
|
if isinstance(relation_type, str):
|
||||||
|
relations_by_id[event.event_id] = relation_type
|
||||||
|
|
||||||
# event ID -> bundled aggregation in non-serialized form.
|
# event ID -> bundled aggregation in non-serialized form.
|
||||||
results: Dict[str, BundledAggregations] = {}
|
results: Dict[str, BundledAggregations] = {}
|
||||||
@ -375,16 +394,34 @@ class RelationsHandler:
|
|||||||
# Fetch any ignored users of the requesting user.
|
# Fetch any ignored users of the requesting user.
|
||||||
ignored_users = await self._main_store.ignored_users(user_id)
|
ignored_users = await self._main_store.ignored_users(user_id)
|
||||||
|
|
||||||
|
# Threads are special as the latest event of a thread might cause additional
|
||||||
|
# events to be fetched. Thus, we check those first!
|
||||||
|
|
||||||
|
# Fetch thread summaries (but only for the directly requested events).
|
||||||
|
threads = await self.get_threads_for_events(
|
||||||
|
# It is not valid to start a thread on an event which itself relates to another event.
|
||||||
|
[eid for eid in events_by_id.keys() if eid not in relations_by_id],
|
||||||
|
user_id,
|
||||||
|
ignored_users,
|
||||||
|
)
|
||||||
|
for event_id, thread in threads.items():
|
||||||
|
results.setdefault(event_id, BundledAggregations()).thread = thread
|
||||||
|
|
||||||
|
# If the latest event in a thread is not already being fetched,
|
||||||
|
# add it. This ensures that the bundled aggregations for the
|
||||||
|
# latest thread event is correct.
|
||||||
|
latest_thread_event = thread.latest_event
|
||||||
|
if latest_thread_event and latest_thread_event.event_id not in events_by_id:
|
||||||
|
events_by_id[latest_thread_event.event_id] = latest_thread_event
|
||||||
|
# Keep relations_by_id in sync with events_by_id:
|
||||||
|
#
|
||||||
|
# We know that the latest event in a thread has a thread relation
|
||||||
|
# (as that is what makes it part of the thread).
|
||||||
|
relations_by_id[latest_thread_event.event_id] = RelationTypes.THREAD
|
||||||
|
|
||||||
# Fetch other relations per event.
|
# Fetch other relations per event.
|
||||||
for event in events_by_id.values():
|
for event in events_by_id.values():
|
||||||
# Do not bundle aggregations for an event which represents an edit or an
|
# Fetch any annotations (ie, reactions) to bundle with this event.
|
||||||
# annotation. It does not make sense for them to have related events.
|
|
||||||
relates_to = event.content.get("m.relates_to")
|
|
||||||
if isinstance(relates_to, (dict, frozendict)):
|
|
||||||
relation_type = relates_to.get("rel_type")
|
|
||||||
if relation_type in (RelationTypes.ANNOTATION, RelationTypes.REPLACE):
|
|
||||||
continue
|
|
||||||
|
|
||||||
annotations = await self.get_annotations_for_event(
|
annotations = await self.get_annotations_for_event(
|
||||||
event.event_id, event.room_id, ignored_users=ignored_users
|
event.event_id, event.room_id, ignored_users=ignored_users
|
||||||
)
|
)
|
||||||
@ -393,6 +430,7 @@ class RelationsHandler:
|
|||||||
event.event_id, BundledAggregations()
|
event.event_id, BundledAggregations()
|
||||||
).annotations = {"chunk": annotations}
|
).annotations = {"chunk": annotations}
|
||||||
|
|
||||||
|
# Fetch any references to bundle with this event.
|
||||||
references, next_token = await self.get_relations_for_event(
|
references, next_token = await self.get_relations_for_event(
|
||||||
event.event_id,
|
event.event_id,
|
||||||
event,
|
event,
|
||||||
@ -425,10 +463,4 @@ class RelationsHandler:
|
|||||||
for event_id, edit in edits.items():
|
for event_id, edit in edits.items():
|
||||||
results.setdefault(event_id, BundledAggregations()).replace = edit
|
results.setdefault(event_id, BundledAggregations()).replace = edit
|
||||||
|
|
||||||
threads = await self.get_threads_for_events(
|
|
||||||
events_by_id.keys(), user_id, ignored_users
|
|
||||||
)
|
|
||||||
for event_id, thread in threads.items():
|
|
||||||
results.setdefault(event_id, BundledAggregations()).thread = thread
|
|
||||||
|
|
||||||
return results
|
return results
|
||||||
|
@ -57,7 +57,7 @@ from synapse.api.filtering import Filter
|
|||||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
|
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
|
||||||
from synapse.event_auth import validate_event_for_room_version
|
from synapse.event_auth import validate_event_for_room_version
|
||||||
from synapse.events import EventBase
|
from synapse.events import EventBase
|
||||||
from synapse.events.utils import copy_power_levels_contents
|
from synapse.events.utils import copy_and_fixup_power_levels_contents
|
||||||
from synapse.federation.federation_client import InvalidResponseError
|
from synapse.federation.federation_client import InvalidResponseError
|
||||||
from synapse.handlers.federation import get_domains_from_state
|
from synapse.handlers.federation import get_domains_from_state
|
||||||
from synapse.handlers.relations import BundledAggregations
|
from synapse.handlers.relations import BundledAggregations
|
||||||
@ -337,13 +337,13 @@ class RoomCreationHandler:
|
|||||||
# 50, but if the default PL in a room is 50 or more, then we set the
|
# 50, but if the default PL in a room is 50 or more, then we set the
|
||||||
# required PL above that.
|
# required PL above that.
|
||||||
|
|
||||||
pl_content = dict(old_room_pl_state.content)
|
pl_content = copy_and_fixup_power_levels_contents(old_room_pl_state.content)
|
||||||
users_default = int(pl_content.get("users_default", 0))
|
users_default: int = pl_content.get("users_default", 0) # type: ignore[assignment]
|
||||||
restricted_level = max(users_default + 1, 50)
|
restricted_level = max(users_default + 1, 50)
|
||||||
|
|
||||||
updated = False
|
updated = False
|
||||||
for v in ("invite", "events_default"):
|
for v in ("invite", "events_default"):
|
||||||
current = int(pl_content.get(v, 0))
|
current: int = pl_content.get(v, 0) # type: ignore[assignment]
|
||||||
if current < restricted_level:
|
if current < restricted_level:
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"Setting level for %s in %s to %i (was %i)",
|
"Setting level for %s in %s to %i (was %i)",
|
||||||
@ -380,7 +380,9 @@ class RoomCreationHandler:
|
|||||||
"state_key": "",
|
"state_key": "",
|
||||||
"room_id": new_room_id,
|
"room_id": new_room_id,
|
||||||
"sender": requester.user.to_string(),
|
"sender": requester.user.to_string(),
|
||||||
"content": old_room_pl_state.content,
|
"content": copy_and_fixup_power_levels_contents(
|
||||||
|
old_room_pl_state.content
|
||||||
|
),
|
||||||
},
|
},
|
||||||
ratelimit=False,
|
ratelimit=False,
|
||||||
)
|
)
|
||||||
@ -471,7 +473,7 @@ class RoomCreationHandler:
|
|||||||
# dict so we can't just copy.deepcopy it.
|
# dict so we can't just copy.deepcopy it.
|
||||||
initial_state[
|
initial_state[
|
||||||
(EventTypes.PowerLevels, "")
|
(EventTypes.PowerLevels, "")
|
||||||
] = power_levels = copy_power_levels_contents(
|
] = power_levels = copy_and_fixup_power_levels_contents(
|
||||||
initial_state[(EventTypes.PowerLevels, "")]
|
initial_state[(EventTypes.PowerLevels, "")]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -105,6 +105,7 @@ class RoomSummaryHandler:
|
|||||||
hs.get_clock(),
|
hs.get_clock(),
|
||||||
"get_room_hierarchy",
|
"get_room_hierarchy",
|
||||||
)
|
)
|
||||||
|
self._msc3266_enabled = hs.config.experimental.msc3266_enabled
|
||||||
|
|
||||||
async def get_room_hierarchy(
|
async def get_room_hierarchy(
|
||||||
self,
|
self,
|
||||||
@ -630,7 +631,7 @@ class RoomSummaryHandler:
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
async def _is_remote_room_accessible(
|
async def _is_remote_room_accessible(
|
||||||
self, requester: str, room_id: str, room: JsonDict
|
self, requester: Optional[str], room_id: str, room: JsonDict
|
||||||
) -> bool:
|
) -> bool:
|
||||||
"""
|
"""
|
||||||
Calculate whether the room received over federation should be shown to the requester.
|
Calculate whether the room received over federation should be shown to the requester.
|
||||||
@ -645,7 +646,8 @@ class RoomSummaryHandler:
|
|||||||
due to an invite, etc.
|
due to an invite, etc.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
requester: The user requesting the summary.
|
requester: The user requesting the summary. If not passed only world
|
||||||
|
readability is checked.
|
||||||
room_id: The room ID returned over federation.
|
room_id: The room ID returned over federation.
|
||||||
room: The summary of the room returned over federation.
|
room: The summary of the room returned over federation.
|
||||||
|
|
||||||
@ -659,6 +661,8 @@ class RoomSummaryHandler:
|
|||||||
or room.get("world_readable") is True
|
or room.get("world_readable") is True
|
||||||
):
|
):
|
||||||
return True
|
return True
|
||||||
|
elif not requester:
|
||||||
|
return False
|
||||||
|
|
||||||
# Check if the user is a member of any of the allowed rooms from the response.
|
# Check if the user is a member of any of the allowed rooms from the response.
|
||||||
allowed_rooms = room.get("allowed_room_ids")
|
allowed_rooms = room.get("allowed_room_ids")
|
||||||
@ -715,6 +719,10 @@ class RoomSummaryHandler:
|
|||||||
"room_type": create_event.content.get(EventContentFields.ROOM_TYPE),
|
"room_type": create_event.content.get(EventContentFields.ROOM_TYPE),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if self._msc3266_enabled:
|
||||||
|
entry["im.nheko.summary.version"] = stats["version"]
|
||||||
|
entry["im.nheko.summary.encryption"] = stats["encryption"]
|
||||||
|
|
||||||
# Federation requests need to provide additional information so the
|
# Federation requests need to provide additional information so the
|
||||||
# requested server is able to filter the response appropriately.
|
# requested server is able to filter the response appropriately.
|
||||||
if for_federation:
|
if for_federation:
|
||||||
@ -812,9 +820,45 @@ class RoomSummaryHandler:
|
|||||||
|
|
||||||
room_summary["membership"] = membership or "leave"
|
room_summary["membership"] = membership or "leave"
|
||||||
else:
|
else:
|
||||||
# TODO federation API, descoped from initial unstable implementation
|
# Reuse the hierarchy query over federation
|
||||||
# as MSC needs more maturing on that side.
|
if remote_room_hosts is None:
|
||||||
raise SynapseError(400, "Federation is not currently supported.")
|
raise SynapseError(400, "Missing via to query remote room")
|
||||||
|
|
||||||
|
(
|
||||||
|
room_entry,
|
||||||
|
children_room_entries,
|
||||||
|
inaccessible_children,
|
||||||
|
) = await self._summarize_remote_room_hierarchy(
|
||||||
|
_RoomQueueEntry(room_id, remote_room_hosts),
|
||||||
|
suggested_only=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# The results over federation might include rooms that we, as the
|
||||||
|
# requesting server, are allowed to see, but the requesting user is
|
||||||
|
# not permitted to see.
|
||||||
|
#
|
||||||
|
# Filter the returned results to only what is accessible to the user.
|
||||||
|
if not room_entry or not await self._is_remote_room_accessible(
|
||||||
|
requester, room_entry.room_id, room_entry.room
|
||||||
|
):
|
||||||
|
raise NotFoundError("Room not found or is not accessible")
|
||||||
|
|
||||||
|
room = dict(room_entry.room)
|
||||||
|
room.pop("allowed_room_ids", None)
|
||||||
|
|
||||||
|
# If there was a requester, add their membership.
|
||||||
|
# We keep the membership in the local membership table unless the
|
||||||
|
# room is purged even for remote rooms.
|
||||||
|
if requester:
|
||||||
|
(
|
||||||
|
membership,
|
||||||
|
_,
|
||||||
|
) = await self._store.get_local_current_membership_for_user_in_room(
|
||||||
|
requester, room_id
|
||||||
|
)
|
||||||
|
room["membership"] = membership or "leave"
|
||||||
|
|
||||||
|
return room
|
||||||
|
|
||||||
return room_summary
|
return room_summary
|
||||||
|
|
||||||
|
@ -357,7 +357,7 @@ class SearchHandler:
|
|||||||
itertools.chain(
|
itertools.chain(
|
||||||
# The events_before and events_after for each context.
|
# The events_before and events_after for each context.
|
||||||
itertools.chain.from_iterable(
|
itertools.chain.from_iterable(
|
||||||
itertools.chain(context["events_before"], context["events_after"]) # type: ignore[arg-type]
|
itertools.chain(context["events_before"], context["events_after"])
|
||||||
for context in contexts.values()
|
for context in contexts.values()
|
||||||
),
|
),
|
||||||
# The returned events.
|
# The returned events.
|
||||||
@ -373,10 +373,10 @@ class SearchHandler:
|
|||||||
|
|
||||||
for context in contexts.values():
|
for context in contexts.values():
|
||||||
context["events_before"] = self._event_serializer.serialize_events(
|
context["events_before"] = self._event_serializer.serialize_events(
|
||||||
context["events_before"], time_now, bundle_aggregations=aggregations # type: ignore[arg-type]
|
context["events_before"], time_now, bundle_aggregations=aggregations
|
||||||
)
|
)
|
||||||
context["events_after"] = self._event_serializer.serialize_events(
|
context["events_after"] = self._event_serializer.serialize_events(
|
||||||
context["events_after"], time_now, bundle_aggregations=aggregations # type: ignore[arg-type]
|
context["events_after"], time_now, bundle_aggregations=aggregations
|
||||||
)
|
)
|
||||||
|
|
||||||
results = [
|
results = [
|
||||||
|
@ -468,7 +468,7 @@ class SsoHandler:
|
|||||||
auth_provider_id,
|
auth_provider_id,
|
||||||
remote_user_id,
|
remote_user_id,
|
||||||
get_request_user_agent(request),
|
get_request_user_agent(request),
|
||||||
request.getClientIP(),
|
request.getClientAddress().host,
|
||||||
)
|
)
|
||||||
new_user = True
|
new_user = True
|
||||||
elif self._sso_update_profile_information:
|
elif self._sso_update_profile_information:
|
||||||
@ -928,7 +928,7 @@ class SsoHandler:
|
|||||||
session.auth_provider_id,
|
session.auth_provider_id,
|
||||||
session.remote_user_id,
|
session.remote_user_id,
|
||||||
get_request_user_agent(request),
|
get_request_user_agent(request),
|
||||||
request.getClientIP(),
|
request.getClientAddress().host,
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
|
@ -1044,7 +1044,7 @@ class SyncHandler:
|
|||||||
last_unread_event_id = await self.store.get_last_receipt_event_id_for_user(
|
last_unread_event_id = await self.store.get_last_receipt_event_id_for_user(
|
||||||
user_id=sync_config.user.to_string(),
|
user_id=sync_config.user.to_string(),
|
||||||
room_id=room_id,
|
room_id=room_id,
|
||||||
receipt_type=ReceiptTypes.READ,
|
receipt_types=(ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE),
|
||||||
)
|
)
|
||||||
|
|
||||||
return await self.store.get_unread_event_push_actions_by_room_for_user(
|
return await self.store.get_unread_event_push_actions_by_room_for_user(
|
||||||
|
@ -256,7 +256,9 @@ class RegistrationTokenAuthChecker(UserInteractiveAuthChecker):
|
|||||||
def __init__(self, hs: "HomeServer"):
|
def __init__(self, hs: "HomeServer"):
|
||||||
super().__init__(hs)
|
super().__init__(hs)
|
||||||
self.hs = hs
|
self.hs = hs
|
||||||
self._enabled = bool(hs.config.registration.registration_requires_token)
|
self._enabled = bool(
|
||||||
|
hs.config.registration.registration_requires_token
|
||||||
|
) or bool(hs.config.registration.enable_registration_token_3pid_bypass)
|
||||||
self.store = hs.get_datastores().main
|
self.store = hs.get_datastores().main
|
||||||
|
|
||||||
def is_enabled(self) -> bool:
|
def is_enabled(self) -> bool:
|
||||||
|
@ -60,7 +60,7 @@ class UserDirectoryHandler(StateDeltasHandler):
|
|||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
self.notifier = hs.get_notifier()
|
self.notifier = hs.get_notifier()
|
||||||
self.is_mine_id = hs.is_mine_id
|
self.is_mine_id = hs.is_mine_id
|
||||||
self.update_user_directory = hs.config.server.update_user_directory
|
self.update_user_directory = hs.config.worker.should_update_user_directory
|
||||||
self.search_all_users = hs.config.userdirectory.user_directory_search_all_users
|
self.search_all_users = hs.config.userdirectory.user_directory_search_all_users
|
||||||
self.spam_checker = hs.get_spam_checker()
|
self.spam_checker = hs.get_spam_checker()
|
||||||
# The current position in the current_state_delta stream
|
# The current position in the current_state_delta stream
|
||||||
|
@ -73,7 +73,7 @@ from synapse.logging.context import make_deferred_yieldable, run_in_background
|
|||||||
from synapse.logging.opentracing import set_tag, start_active_span, tags
|
from synapse.logging.opentracing import set_tag, start_active_span, tags
|
||||||
from synapse.types import JsonDict
|
from synapse.types import JsonDict
|
||||||
from synapse.util import json_decoder
|
from synapse.util import json_decoder
|
||||||
from synapse.util.async_helpers import timeout_deferred
|
from synapse.util.async_helpers import AwakenableSleeper, timeout_deferred
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
@ -353,6 +353,13 @@ class MatrixFederationHttpClient:
|
|||||||
|
|
||||||
self._cooperator = Cooperator(scheduler=schedule)
|
self._cooperator = Cooperator(scheduler=schedule)
|
||||||
|
|
||||||
|
self._sleeper = AwakenableSleeper(self.reactor)
|
||||||
|
|
||||||
|
def wake_destination(self, destination: str) -> None:
|
||||||
|
"""Called when the remote server may have come back online."""
|
||||||
|
|
||||||
|
self._sleeper.wake(destination)
|
||||||
|
|
||||||
async def _send_request_with_optional_trailing_slash(
|
async def _send_request_with_optional_trailing_slash(
|
||||||
self,
|
self,
|
||||||
request: MatrixFederationRequest,
|
request: MatrixFederationRequest,
|
||||||
@ -474,6 +481,8 @@ class MatrixFederationHttpClient:
|
|||||||
self._store,
|
self._store,
|
||||||
backoff_on_404=backoff_on_404,
|
backoff_on_404=backoff_on_404,
|
||||||
ignore_backoff=ignore_backoff,
|
ignore_backoff=ignore_backoff,
|
||||||
|
notifier=self.hs.get_notifier(),
|
||||||
|
replication_client=self.hs.get_replication_command_handler(),
|
||||||
)
|
)
|
||||||
|
|
||||||
method_bytes = request.method.encode("ascii")
|
method_bytes = request.method.encode("ascii")
|
||||||
@ -664,7 +673,9 @@ class MatrixFederationHttpClient:
|
|||||||
delay,
|
delay,
|
||||||
)
|
)
|
||||||
|
|
||||||
await self.clock.sleep(delay)
|
# Sleep for the calculated delay, or wake up immediately
|
||||||
|
# if we get notified that the server is back up.
|
||||||
|
await self._sleeper.sleep(request.destination, delay * 1000)
|
||||||
retries_left -= 1
|
retries_left -= 1
|
||||||
else:
|
else:
|
||||||
raise
|
raise
|
||||||
|
@ -43,6 +43,7 @@ from typing_extensions import Protocol
|
|||||||
from zope.interface import implementer
|
from zope.interface import implementer
|
||||||
|
|
||||||
from twisted.internet import defer, interfaces
|
from twisted.internet import defer, interfaces
|
||||||
|
from twisted.internet.defer import CancelledError
|
||||||
from twisted.python import failure
|
from twisted.python import failure
|
||||||
from twisted.web import resource
|
from twisted.web import resource
|
||||||
from twisted.web.server import NOT_DONE_YET, Request
|
from twisted.web.server import NOT_DONE_YET, Request
|
||||||
@ -82,6 +83,14 @@ HTML_ERROR_TEMPLATE = """<!DOCTYPE html>
|
|||||||
</html>
|
</html>
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# A fictional HTTP status code for requests where the client has disconnected and we
|
||||||
|
# successfully cancelled the request. Used only for logging purposes. Clients will never
|
||||||
|
# observe this code unless cancellations leak across requests or we raise a
|
||||||
|
# `CancelledError` ourselves.
|
||||||
|
# Analogous to nginx's 499 status code:
|
||||||
|
# https://github.com/nginx/nginx/blob/release-1.21.6/src/http/ngx_http_request.h#L128-L134
|
||||||
|
HTTP_STATUS_REQUEST_CANCELLED = 499
|
||||||
|
|
||||||
|
|
||||||
def return_json_error(f: failure.Failure, request: SynapseRequest) -> None:
|
def return_json_error(f: failure.Failure, request: SynapseRequest) -> None:
|
||||||
"""Sends a JSON error response to clients."""
|
"""Sends a JSON error response to clients."""
|
||||||
@ -93,6 +102,17 @@ def return_json_error(f: failure.Failure, request: SynapseRequest) -> None:
|
|||||||
error_dict = exc.error_dict()
|
error_dict = exc.error_dict()
|
||||||
|
|
||||||
logger.info("%s SynapseError: %s - %s", request, error_code, exc.msg)
|
logger.info("%s SynapseError: %s - %s", request, error_code, exc.msg)
|
||||||
|
elif f.check(CancelledError):
|
||||||
|
error_code = HTTP_STATUS_REQUEST_CANCELLED
|
||||||
|
error_dict = {"error": "Request cancelled", "errcode": Codes.UNKNOWN}
|
||||||
|
|
||||||
|
if not request._disconnected:
|
||||||
|
logger.error(
|
||||||
|
"Got cancellation before client disconnection from %r: %r",
|
||||||
|
request.request_metrics.name,
|
||||||
|
request,
|
||||||
|
exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore[arg-type]
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
error_code = 500
|
error_code = 500
|
||||||
error_dict = {"error": "Internal server error", "errcode": Codes.UNKNOWN}
|
error_dict = {"error": "Internal server error", "errcode": Codes.UNKNOWN}
|
||||||
@ -155,6 +175,16 @@ def return_html_error(
|
|||||||
request,
|
request,
|
||||||
exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore[arg-type]
|
exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore[arg-type]
|
||||||
)
|
)
|
||||||
|
elif f.check(CancelledError):
|
||||||
|
code = HTTP_STATUS_REQUEST_CANCELLED
|
||||||
|
msg = "Request cancelled"
|
||||||
|
|
||||||
|
if not request._disconnected:
|
||||||
|
logger.error(
|
||||||
|
"Got cancellation before client disconnection when handling request %r",
|
||||||
|
request,
|
||||||
|
exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore[arg-type]
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
code = HTTPStatus.INTERNAL_SERVER_ERROR
|
code = HTTPStatus.INTERNAL_SERVER_ERROR
|
||||||
msg = "Internal server error"
|
msg = "Internal server error"
|
||||||
@ -295,7 +325,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta):
|
|||||||
if isawaitable(raw_callback_return):
|
if isawaitable(raw_callback_return):
|
||||||
callback_return = await raw_callback_return
|
callback_return = await raw_callback_return
|
||||||
else:
|
else:
|
||||||
callback_return = raw_callback_return # type: ignore
|
callback_return = raw_callback_return
|
||||||
|
|
||||||
return callback_return
|
return callback_return
|
||||||
|
|
||||||
@ -469,7 +499,7 @@ class JsonResource(DirectServeJsonResource):
|
|||||||
if isinstance(raw_callback_return, (defer.Deferred, types.CoroutineType)):
|
if isinstance(raw_callback_return, (defer.Deferred, types.CoroutineType)):
|
||||||
callback_return = await raw_callback_return
|
callback_return = await raw_callback_return
|
||||||
else:
|
else:
|
||||||
callback_return = raw_callback_return # type: ignore
|
callback_return = raw_callback_return
|
||||||
|
|
||||||
return callback_return
|
return callback_return
|
||||||
|
|
||||||
@ -683,6 +713,9 @@ def respond_with_json(
|
|||||||
Returns:
|
Returns:
|
||||||
twisted.web.server.NOT_DONE_YET if the request is still active.
|
twisted.web.server.NOT_DONE_YET if the request is still active.
|
||||||
"""
|
"""
|
||||||
|
# The response code must always be set, for logging purposes.
|
||||||
|
request.setResponseCode(code)
|
||||||
|
|
||||||
# could alternatively use request.notifyFinish() and flip a flag when
|
# could alternatively use request.notifyFinish() and flip a flag when
|
||||||
# the Deferred fires, but since the flag is RIGHT THERE it seems like
|
# the Deferred fires, but since the flag is RIGHT THERE it seems like
|
||||||
# a waste.
|
# a waste.
|
||||||
@ -697,7 +730,6 @@ def respond_with_json(
|
|||||||
else:
|
else:
|
||||||
encoder = _encode_json_bytes
|
encoder = _encode_json_bytes
|
||||||
|
|
||||||
request.setResponseCode(code)
|
|
||||||
request.setHeader(b"Content-Type", b"application/json")
|
request.setHeader(b"Content-Type", b"application/json")
|
||||||
request.setHeader(b"Cache-Control", b"no-cache, no-store, must-revalidate")
|
request.setHeader(b"Cache-Control", b"no-cache, no-store, must-revalidate")
|
||||||
|
|
||||||
@ -728,13 +760,15 @@ def respond_with_json_bytes(
|
|||||||
Returns:
|
Returns:
|
||||||
twisted.web.server.NOT_DONE_YET if the request is still active.
|
twisted.web.server.NOT_DONE_YET if the request is still active.
|
||||||
"""
|
"""
|
||||||
|
# The response code must always be set, for logging purposes.
|
||||||
|
request.setResponseCode(code)
|
||||||
|
|
||||||
if request._disconnected:
|
if request._disconnected:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
"Not sending response to request %s, already disconnected.", request
|
"Not sending response to request %s, already disconnected.", request
|
||||||
)
|
)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
request.setResponseCode(code)
|
|
||||||
request.setHeader(b"Content-Type", b"application/json")
|
request.setHeader(b"Content-Type", b"application/json")
|
||||||
request.setHeader(b"Content-Length", b"%d" % (len(json_bytes),))
|
request.setHeader(b"Content-Length", b"%d" % (len(json_bytes),))
|
||||||
request.setHeader(b"Cache-Control", b"no-cache, no-store, must-revalidate")
|
request.setHeader(b"Cache-Control", b"no-cache, no-store, must-revalidate")
|
||||||
@ -840,6 +874,9 @@ def respond_with_html_bytes(request: Request, code: int, html_bytes: bytes) -> N
|
|||||||
code: The HTTP response code.
|
code: The HTTP response code.
|
||||||
html_bytes: The HTML bytes to use as the response body.
|
html_bytes: The HTML bytes to use as the response body.
|
||||||
"""
|
"""
|
||||||
|
# The response code must always be set, for logging purposes.
|
||||||
|
request.setResponseCode(code)
|
||||||
|
|
||||||
# could alternatively use request.notifyFinish() and flip a flag when
|
# could alternatively use request.notifyFinish() and flip a flag when
|
||||||
# the Deferred fires, but since the flag is RIGHT THERE it seems like
|
# the Deferred fires, but since the flag is RIGHT THERE it seems like
|
||||||
# a waste.
|
# a waste.
|
||||||
@ -849,7 +886,6 @@ def respond_with_html_bytes(request: Request, code: int, html_bytes: bytes) -> N
|
|||||||
)
|
)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
request.setResponseCode(code)
|
|
||||||
request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
|
request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
|
||||||
request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),))
|
request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),))
|
||||||
|
|
||||||
|
@ -238,7 +238,7 @@ class SynapseRequest(Request):
|
|||||||
request_id,
|
request_id,
|
||||||
request=ContextRequest(
|
request=ContextRequest(
|
||||||
request_id=request_id,
|
request_id=request_id,
|
||||||
ip_address=self.getClientIP(),
|
ip_address=self.getClientAddress().host,
|
||||||
site_tag=self.synapse_site.site_tag,
|
site_tag=self.synapse_site.site_tag,
|
||||||
# The requester is going to be unknown at this point.
|
# The requester is going to be unknown at this point.
|
||||||
requester=None,
|
requester=None,
|
||||||
@ -381,7 +381,7 @@ class SynapseRequest(Request):
|
|||||||
|
|
||||||
self.synapse_site.access_logger.debug(
|
self.synapse_site.access_logger.debug(
|
||||||
"%s - %s - Received request: %s %s",
|
"%s - %s - Received request: %s %s",
|
||||||
self.getClientIP(),
|
self.getClientAddress().host,
|
||||||
self.synapse_site.site_tag,
|
self.synapse_site.site_tag,
|
||||||
self.get_method(),
|
self.get_method(),
|
||||||
self.get_redacted_uri(),
|
self.get_redacted_uri(),
|
||||||
@ -429,7 +429,7 @@ class SynapseRequest(Request):
|
|||||||
"%s - %s - {%s}"
|
"%s - %s - {%s}"
|
||||||
" Processed request: %.3fsec/%.3fsec (%.3fsec, %.3fsec) (%.3fsec/%.3fsec/%d)"
|
" Processed request: %.3fsec/%.3fsec (%.3fsec, %.3fsec) (%.3fsec/%.3fsec/%d)"
|
||||||
' %sB %s "%s %s %s" "%s" [%d dbevts]',
|
' %sB %s "%s %s %s" "%s" [%d dbevts]',
|
||||||
self.getClientIP(),
|
self.getClientAddress().host,
|
||||||
self.synapse_site.site_tag,
|
self.synapse_site.site_tag,
|
||||||
requester,
|
requester,
|
||||||
processing_time,
|
processing_time,
|
||||||
|
@ -722,6 +722,11 @@ P = ParamSpec("P")
|
|||||||
R = TypeVar("R")
|
R = TypeVar("R")
|
||||||
|
|
||||||
|
|
||||||
|
async def _unwrap_awaitable(awaitable: Awaitable[R]) -> R:
|
||||||
|
"""Unwraps an arbitrary awaitable by awaiting it."""
|
||||||
|
return await awaitable
|
||||||
|
|
||||||
|
|
||||||
@overload
|
@overload
|
||||||
def preserve_fn( # type: ignore[misc]
|
def preserve_fn( # type: ignore[misc]
|
||||||
f: Callable[P, Awaitable[R]],
|
f: Callable[P, Awaitable[R]],
|
||||||
@ -802,17 +807,20 @@ def run_in_background( # type: ignore[misc]
|
|||||||
# by synchronous exceptions, so let's turn them into Failures.
|
# by synchronous exceptions, so let's turn them into Failures.
|
||||||
return defer.fail()
|
return defer.fail()
|
||||||
|
|
||||||
|
# `res` may be a coroutine, `Deferred`, some other kind of awaitable, or a plain
|
||||||
|
# value. Convert it to a `Deferred`.
|
||||||
if isinstance(res, typing.Coroutine):
|
if isinstance(res, typing.Coroutine):
|
||||||
|
# Wrap the coroutine in a `Deferred`.
|
||||||
res = defer.ensureDeferred(res)
|
res = defer.ensureDeferred(res)
|
||||||
|
elif isinstance(res, defer.Deferred):
|
||||||
# At this point we should have a Deferred, if not then f was a synchronous
|
pass
|
||||||
# function, wrap it in a Deferred for consistency.
|
elif isinstance(res, Awaitable):
|
||||||
if not isinstance(res, defer.Deferred):
|
# `res` is probably some kind of completed awaitable, such as a `DoneAwaitable`
|
||||||
# `res` is not a `Deferred` and not a `Coroutine`.
|
# or `Future` from `make_awaitable`.
|
||||||
# There are no other types of `Awaitable`s we expect to encounter in Synapse.
|
res = defer.ensureDeferred(_unwrap_awaitable(res))
|
||||||
assert not isinstance(res, Awaitable)
|
else:
|
||||||
|
# `res` is a plain value. Wrap it in a `Deferred`.
|
||||||
return defer.succeed(res)
|
res = defer.succeed(res)
|
||||||
|
|
||||||
if res.called and not res.paused:
|
if res.called and not res.paused:
|
||||||
# The function should have maintained the logcontext, so we can
|
# The function should have maintained the logcontext, so we can
|
||||||
|
@ -884,7 +884,7 @@ def trace_servlet(request: "SynapseRequest", extract_context: bool = False):
|
|||||||
tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
|
tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
|
||||||
tags.HTTP_METHOD: request.get_method(),
|
tags.HTTP_METHOD: request.get_method(),
|
||||||
tags.HTTP_URL: request.get_redacted_uri(),
|
tags.HTTP_URL: request.get_redacted_uri(),
|
||||||
tags.PEER_HOST_IPV6: request.getClientIP(),
|
tags.PEER_HOST_IPV6: request.getClientAddress().host,
|
||||||
}
|
}
|
||||||
|
|
||||||
request_name = request.request_metrics.name
|
request_name = request.request_metrics.name
|
||||||
|
@ -28,11 +28,11 @@ from typing import (
|
|||||||
Type,
|
Type,
|
||||||
TypeVar,
|
TypeVar,
|
||||||
Union,
|
Union,
|
||||||
cast,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
from prometheus_client import Metric
|
from prometheus_client import Metric
|
||||||
from prometheus_client.core import REGISTRY, Counter, Gauge
|
from prometheus_client.core import REGISTRY, Counter, Gauge
|
||||||
|
from typing_extensions import ParamSpec
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
@ -256,24 +256,48 @@ def run_as_background_process(
|
|||||||
return defer.ensureDeferred(run())
|
return defer.ensureDeferred(run())
|
||||||
|
|
||||||
|
|
||||||
F = TypeVar("F", bound=Callable[..., Awaitable[Optional[Any]]])
|
P = ParamSpec("P")
|
||||||
|
|
||||||
|
|
||||||
def wrap_as_background_process(desc: str) -> Callable[[F], F]:
|
def wrap_as_background_process(
|
||||||
"""Decorator that wraps a function that gets called as a background
|
desc: str,
|
||||||
process.
|
) -> Callable[
|
||||||
|
[Callable[P, Awaitable[Optional[R]]]],
|
||||||
|
Callable[P, "defer.Deferred[Optional[R]]"],
|
||||||
|
]:
|
||||||
|
"""Decorator that wraps an asynchronous function `func`, returning a synchronous
|
||||||
|
decorated function. Calling the decorated version runs `func` as a background
|
||||||
|
process, forwarding all arguments verbatim.
|
||||||
|
|
||||||
Equivalent to calling the function with `run_as_background_process`
|
That is,
|
||||||
|
|
||||||
|
@wrap_as_background_process
|
||||||
|
def func(*args): ...
|
||||||
|
func(1, 2, third=3)
|
||||||
|
|
||||||
|
is equivalent to:
|
||||||
|
|
||||||
|
def func(*args): ...
|
||||||
|
run_as_background_process(func, 1, 2, third=3)
|
||||||
|
|
||||||
|
The former can be convenient if `func` needs to be run as a background process in
|
||||||
|
multiple places.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def wrap_as_background_process_inner(func: F) -> F:
|
def wrap_as_background_process_inner(
|
||||||
|
func: Callable[P, Awaitable[Optional[R]]]
|
||||||
|
) -> Callable[P, "defer.Deferred[Optional[R]]"]:
|
||||||
@wraps(func)
|
@wraps(func)
|
||||||
def wrap_as_background_process_inner_2(
|
def wrap_as_background_process_inner_2(
|
||||||
*args: Any, **kwargs: Any
|
*args: P.args, **kwargs: P.kwargs
|
||||||
) -> "defer.Deferred[Optional[R]]":
|
) -> "defer.Deferred[Optional[R]]":
|
||||||
return run_as_background_process(desc, func, *args, **kwargs)
|
# type-ignore: mypy is confusing kwargs with the bg_start_span kwarg.
|
||||||
|
# Argument 4 to "run_as_background_process" has incompatible type
|
||||||
|
# "**P.kwargs"; expected "bool"
|
||||||
|
# See https://github.com/python/mypy/issues/8862
|
||||||
|
return run_as_background_process(desc, func, *args, **kwargs) # type: ignore[arg-type]
|
||||||
|
|
||||||
return cast(F, wrap_as_background_process_inner_2)
|
return wrap_as_background_process_inner_2
|
||||||
|
|
||||||
return wrap_as_background_process_inner
|
return wrap_as_background_process_inner
|
||||||
|
|
||||||
|
@ -30,6 +30,7 @@ from typing import (
|
|||||||
|
|
||||||
import attr
|
import attr
|
||||||
import jinja2
|
import jinja2
|
||||||
|
from typing_extensions import ParamSpec
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
from twisted.web.resource import Resource
|
from twisted.web.resource import Resource
|
||||||
@ -82,6 +83,7 @@ from synapse.handlers.auth import (
|
|||||||
ON_LOGGED_OUT_CALLBACK,
|
ON_LOGGED_OUT_CALLBACK,
|
||||||
AuthHandler,
|
AuthHandler,
|
||||||
)
|
)
|
||||||
|
from synapse.handlers.push_rules import RuleSpec, check_actions
|
||||||
from synapse.http.client import SimpleHttpClient
|
from synapse.http.client import SimpleHttpClient
|
||||||
from synapse.http.server import (
|
from synapse.http.server import (
|
||||||
DirectServeHtmlResource,
|
DirectServeHtmlResource,
|
||||||
@ -109,6 +111,7 @@ from synapse.storage.state import StateFilter
|
|||||||
from synapse.types import (
|
from synapse.types import (
|
||||||
DomainSpecificString,
|
DomainSpecificString,
|
||||||
JsonDict,
|
JsonDict,
|
||||||
|
JsonMapping,
|
||||||
Requester,
|
Requester,
|
||||||
StateMap,
|
StateMap,
|
||||||
UserID,
|
UserID,
|
||||||
@ -127,6 +130,7 @@ if TYPE_CHECKING:
|
|||||||
|
|
||||||
|
|
||||||
T = TypeVar("T")
|
T = TypeVar("T")
|
||||||
|
P = ParamSpec("P")
|
||||||
|
|
||||||
"""
|
"""
|
||||||
This package defines the 'stable' API which can be used by extension modules which
|
This package defines the 'stable' API which can be used by extension modules which
|
||||||
@ -151,6 +155,7 @@ __all__ = [
|
|||||||
"PRESENCE_ALL_USERS",
|
"PRESENCE_ALL_USERS",
|
||||||
"LoginResponse",
|
"LoginResponse",
|
||||||
"JsonDict",
|
"JsonDict",
|
||||||
|
"JsonMapping",
|
||||||
"EventBase",
|
"EventBase",
|
||||||
"StateMap",
|
"StateMap",
|
||||||
"ProfileInfo",
|
"ProfileInfo",
|
||||||
@ -193,6 +198,7 @@ class ModuleApi:
|
|||||||
self._clock: Clock = hs.get_clock()
|
self._clock: Clock = hs.get_clock()
|
||||||
self._registration_handler = hs.get_registration_handler()
|
self._registration_handler = hs.get_registration_handler()
|
||||||
self._send_email_handler = hs.get_send_email_handler()
|
self._send_email_handler = hs.get_send_email_handler()
|
||||||
|
self._push_rules_handler = hs.get_push_rules_handler()
|
||||||
self.custom_template_dir = hs.config.server.custom_template_directory
|
self.custom_template_dir = hs.config.server.custom_template_directory
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -795,9 +801,9 @@ class ModuleApi:
|
|||||||
def run_db_interaction(
|
def run_db_interaction(
|
||||||
self,
|
self,
|
||||||
desc: str,
|
desc: str,
|
||||||
func: Callable[..., T],
|
func: Callable[P, T],
|
||||||
*args: Any,
|
*args: P.args,
|
||||||
**kwargs: Any,
|
**kwargs: P.kwargs,
|
||||||
) -> "defer.Deferred[T]":
|
) -> "defer.Deferred[T]":
|
||||||
"""Run a function with a database connection
|
"""Run a function with a database connection
|
||||||
|
|
||||||
@ -813,8 +819,9 @@ class ModuleApi:
|
|||||||
Returns:
|
Returns:
|
||||||
Deferred[object]: result of func
|
Deferred[object]: result of func
|
||||||
"""
|
"""
|
||||||
|
# type-ignore: See https://github.com/python/mypy/issues/8862
|
||||||
return defer.ensureDeferred(
|
return defer.ensureDeferred(
|
||||||
self._store.db_pool.runInteraction(desc, func, *args, **kwargs)
|
self._store.db_pool.runInteraction(desc, func, *args, **kwargs) # type: ignore[arg-type]
|
||||||
)
|
)
|
||||||
|
|
||||||
def complete_sso_login(
|
def complete_sso_login(
|
||||||
@ -1292,9 +1299,9 @@ class ModuleApi:
|
|||||||
|
|
||||||
async def defer_to_thread(
|
async def defer_to_thread(
|
||||||
self,
|
self,
|
||||||
f: Callable[..., T],
|
f: Callable[P, T],
|
||||||
*args: Any,
|
*args: P.args,
|
||||||
**kwargs: Any,
|
**kwargs: P.kwargs,
|
||||||
) -> T:
|
) -> T:
|
||||||
"""Runs the given function in a separate thread from Synapse's thread pool.
|
"""Runs the given function in a separate thread from Synapse's thread pool.
|
||||||
|
|
||||||
@ -1350,6 +1357,68 @@ class ModuleApi:
|
|||||||
"""
|
"""
|
||||||
await self._store.add_user_bound_threepid(user_id, medium, address, id_server)
|
await self._store.add_user_bound_threepid(user_id, medium, address, id_server)
|
||||||
|
|
||||||
|
def check_push_rule_actions(
|
||||||
|
self, actions: List[Union[str, Dict[str, str]]]
|
||||||
|
) -> None:
|
||||||
|
"""Checks if the given push rule actions are valid according to the Matrix
|
||||||
|
specification.
|
||||||
|
|
||||||
|
See https://spec.matrix.org/v1.2/client-server-api/#actions for the list of valid
|
||||||
|
actions.
|
||||||
|
|
||||||
|
Added in Synapse v1.58.0.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
actions: the actions to check.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
synapse.module_api.errors.InvalidRuleException if the actions are invalid.
|
||||||
|
"""
|
||||||
|
check_actions(actions)
|
||||||
|
|
||||||
|
async def set_push_rule_action(
|
||||||
|
self,
|
||||||
|
user_id: str,
|
||||||
|
scope: str,
|
||||||
|
kind: str,
|
||||||
|
rule_id: str,
|
||||||
|
actions: List[Union[str, Dict[str, str]]],
|
||||||
|
) -> None:
|
||||||
|
"""Changes the actions of an existing push rule for the given user.
|
||||||
|
|
||||||
|
See https://spec.matrix.org/v1.2/client-server-api/#push-rules for more
|
||||||
|
information about push rules and their syntax.
|
||||||
|
|
||||||
|
Can only be called on the main process.
|
||||||
|
|
||||||
|
Added in Synapse v1.58.0.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id: the user for which to change the push rule's actions.
|
||||||
|
scope: the push rule's scope, currently only "global" is allowed.
|
||||||
|
kind: the push rule's kind.
|
||||||
|
rule_id: the push rule's identifier.
|
||||||
|
actions: the actions to run when the rule's conditions match.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
RuntimeError if this method is called on a worker or `scope` is invalid.
|
||||||
|
synapse.module_api.errors.RuleNotFoundException if the rule being modified
|
||||||
|
can't be found.
|
||||||
|
synapse.module_api.errors.InvalidRuleException if the actions are invalid.
|
||||||
|
"""
|
||||||
|
if self.worker_app is not None:
|
||||||
|
raise RuntimeError("module tried to change push rule actions on a worker")
|
||||||
|
|
||||||
|
if scope != "global":
|
||||||
|
raise RuntimeError(
|
||||||
|
"invalid scope %s, only 'global' is currently allowed" % scope
|
||||||
|
)
|
||||||
|
|
||||||
|
spec = RuleSpec(scope, kind, rule_id, "actions")
|
||||||
|
await self._push_rules_handler.set_rule_attr(
|
||||||
|
user_id, spec, {"actions": actions}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class PublicRoomListManager:
|
class PublicRoomListManager:
|
||||||
"""Contains methods for adding to, removing from and querying whether a room
|
"""Contains methods for adding to, removing from and querying whether a room
|
||||||
@ -1419,7 +1488,7 @@ class AccountDataManager:
|
|||||||
f"{user_id} is not local to this homeserver; can't access account data for remote users."
|
f"{user_id} is not local to this homeserver; can't access account data for remote users."
|
||||||
)
|
)
|
||||||
|
|
||||||
async def get_global(self, user_id: str, data_type: str) -> Optional[JsonDict]:
|
async def get_global(self, user_id: str, data_type: str) -> Optional[JsonMapping]:
|
||||||
"""
|
"""
|
||||||
Gets some global account data, of a specified type, for the specified user.
|
Gets some global account data, of a specified type, for the specified user.
|
||||||
|
|
||||||
|
@ -20,10 +20,14 @@ from synapse.api.errors import (
|
|||||||
SynapseError,
|
SynapseError,
|
||||||
)
|
)
|
||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
|
from synapse.handlers.push_rules import InvalidRuleException
|
||||||
|
from synapse.storage.push_rule import RuleNotFoundException
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
"InvalidClientCredentialsError",
|
"InvalidClientCredentialsError",
|
||||||
"RedirectException",
|
"RedirectException",
|
||||||
"SynapseError",
|
"SynapseError",
|
||||||
"ConfigError",
|
"ConfigError",
|
||||||
|
"InvalidRuleException",
|
||||||
|
"RuleNotFoundException",
|
||||||
]
|
]
|
||||||
|
@ -228,9 +228,7 @@ class Notifier:
|
|||||||
# Called when there are new things to stream over replication
|
# Called when there are new things to stream over replication
|
||||||
self.replication_callbacks: List[Callable[[], None]] = []
|
self.replication_callbacks: List[Callable[[], None]] = []
|
||||||
|
|
||||||
# Called when remote servers have come back online after having been
|
self._federation_client = hs.get_federation_http_client()
|
||||||
# down.
|
|
||||||
self.remote_server_up_callbacks: List[Callable[[str], None]] = []
|
|
||||||
|
|
||||||
self._third_party_rules = hs.get_third_party_event_rules()
|
self._third_party_rules = hs.get_third_party_event_rules()
|
||||||
|
|
||||||
@ -731,3 +729,7 @@ class Notifier:
|
|||||||
# circular dependencies.
|
# circular dependencies.
|
||||||
if self.federation_sender:
|
if self.federation_sender:
|
||||||
self.federation_sender.wake_destination(server)
|
self.federation_sender.wake_destination(server)
|
||||||
|
|
||||||
|
# Tell the federation client about the fact the server is back up, so
|
||||||
|
# that any in flight requests can be immediately retried.
|
||||||
|
self._federation_client.wake_destination(server)
|
||||||
|
@ -277,6 +277,21 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [
|
|||||||
],
|
],
|
||||||
"actions": ["dont_notify"],
|
"actions": ["dont_notify"],
|
||||||
},
|
},
|
||||||
|
# XXX: This is an experimental rule that is only enabled if msc3786_enabled
|
||||||
|
# is enabled, if it is not the rule gets filtered out in _load_rules() in
|
||||||
|
# PushRulesWorkerStore
|
||||||
|
{
|
||||||
|
"rule_id": "global/override/.org.matrix.msc3786.rule.room.server_acl",
|
||||||
|
"conditions": [
|
||||||
|
{
|
||||||
|
"kind": "event_match",
|
||||||
|
"key": "type",
|
||||||
|
"pattern": "m.room.server_acl",
|
||||||
|
"_cache_key": "_room_server_acl",
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"actions": ["dont_notify"],
|
||||||
|
},
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@ -24,7 +24,9 @@ async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) -
|
|||||||
invites = await store.get_invited_rooms_for_local_user(user_id)
|
invites = await store.get_invited_rooms_for_local_user(user_id)
|
||||||
joins = await store.get_rooms_for_user(user_id)
|
joins = await store.get_rooms_for_user(user_id)
|
||||||
|
|
||||||
my_receipts_by_room = await store.get_receipts_for_user(user_id, ReceiptTypes.READ)
|
my_receipts_by_room = await store.get_receipts_for_user(
|
||||||
|
user_id, (ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE)
|
||||||
|
)
|
||||||
|
|
||||||
badge = len(invites)
|
badge = len(invites)
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
"""This module implements the TCP replication protocol used by synapse to
|
"""This module implements the TCP replication protocol used by synapse to
|
||||||
communicate between the master process and its workers (when they're enabled).
|
communicate between the master process and its workers (when they're enabled).
|
||||||
|
|
||||||
Further details can be found in docs/tcp_replication.rst
|
Further details can be found in docs/tcp_replication.md
|
||||||
|
|
||||||
|
|
||||||
Structure of the module:
|
Structure of the module:
|
||||||
|
@ -21,7 +21,7 @@ from twisted.internet.interfaces import IAddress, IConnector
|
|||||||
from twisted.internet.protocol import ReconnectingClientFactory
|
from twisted.internet.protocol import ReconnectingClientFactory
|
||||||
from twisted.python.failure import Failure
|
from twisted.python.failure import Failure
|
||||||
|
|
||||||
from synapse.api.constants import EventTypes
|
from synapse.api.constants import EventTypes, ReceiptTypes
|
||||||
from synapse.federation import send_queue
|
from synapse.federation import send_queue
|
||||||
from synapse.federation.sender import FederationSender
|
from synapse.federation.sender import FederationSender
|
||||||
from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
|
from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
|
||||||
@ -401,10 +401,8 @@ class FederationSenderHandler:
|
|||||||
# we only want to send on receipts for our own users
|
# we only want to send on receipts for our own users
|
||||||
if not self._is_mine_id(receipt.user_id):
|
if not self._is_mine_id(receipt.user_id):
|
||||||
continue
|
continue
|
||||||
if (
|
# Private read receipts never get sent over federation.
|
||||||
receipt.data.get("hidden", False)
|
if receipt.receipt_type == ReceiptTypes.READ_PRIVATE:
|
||||||
and self._hs.config.experimental.msc2285_enabled
|
|
||||||
):
|
|
||||||
continue
|
continue
|
||||||
receipt_info = ReadReceipt(
|
receipt_info = ReadReceipt(
|
||||||
receipt.room_id,
|
receipt.room_id,
|
||||||
|
@ -537,7 +537,7 @@ class ReplicationCommandHandler:
|
|||||||
# Ignore POSITION that are just our own echoes
|
# Ignore POSITION that are just our own echoes
|
||||||
return
|
return
|
||||||
|
|
||||||
logger.info("Handling '%s %s'", cmd.NAME, cmd.to_line())
|
logger.debug("Handling '%s %s'", cmd.NAME, cmd.to_line())
|
||||||
|
|
||||||
self._add_command_to_stream_queue(conn, cmd)
|
self._add_command_to_stream_queue(conn, cmd)
|
||||||
|
|
||||||
@ -567,6 +567,11 @@ class ReplicationCommandHandler:
|
|||||||
# between then and now.
|
# between then and now.
|
||||||
missing_updates = cmd.prev_token != current_token
|
missing_updates = cmd.prev_token != current_token
|
||||||
while missing_updates:
|
while missing_updates:
|
||||||
|
# Note: There may very well not be any new updates, but we check to
|
||||||
|
# make sure. This can particularly happen for the event stream where
|
||||||
|
# event persisters continuously send `POSITION`. See `resource.py`
|
||||||
|
# for why this can happen.
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
"Fetching replication rows for '%s' between %i and %i",
|
"Fetching replication rows for '%s' between %i and %i",
|
||||||
stream_name,
|
stream_name,
|
||||||
@ -590,7 +595,7 @@ class ReplicationCommandHandler:
|
|||||||
[stream.parse_row(row) for row in rows],
|
[stream.parse_row(row) for row in rows],
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.info("Caught up with stream '%s' to %i", stream_name, cmd.new_token)
|
logger.info("Caught up with stream '%s' to %i", stream_name, cmd.new_token)
|
||||||
|
|
||||||
# We've now caught up to position sent to us, notify handler.
|
# We've now caught up to position sent to us, notify handler.
|
||||||
await self._replication_data_handler.on_position(
|
await self._replication_data_handler.on_position(
|
||||||
|
@ -204,6 +204,15 @@ class ReplicationStreamer:
|
|||||||
# turns out that e.g. account data streams share
|
# turns out that e.g. account data streams share
|
||||||
# their "current token" with each other, meaning
|
# their "current token" with each other, meaning
|
||||||
# that it is *not* safe to send a POSITION.
|
# that it is *not* safe to send a POSITION.
|
||||||
|
|
||||||
|
# Note: `last_token` may not *actually* be the
|
||||||
|
# last token we sent out in a RDATA or POSITION.
|
||||||
|
# This can happen if we sent out an RDATA for
|
||||||
|
# position X when our current token was say X+1.
|
||||||
|
# Other workers will see RDATA for X and then a
|
||||||
|
# POSITION with last token of X+1, which will
|
||||||
|
# cause them to check if there were any missing
|
||||||
|
# updates between X and X+1.
|
||||||
logger.info(
|
logger.info(
|
||||||
"Sending position: %s -> %s",
|
"Sending position: %s -> %s",
|
||||||
stream.NAME,
|
stream.NAME,
|
||||||
|
@ -882,9 +882,7 @@ class WhoamiRestServlet(RestServlet):
|
|||||||
|
|
||||||
response = {
|
response = {
|
||||||
"user_id": requester.user.to_string(),
|
"user_id": requester.user.to_string(),
|
||||||
# MSC: https://github.com/matrix-org/matrix-doc/pull/3069
|
|
||||||
# Entered spec in Matrix 1.2
|
# Entered spec in Matrix 1.2
|
||||||
"org.matrix.msc3069.is_guest": bool(requester.is_guest),
|
|
||||||
"is_guest": bool(requester.is_guest),
|
"is_guest": bool(requester.is_guest),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -112,7 +112,7 @@ class AuthRestServlet(RestServlet):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
await self.auth_handler.add_oob_auth(
|
await self.auth_handler.add_oob_auth(
|
||||||
LoginType.RECAPTCHA, authdict, request.getClientIP()
|
LoginType.RECAPTCHA, authdict, request.getClientAddress().host
|
||||||
)
|
)
|
||||||
except LoginError as e:
|
except LoginError as e:
|
||||||
# Authentication failed, let user try again
|
# Authentication failed, let user try again
|
||||||
@ -132,7 +132,7 @@ class AuthRestServlet(RestServlet):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
await self.auth_handler.add_oob_auth(
|
await self.auth_handler.add_oob_auth(
|
||||||
LoginType.TERMS, authdict, request.getClientIP()
|
LoginType.TERMS, authdict, request.getClientAddress().host
|
||||||
)
|
)
|
||||||
except LoginError as e:
|
except LoginError as e:
|
||||||
# Authentication failed, let user try again
|
# Authentication failed, let user try again
|
||||||
@ -161,7 +161,9 @@ class AuthRestServlet(RestServlet):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
await self.auth_handler.add_oob_auth(
|
await self.auth_handler.add_oob_auth(
|
||||||
LoginType.REGISTRATION_TOKEN, authdict, request.getClientIP()
|
LoginType.REGISTRATION_TOKEN,
|
||||||
|
authdict,
|
||||||
|
request.getClientAddress().host,
|
||||||
)
|
)
|
||||||
except LoginError as e:
|
except LoginError as e:
|
||||||
html = self.registration_token_template.render(
|
html = self.registration_token_template.render(
|
||||||
|
@ -15,8 +15,6 @@
|
|||||||
import logging
|
import logging
|
||||||
from typing import TYPE_CHECKING, Awaitable, Dict, List, Optional, Tuple
|
from typing import TYPE_CHECKING, Awaitable, Dict, List, Optional, Tuple
|
||||||
|
|
||||||
from twisted.web.server import Request
|
|
||||||
|
|
||||||
from synapse.api.constants import Membership
|
from synapse.api.constants import Membership
|
||||||
from synapse.api.errors import SynapseError
|
from synapse.api.errors import SynapseError
|
||||||
from synapse.http.server import HttpServer
|
from synapse.http.server import HttpServer
|
||||||
@ -97,7 +95,7 @@ class KnockRoomAliasServlet(RestServlet):
|
|||||||
return 200, {"room_id": room_id}
|
return 200, {"room_id": room_id}
|
||||||
|
|
||||||
def on_PUT(
|
def on_PUT(
|
||||||
self, request: Request, room_identifier: str, txn_id: str
|
self, request: SynapseRequest, room_identifier: str, txn_id: str
|
||||||
) -> Awaitable[Tuple[int, JsonDict]]:
|
) -> Awaitable[Tuple[int, JsonDict]]:
|
||||||
set_tag("txn_id", txn_id)
|
set_tag("txn_id", txn_id)
|
||||||
|
|
||||||
|
@ -69,9 +69,7 @@ class LoginRestServlet(RestServlet):
|
|||||||
SSO_TYPE = "m.login.sso"
|
SSO_TYPE = "m.login.sso"
|
||||||
TOKEN_TYPE = "m.login.token"
|
TOKEN_TYPE = "m.login.token"
|
||||||
JWT_TYPE = "org.matrix.login.jwt"
|
JWT_TYPE = "org.matrix.login.jwt"
|
||||||
JWT_TYPE_DEPRECATED = "m.login.jwt"
|
|
||||||
APPSERVICE_TYPE = "m.login.application_service"
|
APPSERVICE_TYPE = "m.login.application_service"
|
||||||
APPSERVICE_TYPE_UNSTABLE = "uk.half-shot.msc2778.login.application_service"
|
|
||||||
REFRESH_TOKEN_PARAM = "refresh_token"
|
REFRESH_TOKEN_PARAM = "refresh_token"
|
||||||
|
|
||||||
def __init__(self, hs: "HomeServer"):
|
def __init__(self, hs: "HomeServer"):
|
||||||
@ -126,7 +124,6 @@ class LoginRestServlet(RestServlet):
|
|||||||
flows: List[JsonDict] = []
|
flows: List[JsonDict] = []
|
||||||
if self.jwt_enabled:
|
if self.jwt_enabled:
|
||||||
flows.append({"type": LoginRestServlet.JWT_TYPE})
|
flows.append({"type": LoginRestServlet.JWT_TYPE})
|
||||||
flows.append({"type": LoginRestServlet.JWT_TYPE_DEPRECATED})
|
|
||||||
|
|
||||||
if self.cas_enabled:
|
if self.cas_enabled:
|
||||||
# we advertise CAS for backwards compat, though MSC1721 renamed it
|
# we advertise CAS for backwards compat, though MSC1721 renamed it
|
||||||
@ -156,7 +153,6 @@ class LoginRestServlet(RestServlet):
|
|||||||
flows.extend({"type": t} for t in self.auth_handler.get_supported_login_types())
|
flows.extend({"type": t} for t in self.auth_handler.get_supported_login_types())
|
||||||
|
|
||||||
flows.append({"type": LoginRestServlet.APPSERVICE_TYPE})
|
flows.append({"type": LoginRestServlet.APPSERVICE_TYPE})
|
||||||
flows.append({"type": LoginRestServlet.APPSERVICE_TYPE_UNSTABLE})
|
|
||||||
|
|
||||||
return 200, {"flows": flows}
|
return 200, {"flows": flows}
|
||||||
|
|
||||||
@ -175,15 +171,12 @@ class LoginRestServlet(RestServlet):
|
|||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if login_submission["type"] in (
|
if login_submission["type"] == LoginRestServlet.APPSERVICE_TYPE:
|
||||||
LoginRestServlet.APPSERVICE_TYPE,
|
|
||||||
LoginRestServlet.APPSERVICE_TYPE_UNSTABLE,
|
|
||||||
):
|
|
||||||
appservice = self.auth.get_appservice_by_req(request)
|
appservice = self.auth.get_appservice_by_req(request)
|
||||||
|
|
||||||
if appservice.is_rate_limited():
|
if appservice.is_rate_limited():
|
||||||
await self._address_ratelimiter.ratelimit(
|
await self._address_ratelimiter.ratelimit(
|
||||||
None, request.getClientIP()
|
None, request.getClientAddress().host
|
||||||
)
|
)
|
||||||
|
|
||||||
result = await self._do_appservice_login(
|
result = await self._do_appservice_login(
|
||||||
@ -191,23 +184,29 @@ class LoginRestServlet(RestServlet):
|
|||||||
appservice,
|
appservice,
|
||||||
should_issue_refresh_token=should_issue_refresh_token,
|
should_issue_refresh_token=should_issue_refresh_token,
|
||||||
)
|
)
|
||||||
elif self.jwt_enabled and (
|
elif (
|
||||||
login_submission["type"] == LoginRestServlet.JWT_TYPE
|
self.jwt_enabled
|
||||||
or login_submission["type"] == LoginRestServlet.JWT_TYPE_DEPRECATED
|
and login_submission["type"] == LoginRestServlet.JWT_TYPE
|
||||||
):
|
):
|
||||||
await self._address_ratelimiter.ratelimit(None, request.getClientIP())
|
await self._address_ratelimiter.ratelimit(
|
||||||
|
None, request.getClientAddress().host
|
||||||
|
)
|
||||||
result = await self._do_jwt_login(
|
result = await self._do_jwt_login(
|
||||||
login_submission,
|
login_submission,
|
||||||
should_issue_refresh_token=should_issue_refresh_token,
|
should_issue_refresh_token=should_issue_refresh_token,
|
||||||
)
|
)
|
||||||
elif login_submission["type"] == LoginRestServlet.TOKEN_TYPE:
|
elif login_submission["type"] == LoginRestServlet.TOKEN_TYPE:
|
||||||
await self._address_ratelimiter.ratelimit(None, request.getClientIP())
|
await self._address_ratelimiter.ratelimit(
|
||||||
|
None, request.getClientAddress().host
|
||||||
|
)
|
||||||
result = await self._do_token_login(
|
result = await self._do_token_login(
|
||||||
login_submission,
|
login_submission,
|
||||||
should_issue_refresh_token=should_issue_refresh_token,
|
should_issue_refresh_token=should_issue_refresh_token,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
await self._address_ratelimiter.ratelimit(None, request.getClientIP())
|
await self._address_ratelimiter.ratelimit(
|
||||||
|
None, request.getClientAddress().host
|
||||||
|
)
|
||||||
result = await self._do_other_login(
|
result = await self._do_other_login(
|
||||||
login_submission,
|
login_submission,
|
||||||
should_issue_refresh_token=should_issue_refresh_token,
|
should_issue_refresh_token=should_issue_refresh_token,
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user