mirror of
https://git.anonymousland.org/anonymousland/synapse.git
synced 2025-08-15 22:20:15 -04:00
Merge remote-tracking branch 'upstream/release-v1.55'
This commit is contained in:
commit
c5c2c2e099
184 changed files with 4188 additions and 2122 deletions
|
@ -21,7 +21,7 @@ python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml
|
||||||
echo "--- Prepare test database"
|
echo "--- Prepare test database"
|
||||||
|
|
||||||
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
||||||
scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
||||||
|
|
||||||
# Run the export-data command on the sqlite test database
|
# Run the export-data command on the sqlite test database
|
||||||
python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
|
python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
|
||||||
|
@ -41,7 +41,7 @@ fi
|
||||||
|
|
||||||
# Port the SQLite databse to postgres so we can check command works against postgres
|
# Port the SQLite databse to postgres so we can check command works against postgres
|
||||||
echo "+++ Port SQLite3 databse to postgres"
|
echo "+++ Port SQLite3 databse to postgres"
|
||||||
scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||||
|
|
||||||
# Run the export-data command on postgres database
|
# Run the export-data command on postgres database
|
||||||
python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
|
python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
|
||||||
|
|
|
@ -25,17 +25,19 @@ python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml
|
||||||
echo "--- Prepare test database"
|
echo "--- Prepare test database"
|
||||||
|
|
||||||
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
||||||
scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
||||||
|
|
||||||
# Create the PostgreSQL database.
|
# Create the PostgreSQL database.
|
||||||
.ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
|
.ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
|
||||||
|
|
||||||
echo "+++ Run synapse_port_db against test database"
|
echo "+++ Run synapse_port_db against test database"
|
||||||
coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
# TODO: this invocation of synapse_port_db (and others below) used to be prepended with `coverage run`,
|
||||||
|
# but coverage seems unable to find the entrypoints installed by `pip install -e .`.
|
||||||
|
synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||||
|
|
||||||
# We should be able to run twice against the same database.
|
# We should be able to run twice against the same database.
|
||||||
echo "+++ Run synapse_port_db a second time"
|
echo "+++ Run synapse_port_db a second time"
|
||||||
coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||||
|
|
||||||
#####
|
#####
|
||||||
|
|
||||||
|
@ -46,7 +48,7 @@ echo "--- Prepare empty SQLite database"
|
||||||
# we do this by deleting the sqlite db, and then doing the same again.
|
# we do this by deleting the sqlite db, and then doing the same again.
|
||||||
rm .ci/test_db.db
|
rm .ci/test_db.db
|
||||||
|
|
||||||
scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
||||||
|
|
||||||
# re-create the PostgreSQL database.
|
# re-create the PostgreSQL database.
|
||||||
.ci/scripts/postgres_exec.py \
|
.ci/scripts/postgres_exec.py \
|
||||||
|
@ -54,4 +56,4 @@ scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-b
|
||||||
"CREATE DATABASE synapse"
|
"CREATE DATABASE synapse"
|
||||||
|
|
||||||
echo "+++ Run synapse_port_db against empty database"
|
echo "+++ Run synapse_port_db against empty database"
|
||||||
coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||||
|
|
|
@ -3,11 +3,9 @@
|
||||||
|
|
||||||
# things to include
|
# things to include
|
||||||
!docker
|
!docker
|
||||||
!scripts
|
|
||||||
!synapse
|
!synapse
|
||||||
!MANIFEST.in
|
!MANIFEST.in
|
||||||
!README.rst
|
!README.rst
|
||||||
!setup.py
|
!setup.py
|
||||||
!synctl
|
|
||||||
|
|
||||||
**/__pycache__
|
**/__pycache__
|
||||||
|
|
7
.github/workflows/release-artifacts.yml
vendored
7
.github/workflows/release-artifacts.yml
vendored
|
@ -31,7 +31,7 @@ jobs:
|
||||||
# if we're running from a tag, get the full list of distros; otherwise just use debian:sid
|
# if we're running from a tag, get the full list of distros; otherwise just use debian:sid
|
||||||
dists='["debian:sid"]'
|
dists='["debian:sid"]'
|
||||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||||
dists=$(scripts-dev/build_debian_packages --show-dists-json)
|
dists=$(scripts-dev/build_debian_packages.py --show-dists-json)
|
||||||
fi
|
fi
|
||||||
echo "::set-output name=distros::$dists"
|
echo "::set-output name=distros::$dists"
|
||||||
# map the step outputs to job outputs
|
# map the step outputs to job outputs
|
||||||
|
@ -74,7 +74,7 @@ jobs:
|
||||||
# see https://github.com/docker/build-push-action/issues/252
|
# see https://github.com/docker/build-push-action/issues/252
|
||||||
# for the cache magic here
|
# for the cache magic here
|
||||||
run: |
|
run: |
|
||||||
./src/scripts-dev/build_debian_packages \
|
./src/scripts-dev/build_debian_packages.py \
|
||||||
--docker-build-arg=--cache-from=type=local,src=/tmp/.buildx-cache \
|
--docker-build-arg=--cache-from=type=local,src=/tmp/.buildx-cache \
|
||||||
--docker-build-arg=--cache-to=type=local,mode=max,dest=/tmp/.buildx-cache-new \
|
--docker-build-arg=--cache-to=type=local,mode=max,dest=/tmp/.buildx-cache-new \
|
||||||
--docker-build-arg=--progress=plain \
|
--docker-build-arg=--progress=plain \
|
||||||
|
@ -112,7 +112,8 @@ jobs:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
with:
|
with:
|
||||||
files: |
|
files: |
|
||||||
python-dist/*
|
Sdist/*
|
||||||
|
Wheel/*
|
||||||
debs.tar.xz
|
debs.tar.xz
|
||||||
# if it's not already published, keep the release as a draft.
|
# if it's not already published, keep the release as a draft.
|
||||||
draft: true
|
draft: true
|
||||||
|
|
35
.github/workflows/tests.yml
vendored
35
.github/workflows/tests.yml
vendored
|
@ -16,7 +16,8 @@ jobs:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
- uses: actions/setup-python@v2
|
- uses: actions/setup-python@v2
|
||||||
- run: pip install -e .
|
- run: pip install -e .
|
||||||
- run: scripts-dev/generate_sample_config --check
|
- run: scripts-dev/generate_sample_config.sh --check
|
||||||
|
- run: scripts-dev/config-lint.sh
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
@ -51,7 +52,7 @@ jobs:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- uses: actions/setup-python@v2
|
- uses: actions/setup-python@v2
|
||||||
- run: "pip install 'towncrier>=18.6.0rc1'"
|
- run: "pip install 'towncrier>=18.6.0rc1'"
|
||||||
- run: scripts-dev/check-newsfragment
|
- run: scripts-dev/check-newsfragment.sh
|
||||||
env:
|
env:
|
||||||
PULL_REQUEST_NUMBER: ${{ github.event.number }}
|
PULL_REQUEST_NUMBER: ${{ github.event.number }}
|
||||||
|
|
||||||
|
@ -376,7 +377,7 @@ jobs:
|
||||||
# Run Complement
|
# Run Complement
|
||||||
- run: |
|
- run: |
|
||||||
set -o pipefail
|
set -o pipefail
|
||||||
go test -v -json -p 1 -tags synapse_blacklist,msc2403 ./tests/... 2>&1 | gotestfmt
|
go test -v -json -p 1 -tags synapse_blacklist,msc2403,msc2716,msc3030 ./tests/... 2>&1 | gotestfmt
|
||||||
shell: bash
|
shell: bash
|
||||||
name: Run Complement Tests
|
name: Run Complement Tests
|
||||||
env:
|
env:
|
||||||
|
@ -387,34 +388,22 @@ jobs:
|
||||||
tests-done:
|
tests-done:
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
needs:
|
needs:
|
||||||
|
- check-sampleconfig
|
||||||
- lint
|
- lint
|
||||||
- lint-crlf
|
- lint-crlf
|
||||||
- lint-newsfile
|
- lint-newsfile
|
||||||
- trial
|
- trial
|
||||||
- trial-olddeps
|
- trial-olddeps
|
||||||
- sytest
|
- sytest
|
||||||
|
- export-data
|
||||||
- portdb
|
- portdb
|
||||||
- complement
|
- complement
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Set build result
|
- uses: matrix-org/done-action@v2
|
||||||
env:
|
with:
|
||||||
NEEDS_CONTEXT: ${{ toJSON(needs) }}
|
needs: ${{ toJSON(needs) }}
|
||||||
# the `jq` incantation dumps out a series of "<job> <result>" lines.
|
|
||||||
# we set it to an intermediate variable to avoid a pipe, which makes it
|
|
||||||
# hard to set $rc.
|
|
||||||
run: |
|
|
||||||
rc=0
|
|
||||||
results=$(jq -r 'to_entries[] | [.key,.value.result] | join(" ")' <<< $NEEDS_CONTEXT)
|
|
||||||
while read job result ; do
|
|
||||||
# The newsfile lint may be skipped on non PR builds
|
|
||||||
if [ $result == "skipped" ] && [ $job == "lint-newsfile" ]; then
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$result" != "success" ]; then
|
# The newsfile lint may be skipped on non PR builds
|
||||||
echo "::set-failed ::Job $job returned $result"
|
skippable:
|
||||||
rc=1
|
lint-newsfile
|
||||||
fi
|
|
||||||
done <<< $results
|
|
||||||
exit $rc
|
|
||||||
|
|
85
CHANGES.md
85
CHANGES.md
|
@ -1,3 +1,88 @@
|
||||||
|
Synapse 1.55.0rc1 (2022-03-15)
|
||||||
|
==============================
|
||||||
|
|
||||||
|
This release removes a workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. **This breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700))**; Mjolnir users should upgrade Mjolnir before upgrading Synapse to this version.
|
||||||
|
|
||||||
|
Features
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Add third-party rules callbacks `check_can_shutdown_room` and `check_can_deactivate_user`. ([\#12028](https://github.com/matrix-org/synapse/issues/12028))
|
||||||
|
- Improve performance of logging in for large accounts. ([\#12132](https://github.com/matrix-org/synapse/issues/12132))
|
||||||
|
- Add experimental env var `SYNAPSE_ASYNC_IO_REACTOR` that causes Synapse to use the asyncio reactor for Twisted. ([\#12135](https://github.com/matrix-org/synapse/issues/12135))
|
||||||
|
- Support the stable identifiers from [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440): threads. ([\#12151](https://github.com/matrix-org/synapse/issues/12151))
|
||||||
|
- Add a new Jinja2 template filter to extract the local part of an email address. ([\#12212](https://github.com/matrix-org/synapse/issues/12212))
|
||||||
|
|
||||||
|
|
||||||
|
Bugfixes
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Use the proper serialization format for bundled thread aggregations. The bug has existed since Synapse v1.48.0. ([\#12090](https://github.com/matrix-org/synapse/issues/12090))
|
||||||
|
- Fix a long-standing bug when redacting events with relations. ([\#12113](https://github.com/matrix-org/synapse/issues/12113), [\#12121](https://github.com/matrix-org/synapse/issues/12121), [\#12130](https://github.com/matrix-org/synapse/issues/12130), [\#12189](https://github.com/matrix-org/synapse/issues/12189))
|
||||||
|
- Fix a bug introduced in Synapse 1.7.2 whereby background updates are never run with the default background batch size. ([\#12157](https://github.com/matrix-org/synapse/issues/12157))
|
||||||
|
- Fix a bug where non-standard information was returned from the `/hierarchy` API. Introduced in Synapse v1.41.0. ([\#12175](https://github.com/matrix-org/synapse/issues/12175))
|
||||||
|
- Fix a bug introduced in Synapse 1.54.0 that broke background updates on sqlite homeservers while search was disabled. ([\#12215](https://github.com/matrix-org/synapse/issues/12215))
|
||||||
|
- Fix a long-standing bug when a `filter` argument with `event_fields` which did not include the `unsigned` field could result in a 500 error on `/sync`. ([\#12234](https://github.com/matrix-org/synapse/issues/12234))
|
||||||
|
|
||||||
|
|
||||||
|
Improved Documentation
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
- Fix complexity checking config example in [Resource Constrained Devices](https://matrix-org.github.io/synapse/v1.54/other/running_synapse_on_single_board_computers.html) docs page. ([\#11998](https://github.com/matrix-org/synapse/issues/11998))
|
||||||
|
- Improve documentation for demo scripts. ([\#12143](https://github.com/matrix-org/synapse/issues/12143))
|
||||||
|
- Updates to the Room DAG concepts development document. ([\#12179](https://github.com/matrix-org/synapse/issues/12179))
|
||||||
|
- Document that the `typing`, `to_device`, `account_data`, `receipts`, and `presence` stream writer can only be used on a single worker. ([\#12196](https://github.com/matrix-org/synapse/issues/12196))
|
||||||
|
- Document that contributors can sign off privately by email. ([\#12204](https://github.com/matrix-org/synapse/issues/12204))
|
||||||
|
|
||||||
|
|
||||||
|
Deprecations and Removals
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
- **Remove workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. Breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700))**
|
||||||
|
- Remove backwards compatibilty with pagination tokens from the `/relations` and `/aggregations` endpoints generated from Synapse < v1.52.0. ([\#12138](https://github.com/matrix-org/synapse/issues/12138))
|
||||||
|
- The groups/communities feature in Synapse has been deprecated. ([\#12200](https://github.com/matrix-org/synapse/issues/12200))
|
||||||
|
|
||||||
|
|
||||||
|
Internal Changes
|
||||||
|
----------------
|
||||||
|
|
||||||
|
- Simplify the `ApplicationService` class' set of public methods related to interest checking. ([\#11915](https://github.com/matrix-org/synapse/issues/11915))
|
||||||
|
- Add config settings for background update parameters. ([\#11980](https://github.com/matrix-org/synapse/issues/11980))
|
||||||
|
- Correct type hints for txredis. ([\#12042](https://github.com/matrix-org/synapse/issues/12042))
|
||||||
|
- Limit the size of `aggregation_key` on annotations. ([\#12101](https://github.com/matrix-org/synapse/issues/12101))
|
||||||
|
- Add type hints to tests files. ([\#12108](https://github.com/matrix-org/synapse/issues/12108), [\#12146](https://github.com/matrix-org/synapse/issues/12146), [\#12207](https://github.com/matrix-org/synapse/issues/12207), [\#12208](https://github.com/matrix-org/synapse/issues/12208))
|
||||||
|
- Move scripts to Synapse package and expose as setuptools entry points. ([\#12118](https://github.com/matrix-org/synapse/issues/12118))
|
||||||
|
- Add support for cancellation to `ReadWriteLock`. ([\#12120](https://github.com/matrix-org/synapse/issues/12120))
|
||||||
|
- Fix data validation to compare to lists, not sequences. ([\#12128](https://github.com/matrix-org/synapse/issues/12128))
|
||||||
|
- Fix CI not attaching source distributions and wheels to the GitHub releases. ([\#12131](https://github.com/matrix-org/synapse/issues/12131))
|
||||||
|
- Remove unused mocks from `test_typing`. ([\#12136](https://github.com/matrix-org/synapse/issues/12136))
|
||||||
|
- Give `scripts-dev` scripts suffixes for neater CI config. ([\#12137](https://github.com/matrix-org/synapse/issues/12137))
|
||||||
|
- Move `synctl` into `synapse._scripts` and expose as an entry point. ([\#12140](https://github.com/matrix-org/synapse/issues/12140))
|
||||||
|
- Move the snapcraft configuration file to `contrib`. ([\#12142](https://github.com/matrix-org/synapse/issues/12142))
|
||||||
|
- Enable [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) Complement tests in CI. ([\#12144](https://github.com/matrix-org/synapse/issues/12144))
|
||||||
|
- Enable [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) Complement tests in CI. ([\#12145](https://github.com/matrix-org/synapse/issues/12145))
|
||||||
|
- Add test for `ObservableDeferred`'s cancellation behaviour. ([\#12149](https://github.com/matrix-org/synapse/issues/12149))
|
||||||
|
- Use `ParamSpec` in type hints for `synapse.logging.context`. ([\#12150](https://github.com/matrix-org/synapse/issues/12150))
|
||||||
|
- Prune unused jobs from `tox` config. ([\#12152](https://github.com/matrix-org/synapse/issues/12152))
|
||||||
|
- Move CI checks out of tox, to facilitate a move to using poetry. ([\#12153](https://github.com/matrix-org/synapse/issues/12153))
|
||||||
|
- Avoid generating state groups for local out-of-band leaves. ([\#12154](https://github.com/matrix-org/synapse/issues/12154))
|
||||||
|
- Avoid trying to calculate the state at outlier events. ([\#12155](https://github.com/matrix-org/synapse/issues/12155), [\#12173](https://github.com/matrix-org/synapse/issues/12173), [\#12202](https://github.com/matrix-org/synapse/issues/12202))
|
||||||
|
- Fix some type annotations. ([\#12156](https://github.com/matrix-org/synapse/issues/12156))
|
||||||
|
- Add type hints for `ObservableDeferred` attributes. ([\#12159](https://github.com/matrix-org/synapse/issues/12159))
|
||||||
|
- Use a prebuilt Action for the `tests-done` CI job. ([\#12161](https://github.com/matrix-org/synapse/issues/12161))
|
||||||
|
- Reduce number of DB queries made during processing of `/sync`. ([\#12163](https://github.com/matrix-org/synapse/issues/12163))
|
||||||
|
- Add `delay_cancellation` utility function, which behaves like `stop_cancellation` but waits until the original `Deferred` resolves before raising a `CancelledError`. ([\#12180](https://github.com/matrix-org/synapse/issues/12180))
|
||||||
|
- Retry HTTP replication failures, this should prevent 502's when restarting stateful workers (main, event persisters, stream writers). Contributed by Nick @ Beeper. ([\#12182](https://github.com/matrix-org/synapse/issues/12182))
|
||||||
|
- Add cancellation support to `@cached` and `@cachedList` decorators. ([\#12183](https://github.com/matrix-org/synapse/issues/12183))
|
||||||
|
- Remove unused variables. ([\#12187](https://github.com/matrix-org/synapse/issues/12187))
|
||||||
|
- Add combined test for HTTP pusher and push rule. Contributed by Nick @ Beeper. ([\#12188](https://github.com/matrix-org/synapse/issues/12188))
|
||||||
|
- Rename `HomeServer.get_tcp_replication` to `get_replication_command_handler`. ([\#12192](https://github.com/matrix-org/synapse/issues/12192))
|
||||||
|
- Remove some dead code. ([\#12197](https://github.com/matrix-org/synapse/issues/12197))
|
||||||
|
- Fix a misleading comment in the function `check_event_for_spam`. ([\#12203](https://github.com/matrix-org/synapse/issues/12203))
|
||||||
|
- Remove unnecessary `pass` statements. ([\#12206](https://github.com/matrix-org/synapse/issues/12206))
|
||||||
|
- Update the SSO username picker template to comply with SIWA guidelines. ([\#12210](https://github.com/matrix-org/synapse/issues/12210))
|
||||||
|
- Improve code documentation for the typing stream over replication. ([\#12211](https://github.com/matrix-org/synapse/issues/12211))
|
||||||
|
|
||||||
|
|
||||||
Synapse 1.54.0 (2022-03-08)
|
Synapse 1.54.0 (2022-03-08)
|
||||||
===========================
|
===========================
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
include synctl
|
|
||||||
include LICENSE
|
include LICENSE
|
||||||
include VERSION
|
include VERSION
|
||||||
include *.rst
|
include *.rst
|
||||||
|
@ -17,7 +16,6 @@ recursive-include synapse/storage *.txt
|
||||||
recursive-include synapse/storage *.md
|
recursive-include synapse/storage *.md
|
||||||
|
|
||||||
recursive-include docs *
|
recursive-include docs *
|
||||||
recursive-include scripts *
|
|
||||||
recursive-include scripts-dev *
|
recursive-include scripts-dev *
|
||||||
recursive-include synapse *.pyi
|
recursive-include synapse *.pyi
|
||||||
recursive-include tests *.py
|
recursive-include tests *.py
|
||||||
|
@ -53,5 +51,4 @@ prune contrib
|
||||||
prune debian
|
prune debian
|
||||||
prune demo/etc
|
prune demo/etc
|
||||||
prune docker
|
prune docker
|
||||||
prune snap
|
|
||||||
prune stubs
|
prune stubs
|
||||||
|
|
|
@ -312,6 +312,9 @@ We recommend using the demo which starts 3 federated instances running on ports
|
||||||
|
|
||||||
(to stop, you can use `./demo/stop.sh`)
|
(to stop, you can use `./demo/stop.sh`)
|
||||||
|
|
||||||
|
See the [demo documentation](https://matrix-org.github.io/synapse/develop/development/demo.html)
|
||||||
|
for more information.
|
||||||
|
|
||||||
If you just want to start a single instance of the app and run it directly::
|
If you just want to start a single instance of the app and run it directly::
|
||||||
|
|
||||||
# Create the homeserver.yaml config once
|
# Create the homeserver.yaml config once
|
||||||
|
|
|
@ -20,7 +20,7 @@ apps:
|
||||||
generate-config:
|
generate-config:
|
||||||
command: generate_config
|
command: generate_config
|
||||||
generate-signing-key:
|
generate-signing-key:
|
||||||
command: generate_signing_key.py
|
command: generate_signing_key
|
||||||
register-new-matrix-user:
|
register-new-matrix-user:
|
||||||
command: register_new_matrix_user
|
command: register_new_matrix_user
|
||||||
plugs: [network]
|
plugs: [network]
|
6
debian/changelog
vendored
6
debian/changelog
vendored
|
@ -1,3 +1,9 @@
|
||||||
|
matrix-synapse-py3 (1.55.0~rc1) stable; urgency=medium
|
||||||
|
|
||||||
|
* New synapse release 1.55.0~rc1.
|
||||||
|
|
||||||
|
-- Synapse Packaging team <packages@matrix.org> Tue, 15 Mar 2022 10:59:31 +0000
|
||||||
|
|
||||||
matrix-synapse-py3 (1.54.0) stable; urgency=medium
|
matrix-synapse-py3 (1.54.0) stable; urgency=medium
|
||||||
|
|
||||||
* New synapse release 1.54.0.
|
* New synapse release 1.54.0.
|
||||||
|
|
11
demo/.gitignore
vendored
11
demo/.gitignore
vendored
|
@ -1,7 +1,4 @@
|
||||||
*.db
|
# Ignore all the temporary files from the demo servers.
|
||||||
*.log
|
8080/
|
||||||
*.log.*
|
8081/
|
||||||
*.pid
|
8082/
|
||||||
|
|
||||||
/media_store.*
|
|
||||||
/etc
|
|
||||||
|
|
26
demo/README
26
demo/README
|
@ -1,26 +0,0 @@
|
||||||
DO NOT USE THESE DEMO SERVERS IN PRODUCTION
|
|
||||||
|
|
||||||
Requires you to have done:
|
|
||||||
python setup.py develop
|
|
||||||
|
|
||||||
|
|
||||||
The demo start.sh will start three synapse servers on ports 8080, 8081 and 8082, with host names localhost:$port. This can be easily changed to `hostname`:$port in start.sh if required.
|
|
||||||
|
|
||||||
To enable the servers to communicate untrusted ssl certs are used. In order to do this the servers do not check the certs
|
|
||||||
and are configured in a highly insecure way. Do not use these configuration files in production.
|
|
||||||
|
|
||||||
stop.sh will stop the synapse servers and the webclient.
|
|
||||||
|
|
||||||
clean.sh will delete the databases and log files.
|
|
||||||
|
|
||||||
To start a completely new set of servers, run:
|
|
||||||
|
|
||||||
./demo/stop.sh; ./demo/clean.sh && ./demo/start.sh
|
|
||||||
|
|
||||||
|
|
||||||
Logs and sqlitedb will be stored in demo/808{0,1,2}.{log,db}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Also note that when joining a public room on a different HS via "#foo:bar.net", then you are (in the current impl) joining a room with room_id "foo". This means that it won't work if your HS already has a room with that name.
|
|
||||||
|
|
|
@ -4,6 +4,9 @@ set -e
|
||||||
|
|
||||||
DIR="$( cd "$( dirname "$0" )" && pwd )"
|
DIR="$( cd "$( dirname "$0" )" && pwd )"
|
||||||
|
|
||||||
|
# Ensure that the servers are stopped.
|
||||||
|
$DIR/stop.sh
|
||||||
|
|
||||||
PID_FILE="$DIR/servers.pid"
|
PID_FILE="$DIR/servers.pid"
|
||||||
|
|
||||||
if [ -f "$PID_FILE" ]; then
|
if [ -f "$PID_FILE" ]; then
|
||||||
|
|
|
@ -6,8 +6,6 @@ CWD=$(pwd)
|
||||||
|
|
||||||
cd "$DIR/.." || exit
|
cd "$DIR/.." || exit
|
||||||
|
|
||||||
mkdir -p demo/etc
|
|
||||||
|
|
||||||
PYTHONPATH=$(readlink -f "$(pwd)")
|
PYTHONPATH=$(readlink -f "$(pwd)")
|
||||||
export PYTHONPATH
|
export PYTHONPATH
|
||||||
|
|
||||||
|
@ -21,22 +19,26 @@ for port in 8080 8081 8082; do
|
||||||
mkdir -p demo/$port
|
mkdir -p demo/$port
|
||||||
pushd demo/$port || exit
|
pushd demo/$port || exit
|
||||||
|
|
||||||
#rm $DIR/etc/$port.config
|
# Generate the configuration for the homeserver at localhost:848x.
|
||||||
python3 -m synapse.app.homeserver \
|
python3 -m synapse.app.homeserver \
|
||||||
--generate-config \
|
--generate-config \
|
||||||
-H "localhost:$https_port" \
|
--server-name "localhost:$port" \
|
||||||
--config-path "$DIR/etc/$port.config" \
|
--config-path "$port.config" \
|
||||||
--report-stats no
|
--report-stats no
|
||||||
|
|
||||||
if ! grep -F "Customisation made by demo/start.sh" -q "$DIR/etc/$port.config"; then
|
if ! grep -F "Customisation made by demo/start.sh" -q "$port.config"; then
|
||||||
# Generate tls keys
|
# Generate TLS keys.
|
||||||
openssl req -x509 -newkey rsa:4096 -keyout "$DIR/etc/localhost:$https_port.tls.key" -out "$DIR/etc/localhost:$https_port.tls.crt" -days 365 -nodes -subj "/O=matrix"
|
openssl req -x509 -newkey rsa:4096 \
|
||||||
|
-keyout "localhost:$port.tls.key" \
|
||||||
|
-out "localhost:$port.tls.crt" \
|
||||||
|
-days 365 -nodes -subj "/O=matrix"
|
||||||
|
|
||||||
# Regenerate configuration
|
# Add customisations to the configuration.
|
||||||
{
|
{
|
||||||
printf '\n\n# Customisation made by demo/start.sh\n'
|
printf '\n\n# Customisation made by demo/start.sh\n\n'
|
||||||
echo "public_baseurl: http://localhost:$port/"
|
echo "public_baseurl: http://localhost:$port/"
|
||||||
echo 'enable_registration: true'
|
echo 'enable_registration: true'
|
||||||
|
echo ''
|
||||||
|
|
||||||
# Warning, this heredoc depends on the interaction of tabs and spaces.
|
# Warning, this heredoc depends on the interaction of tabs and spaces.
|
||||||
# Please don't accidentaly bork me with your fancy settings.
|
# Please don't accidentaly bork me with your fancy settings.
|
||||||
|
@ -63,38 +65,34 @@ for port in 8080 8081 8082; do
|
||||||
|
|
||||||
echo "${listeners}"
|
echo "${listeners}"
|
||||||
|
|
||||||
# Disable tls for the servers
|
# Disable TLS for the servers
|
||||||
printf '\n\n# Disable tls on the servers.'
|
printf '\n\n# Disable TLS for the servers.'
|
||||||
echo '# DO NOT USE IN PRODUCTION'
|
echo '# DO NOT USE IN PRODUCTION'
|
||||||
echo 'use_insecure_ssl_client_just_for_testing_do_not_use: true'
|
echo 'use_insecure_ssl_client_just_for_testing_do_not_use: true'
|
||||||
echo 'federation_verify_certificates: false'
|
echo 'federation_verify_certificates: false'
|
||||||
|
|
||||||
# Set tls paths
|
# Set paths for the TLS certificates.
|
||||||
echo "tls_certificate_path: \"$DIR/etc/localhost:$https_port.tls.crt\""
|
echo "tls_certificate_path: \"$DIR/$port/localhost:$port.tls.crt\""
|
||||||
echo "tls_private_key_path: \"$DIR/etc/localhost:$https_port.tls.key\""
|
echo "tls_private_key_path: \"$DIR/$port/localhost:$port.tls.key\""
|
||||||
|
|
||||||
# Ignore keys from the trusted keys server
|
# Ignore keys from the trusted keys server
|
||||||
echo '# Ignore keys from the trusted keys server'
|
echo '# Ignore keys from the trusted keys server'
|
||||||
echo 'trusted_key_servers:'
|
echo 'trusted_key_servers:'
|
||||||
echo ' - server_name: "matrix.org"'
|
echo ' - server_name: "matrix.org"'
|
||||||
echo ' accept_keys_insecurely: true'
|
echo ' accept_keys_insecurely: true'
|
||||||
|
echo ''
|
||||||
|
|
||||||
# Reduce the blacklist
|
# Allow the servers to communicate over localhost.
|
||||||
blacklist=$(cat <<-BLACK
|
allow_list=$(cat <<-ALLOW_LIST
|
||||||
# Set the blacklist so that it doesn't include 127.0.0.1, ::1
|
# Allow the servers to communicate over localhost.
|
||||||
federation_ip_range_blacklist:
|
ip_range_whitelist:
|
||||||
- '10.0.0.0/8'
|
- '127.0.0.1/8'
|
||||||
- '172.16.0.0/12'
|
- '::1/128'
|
||||||
- '192.168.0.0/16'
|
ALLOW_LIST
|
||||||
- '100.64.0.0/10'
|
|
||||||
- '169.254.0.0/16'
|
|
||||||
- 'fe80::/64'
|
|
||||||
- 'fc00::/7'
|
|
||||||
BLACK
|
|
||||||
)
|
)
|
||||||
|
|
||||||
echo "${blacklist}"
|
echo "${allow_list}"
|
||||||
} >> "$DIR/etc/$port.config"
|
} >> "$port.config"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Check script parameters
|
# Check script parameters
|
||||||
|
@ -141,19 +139,18 @@ for port in 8080 8081 8082; do
|
||||||
burst_count: 1000
|
burst_count: 1000
|
||||||
RC
|
RC
|
||||||
)
|
)
|
||||||
echo "${ratelimiting}" >> "$DIR/etc/$port.config"
|
echo "${ratelimiting}" >> "$port.config"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if ! grep -F "full_twisted_stacktraces" -q "$DIR/etc/$port.config"; then
|
# Always disable reporting of stats if the option is not there.
|
||||||
echo "full_twisted_stacktraces: true" >> "$DIR/etc/$port.config"
|
if ! grep -F "report_stats" -q "$port.config" ; then
|
||||||
fi
|
echo "report_stats: false" >> "$port.config"
|
||||||
if ! grep -F "report_stats" -q "$DIR/etc/$port.config" ; then
|
|
||||||
echo "report_stats: false" >> "$DIR/etc/$port.config"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Run the homeserver in the background.
|
||||||
python3 -m synapse.app.homeserver \
|
python3 -m synapse.app.homeserver \
|
||||||
--config-path "$DIR/etc/$port.config" \
|
--config-path "$port.config" \
|
||||||
-D \
|
-D \
|
||||||
|
|
||||||
popd || exit
|
popd || exit
|
||||||
|
|
|
@ -46,8 +46,7 @@ RUN \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Copy just what we need to pip install
|
# Copy just what we need to pip install
|
||||||
COPY scripts /synapse/scripts/
|
COPY MANIFEST.in README.rst setup.py /synapse/
|
||||||
COPY MANIFEST.in README.rst setup.py synctl /synapse/
|
|
||||||
COPY synapse/__init__.py /synapse/synapse/__init__.py
|
COPY synapse/__init__.py /synapse/synapse/__init__.py
|
||||||
COPY synapse/python_dependencies.py /synapse/synapse/python_dependencies.py
|
COPY synapse/python_dependencies.py /synapse/synapse/python_dependencies.py
|
||||||
|
|
||||||
|
|
|
@ -82,6 +82,7 @@
|
||||||
- [Release Cycle](development/releases.md)
|
- [Release Cycle](development/releases.md)
|
||||||
- [Git Usage](development/git.md)
|
- [Git Usage](development/git.md)
|
||||||
- [Testing]()
|
- [Testing]()
|
||||||
|
- [Demo scripts](development/demo.md)
|
||||||
- [OpenTracing](opentracing.md)
|
- [OpenTracing](opentracing.md)
|
||||||
- [Database Schemas](development/database_schema.md)
|
- [Database Schemas](development/database_schema.md)
|
||||||
- [Experimental features](development/experimental_features.md)
|
- [Experimental features](development/experimental_features.md)
|
||||||
|
|
|
@ -172,6 +172,6 @@ frobber:
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that the sample configuration is generated from the synapse code
|
Note that the sample configuration is generated from the synapse code
|
||||||
and is maintained by a script, `scripts-dev/generate_sample_config`.
|
and is maintained by a script, `scripts-dev/generate_sample_config.sh`.
|
||||||
Making sure that the output from this script matches the desired format
|
Making sure that the output from this script matches the desired format
|
||||||
is left as an exercise for the reader!
|
is left as an exercise for the reader!
|
||||||
|
|
|
@ -458,6 +458,17 @@ Git allows you to add this signoff automatically when using the `-s`
|
||||||
flag to `git commit`, which uses the name and email set in your
|
flag to `git commit`, which uses the name and email set in your
|
||||||
`user.name` and `user.email` git configs.
|
`user.name` and `user.email` git configs.
|
||||||
|
|
||||||
|
### Private Sign off
|
||||||
|
|
||||||
|
If you would like to provide your legal name privately to the Matrix.org
|
||||||
|
Foundation (instead of in a public commit or comment), you can do so
|
||||||
|
by emailing your legal name and a link to the pull request to
|
||||||
|
[dco@matrix.org](mailto:dco@matrix.org?subject=Private%20sign%20off).
|
||||||
|
It helps to include "sign off" or similar in the subject line. You will then
|
||||||
|
be instructed further.
|
||||||
|
|
||||||
|
Once private sign off is complete, doing so for future contributions will not
|
||||||
|
be required.
|
||||||
|
|
||||||
# 10. Turn feedback into better code.
|
# 10. Turn feedback into better code.
|
||||||
|
|
||||||
|
|
|
@ -158,9 +158,9 @@ same as integers.
|
||||||
There are three separate aspects to this:
|
There are three separate aspects to this:
|
||||||
|
|
||||||
* Any new boolean column must be added to the `BOOLEAN_COLUMNS` list in
|
* Any new boolean column must be added to the `BOOLEAN_COLUMNS` list in
|
||||||
`scripts/synapse_port_db`. This tells the port script to cast the integer
|
`synapse/_scripts/synapse_port_db.py`. This tells the port script to cast
|
||||||
value from SQLite to a boolean before writing the value to the postgres
|
the integer value from SQLite to a boolean before writing the value to the
|
||||||
database.
|
postgres database.
|
||||||
|
|
||||||
* Before SQLite 3.23, `TRUE` and `FALSE` were not recognised as constants by
|
* Before SQLite 3.23, `TRUE` and `FALSE` were not recognised as constants by
|
||||||
SQLite, and the `IS [NOT] TRUE`/`IS [NOT] FALSE` operators were not
|
SQLite, and the `IS [NOT] TRUE`/`IS [NOT] FALSE` operators were not
|
||||||
|
|
41
docs/development/demo.md
Normal file
41
docs/development/demo.md
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
# Synapse demo setup
|
||||||
|
|
||||||
|
**DO NOT USE THESE DEMO SERVERS IN PRODUCTION**
|
||||||
|
|
||||||
|
Requires you to have a [Synapse development environment setup](https://matrix-org.github.io/synapse/develop/development/contributing_guide.html#4-install-the-dependencies).
|
||||||
|
|
||||||
|
The demo setup allows running three federation Synapse servers, with server
|
||||||
|
names `localhost:8080`, `localhost:8081`, and `localhost:8082`.
|
||||||
|
|
||||||
|
You can access them via any Matrix client over HTTP at `localhost:8080`,
|
||||||
|
`localhost:8081`, and `localhost:8082` or over HTTPS at `localhost:8480`,
|
||||||
|
`localhost:8481`, and `localhost:8482`.
|
||||||
|
|
||||||
|
To enable the servers to communicate, self-signed SSL certificates are generated
|
||||||
|
and the servers are configured in a highly insecure way, including:
|
||||||
|
|
||||||
|
* Not checking certificates over federation.
|
||||||
|
* Not verifying keys.
|
||||||
|
|
||||||
|
The servers are configured to store their data under `demo/8080`, `demo/8081`, and
|
||||||
|
`demo/8082`. This includes configuration, logs, SQLite databases, and media.
|
||||||
|
|
||||||
|
Note that when joining a public room on a different HS via "#foo:bar.net", then
|
||||||
|
you are (in the current impl) joining a room with room_id "foo". This means that
|
||||||
|
it won't work if your HS already has a room with that name.
|
||||||
|
|
||||||
|
## Using the demo scripts
|
||||||
|
|
||||||
|
There's three main scripts with straightforward purposes:
|
||||||
|
|
||||||
|
* `start.sh` will start the Synapse servers, generating any missing configuration.
|
||||||
|
* This accepts a single parameter `--no-rate-limit` to "disable" rate limits
|
||||||
|
(they actually still exist, but are very high).
|
||||||
|
* `stop.sh` will stop the Synapse servers.
|
||||||
|
* `clean.sh` will delete the configuration, databases, log files, etc.
|
||||||
|
|
||||||
|
To start a completely new set of servers, run:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
./demo/stop.sh; ./demo/clean.sh && ./demo/start.sh
|
||||||
|
```
|
|
@ -30,13 +30,57 @@ rather than skipping any that arrived late; whereas if you're looking at a
|
||||||
historical section of timeline (i.e. `/messages`), you want to see the best
|
historical section of timeline (i.e. `/messages`), you want to see the best
|
||||||
representation of the state of the room as others were seeing it at the time.
|
representation of the state of the room as others were seeing it at the time.
|
||||||
|
|
||||||
|
## Outliers
|
||||||
|
|
||||||
|
We mark an event as an `outlier` when we haven't figured out the state for the
|
||||||
|
room at that point in the DAG yet. They are "floating" events that we haven't
|
||||||
|
yet correlated to the DAG.
|
||||||
|
|
||||||
|
Outliers typically arise when we fetch the auth chain or state for a given
|
||||||
|
event. When that happens, we just grab the events in the state/auth chain,
|
||||||
|
without calculating the state at those events, or backfilling their
|
||||||
|
`prev_events`.
|
||||||
|
|
||||||
|
So, typically, we won't have the `prev_events` of an `outlier` in the database,
|
||||||
|
(though it's entirely possible that we *might* have them for some other
|
||||||
|
reason). Other things that make outliers different from regular events:
|
||||||
|
|
||||||
|
* We don't have state for them, so there should be no entry in
|
||||||
|
`event_to_state_groups` for an outlier. (In practice this isn't always
|
||||||
|
the case, though I'm not sure why: see https://github.com/matrix-org/synapse/issues/12201).
|
||||||
|
|
||||||
|
* We don't record entries for them in the `event_edges`,
|
||||||
|
`event_forward_extremeties` or `event_backward_extremities` tables.
|
||||||
|
|
||||||
|
Since outliers are not tied into the DAG, they do not normally form part of the
|
||||||
|
timeline sent down to clients via `/sync` or `/messages`; however there is an
|
||||||
|
exception:
|
||||||
|
|
||||||
|
### Out-of-band membership events
|
||||||
|
|
||||||
|
A special case of outlier events are some membership events for federated rooms
|
||||||
|
that we aren't full members of. For example:
|
||||||
|
|
||||||
|
* invites received over federation, before we join the room
|
||||||
|
* *rejections* for said invites
|
||||||
|
* knock events for rooms that we would like to join but have not yet joined.
|
||||||
|
|
||||||
|
In all the above cases, we don't have the state for the room, which is why they
|
||||||
|
are treated as outliers. They are a bit special though, in that they are
|
||||||
|
proactively sent to clients via `/sync`.
|
||||||
|
|
||||||
## Forward extremity
|
## Forward extremity
|
||||||
|
|
||||||
Most-recent-in-time events in the DAG which are not referenced by any other events' `prev_events` yet.
|
Most-recent-in-time events in the DAG which are not referenced by any other
|
||||||
|
events' `prev_events` yet. (In this definition, outliers, rejected events, and
|
||||||
|
soft-failed events don't count.)
|
||||||
|
|
||||||
The forward extremities of a room are used as the `prev_events` when the next event is sent.
|
The forward extremities of a room (or at least, a subset of them, if there are
|
||||||
|
more than ten) are used as the `prev_events` when the next event is sent.
|
||||||
|
|
||||||
|
The "current state" of a room (ie: the state which would be used if we
|
||||||
|
generated a new event) is, therefore, the resolution of the room states
|
||||||
|
at each of the forward extremities.
|
||||||
|
|
||||||
## Backward extremity
|
## Backward extremity
|
||||||
|
|
||||||
|
@ -44,23 +88,14 @@ The current marker of where we have backfilled up to and will generally be the
|
||||||
`prev_events` of the oldest-in-time events we have in the DAG. This gives a starting point when
|
`prev_events` of the oldest-in-time events we have in the DAG. This gives a starting point when
|
||||||
backfilling history.
|
backfilling history.
|
||||||
|
|
||||||
When we persist a non-outlier event, we clear it as a backward extremity and set
|
Note that, unlike forward extremities, we typically don't have any backward
|
||||||
all of its `prev_events` as the new backward extremities if they aren't already
|
extremity events themselves in the database - or, if we do, they will be "outliers" (see
|
||||||
persisted in the `events` table.
|
above). Either way, we don't expect to have the room state at a backward extremity.
|
||||||
|
|
||||||
|
|
||||||
## Outliers
|
|
||||||
|
|
||||||
We mark an event as an `outlier` when we haven't figured out the state for the
|
|
||||||
room at that point in the DAG yet.
|
|
||||||
|
|
||||||
We won't *necessarily* have the `prev_events` of an `outlier` in the database,
|
|
||||||
but it's entirely possible that we *might*.
|
|
||||||
|
|
||||||
For example, when we fetch the event auth chain or state for a given event, we
|
|
||||||
mark all of those claimed auth events as outliers because we haven't done the
|
|
||||||
state calculation ourself.
|
|
||||||
|
|
||||||
|
When we persist a non-outlier event, if it was previously a backward extremity,
|
||||||
|
we clear it as a backward extremity and set all of its `prev_events` as the new
|
||||||
|
backward extremities if they aren't already persisted as non-outliers. This
|
||||||
|
therefore keeps the backward extremities up-to-date.
|
||||||
|
|
||||||
## State groups
|
## State groups
|
||||||
|
|
||||||
|
|
|
@ -63,4 +63,5 @@ release of Synapse.
|
||||||
|
|
||||||
If you want to get up and running quickly with a trio of homeservers in a
|
If you want to get up and running quickly with a trio of homeservers in a
|
||||||
private federation, there is a script in the `demo` directory. This is mainly
|
private federation, there is a script in the `demo` directory. This is mainly
|
||||||
useful just for development purposes. See [demo/README](https://github.com/matrix-org/synapse/tree/develop/demo/).
|
useful just for development purposes. See
|
||||||
|
[demo scripts](https://matrix-org.github.io/synapse/develop/development/demo.html).
|
||||||
|
|
|
@ -148,6 +148,49 @@ deny an incoming event, see [`check_event_for_spam`](spam_checker_callbacks.md#c
|
||||||
|
|
||||||
If multiple modules implement this callback, Synapse runs them all in order.
|
If multiple modules implement this callback, Synapse runs them all in order.
|
||||||
|
|
||||||
|
### `check_can_shutdown_room`
|
||||||
|
|
||||||
|
_First introduced in Synapse v1.55.0_
|
||||||
|
|
||||||
|
```python
|
||||||
|
async def check_can_shutdown_room(
|
||||||
|
user_id: str, room_id: str,
|
||||||
|
) -> bool:
|
||||||
|
```
|
||||||
|
|
||||||
|
Called when an admin user requests the shutdown of a room. The module must return a
|
||||||
|
boolean indicating whether the shutdown can go through. If the callback returns `False`,
|
||||||
|
the shutdown will not proceed and the caller will see a `M_FORBIDDEN` error.
|
||||||
|
|
||||||
|
If multiple modules implement this callback, they will be considered in order. If a
|
||||||
|
callback returns `True`, Synapse falls through to the next one. The value of the first
|
||||||
|
callback that does not return `True` will be used. If this happens, Synapse will not call
|
||||||
|
any of the subsequent implementations of this callback.
|
||||||
|
|
||||||
|
### `check_can_deactivate_user`
|
||||||
|
|
||||||
|
_First introduced in Synapse v1.55.0_
|
||||||
|
|
||||||
|
```python
|
||||||
|
async def check_can_deactivate_user(
|
||||||
|
user_id: str, by_admin: bool,
|
||||||
|
) -> bool:
|
||||||
|
```
|
||||||
|
|
||||||
|
Called when the deactivation of a user is requested. User deactivation can be
|
||||||
|
performed by an admin or the user themselves, so developers are encouraged to check the
|
||||||
|
requester when implementing this callback. The module must return a
|
||||||
|
boolean indicating whether the deactivation can go through. If the callback returns `False`,
|
||||||
|
the deactivation will not proceed and the caller will see a `M_FORBIDDEN` error.
|
||||||
|
|
||||||
|
The module is passed two parameters, `user_id` which is the ID of the user being deactivated, and `by_admin` which is `True` if the request is made by a serve admin, and `False` otherwise.
|
||||||
|
|
||||||
|
If multiple modules implement this callback, they will be considered in order. If a
|
||||||
|
callback returns `True`, Synapse falls through to the next one. The value of the first
|
||||||
|
callback that does not return `True` will be used. If this happens, Synapse will not call
|
||||||
|
any of the subsequent implementations of this callback.
|
||||||
|
|
||||||
|
|
||||||
### `on_profile_update`
|
### `on_profile_update`
|
||||||
|
|
||||||
_First introduced in Synapse v1.54.0_
|
_First introduced in Synapse v1.54.0_
|
||||||
|
|
|
@ -31,28 +31,29 @@ Anything that requires modifying the device list [#7721](https://github.com/matr
|
||||||
Put the below in a new file at /etc/matrix-synapse/conf.d/sbc.yaml to override the defaults in homeserver.yaml.
|
Put the below in a new file at /etc/matrix-synapse/conf.d/sbc.yaml to override the defaults in homeserver.yaml.
|
||||||
|
|
||||||
```
|
```
|
||||||
# Set to false to disable presence tracking on this homeserver.
|
# Disable presence tracking, which is currently fairly resource intensive
|
||||||
|
# More info: https://github.com/matrix-org/synapse/issues/9478
|
||||||
use_presence: false
|
use_presence: false
|
||||||
|
|
||||||
# When this is enabled, the room "complexity" will be checked before a user
|
# Set a small complexity limit, preventing users from joining large rooms
|
||||||
# joins a new remote room. If it is above the complexity limit, the server will
|
# which may be resource-intensive to remain a part of.
|
||||||
# disallow joining, or will instantly leave.
|
#
|
||||||
|
# Note that this will not prevent users from joining smaller rooms that
|
||||||
|
# eventually become complex.
|
||||||
limit_remote_rooms:
|
limit_remote_rooms:
|
||||||
# Uncomment to enable room complexity checking.
|
enabled: true
|
||||||
#enabled: true
|
|
||||||
complexity: 3.0
|
complexity: 3.0
|
||||||
|
|
||||||
# Database configuration
|
# Database configuration
|
||||||
database:
|
database:
|
||||||
|
# Use postgres for the best performance
|
||||||
name: psycopg2
|
name: psycopg2
|
||||||
args:
|
args:
|
||||||
user: matrix-synapse
|
user: matrix-synapse
|
||||||
# Generate a long, secure one with a password manager
|
# Generate a long, secure password using a password manager
|
||||||
password: hunter2
|
password: hunter2
|
||||||
database: matrix-synapse
|
database: matrix-synapse
|
||||||
host: localhost
|
host: localhost
|
||||||
cp_min: 5
|
|
||||||
cp_max: 10
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Currently the complexity is measured by [current_state_events / 500](https://github.com/matrix-org/synapse/blob/v1.20.1/synapse/storage/databases/main/events_worker.py#L986). You can find join times and your most complex rooms like this:
|
Currently the complexity is measured by [current_state_events / 500](https://github.com/matrix-org/synapse/blob/v1.20.1/synapse/storage/databases/main/events_worker.py#L986). You can find join times and your most complex rooms like this:
|
||||||
|
|
|
@ -153,9 +153,9 @@ database file (typically `homeserver.db`) to another location. Once the
|
||||||
copy is complete, restart synapse. For instance:
|
copy is complete, restart synapse. For instance:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
./synctl stop
|
synctl stop
|
||||||
cp homeserver.db homeserver.db.snapshot
|
cp homeserver.db homeserver.db.snapshot
|
||||||
./synctl start
|
synctl start
|
||||||
```
|
```
|
||||||
|
|
||||||
Copy the old config file into a new config file:
|
Copy the old config file into a new config file:
|
||||||
|
@ -192,10 +192,10 @@ Once that has completed, change the synapse config to point at the
|
||||||
PostgreSQL database configuration file `homeserver-postgres.yaml`:
|
PostgreSQL database configuration file `homeserver-postgres.yaml`:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
./synctl stop
|
synctl stop
|
||||||
mv homeserver.yaml homeserver-old-sqlite.yaml
|
mv homeserver.yaml homeserver-old-sqlite.yaml
|
||||||
mv homeserver-postgres.yaml homeserver.yaml
|
mv homeserver-postgres.yaml homeserver.yaml
|
||||||
./synctl start
|
synctl start
|
||||||
```
|
```
|
||||||
|
|
||||||
Synapse should now be running against PostgreSQL.
|
Synapse should now be running against PostgreSQL.
|
||||||
|
|
|
@ -1947,8 +1947,14 @@ saml2_config:
|
||||||
#
|
#
|
||||||
# localpart_template: Jinja2 template for the localpart of the MXID.
|
# localpart_template: Jinja2 template for the localpart of the MXID.
|
||||||
# If this is not set, the user will be prompted to choose their
|
# If this is not set, the user will be prompted to choose their
|
||||||
# own username (see 'sso_auth_account_details.html' in the 'sso'
|
# own username (see the documentation for the
|
||||||
# section of this file).
|
# 'sso_auth_account_details.html' template). This template can
|
||||||
|
# use the 'localpart_from_email' filter.
|
||||||
|
#
|
||||||
|
# confirm_localpart: Whether to prompt the user to validate (or
|
||||||
|
# change) the generated localpart (see the documentation for the
|
||||||
|
# 'sso_auth_account_details.html' template), instead of
|
||||||
|
# registering the account right away.
|
||||||
#
|
#
|
||||||
# display_name_template: Jinja2 template for the display name to set
|
# display_name_template: Jinja2 template for the display name to set
|
||||||
# on first login. If unset, no displayname will be set.
|
# on first login. If unset, no displayname will be set.
|
||||||
|
@ -2729,3 +2735,35 @@ redis:
|
||||||
# Optional password if configured on the Redis instance
|
# Optional password if configured on the Redis instance
|
||||||
#
|
#
|
||||||
#password: <secret_password>
|
#password: <secret_password>
|
||||||
|
|
||||||
|
|
||||||
|
## Background Updates ##
|
||||||
|
|
||||||
|
# Background updates are database updates that are run in the background in batches.
|
||||||
|
# The duration, minimum batch size, default batch size, whether to sleep between batches and if so, how long to
|
||||||
|
# sleep can all be configured. This is helpful to speed up or slow down the updates.
|
||||||
|
#
|
||||||
|
background_updates:
|
||||||
|
# How long in milliseconds to run a batch of background updates for. Defaults to 100. Uncomment and set
|
||||||
|
# a time to change the default.
|
||||||
|
#
|
||||||
|
#background_update_duration_ms: 500
|
||||||
|
|
||||||
|
# Whether to sleep between updates. Defaults to True. Uncomment to change the default.
|
||||||
|
#
|
||||||
|
#sleep_enabled: false
|
||||||
|
|
||||||
|
# If sleeping between updates, how long in milliseconds to sleep for. Defaults to 1000. Uncomment
|
||||||
|
# and set a duration to change the default.
|
||||||
|
#
|
||||||
|
#sleep_duration_ms: 300
|
||||||
|
|
||||||
|
# Minimum size a batch of background updates can be. Must be greater than 0. Defaults to 1. Uncomment and
|
||||||
|
# set a size to change the default.
|
||||||
|
#
|
||||||
|
#min_batch_size: 10
|
||||||
|
|
||||||
|
# The batch size to use for the first iteration of a new background update. The default is 100.
|
||||||
|
# Uncomment and set a size to change the default.
|
||||||
|
#
|
||||||
|
#default_batch_size: 50
|
||||||
|
|
|
@ -36,6 +36,13 @@ Turns a `mxc://` URL for media content into an HTTP(S) one using the homeserver'
|
||||||
|
|
||||||
Example: `message.sender_avatar_url|mxc_to_http(32,32)`
|
Example: `message.sender_avatar_url|mxc_to_http(32,32)`
|
||||||
|
|
||||||
|
```python
|
||||||
|
localpart_from_email(address: str) -> str
|
||||||
|
```
|
||||||
|
|
||||||
|
Returns the local part of an email address (e.g. `alice` in `alice@example.com`).
|
||||||
|
|
||||||
|
Example: `user.email_address|localpart_from_email`
|
||||||
|
|
||||||
## Email templates
|
## Email templates
|
||||||
|
|
||||||
|
@ -176,8 +183,11 @@ Below are the templates Synapse will look for when generating pages related to S
|
||||||
for the brand of the IdP
|
for the brand of the IdP
|
||||||
* `user_attributes`: an object containing details about the user that
|
* `user_attributes`: an object containing details about the user that
|
||||||
we received from the IdP. May have the following attributes:
|
we received from the IdP. May have the following attributes:
|
||||||
* display_name: the user's display_name
|
* `display_name`: the user's display name
|
||||||
* emails: a list of email addresses
|
* `emails`: a list of email addresses
|
||||||
|
* `localpart`: the local part of the Matrix user ID to register,
|
||||||
|
if `localpart_template` is set in the mapping provider configuration (empty
|
||||||
|
string if not)
|
||||||
The template should render a form which submits the following fields:
|
The template should render a form which submits the following fields:
|
||||||
* `username`: the localpart of the user's chosen user id
|
* `username`: the localpart of the user's chosen user id
|
||||||
* `sso_new_user_consent.html`: HTML page allowing the user to consent to the
|
* `sso_new_user_consent.html`: HTML page allowing the user to consent to the
|
||||||
|
|
|
@ -238,8 +238,9 @@ After updating the homeserver configuration, you must restart synapse:
|
||||||
|
|
||||||
* If you use synctl:
|
* If you use synctl:
|
||||||
```sh
|
```sh
|
||||||
cd /where/you/run/synapse
|
# Depending on how Synapse is installed, synctl may already be on
|
||||||
./synctl restart
|
# your PATH. If not, you may need to activate a virtual environment.
|
||||||
|
synctl restart
|
||||||
```
|
```
|
||||||
* If you use systemd:
|
* If you use systemd:
|
||||||
```sh
|
```sh
|
||||||
|
|
|
@ -47,7 +47,7 @@ this document.
|
||||||
3. Restart Synapse:
|
3. Restart Synapse:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./synctl restart
|
synctl restart
|
||||||
```
|
```
|
||||||
|
|
||||||
To check whether your update was successful, you can check the running
|
To check whether your update was successful, you can check the running
|
||||||
|
@ -85,6 +85,49 @@ process, for example:
|
||||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||||
```
|
```
|
||||||
|
|
||||||
|
# Upgrading to v1.56.0
|
||||||
|
|
||||||
|
## Groups/communities feature has been deprecated
|
||||||
|
|
||||||
|
The non-standard groups/communities feature in Synapse has been deprecated and will
|
||||||
|
be disabled by default in Synapse v1.58.0.
|
||||||
|
|
||||||
|
You can test disabling it by adding the following to your homeserver configuration:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
experimental_features:
|
||||||
|
groups_enabled: false
|
||||||
|
```
|
||||||
|
|
||||||
|
# Upgrading to v1.55.0
|
||||||
|
|
||||||
|
## `synctl` script has been moved
|
||||||
|
|
||||||
|
The `synctl` script
|
||||||
|
[has been made](https://github.com/matrix-org/synapse/pull/12140) an
|
||||||
|
[entry point](https://packaging.python.org/en/latest/specifications/entry-points/)
|
||||||
|
and no longer exists at the root of Synapse's source tree. If you wish to use
|
||||||
|
`synctl` to manage your homeserver, you should invoke `synctl` directly, e.g.
|
||||||
|
`synctl start` instead of `./synctl start` or `/path/to/synctl start`.
|
||||||
|
|
||||||
|
You will need to ensure `synctl` is on your `PATH`.
|
||||||
|
- This is automatically the case when using
|
||||||
|
[Debian packages](https://packages.matrix.org/debian/) or
|
||||||
|
[docker images](https://hub.docker.com/r/matrixdotorg/synapse)
|
||||||
|
provided by Matrix.org.
|
||||||
|
- When installing from a wheel, sdist, or PyPI, a `synctl` executable is added
|
||||||
|
to your Python installation's `bin`. This should be on your `PATH`
|
||||||
|
automatically, though you might need to activate a virtual environment
|
||||||
|
depending on how you installed Synapse.
|
||||||
|
|
||||||
|
|
||||||
|
## Compatibility dropped for Mjolnir 1.3.1 and earlier
|
||||||
|
|
||||||
|
Synapse v1.55.0 drops support for Mjolnir 1.3.1 and earlier.
|
||||||
|
If you use the Mjolnir module to moderate your homeserver,
|
||||||
|
please upgrade Mjolnir to version 1.3.2 or later before upgrading Synapse.
|
||||||
|
|
||||||
|
|
||||||
# Upgrading to v1.54.0
|
# Upgrading to v1.54.0
|
||||||
|
|
||||||
## Legacy structured logging configuration removal
|
## Legacy structured logging configuration removal
|
||||||
|
|
|
@ -12,7 +12,7 @@ UPDATE users SET admin = 1 WHERE name = '@foo:bar.com';
|
||||||
```
|
```
|
||||||
|
|
||||||
A new server admin user can also be created using the `register_new_matrix_user`
|
A new server admin user can also be created using the `register_new_matrix_user`
|
||||||
command. This is a script that is located in the `scripts/` directory, or possibly
|
command. This is a script that is distributed as part of synapse. It is possibly
|
||||||
already on your `$PATH` depending on how Synapse was installed.
|
already on your `$PATH` depending on how Synapse was installed.
|
||||||
|
|
||||||
Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings.
|
Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings.
|
||||||
|
|
|
@ -351,8 +351,11 @@ is only supported with Redis-based replication.)
|
||||||
|
|
||||||
To enable this, the worker must have a HTTP replication listener configured,
|
To enable this, the worker must have a HTTP replication listener configured,
|
||||||
have a `worker_name` and be listed in the `instance_map` config. The same worker
|
have a `worker_name` and be listed in the `instance_map` config. The same worker
|
||||||
can handle multiple streams. For example, to move event persistence off to a
|
can handle multiple streams, but unless otherwise documented, each stream can only
|
||||||
dedicated worker, the shared configuration would include:
|
have a single writer.
|
||||||
|
|
||||||
|
For example, to move event persistence off to a dedicated worker, the shared
|
||||||
|
configuration would include:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
instance_map:
|
instance_map:
|
||||||
|
@ -370,8 +373,8 @@ streams and the endpoints associated with them:
|
||||||
|
|
||||||
##### The `events` stream
|
##### The `events` stream
|
||||||
|
|
||||||
The `events` stream also experimentally supports having multiple writers, where
|
The `events` stream experimentally supports having multiple writers, where work
|
||||||
work is sharded between them by room ID. Note that you *must* restart all worker
|
is sharded between them by room ID. Note that you *must* restart all worker
|
||||||
instances when adding or removing event persisters. An example `stream_writers`
|
instances when adding or removing event persisters. An example `stream_writers`
|
||||||
configuration with multiple writers:
|
configuration with multiple writers:
|
||||||
|
|
||||||
|
@ -384,38 +387,38 @@ stream_writers:
|
||||||
|
|
||||||
##### The `typing` stream
|
##### The `typing` stream
|
||||||
|
|
||||||
The following endpoints should be routed directly to the workers configured as
|
The following endpoints should be routed directly to the worker configured as
|
||||||
stream writers for the `typing` stream:
|
the stream writer for the `typing` stream:
|
||||||
|
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/typing
|
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/typing
|
||||||
|
|
||||||
##### The `to_device` stream
|
##### The `to_device` stream
|
||||||
|
|
||||||
The following endpoints should be routed directly to the workers configured as
|
The following endpoints should be routed directly to the worker configured as
|
||||||
stream writers for the `to_device` stream:
|
the stream writer for the `to_device` stream:
|
||||||
|
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/sendToDevice/
|
^/_matrix/client/(api/v1|r0|v3|unstable)/sendToDevice/
|
||||||
|
|
||||||
##### The `account_data` stream
|
##### The `account_data` stream
|
||||||
|
|
||||||
The following endpoints should be routed directly to the workers configured as
|
The following endpoints should be routed directly to the worker configured as
|
||||||
stream writers for the `account_data` stream:
|
the stream writer for the `account_data` stream:
|
||||||
|
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/.*/tags
|
^/_matrix/client/(api/v1|r0|v3|unstable)/.*/tags
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/.*/account_data
|
^/_matrix/client/(api/v1|r0|v3|unstable)/.*/account_data
|
||||||
|
|
||||||
##### The `receipts` stream
|
##### The `receipts` stream
|
||||||
|
|
||||||
The following endpoints should be routed directly to the workers configured as
|
The following endpoints should be routed directly to the worker configured as
|
||||||
stream writers for the `receipts` stream:
|
the stream writer for the `receipts` stream:
|
||||||
|
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/receipt
|
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/receipt
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/read_markers
|
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/read_markers
|
||||||
|
|
||||||
##### The `presence` stream
|
##### The `presence` stream
|
||||||
|
|
||||||
The following endpoints should be routed directly to the workers configured as
|
The following endpoints should be routed directly to the worker configured as
|
||||||
stream writers for the `presence` stream:
|
the stream writer for the `presence` stream:
|
||||||
|
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/presence/
|
^/_matrix/client/(api/v1|r0|v3|unstable)/presence/
|
||||||
|
|
||||||
|
|
33
mypy.ini
33
mypy.ini
|
@ -11,7 +11,7 @@ local_partial_types = True
|
||||||
no_implicit_optional = True
|
no_implicit_optional = True
|
||||||
|
|
||||||
files =
|
files =
|
||||||
scripts-dev/sign_json,
|
scripts-dev/,
|
||||||
setup.py,
|
setup.py,
|
||||||
synapse/,
|
synapse/,
|
||||||
tests/
|
tests/
|
||||||
|
@ -23,6 +23,20 @@ files =
|
||||||
# https://docs.python.org/3/library/re.html#re.X
|
# https://docs.python.org/3/library/re.html#re.X
|
||||||
exclude = (?x)
|
exclude = (?x)
|
||||||
^(
|
^(
|
||||||
|
|scripts-dev/build_debian_packages.py
|
||||||
|
|scripts-dev/check_signature.py
|
||||||
|
|scripts-dev/definitions.py
|
||||||
|
|scripts-dev/federation_client.py
|
||||||
|
|scripts-dev/hash_history.py
|
||||||
|
|scripts-dev/list_url_patterns.py
|
||||||
|
|scripts-dev/release.py
|
||||||
|
|scripts-dev/tail-synapse.py
|
||||||
|
|
||||||
|
|synapse/_scripts/export_signing_key.py
|
||||||
|
|synapse/_scripts/move_remote_media_to_new_store.py
|
||||||
|
|synapse/_scripts/synapse_port_db.py
|
||||||
|
|synapse/_scripts/update_synapse_database.py
|
||||||
|
|
||||||
|synapse/storage/databases/__init__.py
|
|synapse/storage/databases/__init__.py
|
||||||
|synapse/storage/databases/main/__init__.py
|
|synapse/storage/databases/main/__init__.py
|
||||||
|synapse/storage/databases/main/cache.py
|
|synapse/storage/databases/main/cache.py
|
||||||
|
@ -74,17 +88,8 @@ exclude = (?x)
|
||||||
|tests/push/test_http.py
|
|tests/push/test_http.py
|
||||||
|tests/push/test_presentable_names.py
|
|tests/push/test_presentable_names.py
|
||||||
|tests/push/test_push_rule_evaluator.py
|
|tests/push/test_push_rule_evaluator.py
|
||||||
|tests/rest/client/test_account.py
|
|
||||||
|tests/rest/client/test_filter.py
|
|
||||||
|tests/rest/client/test_report_event.py
|
|
||||||
|tests/rest/client/test_rooms.py
|
|
||||||
|tests/rest/client/test_third_party_rules.py
|
|
||||||
|tests/rest/client/test_transactions.py
|
|tests/rest/client/test_transactions.py
|
||||||
|tests/rest/client/test_typing.py
|
|
||||||
|tests/rest/key/v2/test_remote_key_resource.py
|
|
||||||
|tests/rest/media/v1/test_base.py
|
|
||||||
|tests/rest/media/v1/test_media_storage.py
|
|tests/rest/media/v1/test_media_storage.py
|
||||||
|tests/rest/media/v1/test_url_preview.py
|
|
||||||
|tests/scripts/test_new_matrix_user.py
|
|tests/scripts/test_new_matrix_user.py
|
||||||
|tests/server.py
|
|tests/server.py
|
||||||
|tests/server_notices/test_resource_limits_server_notices.py
|
|tests/server_notices/test_resource_limits_server_notices.py
|
||||||
|
@ -246,10 +251,7 @@ disallow_untyped_defs = True
|
||||||
[mypy-tests.storage.test_user_directory]
|
[mypy-tests.storage.test_user_directory]
|
||||||
disallow_untyped_defs = True
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
[mypy-tests.rest.admin.*]
|
[mypy-tests.rest.*]
|
||||||
disallow_untyped_defs = True
|
|
||||||
|
|
||||||
[mypy-tests.rest.client.*]
|
|
||||||
disallow_untyped_defs = True
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
[mypy-tests.federation.transport.test_client]
|
[mypy-tests.federation.transport.test_client]
|
||||||
|
@ -350,3 +352,6 @@ ignore_missing_imports = True
|
||||||
|
|
||||||
[mypy-zope]
|
[mypy-zope]
|
||||||
ignore_missing_imports = True
|
ignore_missing_imports = True
|
||||||
|
|
||||||
|
[mypy-incremental.*]
|
||||||
|
ignore_missing_imports = True
|
||||||
|
|
|
@ -71,4 +71,4 @@ fi
|
||||||
|
|
||||||
# Run the tests!
|
# Run the tests!
|
||||||
echo "Images built; running complement"
|
echo "Images built; running complement"
|
||||||
go test -v -tags synapse_blacklist,msc2403 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests/...
|
go test -v -tags synapse_blacklist,msc2403,msc2716,msc3030 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests/...
|
||||||
|
|
|
@ -1,28 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
#
|
|
||||||
# Update/check the docs/sample_config.yaml
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
cd "$(dirname "$0")/.."
|
|
||||||
|
|
||||||
SAMPLE_CONFIG="docs/sample_config.yaml"
|
|
||||||
SAMPLE_LOG_CONFIG="docs/sample_log_config.yaml"
|
|
||||||
|
|
||||||
check() {
|
|
||||||
diff -u "$SAMPLE_LOG_CONFIG" <(./scripts/generate_log_config) >/dev/null || return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
if [ "$1" == "--check" ]; then
|
|
||||||
diff -u "$SAMPLE_CONFIG" <(./scripts/generate_config --header-file docs/.sample_config_header.yaml) >/dev/null || {
|
|
||||||
echo -e "\e[1m\e[31m$SAMPLE_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config\`.\e[0m" >&2
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
diff -u "$SAMPLE_LOG_CONFIG" <(./scripts/generate_log_config) >/dev/null || {
|
|
||||||
echo -e "\e[1m\e[31m$SAMPLE_LOG_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config\`.\e[0m" >&2
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
else
|
|
||||||
./scripts/generate_config --header-file docs/.sample_config_header.yaml -o "$SAMPLE_CONFIG"
|
|
||||||
./scripts/generate_log_config -o "$SAMPLE_LOG_CONFIG"
|
|
||||||
fi
|
|
28
scripts-dev/generate_sample_config.sh
Executable file
28
scripts-dev/generate_sample_config.sh
Executable file
|
@ -0,0 +1,28 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
#
|
||||||
|
# Update/check the docs/sample_config.yaml
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")/.."
|
||||||
|
|
||||||
|
SAMPLE_CONFIG="docs/sample_config.yaml"
|
||||||
|
SAMPLE_LOG_CONFIG="docs/sample_log_config.yaml"
|
||||||
|
|
||||||
|
check() {
|
||||||
|
diff -u "$SAMPLE_LOG_CONFIG" <(synapse/_scripts/generate_log_config.py) >/dev/null || return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if [ "$1" == "--check" ]; then
|
||||||
|
diff -u "$SAMPLE_CONFIG" <(synapse/_scripts/generate_config.py --header-file docs/.sample_config_header.yaml) >/dev/null || {
|
||||||
|
echo -e "\e[1m\e[31m$SAMPLE_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config.sh\`.\e[0m" >&2
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
diff -u "$SAMPLE_LOG_CONFIG" <(synapse/_scripts/generate_log_config.py) >/dev/null || {
|
||||||
|
echo -e "\e[1m\e[31m$SAMPLE_LOG_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config.sh\`.\e[0m" >&2
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
else
|
||||||
|
synapse/_scripts/generate_config.py --header-file docs/.sample_config_header.yaml -o "$SAMPLE_CONFIG"
|
||||||
|
synapse/_scripts/generate_log_config.py -o "$SAMPLE_LOG_CONFIG"
|
||||||
|
fi
|
|
@ -84,17 +84,8 @@ else
|
||||||
files=(
|
files=(
|
||||||
"synapse" "docker" "tests"
|
"synapse" "docker" "tests"
|
||||||
# annoyingly, black doesn't find these so we have to list them
|
# annoyingly, black doesn't find these so we have to list them
|
||||||
"scripts/export_signing_key"
|
|
||||||
"scripts/generate_config"
|
|
||||||
"scripts/generate_log_config"
|
|
||||||
"scripts/hash_password"
|
|
||||||
"scripts/register_new_matrix_user"
|
|
||||||
"scripts/synapse_port_db"
|
|
||||||
"scripts/update_synapse_database"
|
|
||||||
"scripts-dev"
|
"scripts-dev"
|
||||||
"scripts-dev/build_debian_packages"
|
"contrib" "setup.py" "synmark" "stubs" ".ci"
|
||||||
"scripts-dev/sign_json"
|
|
||||||
"contrib" "synctl" "setup.py" "synmark" "stubs" ".ci"
|
|
||||||
)
|
)
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -147,7 +147,7 @@ python -m synapse.app.homeserver --generate-keys -c "$SQLITE_CONFIG"
|
||||||
|
|
||||||
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
||||||
echo "Running db background jobs..."
|
echo "Running db background jobs..."
|
||||||
scripts/update_synapse_database --database-config --run-background-updates "$SQLITE_CONFIG"
|
synapse/_scripts/update_synapse_database.py --database-config --run-background-updates "$SQLITE_CONFIG"
|
||||||
|
|
||||||
# Create the PostgreSQL database.
|
# Create the PostgreSQL database.
|
||||||
echo "Creating postgres database..."
|
echo "Creating postgres database..."
|
||||||
|
@ -156,10 +156,10 @@ createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_DB_NAME"
|
||||||
echo "Copying data from SQLite3 to Postgres with synapse_port_db..."
|
echo "Copying data from SQLite3 to Postgres with synapse_port_db..."
|
||||||
if [ -z "$COVERAGE" ]; then
|
if [ -z "$COVERAGE" ]; then
|
||||||
# No coverage needed
|
# No coverage needed
|
||||||
scripts/synapse_port_db --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG"
|
synapse/_scripts/synapse_port_db.py --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG"
|
||||||
else
|
else
|
||||||
# Coverage desired
|
# Coverage desired
|
||||||
coverage run scripts/synapse_port_db --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG"
|
coverage run synapse/_scripts/synapse_port_db.py --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Delete schema_version, applied_schema_deltas and applied_module_schemas tables
|
# Delete schema_version, applied_schema_deltas and applied_module_schemas tables
|
||||||
|
|
|
@ -1,19 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# Copyright 2015, 2016 OpenMarket Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from synapse._scripts.register_new_matrix_user import main
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
|
@ -1,19 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# Copyright 2021 The Matrix.org Foundation C.I.C.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from synapse._scripts.review_recent_signups import main
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
|
@ -1,45 +0,0 @@
|
||||||
#!/usr/bin/env perl
|
|
||||||
|
|
||||||
use strict;
|
|
||||||
use warnings;
|
|
||||||
|
|
||||||
use JSON::XS;
|
|
||||||
use LWP::UserAgent;
|
|
||||||
use URI::Escape;
|
|
||||||
|
|
||||||
if (@ARGV < 4) {
|
|
||||||
die "usage: $0 <homeserver url> <access_token> <room_id|room_alias> <group_id>\n";
|
|
||||||
}
|
|
||||||
|
|
||||||
my ($hs, $access_token, $room_id, $group_id) = @ARGV;
|
|
||||||
my $ua = LWP::UserAgent->new();
|
|
||||||
$ua->timeout(10);
|
|
||||||
|
|
||||||
if ($room_id =~ /^#/) {
|
|
||||||
$room_id = uri_escape($room_id);
|
|
||||||
$room_id = decode_json($ua->get("${hs}/_matrix/client/r0/directory/room/${room_id}?access_token=${access_token}")->decoded_content)->{room_id};
|
|
||||||
}
|
|
||||||
|
|
||||||
my $room_users = [ keys %{decode_json($ua->get("${hs}/_matrix/client/r0/rooms/${room_id}/joined_members?access_token=${access_token}")->decoded_content)->{joined}} ];
|
|
||||||
my $group_users = [
|
|
||||||
(map { $_->{user_id} } @{decode_json($ua->get("${hs}/_matrix/client/unstable/groups/${group_id}/users?access_token=${access_token}" )->decoded_content)->{chunk}}),
|
|
||||||
(map { $_->{user_id} } @{decode_json($ua->get("${hs}/_matrix/client/unstable/groups/${group_id}/invited_users?access_token=${access_token}" )->decoded_content)->{chunk}}),
|
|
||||||
];
|
|
||||||
|
|
||||||
die "refusing to sync from empty room" unless (@$room_users);
|
|
||||||
die "refusing to sync to empty group" unless (@$group_users);
|
|
||||||
|
|
||||||
my $diff = {};
|
|
||||||
foreach my $user (@$room_users) { $diff->{$user}++ }
|
|
||||||
foreach my $user (@$group_users) { $diff->{$user}-- }
|
|
||||||
|
|
||||||
foreach my $user (keys %$diff) {
|
|
||||||
if ($diff->{$user} == 1) {
|
|
||||||
warn "inviting $user";
|
|
||||||
print STDERR $ua->put("${hs}/_matrix/client/unstable/groups/${group_id}/admin/users/invite/${user}?access_token=${access_token}", Content=>'{}')->status_line."\n";
|
|
||||||
}
|
|
||||||
elsif ($diff->{$user} == -1) {
|
|
||||||
warn "removing $user";
|
|
||||||
print STDERR $ua->put("${hs}/_matrix/client/unstable/groups/${group_id}/admin/users/remove/${user}?access_token=${access_token}", Content=>'{}')->status_line."\n";
|
|
||||||
}
|
|
||||||
}
|
|
14
setup.py
14
setup.py
|
@ -15,7 +15,6 @@
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
import glob
|
|
||||||
import os
|
import os
|
||||||
from typing import Any, Dict
|
from typing import Any, Dict
|
||||||
|
|
||||||
|
@ -153,8 +152,20 @@ setup(
|
||||||
python_requires="~=3.7",
|
python_requires="~=3.7",
|
||||||
entry_points={
|
entry_points={
|
||||||
"console_scripts": [
|
"console_scripts": [
|
||||||
|
# Application
|
||||||
"synapse_homeserver = synapse.app.homeserver:main",
|
"synapse_homeserver = synapse.app.homeserver:main",
|
||||||
"synapse_worker = synapse.app.generic_worker:main",
|
"synapse_worker = synapse.app.generic_worker:main",
|
||||||
|
"synctl = synapse._scripts.synctl:main",
|
||||||
|
# Scripts
|
||||||
|
"export_signing_key = synapse._scripts.export_signing_key:main",
|
||||||
|
"generate_config = synapse._scripts.generate_config:main",
|
||||||
|
"generate_log_config = synapse._scripts.generate_log_config:main",
|
||||||
|
"generate_signing_key = synapse._scripts.generate_signing_key:main",
|
||||||
|
"hash_password = synapse._scripts.hash_password:main",
|
||||||
|
"register_new_matrix_user = synapse._scripts.register_new_matrix_user:main",
|
||||||
|
"synapse_port_db = synapse._scripts.synapse_port_db:main",
|
||||||
|
"synapse_review_recent_signups = synapse._scripts.review_recent_signups:main",
|
||||||
|
"update_synapse_database = synapse._scripts.update_synapse_database:main",
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
classifiers=[
|
classifiers=[
|
||||||
|
@ -167,6 +178,5 @@ setup(
|
||||||
"Programming Language :: Python :: 3.9",
|
"Programming Language :: Python :: 3.9",
|
||||||
"Programming Language :: Python :: 3.10",
|
"Programming Language :: Python :: 3.10",
|
||||||
],
|
],
|
||||||
scripts=["synctl"] + glob.glob("scripts/*"),
|
|
||||||
cmdclass={"test": TestCommand},
|
cmdclass={"test": TestCommand},
|
||||||
)
|
)
|
||||||
|
|
|
@ -20,7 +20,7 @@ from twisted.internet import protocol
|
||||||
from twisted.internet.defer import Deferred
|
from twisted.internet.defer import Deferred
|
||||||
|
|
||||||
class RedisProtocol(protocol.Protocol):
|
class RedisProtocol(protocol.Protocol):
|
||||||
def publish(self, channel: str, message: bytes): ...
|
def publish(self, channel: str, message: bytes) -> "Deferred[None]": ...
|
||||||
def ping(self) -> "Deferred[None]": ...
|
def ping(self) -> "Deferred[None]": ...
|
||||||
def set(
|
def set(
|
||||||
self,
|
self,
|
||||||
|
@ -52,11 +52,14 @@ def lazyConnection(
|
||||||
convertNumbers: bool = ...,
|
convertNumbers: bool = ...,
|
||||||
) -> RedisProtocol: ...
|
) -> RedisProtocol: ...
|
||||||
|
|
||||||
class ConnectionHandler: ...
|
# ConnectionHandler doesn't actually inherit from RedisProtocol, but it proxies
|
||||||
|
# most methods to it via ConnectionHandler.__getattr__.
|
||||||
|
class ConnectionHandler(RedisProtocol):
|
||||||
|
def disconnect(self) -> "Deferred[None]": ...
|
||||||
|
|
||||||
class RedisFactory(protocol.ReconnectingClientFactory):
|
class RedisFactory(protocol.ReconnectingClientFactory):
|
||||||
continueTrying: bool
|
continueTrying: bool
|
||||||
handler: RedisProtocol
|
handler: ConnectionHandler
|
||||||
pool: List[RedisProtocol]
|
pool: List[RedisProtocol]
|
||||||
replyTimeout: Optional[int]
|
replyTimeout: Optional[int]
|
||||||
def __init__(
|
def __init__(
|
||||||
|
|
|
@ -25,6 +25,27 @@ if sys.version_info < (3, 7):
|
||||||
print("Synapse requires Python 3.7 or above.")
|
print("Synapse requires Python 3.7 or above.")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Allow using the asyncio reactor via env var.
|
||||||
|
if bool(os.environ.get("SYNAPSE_ASYNC_IO_REACTOR", False)):
|
||||||
|
try:
|
||||||
|
from incremental import Version
|
||||||
|
|
||||||
|
import twisted
|
||||||
|
|
||||||
|
# We need a bugfix that is included in Twisted 21.2.0:
|
||||||
|
# https://twistedmatrix.com/trac/ticket/9787
|
||||||
|
if twisted.version < Version("Twisted", 21, 2, 0):
|
||||||
|
print("Using asyncio reactor requires Twisted>=21.2.0")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
|
||||||
|
from twisted.internet import asyncioreactor
|
||||||
|
|
||||||
|
asyncioreactor.install(asyncio.get_event_loop())
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
# Twisted and canonicaljson will fail to import when this file is executed to
|
# Twisted and canonicaljson will fail to import when this file is executed to
|
||||||
# get the __version__ during a fresh install. That's OK and subsequent calls to
|
# get the __version__ during a fresh install. That's OK and subsequent calls to
|
||||||
# actually start Synapse will import these libraries fine.
|
# actually start Synapse will import these libraries fine.
|
||||||
|
@ -47,7 +68,7 @@ try:
|
||||||
except ImportError:
|
except ImportError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
__version__ = "1.54.0"
|
__version__ = "1.55.0rc1"
|
||||||
|
|
||||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||||
# We import here so that we don't have to install a bunch of deps when
|
# We import here so that we don't have to install a bunch of deps when
|
||||||
|
|
|
@ -50,7 +50,7 @@ def format_for_config(public_key: nacl.signing.VerifyKey, expiry_ts: int):
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
def main():
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
|
@ -85,7 +85,6 @@ if __name__ == "__main__":
|
||||||
else format_plain
|
else format_plain
|
||||||
)
|
)
|
||||||
|
|
||||||
keys = []
|
|
||||||
for file in args.key_file:
|
for file in args.key_file:
|
||||||
try:
|
try:
|
||||||
res = read_signing_keys(file)
|
res = read_signing_keys(file)
|
||||||
|
@ -98,3 +97,7 @@ if __name__ == "__main__":
|
||||||
res = []
|
res = []
|
||||||
for key in res:
|
for key in res:
|
||||||
formatter(get_verify_key(key))
|
formatter(get_verify_key(key))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
|
@ -6,7 +6,8 @@ import sys
|
||||||
|
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
|
def main():
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--config-dir",
|
"--config-dir",
|
||||||
|
@ -76,3 +77,7 @@ if __name__ == "__main__":
|
||||||
shutil.copyfileobj(args.header_file, args.output_file)
|
shutil.copyfileobj(args.header_file, args.output_file)
|
||||||
|
|
||||||
args.output_file.write(conf)
|
args.output_file.write(conf)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
|
@ -19,7 +19,8 @@ import sys
|
||||||
|
|
||||||
from synapse.config.logger import DEFAULT_LOG_CONFIG
|
from synapse.config.logger import DEFAULT_LOG_CONFIG
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
|
def main():
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
|
@ -42,3 +43,7 @@ if __name__ == "__main__":
|
||||||
out = args.output_file
|
out = args.output_file
|
||||||
out.write(DEFAULT_LOG_CONFIG.substitute(log_file=args.log_file))
|
out.write(DEFAULT_LOG_CONFIG.substitute(log_file=args.log_file))
|
||||||
out.flush()
|
out.flush()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
|
@ -19,7 +19,8 @@ from signedjson.key import generate_signing_key, write_signing_keys
|
||||||
|
|
||||||
from synapse.util.stringutils import random_string
|
from synapse.util.stringutils import random_string
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
|
def main():
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
|
@ -34,3 +35,7 @@ if __name__ == "__main__":
|
||||||
key_id = "a_" + random_string(4)
|
key_id = "a_" + random_string(4)
|
||||||
key = (generate_signing_key(key_id),)
|
key = (generate_signing_key(key_id),)
|
||||||
write_signing_keys(args.output_file, key)
|
write_signing_keys(args.output_file, key)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
|
@ -8,9 +8,6 @@ import unicodedata
|
||||||
import bcrypt
|
import bcrypt
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
bcrypt_rounds = 12
|
|
||||||
password_pepper = ""
|
|
||||||
|
|
||||||
|
|
||||||
def prompt_for_pass():
|
def prompt_for_pass():
|
||||||
password = getpass.getpass("Password: ")
|
password = getpass.getpass("Password: ")
|
||||||
|
@ -26,7 +23,10 @@ def prompt_for_pass():
|
||||||
return password
|
return password
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
def main():
|
||||||
|
bcrypt_rounds = 12
|
||||||
|
password_pepper = ""
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description=(
|
description=(
|
||||||
"Calculate the hash of a new password, so that passwords can be reset"
|
"Calculate the hash of a new password, so that passwords can be reset"
|
||||||
|
@ -77,3 +77,7 @@ if __name__ == "__main__":
|
||||||
).decode("ascii")
|
).decode("ascii")
|
||||||
|
|
||||||
print(hashed)
|
print(hashed)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
|
@ -28,7 +28,7 @@ This can be extracted from postgres with::
|
||||||
|
|
||||||
To use, pipe the above into::
|
To use, pipe the above into::
|
||||||
|
|
||||||
PYTHON_PATH=. ./scripts/move_remote_media_to_new_store.py <source repo> <dest repo>
|
PYTHON_PATH=. synapse/_scripts/move_remote_media_to_new_store.py <source repo> <dest repo>
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import argparse
|
import argparse
|
|
@ -1146,7 +1146,7 @@ class TerminalProgress(Progress):
|
||||||
##############################################
|
##############################################
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
def main():
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="A script to port an existing synapse SQLite database to"
|
description="A script to port an existing synapse SQLite database to"
|
||||||
" a new PostgreSQL database."
|
" a new PostgreSQL database."
|
||||||
|
@ -1251,3 +1251,7 @@ if __name__ == "__main__":
|
||||||
sys.stderr.write(end_error)
|
sys.stderr.write(end_error)
|
||||||
|
|
||||||
sys.exit(5)
|
sys.exit(5)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
|
@ -178,7 +178,9 @@ class RelationTypes:
|
||||||
ANNOTATION: Final = "m.annotation"
|
ANNOTATION: Final = "m.annotation"
|
||||||
REPLACE: Final = "m.replace"
|
REPLACE: Final = "m.replace"
|
||||||
REFERENCE: Final = "m.reference"
|
REFERENCE: Final = "m.reference"
|
||||||
THREAD: Final = "io.element.thread"
|
THREAD: Final = "m.thread"
|
||||||
|
# TODO Remove this in Synapse >= v1.57.0.
|
||||||
|
UNSTABLE_THREAD: Final = "io.element.thread"
|
||||||
|
|
||||||
|
|
||||||
class LimitBlockingTypes:
|
class LimitBlockingTypes:
|
||||||
|
|
|
@ -88,7 +88,9 @@ ROOM_EVENT_FILTER_SCHEMA = {
|
||||||
"org.matrix.labels": {"type": "array", "items": {"type": "string"}},
|
"org.matrix.labels": {"type": "array", "items": {"type": "string"}},
|
||||||
"org.matrix.not_labels": {"type": "array", "items": {"type": "string"}},
|
"org.matrix.not_labels": {"type": "array", "items": {"type": "string"}},
|
||||||
# MSC3440, filtering by event relations.
|
# MSC3440, filtering by event relations.
|
||||||
|
"related_by_senders": {"type": "array", "items": {"type": "string"}},
|
||||||
"io.element.relation_senders": {"type": "array", "items": {"type": "string"}},
|
"io.element.relation_senders": {"type": "array", "items": {"type": "string"}},
|
||||||
|
"related_by_rel_types": {"type": "array", "items": {"type": "string"}},
|
||||||
"io.element.relation_types": {"type": "array", "items": {"type": "string"}},
|
"io.element.relation_types": {"type": "array", "items": {"type": "string"}},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -318,19 +320,18 @@ class Filter:
|
||||||
self.labels = filter_json.get("org.matrix.labels", None)
|
self.labels = filter_json.get("org.matrix.labels", None)
|
||||||
self.not_labels = filter_json.get("org.matrix.not_labels", [])
|
self.not_labels = filter_json.get("org.matrix.not_labels", [])
|
||||||
|
|
||||||
# Ideally these would be rejected at the endpoint if they were provided
|
self.related_by_senders = self.filter_json.get("related_by_senders", None)
|
||||||
# and not supported, but that would involve modifying the JSON schema
|
self.related_by_rel_types = self.filter_json.get("related_by_rel_types", None)
|
||||||
# based on the homeserver configuration.
|
|
||||||
|
# Fallback to the unstable prefix if the stable version is not given.
|
||||||
if hs.config.experimental.msc3440_enabled:
|
if hs.config.experimental.msc3440_enabled:
|
||||||
self.relation_senders = self.filter_json.get(
|
self.related_by_senders = self.related_by_senders or self.filter_json.get(
|
||||||
"io.element.relation_senders", None
|
"io.element.relation_senders", None
|
||||||
)
|
)
|
||||||
self.relation_types = self.filter_json.get(
|
self.related_by_rel_types = (
|
||||||
"io.element.relation_types", None
|
self.related_by_rel_types
|
||||||
|
or self.filter_json.get("io.element.relation_types", None)
|
||||||
)
|
)
|
||||||
else:
|
|
||||||
self.relation_senders = None
|
|
||||||
self.relation_types = None
|
|
||||||
|
|
||||||
def filters_all_types(self) -> bool:
|
def filters_all_types(self) -> bool:
|
||||||
return "*" in self.not_types
|
return "*" in self.not_types
|
||||||
|
@ -461,7 +462,7 @@ class Filter:
|
||||||
event_ids = [event.event_id for event in events if isinstance(event, EventBase)] # type: ignore[attr-defined]
|
event_ids = [event.event_id for event in events if isinstance(event, EventBase)] # type: ignore[attr-defined]
|
||||||
event_ids_to_keep = set(
|
event_ids_to_keep = set(
|
||||||
await self._store.events_have_relations(
|
await self._store.events_have_relations(
|
||||||
event_ids, self.relation_senders, self.relation_types
|
event_ids, self.related_by_senders, self.related_by_rel_types
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -474,7 +475,7 @@ class Filter:
|
||||||
async def filter(self, events: Iterable[FilterEvent]) -> List[FilterEvent]:
|
async def filter(self, events: Iterable[FilterEvent]) -> List[FilterEvent]:
|
||||||
result = [event for event in events if self._check(event)]
|
result = [event for event in events if self._check(event)]
|
||||||
|
|
||||||
if self.relation_senders or self.relation_types:
|
if self.related_by_senders or self.related_by_rel_types:
|
||||||
return await self._check_event_relations(result)
|
return await self._check_event_relations(result)
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
|
@ -322,7 +322,8 @@ class GenericWorkerServer(HomeServer):
|
||||||
|
|
||||||
presence.register_servlets(self, resource)
|
presence.register_servlets(self, resource)
|
||||||
|
|
||||||
groups.register_servlets(self, resource)
|
if self.config.experimental.groups_enabled:
|
||||||
|
groups.register_servlets(self, resource)
|
||||||
|
|
||||||
resources.update({CLIENT_API_PREFIX: resource})
|
resources.update({CLIENT_API_PREFIX: resource})
|
||||||
|
|
||||||
|
@ -417,7 +418,7 @@ class GenericWorkerServer(HomeServer):
|
||||||
else:
|
else:
|
||||||
logger.warning("Unsupported listener type: %s", listener.type)
|
logger.warning("Unsupported listener type: %s", listener.type)
|
||||||
|
|
||||||
self.get_tcp_replication().start_replication(self)
|
self.get_replication_command_handler().start_replication(self)
|
||||||
|
|
||||||
|
|
||||||
def start(config_options: List[str]) -> None:
|
def start(config_options: List[str]) -> None:
|
||||||
|
|
|
@ -273,7 +273,7 @@ class SynapseHomeServer(HomeServer):
|
||||||
# If redis is enabled we connect via the replication command handler
|
# If redis is enabled we connect via the replication command handler
|
||||||
# in the same way as the workers (since we're effectively a client
|
# in the same way as the workers (since we're effectively a client
|
||||||
# rather than a server).
|
# rather than a server).
|
||||||
self.get_tcp_replication().start_replication(self)
|
self.get_replication_command_handler().start_replication(self)
|
||||||
|
|
||||||
for listener in self.config.server.listeners:
|
for listener in self.config.server.listeners:
|
||||||
if listener.type == "http":
|
if listener.type == "http":
|
||||||
|
|
|
@ -175,27 +175,14 @@ class ApplicationService:
|
||||||
return namespace.exclusive
|
return namespace.exclusive
|
||||||
return False
|
return False
|
||||||
|
|
||||||
async def _matches_user(self, event: EventBase, store: "DataStore") -> bool:
|
|
||||||
if self.is_interested_in_user(event.sender):
|
|
||||||
return True
|
|
||||||
|
|
||||||
# also check m.room.member state key
|
|
||||||
if event.type == EventTypes.Member and self.is_interested_in_user(
|
|
||||||
event.state_key
|
|
||||||
):
|
|
||||||
return True
|
|
||||||
|
|
||||||
does_match = await self.matches_user_in_member_list(event.room_id, store)
|
|
||||||
return does_match
|
|
||||||
|
|
||||||
@cached(num_args=1, cache_context=True)
|
@cached(num_args=1, cache_context=True)
|
||||||
async def matches_user_in_member_list(
|
async def _matches_user_in_member_list(
|
||||||
self,
|
self,
|
||||||
room_id: str,
|
room_id: str,
|
||||||
store: "DataStore",
|
store: "DataStore",
|
||||||
cache_context: _CacheContext,
|
cache_context: _CacheContext,
|
||||||
) -> bool:
|
) -> bool:
|
||||||
"""Check if this service is interested a room based upon it's membership
|
"""Check if this service is interested a room based upon its membership
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
room_id: The room to check.
|
room_id: The room to check.
|
||||||
|
@ -214,47 +201,110 @@ class ApplicationService:
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def _matches_room_id(self, event: EventBase) -> bool:
|
def is_interested_in_user(
|
||||||
if hasattr(event, "room_id"):
|
self,
|
||||||
return self.is_interested_in_room(event.room_id)
|
user_id: str,
|
||||||
return False
|
) -> bool:
|
||||||
|
"""
|
||||||
|
Returns whether the application is interested in a given user ID.
|
||||||
|
|
||||||
async def _matches_aliases(self, event: EventBase, store: "DataStore") -> bool:
|
The appservice is considered to be interested in a user if either: the
|
||||||
alias_list = await store.get_aliases_for_room(event.room_id)
|
user ID is in the appservice's user namespace, or if the user is the
|
||||||
|
appservice's configured sender_localpart.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id: The ID of the user to check.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if the application service is interested in the user, False if not.
|
||||||
|
"""
|
||||||
|
return (
|
||||||
|
# User is the appservice's sender_localpart user
|
||||||
|
user_id == self.sender
|
||||||
|
# User is in the appservice's user namespace
|
||||||
|
or self.is_user_in_namespace(user_id)
|
||||||
|
)
|
||||||
|
|
||||||
|
@cached(num_args=1, cache_context=True)
|
||||||
|
async def is_interested_in_room(
|
||||||
|
self,
|
||||||
|
room_id: str,
|
||||||
|
store: "DataStore",
|
||||||
|
cache_context: _CacheContext,
|
||||||
|
) -> bool:
|
||||||
|
"""
|
||||||
|
Returns whether the application service is interested in a given room ID.
|
||||||
|
|
||||||
|
The appservice is considered to be interested in the room if either: the ID or one
|
||||||
|
of the aliases of the room is in the appservice's room ID or alias namespace
|
||||||
|
respectively, or if one of the members of the room fall into the appservice's user
|
||||||
|
namespace.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
room_id: The ID of the room to check.
|
||||||
|
store: The homeserver's datastore class.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if the application service is interested in the room, False if not.
|
||||||
|
"""
|
||||||
|
# Check if we have interest in this room ID
|
||||||
|
if self.is_room_id_in_namespace(room_id):
|
||||||
|
return True
|
||||||
|
|
||||||
|
# likewise with the room's aliases (if it has any)
|
||||||
|
alias_list = await store.get_aliases_for_room(room_id)
|
||||||
for alias in alias_list:
|
for alias in alias_list:
|
||||||
if self.is_interested_in_alias(alias):
|
if self.is_room_alias_in_namespace(alias):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
return False
|
# And finally, perform an expensive check on whether any of the
|
||||||
|
# users in the room match the appservice's user namespace
|
||||||
|
return await self._matches_user_in_member_list(
|
||||||
|
room_id, store, on_invalidate=cache_context.invalidate
|
||||||
|
)
|
||||||
|
|
||||||
async def is_interested(self, event: EventBase, store: "DataStore") -> bool:
|
@cached(num_args=1, cache_context=True)
|
||||||
|
async def is_interested_in_event(
|
||||||
|
self,
|
||||||
|
event_id: str,
|
||||||
|
event: EventBase,
|
||||||
|
store: "DataStore",
|
||||||
|
cache_context: _CacheContext,
|
||||||
|
) -> bool:
|
||||||
"""Check if this service is interested in this event.
|
"""Check if this service is interested in this event.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
event_id: The ID of the event to check. This is purely used for simplifying the
|
||||||
|
caching of calls to this method.
|
||||||
event: The event to check.
|
event: The event to check.
|
||||||
store: The datastore to query.
|
store: The datastore to query.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
True if this service would like to know about this event.
|
True if this service would like to know about this event, otherwise False.
|
||||||
"""
|
"""
|
||||||
# Do cheap checks first
|
# Check if we're interested in this event's sender by namespace (or if they're the
|
||||||
if self._matches_room_id(event):
|
# sender_localpart user)
|
||||||
|
if self.is_interested_in_user(event.sender):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
# This will check the namespaces first before
|
# additionally, if this is a membership event, perform the same checks on
|
||||||
# checking the store, so should be run before _matches_aliases
|
# the user it references
|
||||||
if await self._matches_user(event, store):
|
if event.type == EventTypes.Member and self.is_interested_in_user(
|
||||||
|
event.state_key
|
||||||
|
):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
# This will check the store, so should be run last
|
# This will check the datastore, so should be run last
|
||||||
if await self._matches_aliases(event, store):
|
if await self.is_interested_in_room(
|
||||||
|
event.room_id, store, on_invalidate=cache_context.invalidate
|
||||||
|
):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@cached(num_args=1)
|
@cached(num_args=1, cache_context=True)
|
||||||
async def is_interested_in_presence(
|
async def is_interested_in_presence(
|
||||||
self, user_id: UserID, store: "DataStore"
|
self, user_id: UserID, store: "DataStore", cache_context: _CacheContext
|
||||||
) -> bool:
|
) -> bool:
|
||||||
"""Check if this service is interested a user's presence
|
"""Check if this service is interested a user's presence
|
||||||
|
|
||||||
|
@ -272,20 +322,19 @@ class ApplicationService:
|
||||||
|
|
||||||
# Then find out if the appservice is interested in any of those rooms
|
# Then find out if the appservice is interested in any of those rooms
|
||||||
for room_id in room_ids:
|
for room_id in room_ids:
|
||||||
if await self.matches_user_in_member_list(room_id, store):
|
if await self.is_interested_in_room(
|
||||||
|
room_id, store, on_invalidate=cache_context.invalidate
|
||||||
|
):
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def is_interested_in_user(self, user_id: str) -> bool:
|
def is_user_in_namespace(self, user_id: str) -> bool:
|
||||||
return (
|
return bool(self._matches_regex(ApplicationService.NS_USERS, user_id))
|
||||||
bool(self._matches_regex(ApplicationService.NS_USERS, user_id))
|
|
||||||
or user_id == self.sender
|
|
||||||
)
|
|
||||||
|
|
||||||
def is_interested_in_alias(self, alias: str) -> bool:
|
def is_room_alias_in_namespace(self, alias: str) -> bool:
|
||||||
return bool(self._matches_regex(ApplicationService.NS_ALIASES, alias))
|
return bool(self._matches_regex(ApplicationService.NS_ALIASES, alias))
|
||||||
|
|
||||||
def is_interested_in_room(self, room_id: str) -> bool:
|
def is_room_id_in_namespace(self, room_id: str) -> bool:
|
||||||
return bool(self._matches_regex(ApplicationService.NS_ROOMS, room_id))
|
return bool(self._matches_regex(ApplicationService.NS_ROOMS, room_id))
|
||||||
|
|
||||||
def is_exclusive_user(self, user_id: str) -> bool:
|
def is_exclusive_user(self, user_id: str) -> bool:
|
||||||
|
|
|
@ -25,7 +25,7 @@ from synapse.appservice import (
|
||||||
TransactionUnusedFallbackKeys,
|
TransactionUnusedFallbackKeys,
|
||||||
)
|
)
|
||||||
from synapse.events import EventBase
|
from synapse.events import EventBase
|
||||||
from synapse.events.utils import serialize_event
|
from synapse.events.utils import SerializeEventConfig, serialize_event
|
||||||
from synapse.http.client import SimpleHttpClient
|
from synapse.http.client import SimpleHttpClient
|
||||||
from synapse.types import JsonDict, ThirdPartyInstanceID
|
from synapse.types import JsonDict, ThirdPartyInstanceID
|
||||||
from synapse.util.caches.response_cache import ResponseCache
|
from synapse.util.caches.response_cache import ResponseCache
|
||||||
|
@ -321,16 +321,18 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||||
serialize_event(
|
serialize_event(
|
||||||
e,
|
e,
|
||||||
time_now,
|
time_now,
|
||||||
as_client_event=True,
|
config=SerializeEventConfig(
|
||||||
# If this is an invite or a knock membership event, and we're interested
|
as_client_event=True,
|
||||||
# in this user, then include any stripped state alongside the event.
|
# If this is an invite or a knock membership event, and we're interested
|
||||||
include_stripped_room_state=(
|
# in this user, then include any stripped state alongside the event.
|
||||||
e.type == EventTypes.Member
|
include_stripped_room_state=(
|
||||||
and (
|
e.type == EventTypes.Member
|
||||||
e.membership == Membership.INVITE
|
and (
|
||||||
or e.membership == Membership.KNOCK
|
e.membership == Membership.INVITE
|
||||||
)
|
or e.membership == Membership.KNOCK
|
||||||
and service.is_interested_in_user(e.state_key)
|
)
|
||||||
|
and service.is_interested_in_user(e.state_key)
|
||||||
|
),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
for e in events
|
for e in events
|
||||||
|
|
|
@ -383,7 +383,7 @@ class RootConfig:
|
||||||
Build a default configuration file
|
Build a default configuration file
|
||||||
|
|
||||||
This is used when the user explicitly asks us to generate a config file
|
This is used when the user explicitly asks us to generate a config file
|
||||||
(eg with --generate_config).
|
(eg with --generate-config).
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
config_dir_path: The path where the config files are kept. Used to
|
config_dir_path: The path where the config files are kept. Used to
|
||||||
|
|
|
@ -19,6 +19,7 @@ from synapse.config import (
|
||||||
api,
|
api,
|
||||||
appservice,
|
appservice,
|
||||||
auth,
|
auth,
|
||||||
|
background_updates,
|
||||||
cache,
|
cache,
|
||||||
captcha,
|
captcha,
|
||||||
cas,
|
cas,
|
||||||
|
@ -115,6 +116,7 @@ class RootConfig:
|
||||||
caches: cache.CacheConfig
|
caches: cache.CacheConfig
|
||||||
federation: federation.FederationConfig
|
federation: federation.FederationConfig
|
||||||
retention: retention.RetentionConfig
|
retention: retention.RetentionConfig
|
||||||
|
background_updates: background_updates.BackgroundUpdateConfig
|
||||||
|
|
||||||
config_classes: List[Type["Config"]] = ...
|
config_classes: List[Type["Config"]] = ...
|
||||||
def __init__(self) -> None: ...
|
def __init__(self) -> None: ...
|
||||||
|
|
68
synapse/config/background_updates.py
Normal file
68
synapse/config/background_updates.py
Normal file
|
@ -0,0 +1,68 @@
|
||||||
|
# Copyright 2022 Matrix.org Foundation C.I.C.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from ._base import Config
|
||||||
|
|
||||||
|
|
||||||
|
class BackgroundUpdateConfig(Config):
|
||||||
|
section = "background_updates"
|
||||||
|
|
||||||
|
def generate_config_section(self, **kwargs) -> str:
|
||||||
|
return """\
|
||||||
|
## Background Updates ##
|
||||||
|
|
||||||
|
# Background updates are database updates that are run in the background in batches.
|
||||||
|
# The duration, minimum batch size, default batch size, whether to sleep between batches and if so, how long to
|
||||||
|
# sleep can all be configured. This is helpful to speed up or slow down the updates.
|
||||||
|
#
|
||||||
|
background_updates:
|
||||||
|
# How long in milliseconds to run a batch of background updates for. Defaults to 100. Uncomment and set
|
||||||
|
# a time to change the default.
|
||||||
|
#
|
||||||
|
#background_update_duration_ms: 500
|
||||||
|
|
||||||
|
# Whether to sleep between updates. Defaults to True. Uncomment to change the default.
|
||||||
|
#
|
||||||
|
#sleep_enabled: false
|
||||||
|
|
||||||
|
# If sleeping between updates, how long in milliseconds to sleep for. Defaults to 1000. Uncomment
|
||||||
|
# and set a duration to change the default.
|
||||||
|
#
|
||||||
|
#sleep_duration_ms: 300
|
||||||
|
|
||||||
|
# Minimum size a batch of background updates can be. Must be greater than 0. Defaults to 1. Uncomment and
|
||||||
|
# set a size to change the default.
|
||||||
|
#
|
||||||
|
#min_batch_size: 10
|
||||||
|
|
||||||
|
# The batch size to use for the first iteration of a new background update. The default is 100.
|
||||||
|
# Uncomment and set a size to change the default.
|
||||||
|
#
|
||||||
|
#default_batch_size: 50
|
||||||
|
"""
|
||||||
|
|
||||||
|
def read_config(self, config, **kwargs) -> None:
|
||||||
|
bg_update_config = config.get("background_updates") or {}
|
||||||
|
|
||||||
|
self.update_duration_ms = bg_update_config.get(
|
||||||
|
"background_update_duration_ms", 100
|
||||||
|
)
|
||||||
|
|
||||||
|
self.sleep_enabled = bg_update_config.get("sleep_enabled", True)
|
||||||
|
|
||||||
|
self.sleep_duration_ms = bg_update_config.get("sleep_duration_ms", 1000)
|
||||||
|
|
||||||
|
self.min_batch_size = bg_update_config.get("min_batch_size", 1)
|
||||||
|
|
||||||
|
self.default_batch_size = bg_update_config.get("default_batch_size", 100)
|
|
@ -74,3 +74,6 @@ class ExperimentalConfig(Config):
|
||||||
|
|
||||||
# MSC3720 (Account status endpoint)
|
# MSC3720 (Account status endpoint)
|
||||||
self.msc3720_enabled: bool = experimental.get("msc3720_enabled", False)
|
self.msc3720_enabled: bool = experimental.get("msc3720_enabled", False)
|
||||||
|
|
||||||
|
# The deprecated groups feature.
|
||||||
|
self.groups_enabled: bool = experimental.get("groups_enabled", True)
|
||||||
|
|
|
@ -17,6 +17,7 @@ from .account_validity import AccountValidityConfig
|
||||||
from .api import ApiConfig
|
from .api import ApiConfig
|
||||||
from .appservice import AppServiceConfig
|
from .appservice import AppServiceConfig
|
||||||
from .auth import AuthConfig
|
from .auth import AuthConfig
|
||||||
|
from .background_updates import BackgroundUpdateConfig
|
||||||
from .cache import CacheConfig
|
from .cache import CacheConfig
|
||||||
from .captcha import CaptchaConfig
|
from .captcha import CaptchaConfig
|
||||||
from .cas import CasConfig
|
from .cas import CasConfig
|
||||||
|
@ -101,4 +102,5 @@ class HomeServerConfig(RootConfig):
|
||||||
WorkerConfig,
|
WorkerConfig,
|
||||||
RedisConfig,
|
RedisConfig,
|
||||||
ExperimentalConfig,
|
ExperimentalConfig,
|
||||||
|
BackgroundUpdateConfig,
|
||||||
]
|
]
|
||||||
|
|
|
@ -182,8 +182,14 @@ class OIDCConfig(Config):
|
||||||
#
|
#
|
||||||
# localpart_template: Jinja2 template for the localpart of the MXID.
|
# localpart_template: Jinja2 template for the localpart of the MXID.
|
||||||
# If this is not set, the user will be prompted to choose their
|
# If this is not set, the user will be prompted to choose their
|
||||||
# own username (see 'sso_auth_account_details.html' in the 'sso'
|
# own username (see the documentation for the
|
||||||
# section of this file).
|
# 'sso_auth_account_details.html' template). This template can
|
||||||
|
# use the 'localpart_from_email' filter.
|
||||||
|
#
|
||||||
|
# confirm_localpart: Whether to prompt the user to validate (or
|
||||||
|
# change) the generated localpart (see the documentation for the
|
||||||
|
# 'sso_auth_account_details.html' template), instead of
|
||||||
|
# registering the account right away.
|
||||||
#
|
#
|
||||||
# display_name_template: Jinja2 template for the display name to set
|
# display_name_template: Jinja2 template for the display name to set
|
||||||
# on first login. If unset, no displayname will be set.
|
# on first login. If unset, no displayname will be set.
|
||||||
|
|
|
@ -245,8 +245,8 @@ class SpamChecker:
|
||||||
"""Checks if a given event is considered "spammy" by this server.
|
"""Checks if a given event is considered "spammy" by this server.
|
||||||
|
|
||||||
If the server considers an event spammy, then it will be rejected if
|
If the server considers an event spammy, then it will be rejected if
|
||||||
sent by a local user. If it is sent by a user on another server, then
|
sent by a local user. If it is sent by a user on another server, the
|
||||||
users receive a blank event.
|
event is soft-failed.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
event: the event to be checked
|
event: the event to be checked
|
||||||
|
|
|
@ -38,6 +38,8 @@ CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK = Callable[
|
||||||
[str, StateMap[EventBase], str], Awaitable[bool]
|
[str, StateMap[EventBase], str], Awaitable[bool]
|
||||||
]
|
]
|
||||||
ON_NEW_EVENT_CALLBACK = Callable[[EventBase, StateMap[EventBase]], Awaitable]
|
ON_NEW_EVENT_CALLBACK = Callable[[EventBase, StateMap[EventBase]], Awaitable]
|
||||||
|
CHECK_CAN_SHUTDOWN_ROOM_CALLBACK = Callable[[str, str], Awaitable[bool]]
|
||||||
|
CHECK_CAN_DEACTIVATE_USER_CALLBACK = Callable[[str, bool], Awaitable[bool]]
|
||||||
ON_PROFILE_UPDATE_CALLBACK = Callable[[str, ProfileInfo, bool, bool], Awaitable]
|
ON_PROFILE_UPDATE_CALLBACK = Callable[[str, ProfileInfo, bool, bool], Awaitable]
|
||||||
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK = Callable[[str, bool, bool], Awaitable]
|
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK = Callable[[str, bool, bool], Awaitable]
|
||||||
|
|
||||||
|
@ -157,6 +159,12 @@ class ThirdPartyEventRules:
|
||||||
CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK
|
CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK
|
||||||
] = []
|
] = []
|
||||||
self._on_new_event_callbacks: List[ON_NEW_EVENT_CALLBACK] = []
|
self._on_new_event_callbacks: List[ON_NEW_EVENT_CALLBACK] = []
|
||||||
|
self._check_can_shutdown_room_callbacks: List[
|
||||||
|
CHECK_CAN_SHUTDOWN_ROOM_CALLBACK
|
||||||
|
] = []
|
||||||
|
self._check_can_deactivate_user_callbacks: List[
|
||||||
|
CHECK_CAN_DEACTIVATE_USER_CALLBACK
|
||||||
|
] = []
|
||||||
self._on_profile_update_callbacks: List[ON_PROFILE_UPDATE_CALLBACK] = []
|
self._on_profile_update_callbacks: List[ON_PROFILE_UPDATE_CALLBACK] = []
|
||||||
self._on_user_deactivation_status_changed_callbacks: List[
|
self._on_user_deactivation_status_changed_callbacks: List[
|
||||||
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK
|
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK
|
||||||
|
@ -173,6 +181,8 @@ class ThirdPartyEventRules:
|
||||||
CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK
|
CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK
|
||||||
] = None,
|
] = None,
|
||||||
on_new_event: Optional[ON_NEW_EVENT_CALLBACK] = None,
|
on_new_event: Optional[ON_NEW_EVENT_CALLBACK] = None,
|
||||||
|
check_can_shutdown_room: Optional[CHECK_CAN_SHUTDOWN_ROOM_CALLBACK] = None,
|
||||||
|
check_can_deactivate_user: Optional[CHECK_CAN_DEACTIVATE_USER_CALLBACK] = None,
|
||||||
on_profile_update: Optional[ON_PROFILE_UPDATE_CALLBACK] = None,
|
on_profile_update: Optional[ON_PROFILE_UPDATE_CALLBACK] = None,
|
||||||
on_user_deactivation_status_changed: Optional[
|
on_user_deactivation_status_changed: Optional[
|
||||||
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK
|
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK
|
||||||
|
@ -198,6 +208,11 @@ class ThirdPartyEventRules:
|
||||||
if on_new_event is not None:
|
if on_new_event is not None:
|
||||||
self._on_new_event_callbacks.append(on_new_event)
|
self._on_new_event_callbacks.append(on_new_event)
|
||||||
|
|
||||||
|
if check_can_shutdown_room is not None:
|
||||||
|
self._check_can_shutdown_room_callbacks.append(check_can_shutdown_room)
|
||||||
|
|
||||||
|
if check_can_deactivate_user is not None:
|
||||||
|
self._check_can_deactivate_user_callbacks.append(check_can_deactivate_user)
|
||||||
if on_profile_update is not None:
|
if on_profile_update is not None:
|
||||||
self._on_profile_update_callbacks.append(on_profile_update)
|
self._on_profile_update_callbacks.append(on_profile_update)
|
||||||
|
|
||||||
|
@ -369,6 +384,46 @@ class ThirdPartyEventRules:
|
||||||
"Failed to run module API callback %s: %s", callback, e
|
"Failed to run module API callback %s: %s", callback, e
|
||||||
)
|
)
|
||||||
|
|
||||||
|
async def check_can_shutdown_room(self, user_id: str, room_id: str) -> bool:
|
||||||
|
"""Intercept requests to shutdown a room. If `False` is returned, the
|
||||||
|
room must not be shut down.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
requester: The ID of the user requesting the shutdown.
|
||||||
|
room_id: The ID of the room.
|
||||||
|
"""
|
||||||
|
for callback in self._check_can_shutdown_room_callbacks:
|
||||||
|
try:
|
||||||
|
if await callback(user_id, room_id) is False:
|
||||||
|
return False
|
||||||
|
except Exception as e:
|
||||||
|
logger.exception(
|
||||||
|
"Failed to run module API callback %s: %s", callback, e
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
|
||||||
|
async def check_can_deactivate_user(
|
||||||
|
self,
|
||||||
|
user_id: str,
|
||||||
|
by_admin: bool,
|
||||||
|
) -> bool:
|
||||||
|
"""Intercept requests to deactivate a user. If `False` is returned, the
|
||||||
|
user should not be deactivated.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
requester
|
||||||
|
user_id: The ID of the room.
|
||||||
|
"""
|
||||||
|
for callback in self._check_can_deactivate_user_callbacks:
|
||||||
|
try:
|
||||||
|
if await callback(user_id, by_admin) is False:
|
||||||
|
return False
|
||||||
|
except Exception as e:
|
||||||
|
logger.exception(
|
||||||
|
"Failed to run module API callback %s: %s", callback, e
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
|
||||||
async def _get_state_map_for_room(self, room_id: str) -> StateMap[EventBase]:
|
async def _get_state_map_for_room(self, room_id: str) -> StateMap[EventBase]:
|
||||||
"""Given a room ID, return the state events of that room.
|
"""Given a room ID, return the state events of that room.
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,7 @@ from typing import (
|
||||||
Union,
|
Union,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
import attr
|
||||||
from frozendict import frozendict
|
from frozendict import frozendict
|
||||||
|
|
||||||
from synapse.api.constants import EventContentFields, EventTypes, RelationTypes
|
from synapse.api.constants import EventContentFields, EventTypes, RelationTypes
|
||||||
|
@ -37,6 +38,7 @@ from synapse.util.frozenutils import unfreeze
|
||||||
from . import EventBase
|
from . import EventBase
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.databases.main.relations import BundledAggregations
|
from synapse.storage.databases.main.relations import BundledAggregations
|
||||||
|
|
||||||
|
|
||||||
|
@ -303,29 +305,37 @@ def format_event_for_client_v2_without_room_id(d: JsonDict) -> JsonDict:
|
||||||
return d
|
return d
|
||||||
|
|
||||||
|
|
||||||
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||||
|
class SerializeEventConfig:
|
||||||
|
as_client_event: bool = True
|
||||||
|
# Function to convert from federation format to client format
|
||||||
|
event_format: Callable[[JsonDict], JsonDict] = format_event_for_client_v1
|
||||||
|
# ID of the user's auth token - used for namespacing of transaction IDs
|
||||||
|
token_id: Optional[int] = None
|
||||||
|
# List of event fields to include. If empty, all fields will be returned.
|
||||||
|
only_event_fields: Optional[List[str]] = None
|
||||||
|
# Some events can have stripped room state stored in the `unsigned` field.
|
||||||
|
# This is required for invite and knock functionality. If this option is
|
||||||
|
# False, that state will be removed from the event before it is returned.
|
||||||
|
# Otherwise, it will be kept.
|
||||||
|
include_stripped_room_state: bool = False
|
||||||
|
|
||||||
|
|
||||||
|
_DEFAULT_SERIALIZE_EVENT_CONFIG = SerializeEventConfig()
|
||||||
|
|
||||||
|
|
||||||
def serialize_event(
|
def serialize_event(
|
||||||
e: Union[JsonDict, EventBase],
|
e: Union[JsonDict, EventBase],
|
||||||
time_now_ms: int,
|
time_now_ms: int,
|
||||||
*,
|
*,
|
||||||
as_client_event: bool = True,
|
config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG,
|
||||||
event_format: Callable[[JsonDict], JsonDict] = format_event_for_client_v1,
|
|
||||||
token_id: Optional[str] = None,
|
|
||||||
only_event_fields: Optional[List[str]] = None,
|
|
||||||
include_stripped_room_state: bool = False,
|
|
||||||
) -> JsonDict:
|
) -> JsonDict:
|
||||||
"""Serialize event for clients
|
"""Serialize event for clients
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
e
|
e
|
||||||
time_now_ms
|
time_now_ms
|
||||||
as_client_event
|
config: Event serialization config
|
||||||
event_format
|
|
||||||
token_id
|
|
||||||
only_event_fields
|
|
||||||
include_stripped_room_state: Some events can have stripped room state
|
|
||||||
stored in the `unsigned` field. This is required for invite and knock
|
|
||||||
functionality. If this option is False, that state will be removed from the
|
|
||||||
event before it is returned. Otherwise, it will be kept.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
The serialized event dictionary.
|
The serialized event dictionary.
|
||||||
|
@ -348,11 +358,11 @@ def serialize_event(
|
||||||
|
|
||||||
if "redacted_because" in e.unsigned:
|
if "redacted_because" in e.unsigned:
|
||||||
d["unsigned"]["redacted_because"] = serialize_event(
|
d["unsigned"]["redacted_because"] = serialize_event(
|
||||||
e.unsigned["redacted_because"], time_now_ms, event_format=event_format
|
e.unsigned["redacted_because"], time_now_ms, config=config
|
||||||
)
|
)
|
||||||
|
|
||||||
if token_id is not None:
|
if config.token_id is not None:
|
||||||
if token_id == getattr(e.internal_metadata, "token_id", None):
|
if config.token_id == getattr(e.internal_metadata, "token_id", None):
|
||||||
txn_id = getattr(e.internal_metadata, "txn_id", None)
|
txn_id = getattr(e.internal_metadata, "txn_id", None)
|
||||||
if txn_id is not None:
|
if txn_id is not None:
|
||||||
d["unsigned"]["transaction_id"] = txn_id
|
d["unsigned"]["transaction_id"] = txn_id
|
||||||
|
@ -361,13 +371,14 @@ def serialize_event(
|
||||||
# that are meant to provide metadata about a room to an invitee/knocker. They are
|
# that are meant to provide metadata about a room to an invitee/knocker. They are
|
||||||
# intended to only be included in specific circumstances, such as down sync, and
|
# intended to only be included in specific circumstances, such as down sync, and
|
||||||
# should not be included in any other case.
|
# should not be included in any other case.
|
||||||
if not include_stripped_room_state:
|
if not config.include_stripped_room_state:
|
||||||
d["unsigned"].pop("invite_room_state", None)
|
d["unsigned"].pop("invite_room_state", None)
|
||||||
d["unsigned"].pop("knock_room_state", None)
|
d["unsigned"].pop("knock_room_state", None)
|
||||||
|
|
||||||
if as_client_event:
|
if config.as_client_event:
|
||||||
d = event_format(d)
|
d = config.event_format(d)
|
||||||
|
|
||||||
|
only_event_fields = config.only_event_fields
|
||||||
if only_event_fields:
|
if only_event_fields:
|
||||||
if not isinstance(only_event_fields, list) or not all(
|
if not isinstance(only_event_fields, list) or not all(
|
||||||
isinstance(f, str) for f in only_event_fields
|
isinstance(f, str) for f in only_event_fields
|
||||||
|
@ -385,23 +396,26 @@ class EventClientSerializer:
|
||||||
clients.
|
clients.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
def __init__(self, hs: "HomeServer"):
|
||||||
|
self._msc3440_enabled = hs.config.experimental.msc3440_enabled
|
||||||
|
|
||||||
def serialize_event(
|
def serialize_event(
|
||||||
self,
|
self,
|
||||||
event: Union[JsonDict, EventBase],
|
event: Union[JsonDict, EventBase],
|
||||||
time_now: int,
|
time_now: int,
|
||||||
*,
|
*,
|
||||||
|
config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG,
|
||||||
bundle_aggregations: Optional[Dict[str, "BundledAggregations"]] = None,
|
bundle_aggregations: Optional[Dict[str, "BundledAggregations"]] = None,
|
||||||
**kwargs: Any,
|
|
||||||
) -> JsonDict:
|
) -> JsonDict:
|
||||||
"""Serializes a single event.
|
"""Serializes a single event.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
event: The event being serialized.
|
event: The event being serialized.
|
||||||
time_now: The current time in milliseconds
|
time_now: The current time in milliseconds
|
||||||
|
config: Event serialization config
|
||||||
bundle_aggregations: Whether to include the bundled aggregations for this
|
bundle_aggregations: Whether to include the bundled aggregations for this
|
||||||
event. Only applies to non-state events. (State events never include
|
event. Only applies to non-state events. (State events never include
|
||||||
bundled aggregations.)
|
bundled aggregations.)
|
||||||
**kwargs: Arguments to pass to `serialize_event`
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
The serialized event
|
The serialized event
|
||||||
|
@ -410,7 +424,7 @@ class EventClientSerializer:
|
||||||
if not isinstance(event, EventBase):
|
if not isinstance(event, EventBase):
|
||||||
return event
|
return event
|
||||||
|
|
||||||
serialized_event = serialize_event(event, time_now, **kwargs)
|
serialized_event = serialize_event(event, time_now, config=config)
|
||||||
|
|
||||||
# Check if there are any bundled aggregations to include with the event.
|
# Check if there are any bundled aggregations to include with the event.
|
||||||
if bundle_aggregations:
|
if bundle_aggregations:
|
||||||
|
@ -419,6 +433,7 @@ class EventClientSerializer:
|
||||||
self._inject_bundled_aggregations(
|
self._inject_bundled_aggregations(
|
||||||
event,
|
event,
|
||||||
time_now,
|
time_now,
|
||||||
|
config,
|
||||||
bundle_aggregations[event.event_id],
|
bundle_aggregations[event.event_id],
|
||||||
serialized_event,
|
serialized_event,
|
||||||
)
|
)
|
||||||
|
@ -456,6 +471,7 @@ class EventClientSerializer:
|
||||||
self,
|
self,
|
||||||
event: EventBase,
|
event: EventBase,
|
||||||
time_now: int,
|
time_now: int,
|
||||||
|
config: SerializeEventConfig,
|
||||||
aggregations: "BundledAggregations",
|
aggregations: "BundledAggregations",
|
||||||
serialized_event: JsonDict,
|
serialized_event: JsonDict,
|
||||||
) -> None:
|
) -> None:
|
||||||
|
@ -466,6 +482,7 @@ class EventClientSerializer:
|
||||||
time_now: The current time in milliseconds
|
time_now: The current time in milliseconds
|
||||||
aggregations: The bundled aggregation to serialize.
|
aggregations: The bundled aggregation to serialize.
|
||||||
serialized_event: The serialized event which may be modified.
|
serialized_event: The serialized event which may be modified.
|
||||||
|
config: Event serialization config
|
||||||
|
|
||||||
"""
|
"""
|
||||||
serialized_aggregations = {}
|
serialized_aggregations = {}
|
||||||
|
@ -493,8 +510,8 @@ class EventClientSerializer:
|
||||||
thread = aggregations.thread
|
thread = aggregations.thread
|
||||||
|
|
||||||
# Don't bundle aggregations as this could recurse forever.
|
# Don't bundle aggregations as this could recurse forever.
|
||||||
serialized_latest_event = self.serialize_event(
|
serialized_latest_event = serialize_event(
|
||||||
thread.latest_event, time_now, bundle_aggregations=None
|
thread.latest_event, time_now, config=config
|
||||||
)
|
)
|
||||||
# Manually apply an edit, if one exists.
|
# Manually apply an edit, if one exists.
|
||||||
if thread.latest_edit:
|
if thread.latest_edit:
|
||||||
|
@ -502,33 +519,53 @@ class EventClientSerializer:
|
||||||
thread.latest_event, serialized_latest_event, thread.latest_edit
|
thread.latest_event, serialized_latest_event, thread.latest_edit
|
||||||
)
|
)
|
||||||
|
|
||||||
serialized_aggregations[RelationTypes.THREAD] = {
|
thread_summary = {
|
||||||
"latest_event": serialized_latest_event,
|
"latest_event": serialized_latest_event,
|
||||||
"count": thread.count,
|
"count": thread.count,
|
||||||
"current_user_participated": thread.current_user_participated,
|
"current_user_participated": thread.current_user_participated,
|
||||||
}
|
}
|
||||||
|
serialized_aggregations[RelationTypes.THREAD] = thread_summary
|
||||||
|
if self._msc3440_enabled:
|
||||||
|
serialized_aggregations[RelationTypes.UNSTABLE_THREAD] = thread_summary
|
||||||
|
|
||||||
# Include the bundled aggregations in the event.
|
# Include the bundled aggregations in the event.
|
||||||
if serialized_aggregations:
|
if serialized_aggregations:
|
||||||
serialized_event["unsigned"].setdefault("m.relations", {}).update(
|
# There is likely already an "unsigned" field, but a filter might
|
||||||
serialized_aggregations
|
# have stripped it off (via the event_fields option). The server is
|
||||||
)
|
# allowed to return additional fields, so add it back.
|
||||||
|
serialized_event.setdefault("unsigned", {}).setdefault(
|
||||||
|
"m.relations", {}
|
||||||
|
).update(serialized_aggregations)
|
||||||
|
|
||||||
def serialize_events(
|
def serialize_events(
|
||||||
self, events: Iterable[Union[JsonDict, EventBase]], time_now: int, **kwargs: Any
|
self,
|
||||||
|
events: Iterable[Union[JsonDict, EventBase]],
|
||||||
|
time_now: int,
|
||||||
|
*,
|
||||||
|
config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG,
|
||||||
|
bundle_aggregations: Optional[Dict[str, "BundledAggregations"]] = None,
|
||||||
) -> List[JsonDict]:
|
) -> List[JsonDict]:
|
||||||
"""Serializes multiple events.
|
"""Serializes multiple events.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
event
|
event
|
||||||
time_now: The current time in milliseconds
|
time_now: The current time in milliseconds
|
||||||
**kwargs: Arguments to pass to `serialize_event`
|
config: Event serialization config
|
||||||
|
bundle_aggregations: Whether to include the bundled aggregations for this
|
||||||
|
event. Only applies to non-state events. (State events never include
|
||||||
|
bundled aggregations.)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
The list of serialized events
|
The list of serialized events
|
||||||
"""
|
"""
|
||||||
return [
|
return [
|
||||||
self.serialize_event(event, time_now=time_now, **kwargs) for event in events
|
self.serialize_event(
|
||||||
|
event,
|
||||||
|
time_now,
|
||||||
|
config=config,
|
||||||
|
bundle_aggregations=bundle_aggregations,
|
||||||
|
)
|
||||||
|
for event in events
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1428,7 +1428,7 @@ class FederationClient(FederationBase):
|
||||||
|
|
||||||
# Validate children_state of the room.
|
# Validate children_state of the room.
|
||||||
children_state = room.pop("children_state", [])
|
children_state = room.pop("children_state", [])
|
||||||
if not isinstance(children_state, Sequence):
|
if not isinstance(children_state, list):
|
||||||
raise InvalidResponseError("'room.children_state' must be a list")
|
raise InvalidResponseError("'room.children_state' must be a list")
|
||||||
if any(not isinstance(e, dict) for e in children_state):
|
if any(not isinstance(e, dict) for e in children_state):
|
||||||
raise InvalidResponseError("Invalid event in 'children_state' list")
|
raise InvalidResponseError("Invalid event in 'children_state' list")
|
||||||
|
@ -1440,14 +1440,14 @@ class FederationClient(FederationBase):
|
||||||
|
|
||||||
# Validate the children rooms.
|
# Validate the children rooms.
|
||||||
children = res.get("children", [])
|
children = res.get("children", [])
|
||||||
if not isinstance(children, Sequence):
|
if not isinstance(children, list):
|
||||||
raise InvalidResponseError("'children' must be a list")
|
raise InvalidResponseError("'children' must be a list")
|
||||||
if any(not isinstance(r, dict) for r in children):
|
if any(not isinstance(r, dict) for r in children):
|
||||||
raise InvalidResponseError("Invalid room in 'children' list")
|
raise InvalidResponseError("Invalid room in 'children' list")
|
||||||
|
|
||||||
# Validate the inaccessible children.
|
# Validate the inaccessible children.
|
||||||
inaccessible_children = res.get("inaccessible_children", [])
|
inaccessible_children = res.get("inaccessible_children", [])
|
||||||
if not isinstance(inaccessible_children, Sequence):
|
if not isinstance(inaccessible_children, list):
|
||||||
raise InvalidResponseError("'inaccessible_children' must be a list")
|
raise InvalidResponseError("'inaccessible_children' must be a list")
|
||||||
if any(not isinstance(r, str) for r in inaccessible_children):
|
if any(not isinstance(r, str) for r in inaccessible_children):
|
||||||
raise InvalidResponseError(
|
raise InvalidResponseError(
|
||||||
|
@ -1630,7 +1630,7 @@ def _validate_hierarchy_event(d: JsonDict) -> None:
|
||||||
raise ValueError("Invalid event: 'content' must be a dict")
|
raise ValueError("Invalid event: 'content' must be a dict")
|
||||||
|
|
||||||
via = content.get("via")
|
via = content.get("via")
|
||||||
if not isinstance(via, Sequence):
|
if not isinstance(via, list):
|
||||||
raise ValueError("Invalid event: 'via' must be a list")
|
raise ValueError("Invalid event: 'via' must be a list")
|
||||||
if any(not isinstance(v, str) for v in via):
|
if any(not isinstance(v, str) for v in via):
|
||||||
raise ValueError("Invalid event: 'via' must be a list of strings")
|
raise ValueError("Invalid event: 'via' must be a list of strings")
|
||||||
|
|
|
@ -244,7 +244,7 @@ class FederationRemoteSendQueue(AbstractFederationSender):
|
||||||
|
|
||||||
self.notifier.on_new_replication_data()
|
self.notifier.on_new_replication_data()
|
||||||
|
|
||||||
def send_device_messages(self, destination: str) -> None:
|
def send_device_messages(self, destination: str, immediate: bool = False) -> None:
|
||||||
"""As per FederationSender"""
|
"""As per FederationSender"""
|
||||||
# We don't need to replicate this as it gets sent down a different
|
# We don't need to replicate this as it gets sent down a different
|
||||||
# stream.
|
# stream.
|
||||||
|
|
|
@ -118,7 +118,12 @@ class AbstractFederationSender(metaclass=abc.ABCMeta):
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def send_device_messages(self, destination: str) -> None:
|
def send_device_messages(self, destination: str, immediate: bool = True) -> None:
|
||||||
|
"""Tells the sender that a new device message is ready to be sent to the
|
||||||
|
destination. The `immediate` flag specifies whether the messages should
|
||||||
|
be tried to be sent immediately, or whether it can be delayed for a
|
||||||
|
short while (to aid performance).
|
||||||
|
"""
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
|
@ -146,9 +151,8 @@ class AbstractFederationSender(metaclass=abc.ABCMeta):
|
||||||
|
|
||||||
|
|
||||||
@attr.s
|
@attr.s
|
||||||
class _PresenceQueue:
|
class _DestinationWakeupQueue:
|
||||||
"""A queue of destinations that need to be woken up due to new presence
|
"""A queue of destinations that need to be woken up due to new updates.
|
||||||
updates.
|
|
||||||
|
|
||||||
Staggers waking up of per destination queues to ensure that we don't attempt
|
Staggers waking up of per destination queues to ensure that we don't attempt
|
||||||
to start TLS connections with many hosts all at once, leading to pinned CPU.
|
to start TLS connections with many hosts all at once, leading to pinned CPU.
|
||||||
|
@ -175,7 +179,7 @@ class _PresenceQueue:
|
||||||
if not self.processing:
|
if not self.processing:
|
||||||
self._handle()
|
self._handle()
|
||||||
|
|
||||||
@wrap_as_background_process("_PresenceQueue.handle")
|
@wrap_as_background_process("_DestinationWakeupQueue.handle")
|
||||||
async def _handle(self) -> None:
|
async def _handle(self) -> None:
|
||||||
"""Background process to drain the queue."""
|
"""Background process to drain the queue."""
|
||||||
|
|
||||||
|
@ -297,7 +301,7 @@ class FederationSender(AbstractFederationSender):
|
||||||
|
|
||||||
self._external_cache = hs.get_external_cache()
|
self._external_cache = hs.get_external_cache()
|
||||||
|
|
||||||
self._presence_queue = _PresenceQueue(self, self.clock)
|
self._destination_wakeup_queue = _DestinationWakeupQueue(self, self.clock)
|
||||||
|
|
||||||
def _get_per_destination_queue(self, destination: str) -> PerDestinationQueue:
|
def _get_per_destination_queue(self, destination: str) -> PerDestinationQueue:
|
||||||
"""Get or create a PerDestinationQueue for the given destination
|
"""Get or create a PerDestinationQueue for the given destination
|
||||||
|
@ -614,7 +618,7 @@ class FederationSender(AbstractFederationSender):
|
||||||
states, start_loop=False
|
states, start_loop=False
|
||||||
)
|
)
|
||||||
|
|
||||||
self._presence_queue.add_to_queue(destination)
|
self._destination_wakeup_queue.add_to_queue(destination)
|
||||||
|
|
||||||
def build_and_send_edu(
|
def build_and_send_edu(
|
||||||
self,
|
self,
|
||||||
|
@ -667,7 +671,7 @@ class FederationSender(AbstractFederationSender):
|
||||||
else:
|
else:
|
||||||
queue.send_edu(edu)
|
queue.send_edu(edu)
|
||||||
|
|
||||||
def send_device_messages(self, destination: str) -> None:
|
def send_device_messages(self, destination: str, immediate: bool = False) -> None:
|
||||||
if destination == self.server_name:
|
if destination == self.server_name:
|
||||||
logger.warning("Not sending device update to ourselves")
|
logger.warning("Not sending device update to ourselves")
|
||||||
return
|
return
|
||||||
|
@ -677,7 +681,11 @@ class FederationSender(AbstractFederationSender):
|
||||||
):
|
):
|
||||||
return
|
return
|
||||||
|
|
||||||
self._get_per_destination_queue(destination).attempt_new_transaction()
|
if immediate:
|
||||||
|
self._get_per_destination_queue(destination).attempt_new_transaction()
|
||||||
|
else:
|
||||||
|
self._get_per_destination_queue(destination).mark_new_data()
|
||||||
|
self._destination_wakeup_queue.add_to_queue(destination)
|
||||||
|
|
||||||
def wake_destination(self, destination: str) -> None:
|
def wake_destination(self, destination: str) -> None:
|
||||||
"""Called when we want to retry sending transactions to a remote.
|
"""Called when we want to retry sending transactions to a remote.
|
||||||
|
|
|
@ -219,6 +219,16 @@ class PerDestinationQueue:
|
||||||
self._pending_edus.append(edu)
|
self._pending_edus.append(edu)
|
||||||
self.attempt_new_transaction()
|
self.attempt_new_transaction()
|
||||||
|
|
||||||
|
def mark_new_data(self) -> None:
|
||||||
|
"""Marks that the destination has new data to send, without starting a
|
||||||
|
new transaction.
|
||||||
|
|
||||||
|
If a transaction loop is already in progress then a new transcation will
|
||||||
|
be attempted when the current one finishes.
|
||||||
|
"""
|
||||||
|
|
||||||
|
self._new_data_to_send = True
|
||||||
|
|
||||||
def attempt_new_transaction(self) -> None:
|
def attempt_new_transaction(self) -> None:
|
||||||
"""Try to start a new transaction to this destination
|
"""Try to start a new transaction to this destination
|
||||||
|
|
||||||
|
|
|
@ -289,7 +289,7 @@ class OpenIdUserInfo(BaseFederationServlet):
|
||||||
return 200, {"sub": user_id}
|
return 200, {"sub": user_id}
|
||||||
|
|
||||||
|
|
||||||
DEFAULT_SERVLET_GROUPS: Dict[str, Iterable[Type[BaseFederationServlet]]] = {
|
SERVLET_GROUPS: Dict[str, Iterable[Type[BaseFederationServlet]]] = {
|
||||||
"federation": FEDERATION_SERVLET_CLASSES,
|
"federation": FEDERATION_SERVLET_CLASSES,
|
||||||
"room_list": (PublicRoomList,),
|
"room_list": (PublicRoomList,),
|
||||||
"group_server": GROUP_SERVER_SERVLET_CLASSES,
|
"group_server": GROUP_SERVER_SERVLET_CLASSES,
|
||||||
|
@ -298,6 +298,10 @@ DEFAULT_SERVLET_GROUPS: Dict[str, Iterable[Type[BaseFederationServlet]]] = {
|
||||||
"openid": (OpenIdUserInfo,),
|
"openid": (OpenIdUserInfo,),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
DEFAULT_SERVLET_GROUPS = ("federation", "room_list", "openid")
|
||||||
|
|
||||||
|
GROUP_SERVLET_GROUPS = ("group_server", "group_local", "group_attestation")
|
||||||
|
|
||||||
|
|
||||||
def register_servlets(
|
def register_servlets(
|
||||||
hs: "HomeServer",
|
hs: "HomeServer",
|
||||||
|
@ -320,16 +324,19 @@ def register_servlets(
|
||||||
Defaults to ``DEFAULT_SERVLET_GROUPS``.
|
Defaults to ``DEFAULT_SERVLET_GROUPS``.
|
||||||
"""
|
"""
|
||||||
if not servlet_groups:
|
if not servlet_groups:
|
||||||
servlet_groups = DEFAULT_SERVLET_GROUPS.keys()
|
servlet_groups = DEFAULT_SERVLET_GROUPS
|
||||||
|
# Only allow the groups servlets if the deprecated groups feature is enabled.
|
||||||
|
if hs.config.experimental.groups_enabled:
|
||||||
|
servlet_groups = servlet_groups + GROUP_SERVLET_GROUPS
|
||||||
|
|
||||||
for servlet_group in servlet_groups:
|
for servlet_group in servlet_groups:
|
||||||
# Skip unknown servlet groups.
|
# Skip unknown servlet groups.
|
||||||
if servlet_group not in DEFAULT_SERVLET_GROUPS:
|
if servlet_group not in SERVLET_GROUPS:
|
||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
f"Attempting to register unknown federation servlet: '{servlet_group}'"
|
f"Attempting to register unknown federation servlet: '{servlet_group}'"
|
||||||
)
|
)
|
||||||
|
|
||||||
for servletclass in DEFAULT_SERVLET_GROUPS[servlet_group]:
|
for servletclass in SERVLET_GROUPS[servlet_group]:
|
||||||
# Only allow the `/timestamp_to_event` servlet if msc3030 is enabled
|
# Only allow the `/timestamp_to_event` servlet if msc3030 is enabled
|
||||||
if (
|
if (
|
||||||
servletclass == FederationTimestampLookupServlet
|
servletclass == FederationTimestampLookupServlet
|
||||||
|
|
|
@ -63,7 +63,7 @@ class Authenticator:
|
||||||
|
|
||||||
self.replication_client = None
|
self.replication_client = None
|
||||||
if hs.config.worker.worker_app:
|
if hs.config.worker.worker_app:
|
||||||
self.replication_client = hs.get_tcp_replication()
|
self.replication_client = hs.get_replication_command_handler()
|
||||||
|
|
||||||
# A method just so we can pass 'self' as the authenticator to the Servlets
|
# A method just so we can pass 'self' as the authenticator to the Servlets
|
||||||
async def authenticate_request(
|
async def authenticate_request(
|
||||||
|
|
|
@ -571,7 +571,7 @@ class ApplicationServicesHandler:
|
||||||
room_alias_str = room_alias.to_string()
|
room_alias_str = room_alias.to_string()
|
||||||
services = self.store.get_app_services()
|
services = self.store.get_app_services()
|
||||||
alias_query_services = [
|
alias_query_services = [
|
||||||
s for s in services if (s.is_interested_in_alias(room_alias_str))
|
s for s in services if (s.is_room_alias_in_namespace(room_alias_str))
|
||||||
]
|
]
|
||||||
for alias_service in alias_query_services:
|
for alias_service in alias_query_services:
|
||||||
is_known_alias = await self.appservice_api.query_alias(
|
is_known_alias = await self.appservice_api.query_alias(
|
||||||
|
@ -660,7 +660,7 @@ class ApplicationServicesHandler:
|
||||||
# inside of a list comprehension anymore.
|
# inside of a list comprehension anymore.
|
||||||
interested_list = []
|
interested_list = []
|
||||||
for s in services:
|
for s in services:
|
||||||
if await s.is_interested(event, self.store):
|
if await s.is_interested_in_event(event.event_id, event, self.store):
|
||||||
interested_list.append(s)
|
interested_list.append(s)
|
||||||
|
|
||||||
return interested_list
|
return interested_list
|
||||||
|
|
|
@ -17,7 +17,7 @@ from typing import TYPE_CHECKING, Optional
|
||||||
|
|
||||||
from synapse.api.errors import SynapseError
|
from synapse.api.errors import SynapseError
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.types import Requester, UserID, create_requester
|
from synapse.types import Codes, Requester, UserID, create_requester
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
|
@ -42,6 +42,7 @@ class DeactivateAccountHandler:
|
||||||
|
|
||||||
# Flag that indicates whether the process to part users from rooms is running
|
# Flag that indicates whether the process to part users from rooms is running
|
||||||
self._user_parter_running = False
|
self._user_parter_running = False
|
||||||
|
self._third_party_rules = hs.get_third_party_event_rules()
|
||||||
|
|
||||||
# Start the user parter loop so it can resume parting users from rooms where
|
# Start the user parter loop so it can resume parting users from rooms where
|
||||||
# it left off (if it has work left to do).
|
# it left off (if it has work left to do).
|
||||||
|
@ -74,6 +75,15 @@ class DeactivateAccountHandler:
|
||||||
Returns:
|
Returns:
|
||||||
True if identity server supports removing threepids, otherwise False.
|
True if identity server supports removing threepids, otherwise False.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# Check if this user can be deactivated
|
||||||
|
if not await self._third_party_rules.check_can_deactivate_user(
|
||||||
|
user_id, by_admin
|
||||||
|
):
|
||||||
|
raise SynapseError(
|
||||||
|
403, "Deactivation of this user is forbidden", Codes.FORBIDDEN
|
||||||
|
)
|
||||||
|
|
||||||
# FIXME: Theoretically there is a race here wherein user resets
|
# FIXME: Theoretically there is a race here wherein user resets
|
||||||
# password using threepid.
|
# password using threepid.
|
||||||
|
|
||||||
|
|
|
@ -371,7 +371,6 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||||
log_kv(
|
log_kv(
|
||||||
{"reason": "User doesn't have device id.", "device_id": device_id}
|
{"reason": "User doesn't have device id.", "device_id": device_id}
|
||||||
)
|
)
|
||||||
pass
|
|
||||||
else:
|
else:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
@ -414,7 +413,6 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||||
# no match
|
# no match
|
||||||
set_tag("error", True)
|
set_tag("error", True)
|
||||||
set_tag("reason", "User doesn't have that device id.")
|
set_tag("reason", "User doesn't have that device id.")
|
||||||
pass
|
|
||||||
else:
|
else:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
@ -506,7 +504,7 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||||
"Sending device list update notif for %r to: %r", user_id, hosts
|
"Sending device list update notif for %r to: %r", user_id, hosts
|
||||||
)
|
)
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
self.federation_sender.send_device_messages(host)
|
self.federation_sender.send_device_messages(host, immediate=False)
|
||||||
log_kv({"message": "sent device update to host", "host": host})
|
log_kv({"message": "sent device update to host", "host": host})
|
||||||
|
|
||||||
async def notify_user_signature_update(
|
async def notify_user_signature_update(
|
||||||
|
|
|
@ -121,7 +121,7 @@ class DirectoryHandler:
|
||||||
|
|
||||||
service = requester.app_service
|
service = requester.app_service
|
||||||
if service:
|
if service:
|
||||||
if not service.is_interested_in_alias(room_alias_str):
|
if not service.is_room_alias_in_namespace(room_alias_str):
|
||||||
raise SynapseError(
|
raise SynapseError(
|
||||||
400,
|
400,
|
||||||
"This application service has not reserved this kind of alias.",
|
"This application service has not reserved this kind of alias.",
|
||||||
|
@ -223,7 +223,7 @@ class DirectoryHandler:
|
||||||
async def delete_appservice_association(
|
async def delete_appservice_association(
|
||||||
self, service: ApplicationService, room_alias: RoomAlias
|
self, service: ApplicationService, room_alias: RoomAlias
|
||||||
) -> None:
|
) -> None:
|
||||||
if not service.is_interested_in_alias(room_alias.to_string()):
|
if not service.is_room_alias_in_namespace(room_alias.to_string()):
|
||||||
raise SynapseError(
|
raise SynapseError(
|
||||||
400,
|
400,
|
||||||
"This application service has not reserved this kind of alias",
|
"This application service has not reserved this kind of alias",
|
||||||
|
@ -378,7 +378,7 @@ class DirectoryHandler:
|
||||||
# non-exclusive locks on the alias (or there are no interested services)
|
# non-exclusive locks on the alias (or there are no interested services)
|
||||||
services = self.store.get_app_services()
|
services = self.store.get_app_services()
|
||||||
interested_services = [
|
interested_services = [
|
||||||
s for s in services if s.is_interested_in_alias(alias.to_string())
|
s for s in services if s.is_room_alias_in_namespace(alias.to_string())
|
||||||
]
|
]
|
||||||
|
|
||||||
for service in interested_services:
|
for service in interested_services:
|
||||||
|
|
|
@ -19,6 +19,7 @@ from typing import TYPE_CHECKING, Iterable, List, Optional
|
||||||
from synapse.api.constants import EduTypes, EventTypes, Membership
|
from synapse.api.constants import EduTypes, EventTypes, Membership
|
||||||
from synapse.api.errors import AuthError, SynapseError
|
from synapse.api.errors import AuthError, SynapseError
|
||||||
from synapse.events import EventBase
|
from synapse.events import EventBase
|
||||||
|
from synapse.events.utils import SerializeEventConfig
|
||||||
from synapse.handlers.presence import format_user_presence_state
|
from synapse.handlers.presence import format_user_presence_state
|
||||||
from synapse.streams.config import PaginationConfig
|
from synapse.streams.config import PaginationConfig
|
||||||
from synapse.types import JsonDict, UserID
|
from synapse.types import JsonDict, UserID
|
||||||
|
@ -120,7 +121,7 @@ class EventStreamHandler:
|
||||||
chunks = self._event_serializer.serialize_events(
|
chunks = self._event_serializer.serialize_events(
|
||||||
events,
|
events,
|
||||||
time_now,
|
time_now,
|
||||||
as_client_event=as_client_event,
|
config=SerializeEventConfig(as_client_event=as_client_event),
|
||||||
)
|
)
|
||||||
|
|
||||||
chunk = {
|
chunk = {
|
||||||
|
|
|
@ -23,8 +23,6 @@ from signedjson.key import decode_verify_key_bytes
|
||||||
from signedjson.sign import verify_signed_json
|
from signedjson.sign import verify_signed_json
|
||||||
from unpaddedbase64 import decode_base64
|
from unpaddedbase64 import decode_base64
|
||||||
|
|
||||||
from twisted.internet import defer
|
|
||||||
|
|
||||||
from synapse import event_auth
|
from synapse import event_auth
|
||||||
from synapse.api.constants import EventContentFields, EventTypes, Membership
|
from synapse.api.constants import EventContentFields, EventTypes, Membership
|
||||||
from synapse.api.errors import (
|
from synapse.api.errors import (
|
||||||
|
@ -45,11 +43,7 @@ from synapse.events.snapshot import EventContext
|
||||||
from synapse.events.validator import EventValidator
|
from synapse.events.validator import EventValidator
|
||||||
from synapse.federation.federation_client import InvalidResponseError
|
from synapse.federation.federation_client import InvalidResponseError
|
||||||
from synapse.http.servlet import assert_params_in_dict
|
from synapse.http.servlet import assert_params_in_dict
|
||||||
from synapse.logging.context import (
|
from synapse.logging.context import nested_logging_context
|
||||||
make_deferred_yieldable,
|
|
||||||
nested_logging_context,
|
|
||||||
preserve_fn,
|
|
||||||
)
|
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.replication.http.federation import (
|
from synapse.replication.http.federation import (
|
||||||
ReplicationCleanRoomRestServlet,
|
ReplicationCleanRoomRestServlet,
|
||||||
|
@ -355,56 +349,8 @@ class FederationHandler:
|
||||||
if success:
|
if success:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
# Huh, well *those* domains didn't work out. Lets try some domains
|
# TODO: we could also try servers which were previously in the room, but
|
||||||
# from the time.
|
# are no longer.
|
||||||
|
|
||||||
tried_domains = set(likely_domains)
|
|
||||||
tried_domains.add(self.server_name)
|
|
||||||
|
|
||||||
event_ids = list(extremities.keys())
|
|
||||||
|
|
||||||
logger.debug("calling resolve_state_groups in _maybe_backfill")
|
|
||||||
resolve = preserve_fn(self.state_handler.resolve_state_groups_for_events)
|
|
||||||
states_list = await make_deferred_yieldable(
|
|
||||||
defer.gatherResults(
|
|
||||||
[resolve(room_id, [e]) for e in event_ids], consumeErrors=True
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# A map from event_id to state map of event_ids.
|
|
||||||
state_ids: Dict[str, StateMap[str]] = dict(
|
|
||||||
zip(event_ids, [s.state for s in states_list])
|
|
||||||
)
|
|
||||||
|
|
||||||
state_map = await self.store.get_events(
|
|
||||||
[e_id for ids in state_ids.values() for e_id in ids.values()],
|
|
||||||
get_prev_content=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
# A map from event_id to state map of events.
|
|
||||||
state_events: Dict[str, StateMap[EventBase]] = {
|
|
||||||
key: {
|
|
||||||
k: state_map[e_id]
|
|
||||||
for k, e_id in state_dict.items()
|
|
||||||
if e_id in state_map
|
|
||||||
}
|
|
||||||
for key, state_dict in state_ids.items()
|
|
||||||
}
|
|
||||||
|
|
||||||
for e_id in event_ids:
|
|
||||||
likely_extremeties_domains = get_domains_from_state(state_events[e_id])
|
|
||||||
|
|
||||||
success = await try_backfill(
|
|
||||||
[
|
|
||||||
dom
|
|
||||||
for dom, _ in likely_extremeties_domains
|
|
||||||
if dom not in tried_domains
|
|
||||||
]
|
|
||||||
)
|
|
||||||
if success:
|
|
||||||
return True
|
|
||||||
|
|
||||||
tried_domains.update(dom for dom, _ in likely_extremeties_domains)
|
|
||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
|
@ -18,6 +18,7 @@ from typing import TYPE_CHECKING, List, Optional, Tuple, cast
|
||||||
from synapse.api.constants import EduTypes, EventTypes, Membership
|
from synapse.api.constants import EduTypes, EventTypes, Membership
|
||||||
from synapse.api.errors import SynapseError
|
from synapse.api.errors import SynapseError
|
||||||
from synapse.events import EventBase
|
from synapse.events import EventBase
|
||||||
|
from synapse.events.utils import SerializeEventConfig
|
||||||
from synapse.events.validator import EventValidator
|
from synapse.events.validator import EventValidator
|
||||||
from synapse.handlers.presence import format_user_presence_state
|
from synapse.handlers.presence import format_user_presence_state
|
||||||
from synapse.handlers.receipts import ReceiptEventSource
|
from synapse.handlers.receipts import ReceiptEventSource
|
||||||
|
@ -152,10 +153,13 @@ class InitialSyncHandler:
|
||||||
|
|
||||||
public_room_ids = await self.store.get_public_room_ids()
|
public_room_ids = await self.store.get_public_room_ids()
|
||||||
|
|
||||||
limit = pagin_config.limit
|
if pagin_config.limit is not None:
|
||||||
if limit is None:
|
limit = pagin_config.limit
|
||||||
|
else:
|
||||||
limit = 10
|
limit = 10
|
||||||
|
|
||||||
|
serializer_options = SerializeEventConfig(as_client_event=as_client_event)
|
||||||
|
|
||||||
async def handle_room(event: RoomsForUser) -> None:
|
async def handle_room(event: RoomsForUser) -> None:
|
||||||
d: JsonDict = {
|
d: JsonDict = {
|
||||||
"room_id": event.room_id,
|
"room_id": event.room_id,
|
||||||
|
@ -173,7 +177,7 @@ class InitialSyncHandler:
|
||||||
d["invite"] = self._event_serializer.serialize_event(
|
d["invite"] = self._event_serializer.serialize_event(
|
||||||
invite_event,
|
invite_event,
|
||||||
time_now,
|
time_now,
|
||||||
as_client_event=as_client_event,
|
config=serializer_options,
|
||||||
)
|
)
|
||||||
|
|
||||||
rooms_ret.append(d)
|
rooms_ret.append(d)
|
||||||
|
@ -225,7 +229,7 @@ class InitialSyncHandler:
|
||||||
self._event_serializer.serialize_events(
|
self._event_serializer.serialize_events(
|
||||||
messages,
|
messages,
|
||||||
time_now=time_now,
|
time_now=time_now,
|
||||||
as_client_event=as_client_event,
|
config=serializer_options,
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
"start": await start_token.to_string(self.store),
|
"start": await start_token.to_string(self.store),
|
||||||
|
@ -235,7 +239,7 @@ class InitialSyncHandler:
|
||||||
d["state"] = self._event_serializer.serialize_events(
|
d["state"] = self._event_serializer.serialize_events(
|
||||||
current_state.values(),
|
current_state.values(),
|
||||||
time_now=time_now,
|
time_now=time_now,
|
||||||
as_client_event=as_client_event,
|
config=serializer_options,
|
||||||
)
|
)
|
||||||
|
|
||||||
account_data_events = []
|
account_data_events = []
|
||||||
|
|
|
@ -1071,6 +1071,9 @@ class EventCreationHandler:
|
||||||
if relation_type == RelationTypes.ANNOTATION:
|
if relation_type == RelationTypes.ANNOTATION:
|
||||||
aggregation_key = relation["key"]
|
aggregation_key = relation["key"]
|
||||||
|
|
||||||
|
if len(aggregation_key) > 500:
|
||||||
|
raise SynapseError(400, "Aggregation key is too long")
|
||||||
|
|
||||||
already_exists = await self.store.has_user_annotated_event(
|
already_exists = await self.store.has_user_annotated_event(
|
||||||
relates_to, event.type, aggregation_key, event.sender
|
relates_to, event.type, aggregation_key, event.sender
|
||||||
)
|
)
|
||||||
|
@ -1078,7 +1081,10 @@ class EventCreationHandler:
|
||||||
raise SynapseError(400, "Can't send same reaction twice")
|
raise SynapseError(400, "Can't send same reaction twice")
|
||||||
|
|
||||||
# Don't attempt to start a thread if the parent event is a relation.
|
# Don't attempt to start a thread if the parent event is a relation.
|
||||||
elif relation_type == RelationTypes.THREAD:
|
elif (
|
||||||
|
relation_type == RelationTypes.THREAD
|
||||||
|
or relation_type == RelationTypes.UNSTABLE_THREAD
|
||||||
|
):
|
||||||
if await self.store.event_includes_relation(relates_to):
|
if await self.store.event_includes_relation(relates_to):
|
||||||
raise SynapseError(
|
raise SynapseError(
|
||||||
400, "Cannot start threads from an event with a relation"
|
400, "Cannot start threads from an event with a relation"
|
||||||
|
|
|
@ -45,6 +45,7 @@ from synapse.types import JsonDict, UserID, map_username_to_mxid_localpart
|
||||||
from synapse.util import Clock, json_decoder
|
from synapse.util import Clock, json_decoder
|
||||||
from synapse.util.caches.cached_call import RetryOnExceptionCachedCall
|
from synapse.util.caches.cached_call import RetryOnExceptionCachedCall
|
||||||
from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry
|
from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry
|
||||||
|
from synapse.util.templates import _localpart_from_email_filter
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
|
@ -1228,6 +1229,7 @@ class OidcSessionData:
|
||||||
|
|
||||||
class UserAttributeDict(TypedDict):
|
class UserAttributeDict(TypedDict):
|
||||||
localpart: Optional[str]
|
localpart: Optional[str]
|
||||||
|
confirm_localpart: bool
|
||||||
display_name: Optional[str]
|
display_name: Optional[str]
|
||||||
emails: List[str]
|
emails: List[str]
|
||||||
|
|
||||||
|
@ -1307,6 +1309,11 @@ def jinja_finalize(thing: Any) -> Any:
|
||||||
|
|
||||||
|
|
||||||
env = Environment(finalize=jinja_finalize)
|
env = Environment(finalize=jinja_finalize)
|
||||||
|
env.filters.update(
|
||||||
|
{
|
||||||
|
"localpart_from_email": _localpart_from_email_filter,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||||
|
@ -1316,6 +1323,7 @@ class JinjaOidcMappingConfig:
|
||||||
display_name_template: Optional[Template]
|
display_name_template: Optional[Template]
|
||||||
email_template: Optional[Template]
|
email_template: Optional[Template]
|
||||||
extra_attributes: Dict[str, Template]
|
extra_attributes: Dict[str, Template]
|
||||||
|
confirm_localpart: bool = False
|
||||||
|
|
||||||
|
|
||||||
class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
|
class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
|
||||||
|
@ -1357,12 +1365,17 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
|
||||||
"invalid jinja template", path=["extra_attributes", key]
|
"invalid jinja template", path=["extra_attributes", key]
|
||||||
) from e
|
) from e
|
||||||
|
|
||||||
|
confirm_localpart = config.get("confirm_localpart") or False
|
||||||
|
if not isinstance(confirm_localpart, bool):
|
||||||
|
raise ConfigError("must be a bool", path=["confirm_localpart"])
|
||||||
|
|
||||||
return JinjaOidcMappingConfig(
|
return JinjaOidcMappingConfig(
|
||||||
subject_claim=subject_claim,
|
subject_claim=subject_claim,
|
||||||
localpart_template=localpart_template,
|
localpart_template=localpart_template,
|
||||||
display_name_template=display_name_template,
|
display_name_template=display_name_template,
|
||||||
email_template=email_template,
|
email_template=email_template,
|
||||||
extra_attributes=extra_attributes,
|
extra_attributes=extra_attributes,
|
||||||
|
confirm_localpart=confirm_localpart,
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_remote_user_id(self, userinfo: UserInfo) -> str:
|
def get_remote_user_id(self, userinfo: UserInfo) -> str:
|
||||||
|
@ -1398,7 +1411,10 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
|
||||||
emails.append(email)
|
emails.append(email)
|
||||||
|
|
||||||
return UserAttributeDict(
|
return UserAttributeDict(
|
||||||
localpart=localpart, display_name=display_name, emails=emails
|
localpart=localpart,
|
||||||
|
display_name=display_name,
|
||||||
|
emails=emails,
|
||||||
|
confirm_localpart=self._config.confirm_localpart,
|
||||||
)
|
)
|
||||||
|
|
||||||
async def get_extra_attributes(self, userinfo: UserInfo, token: Token) -> JsonDict:
|
async def get_extra_attributes(self, userinfo: UserInfo, token: Token) -> JsonDict:
|
||||||
|
|
|
@ -22,6 +22,7 @@ from twisted.python.failure import Failure
|
||||||
from synapse.api.constants import EventTypes, Membership
|
from synapse.api.constants import EventTypes, Membership
|
||||||
from synapse.api.errors import SynapseError
|
from synapse.api.errors import SynapseError
|
||||||
from synapse.api.filtering import Filter
|
from synapse.api.filtering import Filter
|
||||||
|
from synapse.events.utils import SerializeEventConfig
|
||||||
from synapse.handlers.room import ShutdownRoomResponse
|
from synapse.handlers.room import ShutdownRoomResponse
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.storage.state import StateFilter
|
from synapse.storage.state import StateFilter
|
||||||
|
@ -349,7 +350,7 @@ class PaginationHandler:
|
||||||
"""
|
"""
|
||||||
self._purges_in_progress_by_room.add(room_id)
|
self._purges_in_progress_by_room.add(room_id)
|
||||||
try:
|
try:
|
||||||
with await self.pagination_lock.write(room_id):
|
async with self.pagination_lock.write(room_id):
|
||||||
await self.storage.purge_events.purge_history(
|
await self.storage.purge_events.purge_history(
|
||||||
room_id, token, delete_local_events
|
room_id, token, delete_local_events
|
||||||
)
|
)
|
||||||
|
@ -405,7 +406,7 @@ class PaginationHandler:
|
||||||
room_id: room to be purged
|
room_id: room to be purged
|
||||||
force: set true to skip checking for joined users.
|
force: set true to skip checking for joined users.
|
||||||
"""
|
"""
|
||||||
with await self.pagination_lock.write(room_id):
|
async with self.pagination_lock.write(room_id):
|
||||||
# first check that we have no users in this room
|
# first check that we have no users in this room
|
||||||
if not force:
|
if not force:
|
||||||
joined = await self.store.is_host_joined(room_id, self._server_name)
|
joined = await self.store.is_host_joined(room_id, self._server_name)
|
||||||
|
@ -447,7 +448,7 @@ class PaginationHandler:
|
||||||
|
|
||||||
room_token = from_token.room_key
|
room_token = from_token.room_key
|
||||||
|
|
||||||
with await self.pagination_lock.read(room_id):
|
async with self.pagination_lock.read(room_id):
|
||||||
(
|
(
|
||||||
membership,
|
membership,
|
||||||
member_event_id,
|
member_event_id,
|
||||||
|
@ -541,13 +542,15 @@ class PaginationHandler:
|
||||||
|
|
||||||
time_now = self.clock.time_msec()
|
time_now = self.clock.time_msec()
|
||||||
|
|
||||||
|
serialize_options = SerializeEventConfig(as_client_event=as_client_event)
|
||||||
|
|
||||||
chunk = {
|
chunk = {
|
||||||
"chunk": (
|
"chunk": (
|
||||||
self._event_serializer.serialize_events(
|
self._event_serializer.serialize_events(
|
||||||
events,
|
events,
|
||||||
time_now,
|
time_now,
|
||||||
|
config=serialize_options,
|
||||||
bundle_aggregations=aggregations,
|
bundle_aggregations=aggregations,
|
||||||
as_client_event=as_client_event,
|
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
"start": await from_token.to_string(self.store),
|
"start": await from_token.to_string(self.store),
|
||||||
|
@ -556,7 +559,7 @@ class PaginationHandler:
|
||||||
|
|
||||||
if state:
|
if state:
|
||||||
chunk["state"] = self._event_serializer.serialize_events(
|
chunk["state"] = self._event_serializer.serialize_events(
|
||||||
state, time_now, as_client_event=as_client_event
|
state, time_now, config=serialize_options
|
||||||
)
|
)
|
||||||
|
|
||||||
return chunk
|
return chunk
|
||||||
|
@ -612,7 +615,7 @@ class PaginationHandler:
|
||||||
|
|
||||||
self._purges_in_progress_by_room.add(room_id)
|
self._purges_in_progress_by_room.add(room_id)
|
||||||
try:
|
try:
|
||||||
with await self.pagination_lock.write(room_id):
|
async with self.pagination_lock.write(room_id):
|
||||||
self._delete_by_id[delete_id].status = DeleteStatus.STATUS_SHUTTING_DOWN
|
self._delete_by_id[delete_id].status = DeleteStatus.STATUS_SHUTTING_DOWN
|
||||||
self._delete_by_id[
|
self._delete_by_id[
|
||||||
delete_id
|
delete_id
|
||||||
|
|
|
@ -267,7 +267,6 @@ class BasePresenceHandler(abc.ABC):
|
||||||
is_syncing: Whether or not the user is now syncing
|
is_syncing: Whether or not the user is now syncing
|
||||||
sync_time_msec: Time in ms when the user was last syncing
|
sync_time_msec: Time in ms when the user was last syncing
|
||||||
"""
|
"""
|
||||||
pass
|
|
||||||
|
|
||||||
async def update_external_syncs_clear(self, process_id: str) -> None:
|
async def update_external_syncs_clear(self, process_id: str) -> None:
|
||||||
"""Marks all users that had been marked as syncing by a given process
|
"""Marks all users that had been marked as syncing by a given process
|
||||||
|
@ -277,7 +276,6 @@ class BasePresenceHandler(abc.ABC):
|
||||||
|
|
||||||
This is a no-op when presence is handled by a different worker.
|
This is a no-op when presence is handled by a different worker.
|
||||||
"""
|
"""
|
||||||
pass
|
|
||||||
|
|
||||||
async def process_replication_rows(
|
async def process_replication_rows(
|
||||||
self, stream_name: str, instance_name: str, token: int, rows: list
|
self, stream_name: str, instance_name: str, token: int, rows: list
|
||||||
|
@ -424,13 +422,13 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||||
|
|
||||||
async def _on_shutdown(self) -> None:
|
async def _on_shutdown(self) -> None:
|
||||||
if self._presence_enabled:
|
if self._presence_enabled:
|
||||||
self.hs.get_tcp_replication().send_command(
|
self.hs.get_replication_command_handler().send_command(
|
||||||
ClearUserSyncsCommand(self.instance_id)
|
ClearUserSyncsCommand(self.instance_id)
|
||||||
)
|
)
|
||||||
|
|
||||||
def send_user_sync(self, user_id: str, is_syncing: bool, last_sync_ms: int) -> None:
|
def send_user_sync(self, user_id: str, is_syncing: bool, last_sync_ms: int) -> None:
|
||||||
if self._presence_enabled:
|
if self._presence_enabled:
|
||||||
self.hs.get_tcp_replication().send_user_sync(
|
self.hs.get_replication_command_handler().send_user_sync(
|
||||||
self.instance_id, user_id, is_syncing, last_sync_ms
|
self.instance_id, user_id, is_syncing, last_sync_ms
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -270,7 +270,7 @@ class ReceiptEventSource(EventSource[int, JsonDict]):
|
||||||
# Then filter down to rooms that the AS can read
|
# Then filter down to rooms that the AS can read
|
||||||
events = []
|
events = []
|
||||||
for room_id, event in rooms_to_events.items():
|
for room_id, event in rooms_to_events.items():
|
||||||
if not await service.matches_user_in_member_list(room_id, self.store):
|
if not await service.is_interested_in_room(room_id, self.store):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
events.append(event)
|
events.append(event)
|
||||||
|
|
|
@ -1485,6 +1485,7 @@ class RoomShutdownHandler:
|
||||||
self.room_member_handler = hs.get_room_member_handler()
|
self.room_member_handler = hs.get_room_member_handler()
|
||||||
self._room_creation_handler = hs.get_room_creation_handler()
|
self._room_creation_handler = hs.get_room_creation_handler()
|
||||||
self._replication = hs.get_replication_data_handler()
|
self._replication = hs.get_replication_data_handler()
|
||||||
|
self._third_party_rules = hs.get_third_party_event_rules()
|
||||||
self.event_creation_handler = hs.get_event_creation_handler()
|
self.event_creation_handler = hs.get_event_creation_handler()
|
||||||
self.store = hs.get_datastores().main
|
self.store = hs.get_datastores().main
|
||||||
|
|
||||||
|
@ -1558,6 +1559,13 @@ class RoomShutdownHandler:
|
||||||
if not RoomID.is_valid(room_id):
|
if not RoomID.is_valid(room_id):
|
||||||
raise SynapseError(400, "%s is not a legal room ID" % (room_id,))
|
raise SynapseError(400, "%s is not a legal room ID" % (room_id,))
|
||||||
|
|
||||||
|
if not await self._third_party_rules.check_can_shutdown_room(
|
||||||
|
requester_user_id, room_id
|
||||||
|
):
|
||||||
|
raise SynapseError(
|
||||||
|
403, "Shutdown of this room is forbidden", Codes.FORBIDDEN
|
||||||
|
)
|
||||||
|
|
||||||
# Action the block first (even if the room doesn't exist yet)
|
# Action the block first (even if the room doesn't exist yet)
|
||||||
if block:
|
if block:
|
||||||
# This will work even if the room is already blocked, but that is
|
# This will work even if the room is already blocked, but that is
|
||||||
|
|
|
@ -1736,8 +1736,8 @@ class RoomMemberMasterHandler(RoomMemberHandler):
|
||||||
txn_id=txn_id,
|
txn_id=txn_id,
|
||||||
prev_event_ids=prev_event_ids,
|
prev_event_ids=prev_event_ids,
|
||||||
auth_event_ids=auth_event_ids,
|
auth_event_ids=auth_event_ids,
|
||||||
|
outlier=True,
|
||||||
)
|
)
|
||||||
event.internal_metadata.outlier = True
|
|
||||||
event.internal_metadata.out_of_band_membership = True
|
event.internal_metadata.out_of_band_membership = True
|
||||||
|
|
||||||
result_event = await self.event_creation_handler.handle_new_client_event(
|
result_event = await self.event_creation_handler.handle_new_client_event(
|
||||||
|
|
|
@ -295,7 +295,7 @@ class RoomSummaryHandler:
|
||||||
# inaccessible to the requesting user.
|
# inaccessible to the requesting user.
|
||||||
if room_entry:
|
if room_entry:
|
||||||
# Add the room (including the stripped m.space.child events).
|
# Add the room (including the stripped m.space.child events).
|
||||||
rooms_result.append(room_entry.as_json())
|
rooms_result.append(room_entry.as_json(for_client=True))
|
||||||
|
|
||||||
# If this room is not at the max-depth, check if there are any
|
# If this room is not at the max-depth, check if there are any
|
||||||
# children to process.
|
# children to process.
|
||||||
|
@ -843,21 +843,32 @@ class _RoomEntry:
|
||||||
# This may not include all children.
|
# This may not include all children.
|
||||||
children_state_events: Sequence[JsonDict] = ()
|
children_state_events: Sequence[JsonDict] = ()
|
||||||
|
|
||||||
def as_json(self) -> JsonDict:
|
def as_json(self, for_client: bool = False) -> JsonDict:
|
||||||
"""
|
"""
|
||||||
Returns a JSON dictionary suitable for the room hierarchy endpoint.
|
Returns a JSON dictionary suitable for the room hierarchy endpoint.
|
||||||
|
|
||||||
It returns the room summary including the stripped m.space.child events
|
It returns the room summary including the stripped m.space.child events
|
||||||
as a sub-key.
|
as a sub-key.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
for_client: If true, any server-server only fields are stripped from
|
||||||
|
the result.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
result = dict(self.room)
|
result = dict(self.room)
|
||||||
|
|
||||||
|
# Before returning to the client, remove the allowed_room_ids key, if it
|
||||||
|
# exists.
|
||||||
|
if for_client:
|
||||||
|
result.pop("allowed_room_ids", False)
|
||||||
|
|
||||||
result["children_state"] = self.children_state_events
|
result["children_state"] = self.children_state_events
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def _has_valid_via(e: EventBase) -> bool:
|
def _has_valid_via(e: EventBase) -> bool:
|
||||||
via = e.content.get("via")
|
via = e.content.get("via")
|
||||||
if not via or not isinstance(via, Sequence):
|
if not via or not isinstance(via, list):
|
||||||
return False
|
return False
|
||||||
for v in via:
|
for v in via:
|
||||||
if not isinstance(v, str):
|
if not isinstance(v, str):
|
||||||
|
|
|
@ -132,6 +132,7 @@ class UserAttributes:
|
||||||
# if `None`, the mapper has not picked a userid, and the user should be prompted to
|
# if `None`, the mapper has not picked a userid, and the user should be prompted to
|
||||||
# enter one.
|
# enter one.
|
||||||
localpart: Optional[str]
|
localpart: Optional[str]
|
||||||
|
confirm_localpart: bool = False
|
||||||
display_name: Optional[str] = None
|
display_name: Optional[str] = None
|
||||||
emails: Collection[str] = attr.Factory(list)
|
emails: Collection[str] = attr.Factory(list)
|
||||||
|
|
||||||
|
@ -561,9 +562,10 @@ class SsoHandler:
|
||||||
# Must provide either attributes or session, not both
|
# Must provide either attributes or session, not both
|
||||||
assert (attributes is not None) != (session is not None)
|
assert (attributes is not None) != (session is not None)
|
||||||
|
|
||||||
if (attributes and attributes.localpart is None) or (
|
if (
|
||||||
session and session.chosen_localpart is None
|
attributes
|
||||||
):
|
and (attributes.localpart is None or attributes.confirm_localpart is True)
|
||||||
|
) or (session and session.chosen_localpart is None):
|
||||||
return b"/_synapse/client/pick_username/account_details"
|
return b"/_synapse/client/pick_username/account_details"
|
||||||
elif self._consent_at_registration and not (
|
elif self._consent_at_registration and not (
|
||||||
session and session.terms_accepted_version
|
session and session.terms_accepted_version
|
||||||
|
|
|
@ -160,8 +160,9 @@ class FollowerTypingHandler:
|
||||||
"""Should be called whenever we receive updates for typing stream."""
|
"""Should be called whenever we receive updates for typing stream."""
|
||||||
|
|
||||||
if self._latest_room_serial > token:
|
if self._latest_room_serial > token:
|
||||||
# The master has gone backwards. To prevent inconsistent data, just
|
# The typing worker has gone backwards (e.g. it may have restarted).
|
||||||
# clear everything.
|
# To prevent inconsistent data, just clear everything.
|
||||||
|
logger.info("Typing handler stream went backwards; resetting")
|
||||||
self._reset()
|
self._reset()
|
||||||
|
|
||||||
# Set the latest serial token to whatever the server gave us.
|
# Set the latest serial token to whatever the server gave us.
|
||||||
|
@ -486,9 +487,7 @@ class TypingNotificationEventSource(EventSource[int, JsonDict]):
|
||||||
if handler._room_serials[room_id] <= from_key:
|
if handler._room_serials[room_id] <= from_key:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if not await service.matches_user_in_member_list(
|
if not await service.is_interested_in_room(room_id, self._main_store):
|
||||||
room_id, self._main_store
|
|
||||||
):
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
events.append(self._make_event_for(room_id))
|
events.append(self._make_event_for(room_id))
|
||||||
|
|
|
@ -120,7 +120,6 @@ class ByteParser(ByteWriteable, Generic[T], abc.ABC):
|
||||||
"""Called when response has finished streaming and the parser should
|
"""Called when response has finished streaming and the parser should
|
||||||
return the final result (or error).
|
return the final result (or error).
|
||||||
"""
|
"""
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||||
|
@ -601,7 +600,6 @@ class MatrixFederationHttpClient:
|
||||||
response.code,
|
response.code,
|
||||||
response_phrase,
|
response_phrase,
|
||||||
)
|
)
|
||||||
pass
|
|
||||||
else:
|
else:
|
||||||
logger.info(
|
logger.info(
|
||||||
"{%s} [%s] Got response headers: %d %s",
|
"{%s} [%s] Got response headers: %d %s",
|
||||||
|
|
|
@ -233,7 +233,6 @@ class HttpServer(Protocol):
|
||||||
servlet_classname (str): The name of the handler to be used in prometheus
|
servlet_classname (str): The name of the handler to be used in prometheus
|
||||||
and opentracing logs.
|
and opentracing logs.
|
||||||
"""
|
"""
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta):
|
class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta):
|
||||||
|
|
|
@ -29,7 +29,6 @@ import warnings
|
||||||
from types import TracebackType
|
from types import TracebackType
|
||||||
from typing import (
|
from typing import (
|
||||||
TYPE_CHECKING,
|
TYPE_CHECKING,
|
||||||
Any,
|
|
||||||
Awaitable,
|
Awaitable,
|
||||||
Callable,
|
Callable,
|
||||||
Optional,
|
Optional,
|
||||||
|
@ -41,7 +40,7 @@ from typing import (
|
||||||
)
|
)
|
||||||
|
|
||||||
import attr
|
import attr
|
||||||
from typing_extensions import Literal
|
from typing_extensions import Literal, ParamSpec
|
||||||
|
|
||||||
from twisted.internet import defer, threads
|
from twisted.internet import defer, threads
|
||||||
from twisted.python.threadpool import ThreadPool
|
from twisted.python.threadpool import ThreadPool
|
||||||
|
@ -719,32 +718,33 @@ def nested_logging_context(suffix: str) -> LoggingContext:
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
P = ParamSpec("P")
|
||||||
R = TypeVar("R")
|
R = TypeVar("R")
|
||||||
|
|
||||||
|
|
||||||
@overload
|
@overload
|
||||||
def preserve_fn( # type: ignore[misc]
|
def preserve_fn( # type: ignore[misc]
|
||||||
f: Callable[..., Awaitable[R]],
|
f: Callable[P, Awaitable[R]],
|
||||||
) -> Callable[..., "defer.Deferred[R]"]:
|
) -> Callable[P, "defer.Deferred[R]"]:
|
||||||
# The `type: ignore[misc]` above suppresses
|
# The `type: ignore[misc]` above suppresses
|
||||||
# "Overloaded function signatures 1 and 2 overlap with incompatible return types"
|
# "Overloaded function signatures 1 and 2 overlap with incompatible return types"
|
||||||
...
|
...
|
||||||
|
|
||||||
|
|
||||||
@overload
|
@overload
|
||||||
def preserve_fn(f: Callable[..., R]) -> Callable[..., "defer.Deferred[R]"]:
|
def preserve_fn(f: Callable[P, R]) -> Callable[P, "defer.Deferred[R]"]:
|
||||||
...
|
...
|
||||||
|
|
||||||
|
|
||||||
def preserve_fn(
|
def preserve_fn(
|
||||||
f: Union[
|
f: Union[
|
||||||
Callable[..., R],
|
Callable[P, R],
|
||||||
Callable[..., Awaitable[R]],
|
Callable[P, Awaitable[R]],
|
||||||
]
|
]
|
||||||
) -> Callable[..., "defer.Deferred[R]"]:
|
) -> Callable[P, "defer.Deferred[R]"]:
|
||||||
"""Function decorator which wraps the function with run_in_background"""
|
"""Function decorator which wraps the function with run_in_background"""
|
||||||
|
|
||||||
def g(*args: Any, **kwargs: Any) -> "defer.Deferred[R]":
|
def g(*args: P.args, **kwargs: P.kwargs) -> "defer.Deferred[R]":
|
||||||
return run_in_background(f, *args, **kwargs)
|
return run_in_background(f, *args, **kwargs)
|
||||||
|
|
||||||
return g
|
return g
|
||||||
|
@ -752,7 +752,7 @@ def preserve_fn(
|
||||||
|
|
||||||
@overload
|
@overload
|
||||||
def run_in_background( # type: ignore[misc]
|
def run_in_background( # type: ignore[misc]
|
||||||
f: Callable[..., Awaitable[R]], *args: Any, **kwargs: Any
|
f: Callable[P, Awaitable[R]], *args: P.args, **kwargs: P.kwargs
|
||||||
) -> "defer.Deferred[R]":
|
) -> "defer.Deferred[R]":
|
||||||
# The `type: ignore[misc]` above suppresses
|
# The `type: ignore[misc]` above suppresses
|
||||||
# "Overloaded function signatures 1 and 2 overlap with incompatible return types"
|
# "Overloaded function signatures 1 and 2 overlap with incompatible return types"
|
||||||
|
@ -761,18 +761,22 @@ def run_in_background( # type: ignore[misc]
|
||||||
|
|
||||||
@overload
|
@overload
|
||||||
def run_in_background(
|
def run_in_background(
|
||||||
f: Callable[..., R], *args: Any, **kwargs: Any
|
f: Callable[P, R], *args: P.args, **kwargs: P.kwargs
|
||||||
) -> "defer.Deferred[R]":
|
) -> "defer.Deferred[R]":
|
||||||
...
|
...
|
||||||
|
|
||||||
|
|
||||||
def run_in_background(
|
def run_in_background( # type: ignore[misc]
|
||||||
|
# The `type: ignore[misc]` above suppresses
|
||||||
|
# "Overloaded function implementation does not accept all possible arguments of signature 1"
|
||||||
|
# "Overloaded function implementation does not accept all possible arguments of signature 2"
|
||||||
|
# which seems like a bug in mypy.
|
||||||
f: Union[
|
f: Union[
|
||||||
Callable[..., R],
|
Callable[P, R],
|
||||||
Callable[..., Awaitable[R]],
|
Callable[P, Awaitable[R]],
|
||||||
],
|
],
|
||||||
*args: Any,
|
*args: P.args,
|
||||||
**kwargs: Any,
|
**kwargs: P.kwargs,
|
||||||
) -> "defer.Deferred[R]":
|
) -> "defer.Deferred[R]":
|
||||||
"""Calls a function, ensuring that the current context is restored after
|
"""Calls a function, ensuring that the current context is restored after
|
||||||
return from the function, and that the sentinel context is set once the
|
return from the function, and that the sentinel context is set once the
|
||||||
|
@ -872,7 +876,7 @@ def _set_context_cb(result: ResultT, context: LoggingContext) -> ResultT:
|
||||||
|
|
||||||
|
|
||||||
def defer_to_thread(
|
def defer_to_thread(
|
||||||
reactor: "ISynapseReactor", f: Callable[..., R], *args: Any, **kwargs: Any
|
reactor: "ISynapseReactor", f: Callable[P, R], *args: P.args, **kwargs: P.kwargs
|
||||||
) -> "defer.Deferred[R]":
|
) -> "defer.Deferred[R]":
|
||||||
"""
|
"""
|
||||||
Calls the function `f` using a thread from the reactor's default threadpool and
|
Calls the function `f` using a thread from the reactor's default threadpool and
|
||||||
|
@ -908,9 +912,9 @@ def defer_to_thread(
|
||||||
def defer_to_threadpool(
|
def defer_to_threadpool(
|
||||||
reactor: "ISynapseReactor",
|
reactor: "ISynapseReactor",
|
||||||
threadpool: ThreadPool,
|
threadpool: ThreadPool,
|
||||||
f: Callable[..., R],
|
f: Callable[P, R],
|
||||||
*args: Any,
|
*args: P.args,
|
||||||
**kwargs: Any,
|
**kwargs: P.kwargs,
|
||||||
) -> "defer.Deferred[R]":
|
) -> "defer.Deferred[R]":
|
||||||
"""
|
"""
|
||||||
A wrapper for twisted.internet.threads.deferToThreadpool, which handles
|
A wrapper for twisted.internet.threads.deferToThreadpool, which handles
|
||||||
|
|
|
@ -54,6 +54,8 @@ from synapse.events.spamcheck import (
|
||||||
USER_MAY_SEND_3PID_INVITE_CALLBACK,
|
USER_MAY_SEND_3PID_INVITE_CALLBACK,
|
||||||
)
|
)
|
||||||
from synapse.events.third_party_rules import (
|
from synapse.events.third_party_rules import (
|
||||||
|
CHECK_CAN_DEACTIVATE_USER_CALLBACK,
|
||||||
|
CHECK_CAN_SHUTDOWN_ROOM_CALLBACK,
|
||||||
CHECK_EVENT_ALLOWED_CALLBACK,
|
CHECK_EVENT_ALLOWED_CALLBACK,
|
||||||
CHECK_THREEPID_CAN_BE_INVITED_CALLBACK,
|
CHECK_THREEPID_CAN_BE_INVITED_CALLBACK,
|
||||||
CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK,
|
CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK,
|
||||||
|
@ -283,6 +285,8 @@ class ModuleApi:
|
||||||
CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK
|
CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK
|
||||||
] = None,
|
] = None,
|
||||||
on_new_event: Optional[ON_NEW_EVENT_CALLBACK] = None,
|
on_new_event: Optional[ON_NEW_EVENT_CALLBACK] = None,
|
||||||
|
check_can_shutdown_room: Optional[CHECK_CAN_SHUTDOWN_ROOM_CALLBACK] = None,
|
||||||
|
check_can_deactivate_user: Optional[CHECK_CAN_DEACTIVATE_USER_CALLBACK] = None,
|
||||||
on_profile_update: Optional[ON_PROFILE_UPDATE_CALLBACK] = None,
|
on_profile_update: Optional[ON_PROFILE_UPDATE_CALLBACK] = None,
|
||||||
on_user_deactivation_status_changed: Optional[
|
on_user_deactivation_status_changed: Optional[
|
||||||
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK
|
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK
|
||||||
|
@ -298,6 +302,8 @@ class ModuleApi:
|
||||||
check_threepid_can_be_invited=check_threepid_can_be_invited,
|
check_threepid_can_be_invited=check_threepid_can_be_invited,
|
||||||
check_visibility_can_be_modified=check_visibility_can_be_modified,
|
check_visibility_can_be_modified=check_visibility_can_be_modified,
|
||||||
on_new_event=on_new_event,
|
on_new_event=on_new_event,
|
||||||
|
check_can_shutdown_room=check_can_shutdown_room,
|
||||||
|
check_can_deactivate_user=check_can_deactivate_user,
|
||||||
on_profile_update=on_profile_update,
|
on_profile_update=on_profile_update,
|
||||||
on_user_deactivation_status_changed=on_user_deactivation_status_changed,
|
on_user_deactivation_status_changed=on_user_deactivation_status_changed,
|
||||||
)
|
)
|
||||||
|
|
|
@ -169,7 +169,7 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [
|
||||||
"kind": "event_match",
|
"kind": "event_match",
|
||||||
"key": "content.msgtype",
|
"key": "content.msgtype",
|
||||||
"pattern": "m.notice",
|
"pattern": "m.notice",
|
||||||
"_id": "_suppress_notices",
|
"_cache_key": "_suppress_notices",
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"actions": ["dont_notify"],
|
"actions": ["dont_notify"],
|
||||||
|
@ -183,13 +183,13 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [
|
||||||
"kind": "event_match",
|
"kind": "event_match",
|
||||||
"key": "type",
|
"key": "type",
|
||||||
"pattern": "m.room.member",
|
"pattern": "m.room.member",
|
||||||
"_id": "_member",
|
"_cache_key": "_member",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"kind": "event_match",
|
"kind": "event_match",
|
||||||
"key": "content.membership",
|
"key": "content.membership",
|
||||||
"pattern": "invite",
|
"pattern": "invite",
|
||||||
"_id": "_invite_member",
|
"_cache_key": "_invite_member",
|
||||||
},
|
},
|
||||||
{"kind": "event_match", "key": "state_key", "pattern_type": "user_id"},
|
{"kind": "event_match", "key": "state_key", "pattern_type": "user_id"},
|
||||||
],
|
],
|
||||||
|
@ -212,7 +212,7 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [
|
||||||
"kind": "event_match",
|
"kind": "event_match",
|
||||||
"key": "type",
|
"key": "type",
|
||||||
"pattern": "m.room.member",
|
"pattern": "m.room.member",
|
||||||
"_id": "_member",
|
"_cache_key": "_member",
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"actions": ["dont_notify"],
|
"actions": ["dont_notify"],
|
||||||
|
@ -237,12 +237,12 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [
|
||||||
"kind": "event_match",
|
"kind": "event_match",
|
||||||
"key": "content.body",
|
"key": "content.body",
|
||||||
"pattern": "@room",
|
"pattern": "@room",
|
||||||
"_id": "_roomnotif_content",
|
"_cache_key": "_roomnotif_content",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"kind": "sender_notification_permission",
|
"kind": "sender_notification_permission",
|
||||||
"key": "room",
|
"key": "room",
|
||||||
"_id": "_roomnotif_pl",
|
"_cache_key": "_roomnotif_pl",
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
"actions": ["notify", {"set_tweak": "highlight", "value": True}],
|
"actions": ["notify", {"set_tweak": "highlight", "value": True}],
|
||||||
|
@ -254,13 +254,13 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [
|
||||||
"kind": "event_match",
|
"kind": "event_match",
|
||||||
"key": "type",
|
"key": "type",
|
||||||
"pattern": "m.room.tombstone",
|
"pattern": "m.room.tombstone",
|
||||||
"_id": "_tombstone",
|
"_cache_key": "_tombstone",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"kind": "event_match",
|
"kind": "event_match",
|
||||||
"key": "state_key",
|
"key": "state_key",
|
||||||
"pattern": "",
|
"pattern": "",
|
||||||
"_id": "_tombstone_statekey",
|
"_cache_key": "_tombstone_statekey",
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
"actions": ["notify", {"set_tweak": "highlight", "value": True}],
|
"actions": ["notify", {"set_tweak": "highlight", "value": True}],
|
||||||
|
@ -272,7 +272,7 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [
|
||||||
"kind": "event_match",
|
"kind": "event_match",
|
||||||
"key": "type",
|
"key": "type",
|
||||||
"pattern": "m.reaction",
|
"pattern": "m.reaction",
|
||||||
"_id": "_reaction",
|
"_cache_key": "_reaction",
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"actions": ["dont_notify"],
|
"actions": ["dont_notify"],
|
||||||
|
@ -288,7 +288,7 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [
|
||||||
"kind": "event_match",
|
"kind": "event_match",
|
||||||
"key": "type",
|
"key": "type",
|
||||||
"pattern": "m.call.invite",
|
"pattern": "m.call.invite",
|
||||||
"_id": "_call",
|
"_cache_key": "_call",
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"actions": [
|
"actions": [
|
||||||
|
@ -302,12 +302,12 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [
|
||||||
{
|
{
|
||||||
"rule_id": "global/underride/.m.rule.room_one_to_one",
|
"rule_id": "global/underride/.m.rule.room_one_to_one",
|
||||||
"conditions": [
|
"conditions": [
|
||||||
{"kind": "room_member_count", "is": "2", "_id": "member_count"},
|
{"kind": "room_member_count", "is": "2", "_cache_key": "member_count"},
|
||||||
{
|
{
|
||||||
"kind": "event_match",
|
"kind": "event_match",
|
||||||
"key": "type",
|
"key": "type",
|
||||||
"pattern": "m.room.message",
|
"pattern": "m.room.message",
|
||||||
"_id": "_message",
|
"_cache_key": "_message",
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
"actions": [
|
"actions": [
|
||||||
|
@ -321,12 +321,12 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [
|
||||||
{
|
{
|
||||||
"rule_id": "global/underride/.m.rule.encrypted_room_one_to_one",
|
"rule_id": "global/underride/.m.rule.encrypted_room_one_to_one",
|
||||||
"conditions": [
|
"conditions": [
|
||||||
{"kind": "room_member_count", "is": "2", "_id": "member_count"},
|
{"kind": "room_member_count", "is": "2", "_cache_key": "member_count"},
|
||||||
{
|
{
|
||||||
"kind": "event_match",
|
"kind": "event_match",
|
||||||
"key": "type",
|
"key": "type",
|
||||||
"pattern": "m.room.encrypted",
|
"pattern": "m.room.encrypted",
|
||||||
"_id": "_encrypted",
|
"_cache_key": "_encrypted",
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
"actions": [
|
"actions": [
|
||||||
|
@ -342,7 +342,7 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [
|
||||||
"kind": "event_match",
|
"kind": "event_match",
|
||||||
"key": "type",
|
"key": "type",
|
||||||
"pattern": "m.room.message",
|
"pattern": "m.room.message",
|
||||||
"_id": "_message",
|
"_cache_key": "_message",
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"actions": ["notify", {"set_tweak": "highlight", "value": False}],
|
"actions": ["notify", {"set_tweak": "highlight", "value": False}],
|
||||||
|
@ -356,7 +356,7 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [
|
||||||
"kind": "event_match",
|
"kind": "event_match",
|
||||||
"key": "type",
|
"key": "type",
|
||||||
"pattern": "m.room.encrypted",
|
"pattern": "m.room.encrypted",
|
||||||
"_id": "_encrypted",
|
"_cache_key": "_encrypted",
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"actions": ["notify", {"set_tweak": "highlight", "value": False}],
|
"actions": ["notify", {"set_tweak": "highlight", "value": False}],
|
||||||
|
@ -368,19 +368,19 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [
|
||||||
"kind": "event_match",
|
"kind": "event_match",
|
||||||
"key": "type",
|
"key": "type",
|
||||||
"pattern": "im.vector.modular.widgets",
|
"pattern": "im.vector.modular.widgets",
|
||||||
"_id": "_type_modular_widgets",
|
"_cache_key": "_type_modular_widgets",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"kind": "event_match",
|
"kind": "event_match",
|
||||||
"key": "content.type",
|
"key": "content.type",
|
||||||
"pattern": "jitsi",
|
"pattern": "jitsi",
|
||||||
"_id": "_content_type_jitsi",
|
"_cache_key": "_content_type_jitsi",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"kind": "event_match",
|
"kind": "event_match",
|
||||||
"key": "state_key",
|
"key": "state_key",
|
||||||
"pattern": "*",
|
"pattern": "*",
|
||||||
"_id": "_is_state_event",
|
"_cache_key": "_is_state_event",
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
"actions": ["notify", {"set_tweak": "highlight", "value": False}],
|
"actions": ["notify", {"set_tweak": "highlight", "value": False}],
|
||||||
|
|
|
@ -274,17 +274,17 @@ def _condition_checker(
|
||||||
cache: Dict[str, bool],
|
cache: Dict[str, bool],
|
||||||
) -> bool:
|
) -> bool:
|
||||||
for cond in conditions:
|
for cond in conditions:
|
||||||
_id = cond.get("_id", None)
|
_cache_key = cond.get("_cache_key", None)
|
||||||
if _id:
|
if _cache_key:
|
||||||
res = cache.get(_id, None)
|
res = cache.get(_cache_key, None)
|
||||||
if res is False:
|
if res is False:
|
||||||
return False
|
return False
|
||||||
elif res is True:
|
elif res is True:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
res = evaluator.matches(cond, uid, display_name)
|
res = evaluator.matches(cond, uid, display_name)
|
||||||
if _id:
|
if _cache_key:
|
||||||
cache[_id] = bool(res)
|
cache[_cache_key] = bool(res)
|
||||||
|
|
||||||
if not res:
|
if not res:
|
||||||
return False
|
return False
|
||||||
|
|
|
@ -40,7 +40,7 @@ def format_push_rules_for_user(
|
||||||
|
|
||||||
# Remove internal stuff.
|
# Remove internal stuff.
|
||||||
for c in r["conditions"]:
|
for c in r["conditions"]:
|
||||||
c.pop("_id", None)
|
c.pop("_cache_key", None)
|
||||||
|
|
||||||
pattern_type = c.pop("pattern_type", None)
|
pattern_type = c.pop("pattern_type", None)
|
||||||
if pattern_type == "user_id":
|
if pattern_type == "user_id":
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue