mirror of
https://git.anonymousland.org/anonymousland/synapse.git
synced 2024-10-01 11:49:51 -04:00
Merge remote-tracking branch 'upstream/release-v1.55'
This commit is contained in:
commit
c5c2c2e099
@ -21,7 +21,7 @@ python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml
|
||||
echo "--- Prepare test database"
|
||||
|
||||
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
||||
scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
||||
update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
||||
|
||||
# Run the export-data command on the sqlite test database
|
||||
python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
|
||||
@ -41,7 +41,7 @@ fi
|
||||
|
||||
# Port the SQLite databse to postgres so we can check command works against postgres
|
||||
echo "+++ Port SQLite3 databse to postgres"
|
||||
scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||
synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||
|
||||
# Run the export-data command on postgres database
|
||||
python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
|
||||
|
@ -25,17 +25,19 @@ python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml
|
||||
echo "--- Prepare test database"
|
||||
|
||||
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
||||
scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
||||
update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
||||
|
||||
# Create the PostgreSQL database.
|
||||
.ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
|
||||
|
||||
echo "+++ Run synapse_port_db against test database"
|
||||
coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||
# TODO: this invocation of synapse_port_db (and others below) used to be prepended with `coverage run`,
|
||||
# but coverage seems unable to find the entrypoints installed by `pip install -e .`.
|
||||
synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||
|
||||
# We should be able to run twice against the same database.
|
||||
echo "+++ Run synapse_port_db a second time"
|
||||
coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||
synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||
|
||||
#####
|
||||
|
||||
@ -46,7 +48,7 @@ echo "--- Prepare empty SQLite database"
|
||||
# we do this by deleting the sqlite db, and then doing the same again.
|
||||
rm .ci/test_db.db
|
||||
|
||||
scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
||||
update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
||||
|
||||
# re-create the PostgreSQL database.
|
||||
.ci/scripts/postgres_exec.py \
|
||||
@ -54,4 +56,4 @@ scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-b
|
||||
"CREATE DATABASE synapse"
|
||||
|
||||
echo "+++ Run synapse_port_db against empty database"
|
||||
coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||
synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||
|
@ -3,11 +3,9 @@
|
||||
|
||||
# things to include
|
||||
!docker
|
||||
!scripts
|
||||
!synapse
|
||||
!MANIFEST.in
|
||||
!README.rst
|
||||
!setup.py
|
||||
!synctl
|
||||
|
||||
**/__pycache__
|
||||
|
7
.github/workflows/release-artifacts.yml
vendored
7
.github/workflows/release-artifacts.yml
vendored
@ -31,7 +31,7 @@ jobs:
|
||||
# if we're running from a tag, get the full list of distros; otherwise just use debian:sid
|
||||
dists='["debian:sid"]'
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
dists=$(scripts-dev/build_debian_packages --show-dists-json)
|
||||
dists=$(scripts-dev/build_debian_packages.py --show-dists-json)
|
||||
fi
|
||||
echo "::set-output name=distros::$dists"
|
||||
# map the step outputs to job outputs
|
||||
@ -74,7 +74,7 @@ jobs:
|
||||
# see https://github.com/docker/build-push-action/issues/252
|
||||
# for the cache magic here
|
||||
run: |
|
||||
./src/scripts-dev/build_debian_packages \
|
||||
./src/scripts-dev/build_debian_packages.py \
|
||||
--docker-build-arg=--cache-from=type=local,src=/tmp/.buildx-cache \
|
||||
--docker-build-arg=--cache-to=type=local,mode=max,dest=/tmp/.buildx-cache-new \
|
||||
--docker-build-arg=--progress=plain \
|
||||
@ -112,7 +112,8 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
files: |
|
||||
python-dist/*
|
||||
Sdist/*
|
||||
Wheel/*
|
||||
debs.tar.xz
|
||||
# if it's not already published, keep the release as a draft.
|
||||
draft: true
|
||||
|
35
.github/workflows/tests.yml
vendored
35
.github/workflows/tests.yml
vendored
@ -16,7 +16,8 @@ jobs:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- run: pip install -e .
|
||||
- run: scripts-dev/generate_sample_config --check
|
||||
- run: scripts-dev/generate_sample_config.sh --check
|
||||
- run: scripts-dev/config-lint.sh
|
||||
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
@ -51,7 +52,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@v2
|
||||
- run: "pip install 'towncrier>=18.6.0rc1'"
|
||||
- run: scripts-dev/check-newsfragment
|
||||
- run: scripts-dev/check-newsfragment.sh
|
||||
env:
|
||||
PULL_REQUEST_NUMBER: ${{ github.event.number }}
|
||||
|
||||
@ -376,7 +377,7 @@ jobs:
|
||||
# Run Complement
|
||||
- run: |
|
||||
set -o pipefail
|
||||
go test -v -json -p 1 -tags synapse_blacklist,msc2403 ./tests/... 2>&1 | gotestfmt
|
||||
go test -v -json -p 1 -tags synapse_blacklist,msc2403,msc2716,msc3030 ./tests/... 2>&1 | gotestfmt
|
||||
shell: bash
|
||||
name: Run Complement Tests
|
||||
env:
|
||||
@ -387,34 +388,22 @@ jobs:
|
||||
tests-done:
|
||||
if: ${{ always() }}
|
||||
needs:
|
||||
- check-sampleconfig
|
||||
- lint
|
||||
- lint-crlf
|
||||
- lint-newsfile
|
||||
- trial
|
||||
- trial-olddeps
|
||||
- sytest
|
||||
- export-data
|
||||
- portdb
|
||||
- complement
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set build result
|
||||
env:
|
||||
NEEDS_CONTEXT: ${{ toJSON(needs) }}
|
||||
# the `jq` incantation dumps out a series of "<job> <result>" lines.
|
||||
# we set it to an intermediate variable to avoid a pipe, which makes it
|
||||
# hard to set $rc.
|
||||
run: |
|
||||
rc=0
|
||||
results=$(jq -r 'to_entries[] | [.key,.value.result] | join(" ")' <<< $NEEDS_CONTEXT)
|
||||
while read job result ; do
|
||||
# The newsfile lint may be skipped on non PR builds
|
||||
if [ $result == "skipped" ] && [ $job == "lint-newsfile" ]; then
|
||||
continue
|
||||
fi
|
||||
- uses: matrix-org/done-action@v2
|
||||
with:
|
||||
needs: ${{ toJSON(needs) }}
|
||||
|
||||
if [ "$result" != "success" ]; then
|
||||
echo "::set-failed ::Job $job returned $result"
|
||||
rc=1
|
||||
fi
|
||||
done <<< $results
|
||||
exit $rc
|
||||
# The newsfile lint may be skipped on non PR builds
|
||||
skippable:
|
||||
lint-newsfile
|
||||
|
85
CHANGES.md
85
CHANGES.md
@ -1,3 +1,88 @@
|
||||
Synapse 1.55.0rc1 (2022-03-15)
|
||||
==============================
|
||||
|
||||
This release removes a workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. **This breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700))**; Mjolnir users should upgrade Mjolnir before upgrading Synapse to this version.
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add third-party rules callbacks `check_can_shutdown_room` and `check_can_deactivate_user`. ([\#12028](https://github.com/matrix-org/synapse/issues/12028))
|
||||
- Improve performance of logging in for large accounts. ([\#12132](https://github.com/matrix-org/synapse/issues/12132))
|
||||
- Add experimental env var `SYNAPSE_ASYNC_IO_REACTOR` that causes Synapse to use the asyncio reactor for Twisted. ([\#12135](https://github.com/matrix-org/synapse/issues/12135))
|
||||
- Support the stable identifiers from [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440): threads. ([\#12151](https://github.com/matrix-org/synapse/issues/12151))
|
||||
- Add a new Jinja2 template filter to extract the local part of an email address. ([\#12212](https://github.com/matrix-org/synapse/issues/12212))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Use the proper serialization format for bundled thread aggregations. The bug has existed since Synapse v1.48.0. ([\#12090](https://github.com/matrix-org/synapse/issues/12090))
|
||||
- Fix a long-standing bug when redacting events with relations. ([\#12113](https://github.com/matrix-org/synapse/issues/12113), [\#12121](https://github.com/matrix-org/synapse/issues/12121), [\#12130](https://github.com/matrix-org/synapse/issues/12130), [\#12189](https://github.com/matrix-org/synapse/issues/12189))
|
||||
- Fix a bug introduced in Synapse 1.7.2 whereby background updates are never run with the default background batch size. ([\#12157](https://github.com/matrix-org/synapse/issues/12157))
|
||||
- Fix a bug where non-standard information was returned from the `/hierarchy` API. Introduced in Synapse v1.41.0. ([\#12175](https://github.com/matrix-org/synapse/issues/12175))
|
||||
- Fix a bug introduced in Synapse 1.54.0 that broke background updates on sqlite homeservers while search was disabled. ([\#12215](https://github.com/matrix-org/synapse/issues/12215))
|
||||
- Fix a long-standing bug when a `filter` argument with `event_fields` which did not include the `unsigned` field could result in a 500 error on `/sync`. ([\#12234](https://github.com/matrix-org/synapse/issues/12234))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Fix complexity checking config example in [Resource Constrained Devices](https://matrix-org.github.io/synapse/v1.54/other/running_synapse_on_single_board_computers.html) docs page. ([\#11998](https://github.com/matrix-org/synapse/issues/11998))
|
||||
- Improve documentation for demo scripts. ([\#12143](https://github.com/matrix-org/synapse/issues/12143))
|
||||
- Updates to the Room DAG concepts development document. ([\#12179](https://github.com/matrix-org/synapse/issues/12179))
|
||||
- Document that the `typing`, `to_device`, `account_data`, `receipts`, and `presence` stream writer can only be used on a single worker. ([\#12196](https://github.com/matrix-org/synapse/issues/12196))
|
||||
- Document that contributors can sign off privately by email. ([\#12204](https://github.com/matrix-org/synapse/issues/12204))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- **Remove workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. Breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700))**
|
||||
- Remove backwards compatibilty with pagination tokens from the `/relations` and `/aggregations` endpoints generated from Synapse < v1.52.0. ([\#12138](https://github.com/matrix-org/synapse/issues/12138))
|
||||
- The groups/communities feature in Synapse has been deprecated. ([\#12200](https://github.com/matrix-org/synapse/issues/12200))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Simplify the `ApplicationService` class' set of public methods related to interest checking. ([\#11915](https://github.com/matrix-org/synapse/issues/11915))
|
||||
- Add config settings for background update parameters. ([\#11980](https://github.com/matrix-org/synapse/issues/11980))
|
||||
- Correct type hints for txredis. ([\#12042](https://github.com/matrix-org/synapse/issues/12042))
|
||||
- Limit the size of `aggregation_key` on annotations. ([\#12101](https://github.com/matrix-org/synapse/issues/12101))
|
||||
- Add type hints to tests files. ([\#12108](https://github.com/matrix-org/synapse/issues/12108), [\#12146](https://github.com/matrix-org/synapse/issues/12146), [\#12207](https://github.com/matrix-org/synapse/issues/12207), [\#12208](https://github.com/matrix-org/synapse/issues/12208))
|
||||
- Move scripts to Synapse package and expose as setuptools entry points. ([\#12118](https://github.com/matrix-org/synapse/issues/12118))
|
||||
- Add support for cancellation to `ReadWriteLock`. ([\#12120](https://github.com/matrix-org/synapse/issues/12120))
|
||||
- Fix data validation to compare to lists, not sequences. ([\#12128](https://github.com/matrix-org/synapse/issues/12128))
|
||||
- Fix CI not attaching source distributions and wheels to the GitHub releases. ([\#12131](https://github.com/matrix-org/synapse/issues/12131))
|
||||
- Remove unused mocks from `test_typing`. ([\#12136](https://github.com/matrix-org/synapse/issues/12136))
|
||||
- Give `scripts-dev` scripts suffixes for neater CI config. ([\#12137](https://github.com/matrix-org/synapse/issues/12137))
|
||||
- Move `synctl` into `synapse._scripts` and expose as an entry point. ([\#12140](https://github.com/matrix-org/synapse/issues/12140))
|
||||
- Move the snapcraft configuration file to `contrib`. ([\#12142](https://github.com/matrix-org/synapse/issues/12142))
|
||||
- Enable [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) Complement tests in CI. ([\#12144](https://github.com/matrix-org/synapse/issues/12144))
|
||||
- Enable [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) Complement tests in CI. ([\#12145](https://github.com/matrix-org/synapse/issues/12145))
|
||||
- Add test for `ObservableDeferred`'s cancellation behaviour. ([\#12149](https://github.com/matrix-org/synapse/issues/12149))
|
||||
- Use `ParamSpec` in type hints for `synapse.logging.context`. ([\#12150](https://github.com/matrix-org/synapse/issues/12150))
|
||||
- Prune unused jobs from `tox` config. ([\#12152](https://github.com/matrix-org/synapse/issues/12152))
|
||||
- Move CI checks out of tox, to facilitate a move to using poetry. ([\#12153](https://github.com/matrix-org/synapse/issues/12153))
|
||||
- Avoid generating state groups for local out-of-band leaves. ([\#12154](https://github.com/matrix-org/synapse/issues/12154))
|
||||
- Avoid trying to calculate the state at outlier events. ([\#12155](https://github.com/matrix-org/synapse/issues/12155), [\#12173](https://github.com/matrix-org/synapse/issues/12173), [\#12202](https://github.com/matrix-org/synapse/issues/12202))
|
||||
- Fix some type annotations. ([\#12156](https://github.com/matrix-org/synapse/issues/12156))
|
||||
- Add type hints for `ObservableDeferred` attributes. ([\#12159](https://github.com/matrix-org/synapse/issues/12159))
|
||||
- Use a prebuilt Action for the `tests-done` CI job. ([\#12161](https://github.com/matrix-org/synapse/issues/12161))
|
||||
- Reduce number of DB queries made during processing of `/sync`. ([\#12163](https://github.com/matrix-org/synapse/issues/12163))
|
||||
- Add `delay_cancellation` utility function, which behaves like `stop_cancellation` but waits until the original `Deferred` resolves before raising a `CancelledError`. ([\#12180](https://github.com/matrix-org/synapse/issues/12180))
|
||||
- Retry HTTP replication failures, this should prevent 502's when restarting stateful workers (main, event persisters, stream writers). Contributed by Nick @ Beeper. ([\#12182](https://github.com/matrix-org/synapse/issues/12182))
|
||||
- Add cancellation support to `@cached` and `@cachedList` decorators. ([\#12183](https://github.com/matrix-org/synapse/issues/12183))
|
||||
- Remove unused variables. ([\#12187](https://github.com/matrix-org/synapse/issues/12187))
|
||||
- Add combined test for HTTP pusher and push rule. Contributed by Nick @ Beeper. ([\#12188](https://github.com/matrix-org/synapse/issues/12188))
|
||||
- Rename `HomeServer.get_tcp_replication` to `get_replication_command_handler`. ([\#12192](https://github.com/matrix-org/synapse/issues/12192))
|
||||
- Remove some dead code. ([\#12197](https://github.com/matrix-org/synapse/issues/12197))
|
||||
- Fix a misleading comment in the function `check_event_for_spam`. ([\#12203](https://github.com/matrix-org/synapse/issues/12203))
|
||||
- Remove unnecessary `pass` statements. ([\#12206](https://github.com/matrix-org/synapse/issues/12206))
|
||||
- Update the SSO username picker template to comply with SIWA guidelines. ([\#12210](https://github.com/matrix-org/synapse/issues/12210))
|
||||
- Improve code documentation for the typing stream over replication. ([\#12211](https://github.com/matrix-org/synapse/issues/12211))
|
||||
|
||||
|
||||
Synapse 1.54.0 (2022-03-08)
|
||||
===========================
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
include synctl
|
||||
include LICENSE
|
||||
include VERSION
|
||||
include *.rst
|
||||
@ -17,7 +16,6 @@ recursive-include synapse/storage *.txt
|
||||
recursive-include synapse/storage *.md
|
||||
|
||||
recursive-include docs *
|
||||
recursive-include scripts *
|
||||
recursive-include scripts-dev *
|
||||
recursive-include synapse *.pyi
|
||||
recursive-include tests *.py
|
||||
@ -53,5 +51,4 @@ prune contrib
|
||||
prune debian
|
||||
prune demo/etc
|
||||
prune docker
|
||||
prune snap
|
||||
prune stubs
|
||||
|
@ -312,6 +312,9 @@ We recommend using the demo which starts 3 federated instances running on ports
|
||||
|
||||
(to stop, you can use `./demo/stop.sh`)
|
||||
|
||||
See the [demo documentation](https://matrix-org.github.io/synapse/develop/development/demo.html)
|
||||
for more information.
|
||||
|
||||
If you just want to start a single instance of the app and run it directly::
|
||||
|
||||
# Create the homeserver.yaml config once
|
||||
|
@ -20,7 +20,7 @@ apps:
|
||||
generate-config:
|
||||
command: generate_config
|
||||
generate-signing-key:
|
||||
command: generate_signing_key.py
|
||||
command: generate_signing_key
|
||||
register-new-matrix-user:
|
||||
command: register_new_matrix_user
|
||||
plugs: [network]
|
6
debian/changelog
vendored
6
debian/changelog
vendored
@ -1,3 +1,9 @@
|
||||
matrix-synapse-py3 (1.55.0~rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.55.0~rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 15 Mar 2022 10:59:31 +0000
|
||||
|
||||
matrix-synapse-py3 (1.54.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.54.0.
|
||||
|
11
demo/.gitignore
vendored
11
demo/.gitignore
vendored
@ -1,7 +1,4 @@
|
||||
*.db
|
||||
*.log
|
||||
*.log.*
|
||||
*.pid
|
||||
|
||||
/media_store.*
|
||||
/etc
|
||||
# Ignore all the temporary files from the demo servers.
|
||||
8080/
|
||||
8081/
|
||||
8082/
|
||||
|
26
demo/README
26
demo/README
@ -1,26 +0,0 @@
|
||||
DO NOT USE THESE DEMO SERVERS IN PRODUCTION
|
||||
|
||||
Requires you to have done:
|
||||
python setup.py develop
|
||||
|
||||
|
||||
The demo start.sh will start three synapse servers on ports 8080, 8081 and 8082, with host names localhost:$port. This can be easily changed to `hostname`:$port in start.sh if required.
|
||||
|
||||
To enable the servers to communicate untrusted ssl certs are used. In order to do this the servers do not check the certs
|
||||
and are configured in a highly insecure way. Do not use these configuration files in production.
|
||||
|
||||
stop.sh will stop the synapse servers and the webclient.
|
||||
|
||||
clean.sh will delete the databases and log files.
|
||||
|
||||
To start a completely new set of servers, run:
|
||||
|
||||
./demo/stop.sh; ./demo/clean.sh && ./demo/start.sh
|
||||
|
||||
|
||||
Logs and sqlitedb will be stored in demo/808{0,1,2}.{log,db}
|
||||
|
||||
|
||||
|
||||
Also note that when joining a public room on a different HS via "#foo:bar.net", then you are (in the current impl) joining a room with room_id "foo". This means that it won't work if your HS already has a room with that name.
|
||||
|
@ -4,6 +4,9 @@ set -e
|
||||
|
||||
DIR="$( cd "$( dirname "$0" )" && pwd )"
|
||||
|
||||
# Ensure that the servers are stopped.
|
||||
$DIR/stop.sh
|
||||
|
||||
PID_FILE="$DIR/servers.pid"
|
||||
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
|
@ -6,8 +6,6 @@ CWD=$(pwd)
|
||||
|
||||
cd "$DIR/.." || exit
|
||||
|
||||
mkdir -p demo/etc
|
||||
|
||||
PYTHONPATH=$(readlink -f "$(pwd)")
|
||||
export PYTHONPATH
|
||||
|
||||
@ -21,22 +19,26 @@ for port in 8080 8081 8082; do
|
||||
mkdir -p demo/$port
|
||||
pushd demo/$port || exit
|
||||
|
||||
#rm $DIR/etc/$port.config
|
||||
# Generate the configuration for the homeserver at localhost:848x.
|
||||
python3 -m synapse.app.homeserver \
|
||||
--generate-config \
|
||||
-H "localhost:$https_port" \
|
||||
--config-path "$DIR/etc/$port.config" \
|
||||
--server-name "localhost:$port" \
|
||||
--config-path "$port.config" \
|
||||
--report-stats no
|
||||
|
||||
if ! grep -F "Customisation made by demo/start.sh" -q "$DIR/etc/$port.config"; then
|
||||
# Generate tls keys
|
||||
openssl req -x509 -newkey rsa:4096 -keyout "$DIR/etc/localhost:$https_port.tls.key" -out "$DIR/etc/localhost:$https_port.tls.crt" -days 365 -nodes -subj "/O=matrix"
|
||||
if ! grep -F "Customisation made by demo/start.sh" -q "$port.config"; then
|
||||
# Generate TLS keys.
|
||||
openssl req -x509 -newkey rsa:4096 \
|
||||
-keyout "localhost:$port.tls.key" \
|
||||
-out "localhost:$port.tls.crt" \
|
||||
-days 365 -nodes -subj "/O=matrix"
|
||||
|
||||
# Regenerate configuration
|
||||
# Add customisations to the configuration.
|
||||
{
|
||||
printf '\n\n# Customisation made by demo/start.sh\n'
|
||||
printf '\n\n# Customisation made by demo/start.sh\n\n'
|
||||
echo "public_baseurl: http://localhost:$port/"
|
||||
echo 'enable_registration: true'
|
||||
echo ''
|
||||
|
||||
# Warning, this heredoc depends on the interaction of tabs and spaces.
|
||||
# Please don't accidentaly bork me with your fancy settings.
|
||||
@ -63,38 +65,34 @@ for port in 8080 8081 8082; do
|
||||
|
||||
echo "${listeners}"
|
||||
|
||||
# Disable tls for the servers
|
||||
printf '\n\n# Disable tls on the servers.'
|
||||
# Disable TLS for the servers
|
||||
printf '\n\n# Disable TLS for the servers.'
|
||||
echo '# DO NOT USE IN PRODUCTION'
|
||||
echo 'use_insecure_ssl_client_just_for_testing_do_not_use: true'
|
||||
echo 'federation_verify_certificates: false'
|
||||
|
||||
# Set tls paths
|
||||
echo "tls_certificate_path: \"$DIR/etc/localhost:$https_port.tls.crt\""
|
||||
echo "tls_private_key_path: \"$DIR/etc/localhost:$https_port.tls.key\""
|
||||
# Set paths for the TLS certificates.
|
||||
echo "tls_certificate_path: \"$DIR/$port/localhost:$port.tls.crt\""
|
||||
echo "tls_private_key_path: \"$DIR/$port/localhost:$port.tls.key\""
|
||||
|
||||
# Ignore keys from the trusted keys server
|
||||
echo '# Ignore keys from the trusted keys server'
|
||||
echo 'trusted_key_servers:'
|
||||
echo ' - server_name: "matrix.org"'
|
||||
echo ' accept_keys_insecurely: true'
|
||||
echo ''
|
||||
|
||||
# Reduce the blacklist
|
||||
blacklist=$(cat <<-BLACK
|
||||
# Set the blacklist so that it doesn't include 127.0.0.1, ::1
|
||||
federation_ip_range_blacklist:
|
||||
- '10.0.0.0/8'
|
||||
- '172.16.0.0/12'
|
||||
- '192.168.0.0/16'
|
||||
- '100.64.0.0/10'
|
||||
- '169.254.0.0/16'
|
||||
- 'fe80::/64'
|
||||
- 'fc00::/7'
|
||||
BLACK
|
||||
# Allow the servers to communicate over localhost.
|
||||
allow_list=$(cat <<-ALLOW_LIST
|
||||
# Allow the servers to communicate over localhost.
|
||||
ip_range_whitelist:
|
||||
- '127.0.0.1/8'
|
||||
- '::1/128'
|
||||
ALLOW_LIST
|
||||
)
|
||||
|
||||
echo "${blacklist}"
|
||||
} >> "$DIR/etc/$port.config"
|
||||
echo "${allow_list}"
|
||||
} >> "$port.config"
|
||||
fi
|
||||
|
||||
# Check script parameters
|
||||
@ -141,19 +139,18 @@ for port in 8080 8081 8082; do
|
||||
burst_count: 1000
|
||||
RC
|
||||
)
|
||||
echo "${ratelimiting}" >> "$DIR/etc/$port.config"
|
||||
echo "${ratelimiting}" >> "$port.config"
|
||||
fi
|
||||
fi
|
||||
|
||||
if ! grep -F "full_twisted_stacktraces" -q "$DIR/etc/$port.config"; then
|
||||
echo "full_twisted_stacktraces: true" >> "$DIR/etc/$port.config"
|
||||
fi
|
||||
if ! grep -F "report_stats" -q "$DIR/etc/$port.config" ; then
|
||||
echo "report_stats: false" >> "$DIR/etc/$port.config"
|
||||
# Always disable reporting of stats if the option is not there.
|
||||
if ! grep -F "report_stats" -q "$port.config" ; then
|
||||
echo "report_stats: false" >> "$port.config"
|
||||
fi
|
||||
|
||||
# Run the homeserver in the background.
|
||||
python3 -m synapse.app.homeserver \
|
||||
--config-path "$DIR/etc/$port.config" \
|
||||
--config-path "$port.config" \
|
||||
-D \
|
||||
|
||||
popd || exit
|
||||
|
@ -46,8 +46,7 @@ RUN \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Copy just what we need to pip install
|
||||
COPY scripts /synapse/scripts/
|
||||
COPY MANIFEST.in README.rst setup.py synctl /synapse/
|
||||
COPY MANIFEST.in README.rst setup.py /synapse/
|
||||
COPY synapse/__init__.py /synapse/synapse/__init__.py
|
||||
COPY synapse/python_dependencies.py /synapse/synapse/python_dependencies.py
|
||||
|
||||
|
@ -82,6 +82,7 @@
|
||||
- [Release Cycle](development/releases.md)
|
||||
- [Git Usage](development/git.md)
|
||||
- [Testing]()
|
||||
- [Demo scripts](development/demo.md)
|
||||
- [OpenTracing](opentracing.md)
|
||||
- [Database Schemas](development/database_schema.md)
|
||||
- [Experimental features](development/experimental_features.md)
|
||||
|
@ -172,6 +172,6 @@ frobber:
|
||||
```
|
||||
|
||||
Note that the sample configuration is generated from the synapse code
|
||||
and is maintained by a script, `scripts-dev/generate_sample_config`.
|
||||
and is maintained by a script, `scripts-dev/generate_sample_config.sh`.
|
||||
Making sure that the output from this script matches the desired format
|
||||
is left as an exercise for the reader!
|
||||
|
@ -458,6 +458,17 @@ Git allows you to add this signoff automatically when using the `-s`
|
||||
flag to `git commit`, which uses the name and email set in your
|
||||
`user.name` and `user.email` git configs.
|
||||
|
||||
### Private Sign off
|
||||
|
||||
If you would like to provide your legal name privately to the Matrix.org
|
||||
Foundation (instead of in a public commit or comment), you can do so
|
||||
by emailing your legal name and a link to the pull request to
|
||||
[dco@matrix.org](mailto:dco@matrix.org?subject=Private%20sign%20off).
|
||||
It helps to include "sign off" or similar in the subject line. You will then
|
||||
be instructed further.
|
||||
|
||||
Once private sign off is complete, doing so for future contributions will not
|
||||
be required.
|
||||
|
||||
# 10. Turn feedback into better code.
|
||||
|
||||
|
@ -158,9 +158,9 @@ same as integers.
|
||||
There are three separate aspects to this:
|
||||
|
||||
* Any new boolean column must be added to the `BOOLEAN_COLUMNS` list in
|
||||
`scripts/synapse_port_db`. This tells the port script to cast the integer
|
||||
value from SQLite to a boolean before writing the value to the postgres
|
||||
database.
|
||||
`synapse/_scripts/synapse_port_db.py`. This tells the port script to cast
|
||||
the integer value from SQLite to a boolean before writing the value to the
|
||||
postgres database.
|
||||
|
||||
* Before SQLite 3.23, `TRUE` and `FALSE` were not recognised as constants by
|
||||
SQLite, and the `IS [NOT] TRUE`/`IS [NOT] FALSE` operators were not
|
||||
|
41
docs/development/demo.md
Normal file
41
docs/development/demo.md
Normal file
@ -0,0 +1,41 @@
|
||||
# Synapse demo setup
|
||||
|
||||
**DO NOT USE THESE DEMO SERVERS IN PRODUCTION**
|
||||
|
||||
Requires you to have a [Synapse development environment setup](https://matrix-org.github.io/synapse/develop/development/contributing_guide.html#4-install-the-dependencies).
|
||||
|
||||
The demo setup allows running three federation Synapse servers, with server
|
||||
names `localhost:8080`, `localhost:8081`, and `localhost:8082`.
|
||||
|
||||
You can access them via any Matrix client over HTTP at `localhost:8080`,
|
||||
`localhost:8081`, and `localhost:8082` or over HTTPS at `localhost:8480`,
|
||||
`localhost:8481`, and `localhost:8482`.
|
||||
|
||||
To enable the servers to communicate, self-signed SSL certificates are generated
|
||||
and the servers are configured in a highly insecure way, including:
|
||||
|
||||
* Not checking certificates over federation.
|
||||
* Not verifying keys.
|
||||
|
||||
The servers are configured to store their data under `demo/8080`, `demo/8081`, and
|
||||
`demo/8082`. This includes configuration, logs, SQLite databases, and media.
|
||||
|
||||
Note that when joining a public room on a different HS via "#foo:bar.net", then
|
||||
you are (in the current impl) joining a room with room_id "foo". This means that
|
||||
it won't work if your HS already has a room with that name.
|
||||
|
||||
## Using the demo scripts
|
||||
|
||||
There's three main scripts with straightforward purposes:
|
||||
|
||||
* `start.sh` will start the Synapse servers, generating any missing configuration.
|
||||
* This accepts a single parameter `--no-rate-limit` to "disable" rate limits
|
||||
(they actually still exist, but are very high).
|
||||
* `stop.sh` will stop the Synapse servers.
|
||||
* `clean.sh` will delete the configuration, databases, log files, etc.
|
||||
|
||||
To start a completely new set of servers, run:
|
||||
|
||||
```sh
|
||||
./demo/stop.sh; ./demo/clean.sh && ./demo/start.sh
|
||||
```
|
@ -30,13 +30,57 @@ rather than skipping any that arrived late; whereas if you're looking at a
|
||||
historical section of timeline (i.e. `/messages`), you want to see the best
|
||||
representation of the state of the room as others were seeing it at the time.
|
||||
|
||||
## Outliers
|
||||
|
||||
We mark an event as an `outlier` when we haven't figured out the state for the
|
||||
room at that point in the DAG yet. They are "floating" events that we haven't
|
||||
yet correlated to the DAG.
|
||||
|
||||
Outliers typically arise when we fetch the auth chain or state for a given
|
||||
event. When that happens, we just grab the events in the state/auth chain,
|
||||
without calculating the state at those events, or backfilling their
|
||||
`prev_events`.
|
||||
|
||||
So, typically, we won't have the `prev_events` of an `outlier` in the database,
|
||||
(though it's entirely possible that we *might* have them for some other
|
||||
reason). Other things that make outliers different from regular events:
|
||||
|
||||
* We don't have state for them, so there should be no entry in
|
||||
`event_to_state_groups` for an outlier. (In practice this isn't always
|
||||
the case, though I'm not sure why: see https://github.com/matrix-org/synapse/issues/12201).
|
||||
|
||||
* We don't record entries for them in the `event_edges`,
|
||||
`event_forward_extremeties` or `event_backward_extremities` tables.
|
||||
|
||||
Since outliers are not tied into the DAG, they do not normally form part of the
|
||||
timeline sent down to clients via `/sync` or `/messages`; however there is an
|
||||
exception:
|
||||
|
||||
### Out-of-band membership events
|
||||
|
||||
A special case of outlier events are some membership events for federated rooms
|
||||
that we aren't full members of. For example:
|
||||
|
||||
* invites received over federation, before we join the room
|
||||
* *rejections* for said invites
|
||||
* knock events for rooms that we would like to join but have not yet joined.
|
||||
|
||||
In all the above cases, we don't have the state for the room, which is why they
|
||||
are treated as outliers. They are a bit special though, in that they are
|
||||
proactively sent to clients via `/sync`.
|
||||
|
||||
## Forward extremity
|
||||
|
||||
Most-recent-in-time events in the DAG which are not referenced by any other events' `prev_events` yet.
|
||||
Most-recent-in-time events in the DAG which are not referenced by any other
|
||||
events' `prev_events` yet. (In this definition, outliers, rejected events, and
|
||||
soft-failed events don't count.)
|
||||
|
||||
The forward extremities of a room are used as the `prev_events` when the next event is sent.
|
||||
The forward extremities of a room (or at least, a subset of them, if there are
|
||||
more than ten) are used as the `prev_events` when the next event is sent.
|
||||
|
||||
The "current state" of a room (ie: the state which would be used if we
|
||||
generated a new event) is, therefore, the resolution of the room states
|
||||
at each of the forward extremities.
|
||||
|
||||
## Backward extremity
|
||||
|
||||
@ -44,23 +88,14 @@ The current marker of where we have backfilled up to and will generally be the
|
||||
`prev_events` of the oldest-in-time events we have in the DAG. This gives a starting point when
|
||||
backfilling history.
|
||||
|
||||
When we persist a non-outlier event, we clear it as a backward extremity and set
|
||||
all of its `prev_events` as the new backward extremities if they aren't already
|
||||
persisted in the `events` table.
|
||||
|
||||
|
||||
## Outliers
|
||||
|
||||
We mark an event as an `outlier` when we haven't figured out the state for the
|
||||
room at that point in the DAG yet.
|
||||
|
||||
We won't *necessarily* have the `prev_events` of an `outlier` in the database,
|
||||
but it's entirely possible that we *might*.
|
||||
|
||||
For example, when we fetch the event auth chain or state for a given event, we
|
||||
mark all of those claimed auth events as outliers because we haven't done the
|
||||
state calculation ourself.
|
||||
Note that, unlike forward extremities, we typically don't have any backward
|
||||
extremity events themselves in the database - or, if we do, they will be "outliers" (see
|
||||
above). Either way, we don't expect to have the room state at a backward extremity.
|
||||
|
||||
When we persist a non-outlier event, if it was previously a backward extremity,
|
||||
we clear it as a backward extremity and set all of its `prev_events` as the new
|
||||
backward extremities if they aren't already persisted as non-outliers. This
|
||||
therefore keeps the backward extremities up-to-date.
|
||||
|
||||
## State groups
|
||||
|
||||
|
@ -63,4 +63,5 @@ release of Synapse.
|
||||
|
||||
If you want to get up and running quickly with a trio of homeservers in a
|
||||
private federation, there is a script in the `demo` directory. This is mainly
|
||||
useful just for development purposes. See [demo/README](https://github.com/matrix-org/synapse/tree/develop/demo/).
|
||||
useful just for development purposes. See
|
||||
[demo scripts](https://matrix-org.github.io/synapse/develop/development/demo.html).
|
||||
|
@ -148,6 +148,49 @@ deny an incoming event, see [`check_event_for_spam`](spam_checker_callbacks.md#c
|
||||
|
||||
If multiple modules implement this callback, Synapse runs them all in order.
|
||||
|
||||
### `check_can_shutdown_room`
|
||||
|
||||
_First introduced in Synapse v1.55.0_
|
||||
|
||||
```python
|
||||
async def check_can_shutdown_room(
|
||||
user_id: str, room_id: str,
|
||||
) -> bool:
|
||||
```
|
||||
|
||||
Called when an admin user requests the shutdown of a room. The module must return a
|
||||
boolean indicating whether the shutdown can go through. If the callback returns `False`,
|
||||
the shutdown will not proceed and the caller will see a `M_FORBIDDEN` error.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `True`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `True` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
### `check_can_deactivate_user`
|
||||
|
||||
_First introduced in Synapse v1.55.0_
|
||||
|
||||
```python
|
||||
async def check_can_deactivate_user(
|
||||
user_id: str, by_admin: bool,
|
||||
) -> bool:
|
||||
```
|
||||
|
||||
Called when the deactivation of a user is requested. User deactivation can be
|
||||
performed by an admin or the user themselves, so developers are encouraged to check the
|
||||
requester when implementing this callback. The module must return a
|
||||
boolean indicating whether the deactivation can go through. If the callback returns `False`,
|
||||
the deactivation will not proceed and the caller will see a `M_FORBIDDEN` error.
|
||||
|
||||
The module is passed two parameters, `user_id` which is the ID of the user being deactivated, and `by_admin` which is `True` if the request is made by a serve admin, and `False` otherwise.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `True`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `True` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
|
||||
### `on_profile_update`
|
||||
|
||||
_First introduced in Synapse v1.54.0_
|
||||
|
@ -31,28 +31,29 @@ Anything that requires modifying the device list [#7721](https://github.com/matr
|
||||
Put the below in a new file at /etc/matrix-synapse/conf.d/sbc.yaml to override the defaults in homeserver.yaml.
|
||||
|
||||
```
|
||||
# Set to false to disable presence tracking on this homeserver.
|
||||
# Disable presence tracking, which is currently fairly resource intensive
|
||||
# More info: https://github.com/matrix-org/synapse/issues/9478
|
||||
use_presence: false
|
||||
|
||||
# When this is enabled, the room "complexity" will be checked before a user
|
||||
# joins a new remote room. If it is above the complexity limit, the server will
|
||||
# disallow joining, or will instantly leave.
|
||||
# Set a small complexity limit, preventing users from joining large rooms
|
||||
# which may be resource-intensive to remain a part of.
|
||||
#
|
||||
# Note that this will not prevent users from joining smaller rooms that
|
||||
# eventually become complex.
|
||||
limit_remote_rooms:
|
||||
# Uncomment to enable room complexity checking.
|
||||
#enabled: true
|
||||
enabled: true
|
||||
complexity: 3.0
|
||||
|
||||
# Database configuration
|
||||
database:
|
||||
# Use postgres for the best performance
|
||||
name: psycopg2
|
||||
args:
|
||||
user: matrix-synapse
|
||||
# Generate a long, secure one with a password manager
|
||||
# Generate a long, secure password using a password manager
|
||||
password: hunter2
|
||||
database: matrix-synapse
|
||||
host: localhost
|
||||
cp_min: 5
|
||||
cp_max: 10
|
||||
```
|
||||
|
||||
Currently the complexity is measured by [current_state_events / 500](https://github.com/matrix-org/synapse/blob/v1.20.1/synapse/storage/databases/main/events_worker.py#L986). You can find join times and your most complex rooms like this:
|
||||
|
@ -153,9 +153,9 @@ database file (typically `homeserver.db`) to another location. Once the
|
||||
copy is complete, restart synapse. For instance:
|
||||
|
||||
```sh
|
||||
./synctl stop
|
||||
synctl stop
|
||||
cp homeserver.db homeserver.db.snapshot
|
||||
./synctl start
|
||||
synctl start
|
||||
```
|
||||
|
||||
Copy the old config file into a new config file:
|
||||
@ -192,10 +192,10 @@ Once that has completed, change the synapse config to point at the
|
||||
PostgreSQL database configuration file `homeserver-postgres.yaml`:
|
||||
|
||||
```sh
|
||||
./synctl stop
|
||||
synctl stop
|
||||
mv homeserver.yaml homeserver-old-sqlite.yaml
|
||||
mv homeserver-postgres.yaml homeserver.yaml
|
||||
./synctl start
|
||||
synctl start
|
||||
```
|
||||
|
||||
Synapse should now be running against PostgreSQL.
|
||||
|
@ -1947,8 +1947,14 @@ saml2_config:
|
||||
#
|
||||
# localpart_template: Jinja2 template for the localpart of the MXID.
|
||||
# If this is not set, the user will be prompted to choose their
|
||||
# own username (see 'sso_auth_account_details.html' in the 'sso'
|
||||
# section of this file).
|
||||
# own username (see the documentation for the
|
||||
# 'sso_auth_account_details.html' template). This template can
|
||||
# use the 'localpart_from_email' filter.
|
||||
#
|
||||
# confirm_localpart: Whether to prompt the user to validate (or
|
||||
# change) the generated localpart (see the documentation for the
|
||||
# 'sso_auth_account_details.html' template), instead of
|
||||
# registering the account right away.
|
||||
#
|
||||
# display_name_template: Jinja2 template for the display name to set
|
||||
# on first login. If unset, no displayname will be set.
|
||||
@ -2729,3 +2735,35 @@ redis:
|
||||
# Optional password if configured on the Redis instance
|
||||
#
|
||||
#password: <secret_password>
|
||||
|
||||
|
||||
## Background Updates ##
|
||||
|
||||
# Background updates are database updates that are run in the background in batches.
|
||||
# The duration, minimum batch size, default batch size, whether to sleep between batches and if so, how long to
|
||||
# sleep can all be configured. This is helpful to speed up or slow down the updates.
|
||||
#
|
||||
background_updates:
|
||||
# How long in milliseconds to run a batch of background updates for. Defaults to 100. Uncomment and set
|
||||
# a time to change the default.
|
||||
#
|
||||
#background_update_duration_ms: 500
|
||||
|
||||
# Whether to sleep between updates. Defaults to True. Uncomment to change the default.
|
||||
#
|
||||
#sleep_enabled: false
|
||||
|
||||
# If sleeping between updates, how long in milliseconds to sleep for. Defaults to 1000. Uncomment
|
||||
# and set a duration to change the default.
|
||||
#
|
||||
#sleep_duration_ms: 300
|
||||
|
||||
# Minimum size a batch of background updates can be. Must be greater than 0. Defaults to 1. Uncomment and
|
||||
# set a size to change the default.
|
||||
#
|
||||
#min_batch_size: 10
|
||||
|
||||
# The batch size to use for the first iteration of a new background update. The default is 100.
|
||||
# Uncomment and set a size to change the default.
|
||||
#
|
||||
#default_batch_size: 50
|
||||
|
@ -36,6 +36,13 @@ Turns a `mxc://` URL for media content into an HTTP(S) one using the homeserver'
|
||||
|
||||
Example: `message.sender_avatar_url|mxc_to_http(32,32)`
|
||||
|
||||
```python
|
||||
localpart_from_email(address: str) -> str
|
||||
```
|
||||
|
||||
Returns the local part of an email address (e.g. `alice` in `alice@example.com`).
|
||||
|
||||
Example: `user.email_address|localpart_from_email`
|
||||
|
||||
## Email templates
|
||||
|
||||
@ -176,8 +183,11 @@ Below are the templates Synapse will look for when generating pages related to S
|
||||
for the brand of the IdP
|
||||
* `user_attributes`: an object containing details about the user that
|
||||
we received from the IdP. May have the following attributes:
|
||||
* display_name: the user's display_name
|
||||
* emails: a list of email addresses
|
||||
* `display_name`: the user's display name
|
||||
* `emails`: a list of email addresses
|
||||
* `localpart`: the local part of the Matrix user ID to register,
|
||||
if `localpart_template` is set in the mapping provider configuration (empty
|
||||
string if not)
|
||||
The template should render a form which submits the following fields:
|
||||
* `username`: the localpart of the user's chosen user id
|
||||
* `sso_new_user_consent.html`: HTML page allowing the user to consent to the
|
||||
|
@ -238,8 +238,9 @@ After updating the homeserver configuration, you must restart synapse:
|
||||
|
||||
* If you use synctl:
|
||||
```sh
|
||||
cd /where/you/run/synapse
|
||||
./synctl restart
|
||||
# Depending on how Synapse is installed, synctl may already be on
|
||||
# your PATH. If not, you may need to activate a virtual environment.
|
||||
synctl restart
|
||||
```
|
||||
* If you use systemd:
|
||||
```sh
|
||||
|
@ -47,7 +47,7 @@ this document.
|
||||
3. Restart Synapse:
|
||||
|
||||
```bash
|
||||
./synctl restart
|
||||
synctl restart
|
||||
```
|
||||
|
||||
To check whether your update was successful, you can check the running
|
||||
@ -85,6 +85,49 @@ process, for example:
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
# Upgrading to v1.56.0
|
||||
|
||||
## Groups/communities feature has been deprecated
|
||||
|
||||
The non-standard groups/communities feature in Synapse has been deprecated and will
|
||||
be disabled by default in Synapse v1.58.0.
|
||||
|
||||
You can test disabling it by adding the following to your homeserver configuration:
|
||||
|
||||
```yaml
|
||||
experimental_features:
|
||||
groups_enabled: false
|
||||
```
|
||||
|
||||
# Upgrading to v1.55.0
|
||||
|
||||
## `synctl` script has been moved
|
||||
|
||||
The `synctl` script
|
||||
[has been made](https://github.com/matrix-org/synapse/pull/12140) an
|
||||
[entry point](https://packaging.python.org/en/latest/specifications/entry-points/)
|
||||
and no longer exists at the root of Synapse's source tree. If you wish to use
|
||||
`synctl` to manage your homeserver, you should invoke `synctl` directly, e.g.
|
||||
`synctl start` instead of `./synctl start` or `/path/to/synctl start`.
|
||||
|
||||
You will need to ensure `synctl` is on your `PATH`.
|
||||
- This is automatically the case when using
|
||||
[Debian packages](https://packages.matrix.org/debian/) or
|
||||
[docker images](https://hub.docker.com/r/matrixdotorg/synapse)
|
||||
provided by Matrix.org.
|
||||
- When installing from a wheel, sdist, or PyPI, a `synctl` executable is added
|
||||
to your Python installation's `bin`. This should be on your `PATH`
|
||||
automatically, though you might need to activate a virtual environment
|
||||
depending on how you installed Synapse.
|
||||
|
||||
|
||||
## Compatibility dropped for Mjolnir 1.3.1 and earlier
|
||||
|
||||
Synapse v1.55.0 drops support for Mjolnir 1.3.1 and earlier.
|
||||
If you use the Mjolnir module to moderate your homeserver,
|
||||
please upgrade Mjolnir to version 1.3.2 or later before upgrading Synapse.
|
||||
|
||||
|
||||
# Upgrading to v1.54.0
|
||||
|
||||
## Legacy structured logging configuration removal
|
||||
|
@ -12,7 +12,7 @@ UPDATE users SET admin = 1 WHERE name = '@foo:bar.com';
|
||||
```
|
||||
|
||||
A new server admin user can also be created using the `register_new_matrix_user`
|
||||
command. This is a script that is located in the `scripts/` directory, or possibly
|
||||
command. This is a script that is distributed as part of synapse. It is possibly
|
||||
already on your `$PATH` depending on how Synapse was installed.
|
||||
|
||||
Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings.
|
||||
|
@ -351,8 +351,11 @@ is only supported with Redis-based replication.)
|
||||
|
||||
To enable this, the worker must have a HTTP replication listener configured,
|
||||
have a `worker_name` and be listed in the `instance_map` config. The same worker
|
||||
can handle multiple streams. For example, to move event persistence off to a
|
||||
dedicated worker, the shared configuration would include:
|
||||
can handle multiple streams, but unless otherwise documented, each stream can only
|
||||
have a single writer.
|
||||
|
||||
For example, to move event persistence off to a dedicated worker, the shared
|
||||
configuration would include:
|
||||
|
||||
```yaml
|
||||
instance_map:
|
||||
@ -370,8 +373,8 @@ streams and the endpoints associated with them:
|
||||
|
||||
##### The `events` stream
|
||||
|
||||
The `events` stream also experimentally supports having multiple writers, where
|
||||
work is sharded between them by room ID. Note that you *must* restart all worker
|
||||
The `events` stream experimentally supports having multiple writers, where work
|
||||
is sharded between them by room ID. Note that you *must* restart all worker
|
||||
instances when adding or removing event persisters. An example `stream_writers`
|
||||
configuration with multiple writers:
|
||||
|
||||
@ -384,38 +387,38 @@ stream_writers:
|
||||
|
||||
##### The `typing` stream
|
||||
|
||||
The following endpoints should be routed directly to the workers configured as
|
||||
stream writers for the `typing` stream:
|
||||
The following endpoints should be routed directly to the worker configured as
|
||||
the stream writer for the `typing` stream:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/typing
|
||||
|
||||
##### The `to_device` stream
|
||||
|
||||
The following endpoints should be routed directly to the workers configured as
|
||||
stream writers for the `to_device` stream:
|
||||
The following endpoints should be routed directly to the worker configured as
|
||||
the stream writer for the `to_device` stream:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/sendToDevice/
|
||||
|
||||
##### The `account_data` stream
|
||||
|
||||
The following endpoints should be routed directly to the workers configured as
|
||||
stream writers for the `account_data` stream:
|
||||
The following endpoints should be routed directly to the worker configured as
|
||||
the stream writer for the `account_data` stream:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/.*/tags
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/.*/account_data
|
||||
|
||||
##### The `receipts` stream
|
||||
|
||||
The following endpoints should be routed directly to the workers configured as
|
||||
stream writers for the `receipts` stream:
|
||||
The following endpoints should be routed directly to the worker configured as
|
||||
the stream writer for the `receipts` stream:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/receipt
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/read_markers
|
||||
|
||||
##### The `presence` stream
|
||||
|
||||
The following endpoints should be routed directly to the workers configured as
|
||||
stream writers for the `presence` stream:
|
||||
The following endpoints should be routed directly to the worker configured as
|
||||
the stream writer for the `presence` stream:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/presence/
|
||||
|
||||
|
33
mypy.ini
33
mypy.ini
@ -11,7 +11,7 @@ local_partial_types = True
|
||||
no_implicit_optional = True
|
||||
|
||||
files =
|
||||
scripts-dev/sign_json,
|
||||
scripts-dev/,
|
||||
setup.py,
|
||||
synapse/,
|
||||
tests/
|
||||
@ -23,6 +23,20 @@ files =
|
||||
# https://docs.python.org/3/library/re.html#re.X
|
||||
exclude = (?x)
|
||||
^(
|
||||
|scripts-dev/build_debian_packages.py
|
||||
|scripts-dev/check_signature.py
|
||||
|scripts-dev/definitions.py
|
||||
|scripts-dev/federation_client.py
|
||||
|scripts-dev/hash_history.py
|
||||
|scripts-dev/list_url_patterns.py
|
||||
|scripts-dev/release.py
|
||||
|scripts-dev/tail-synapse.py
|
||||
|
||||
|synapse/_scripts/export_signing_key.py
|
||||
|synapse/_scripts/move_remote_media_to_new_store.py
|
||||
|synapse/_scripts/synapse_port_db.py
|
||||
|synapse/_scripts/update_synapse_database.py
|
||||
|
||||
|synapse/storage/databases/__init__.py
|
||||
|synapse/storage/databases/main/__init__.py
|
||||
|synapse/storage/databases/main/cache.py
|
||||
@ -74,17 +88,8 @@ exclude = (?x)
|
||||
|tests/push/test_http.py
|
||||
|tests/push/test_presentable_names.py
|
||||
|tests/push/test_push_rule_evaluator.py
|
||||
|tests/rest/client/test_account.py
|
||||
|tests/rest/client/test_filter.py
|
||||
|tests/rest/client/test_report_event.py
|
||||
|tests/rest/client/test_rooms.py
|
||||
|tests/rest/client/test_third_party_rules.py
|
||||
|tests/rest/client/test_transactions.py
|
||||
|tests/rest/client/test_typing.py
|
||||
|tests/rest/key/v2/test_remote_key_resource.py
|
||||
|tests/rest/media/v1/test_base.py
|
||||
|tests/rest/media/v1/test_media_storage.py
|
||||
|tests/rest/media/v1/test_url_preview.py
|
||||
|tests/scripts/test_new_matrix_user.py
|
||||
|tests/server.py
|
||||
|tests/server_notices/test_resource_limits_server_notices.py
|
||||
@ -246,10 +251,7 @@ disallow_untyped_defs = True
|
||||
[mypy-tests.storage.test_user_directory]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.rest.admin.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.rest.client.*]
|
||||
[mypy-tests.rest.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.federation.transport.test_client]
|
||||
@ -350,3 +352,6 @@ ignore_missing_imports = True
|
||||
|
||||
[mypy-zope]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-incremental.*]
|
||||
ignore_missing_imports = True
|
||||
|
@ -71,4 +71,4 @@ fi
|
||||
|
||||
# Run the tests!
|
||||
echo "Images built; running complement"
|
||||
go test -v -tags synapse_blacklist,msc2403 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests/...
|
||||
go test -v -tags synapse_blacklist,msc2403,msc2716,msc3030 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests/...
|
||||
|
@ -1,28 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Update/check the docs/sample_config.yaml
|
||||
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
SAMPLE_CONFIG="docs/sample_config.yaml"
|
||||
SAMPLE_LOG_CONFIG="docs/sample_log_config.yaml"
|
||||
|
||||
check() {
|
||||
diff -u "$SAMPLE_LOG_CONFIG" <(./scripts/generate_log_config) >/dev/null || return 1
|
||||
}
|
||||
|
||||
if [ "$1" == "--check" ]; then
|
||||
diff -u "$SAMPLE_CONFIG" <(./scripts/generate_config --header-file docs/.sample_config_header.yaml) >/dev/null || {
|
||||
echo -e "\e[1m\e[31m$SAMPLE_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config\`.\e[0m" >&2
|
||||
exit 1
|
||||
}
|
||||
diff -u "$SAMPLE_LOG_CONFIG" <(./scripts/generate_log_config) >/dev/null || {
|
||||
echo -e "\e[1m\e[31m$SAMPLE_LOG_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config\`.\e[0m" >&2
|
||||
exit 1
|
||||
}
|
||||
else
|
||||
./scripts/generate_config --header-file docs/.sample_config_header.yaml -o "$SAMPLE_CONFIG"
|
||||
./scripts/generate_log_config -o "$SAMPLE_LOG_CONFIG"
|
||||
fi
|
28
scripts-dev/generate_sample_config.sh
Executable file
28
scripts-dev/generate_sample_config.sh
Executable file
@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Update/check the docs/sample_config.yaml
|
||||
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
SAMPLE_CONFIG="docs/sample_config.yaml"
|
||||
SAMPLE_LOG_CONFIG="docs/sample_log_config.yaml"
|
||||
|
||||
check() {
|
||||
diff -u "$SAMPLE_LOG_CONFIG" <(synapse/_scripts/generate_log_config.py) >/dev/null || return 1
|
||||
}
|
||||
|
||||
if [ "$1" == "--check" ]; then
|
||||
diff -u "$SAMPLE_CONFIG" <(synapse/_scripts/generate_config.py --header-file docs/.sample_config_header.yaml) >/dev/null || {
|
||||
echo -e "\e[1m\e[31m$SAMPLE_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config.sh\`.\e[0m" >&2
|
||||
exit 1
|
||||
}
|
||||
diff -u "$SAMPLE_LOG_CONFIG" <(synapse/_scripts/generate_log_config.py) >/dev/null || {
|
||||
echo -e "\e[1m\e[31m$SAMPLE_LOG_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config.sh\`.\e[0m" >&2
|
||||
exit 1
|
||||
}
|
||||
else
|
||||
synapse/_scripts/generate_config.py --header-file docs/.sample_config_header.yaml -o "$SAMPLE_CONFIG"
|
||||
synapse/_scripts/generate_log_config.py -o "$SAMPLE_LOG_CONFIG"
|
||||
fi
|
@ -84,17 +84,8 @@ else
|
||||
files=(
|
||||
"synapse" "docker" "tests"
|
||||
# annoyingly, black doesn't find these so we have to list them
|
||||
"scripts/export_signing_key"
|
||||
"scripts/generate_config"
|
||||
"scripts/generate_log_config"
|
||||
"scripts/hash_password"
|
||||
"scripts/register_new_matrix_user"
|
||||
"scripts/synapse_port_db"
|
||||
"scripts/update_synapse_database"
|
||||
"scripts-dev"
|
||||
"scripts-dev/build_debian_packages"
|
||||
"scripts-dev/sign_json"
|
||||
"contrib" "synctl" "setup.py" "synmark" "stubs" ".ci"
|
||||
"contrib" "setup.py" "synmark" "stubs" ".ci"
|
||||
)
|
||||
fi
|
||||
fi
|
||||
|
@ -147,7 +147,7 @@ python -m synapse.app.homeserver --generate-keys -c "$SQLITE_CONFIG"
|
||||
|
||||
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
||||
echo "Running db background jobs..."
|
||||
scripts/update_synapse_database --database-config --run-background-updates "$SQLITE_CONFIG"
|
||||
synapse/_scripts/update_synapse_database.py --database-config --run-background-updates "$SQLITE_CONFIG"
|
||||
|
||||
# Create the PostgreSQL database.
|
||||
echo "Creating postgres database..."
|
||||
@ -156,10 +156,10 @@ createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_DB_NAME"
|
||||
echo "Copying data from SQLite3 to Postgres with synapse_port_db..."
|
||||
if [ -z "$COVERAGE" ]; then
|
||||
# No coverage needed
|
||||
scripts/synapse_port_db --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG"
|
||||
synapse/_scripts/synapse_port_db.py --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG"
|
||||
else
|
||||
# Coverage desired
|
||||
coverage run scripts/synapse_port_db --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG"
|
||||
coverage run synapse/_scripts/synapse_port_db.py --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG"
|
||||
fi
|
||||
|
||||
# Delete schema_version, applied_schema_deltas and applied_module_schemas tables
|
||||
|
@ -1,19 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse._scripts.register_new_matrix_user import main
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,19 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2021 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse._scripts.review_recent_signups import main
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,45 +0,0 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use JSON::XS;
|
||||
use LWP::UserAgent;
|
||||
use URI::Escape;
|
||||
|
||||
if (@ARGV < 4) {
|
||||
die "usage: $0 <homeserver url> <access_token> <room_id|room_alias> <group_id>\n";
|
||||
}
|
||||
|
||||
my ($hs, $access_token, $room_id, $group_id) = @ARGV;
|
||||
my $ua = LWP::UserAgent->new();
|
||||
$ua->timeout(10);
|
||||
|
||||
if ($room_id =~ /^#/) {
|
||||
$room_id = uri_escape($room_id);
|
||||
$room_id = decode_json($ua->get("${hs}/_matrix/client/r0/directory/room/${room_id}?access_token=${access_token}")->decoded_content)->{room_id};
|
||||
}
|
||||
|
||||
my $room_users = [ keys %{decode_json($ua->get("${hs}/_matrix/client/r0/rooms/${room_id}/joined_members?access_token=${access_token}")->decoded_content)->{joined}} ];
|
||||
my $group_users = [
|
||||
(map { $_->{user_id} } @{decode_json($ua->get("${hs}/_matrix/client/unstable/groups/${group_id}/users?access_token=${access_token}" )->decoded_content)->{chunk}}),
|
||||
(map { $_->{user_id} } @{decode_json($ua->get("${hs}/_matrix/client/unstable/groups/${group_id}/invited_users?access_token=${access_token}" )->decoded_content)->{chunk}}),
|
||||
];
|
||||
|
||||
die "refusing to sync from empty room" unless (@$room_users);
|
||||
die "refusing to sync to empty group" unless (@$group_users);
|
||||
|
||||
my $diff = {};
|
||||
foreach my $user (@$room_users) { $diff->{$user}++ }
|
||||
foreach my $user (@$group_users) { $diff->{$user}-- }
|
||||
|
||||
foreach my $user (keys %$diff) {
|
||||
if ($diff->{$user} == 1) {
|
||||
warn "inviting $user";
|
||||
print STDERR $ua->put("${hs}/_matrix/client/unstable/groups/${group_id}/admin/users/invite/${user}?access_token=${access_token}", Content=>'{}')->status_line."\n";
|
||||
}
|
||||
elsif ($diff->{$user} == -1) {
|
||||
warn "removing $user";
|
||||
print STDERR $ua->put("${hs}/_matrix/client/unstable/groups/${group_id}/admin/users/remove/${user}?access_token=${access_token}", Content=>'{}')->status_line."\n";
|
||||
}
|
||||
}
|
14
setup.py
14
setup.py
@ -15,7 +15,6 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import glob
|
||||
import os
|
||||
from typing import Any, Dict
|
||||
|
||||
@ -153,8 +152,20 @@ setup(
|
||||
python_requires="~=3.7",
|
||||
entry_points={
|
||||
"console_scripts": [
|
||||
# Application
|
||||
"synapse_homeserver = synapse.app.homeserver:main",
|
||||
"synapse_worker = synapse.app.generic_worker:main",
|
||||
"synctl = synapse._scripts.synctl:main",
|
||||
# Scripts
|
||||
"export_signing_key = synapse._scripts.export_signing_key:main",
|
||||
"generate_config = synapse._scripts.generate_config:main",
|
||||
"generate_log_config = synapse._scripts.generate_log_config:main",
|
||||
"generate_signing_key = synapse._scripts.generate_signing_key:main",
|
||||
"hash_password = synapse._scripts.hash_password:main",
|
||||
"register_new_matrix_user = synapse._scripts.register_new_matrix_user:main",
|
||||
"synapse_port_db = synapse._scripts.synapse_port_db:main",
|
||||
"synapse_review_recent_signups = synapse._scripts.review_recent_signups:main",
|
||||
"update_synapse_database = synapse._scripts.update_synapse_database:main",
|
||||
]
|
||||
},
|
||||
classifiers=[
|
||||
@ -167,6 +178,5 @@ setup(
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
],
|
||||
scripts=["synctl"] + glob.glob("scripts/*"),
|
||||
cmdclass={"test": TestCommand},
|
||||
)
|
||||
|
@ -20,7 +20,7 @@ from twisted.internet import protocol
|
||||
from twisted.internet.defer import Deferred
|
||||
|
||||
class RedisProtocol(protocol.Protocol):
|
||||
def publish(self, channel: str, message: bytes): ...
|
||||
def publish(self, channel: str, message: bytes) -> "Deferred[None]": ...
|
||||
def ping(self) -> "Deferred[None]": ...
|
||||
def set(
|
||||
self,
|
||||
@ -52,11 +52,14 @@ def lazyConnection(
|
||||
convertNumbers: bool = ...,
|
||||
) -> RedisProtocol: ...
|
||||
|
||||
class ConnectionHandler: ...
|
||||
# ConnectionHandler doesn't actually inherit from RedisProtocol, but it proxies
|
||||
# most methods to it via ConnectionHandler.__getattr__.
|
||||
class ConnectionHandler(RedisProtocol):
|
||||
def disconnect(self) -> "Deferred[None]": ...
|
||||
|
||||
class RedisFactory(protocol.ReconnectingClientFactory):
|
||||
continueTrying: bool
|
||||
handler: RedisProtocol
|
||||
handler: ConnectionHandler
|
||||
pool: List[RedisProtocol]
|
||||
replyTimeout: Optional[int]
|
||||
def __init__(
|
||||
|
@ -25,6 +25,27 @@ if sys.version_info < (3, 7):
|
||||
print("Synapse requires Python 3.7 or above.")
|
||||
sys.exit(1)
|
||||
|
||||
# Allow using the asyncio reactor via env var.
|
||||
if bool(os.environ.get("SYNAPSE_ASYNC_IO_REACTOR", False)):
|
||||
try:
|
||||
from incremental import Version
|
||||
|
||||
import twisted
|
||||
|
||||
# We need a bugfix that is included in Twisted 21.2.0:
|
||||
# https://twistedmatrix.com/trac/ticket/9787
|
||||
if twisted.version < Version("Twisted", 21, 2, 0):
|
||||
print("Using asyncio reactor requires Twisted>=21.2.0")
|
||||
sys.exit(1)
|
||||
|
||||
import asyncio
|
||||
|
||||
from twisted.internet import asyncioreactor
|
||||
|
||||
asyncioreactor.install(asyncio.get_event_loop())
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
# Twisted and canonicaljson will fail to import when this file is executed to
|
||||
# get the __version__ during a fresh install. That's OK and subsequent calls to
|
||||
# actually start Synapse will import these libraries fine.
|
||||
@ -47,7 +68,7 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.54.0"
|
||||
__version__ = "1.55.0rc1"
|
||||
|
||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||
# We import here so that we don't have to install a bunch of deps when
|
||||
|
@ -50,7 +50,7 @@ def format_for_config(public_key: nacl.signing.VerifyKey, expiry_ts: int):
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument(
|
||||
@ -85,7 +85,6 @@ if __name__ == "__main__":
|
||||
else format_plain
|
||||
)
|
||||
|
||||
keys = []
|
||||
for file in args.key_file:
|
||||
try:
|
||||
res = read_signing_keys(file)
|
||||
@ -98,3 +97,7 @@ if __name__ == "__main__":
|
||||
res = []
|
||||
for key in res:
|
||||
formatter(get_verify_key(key))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -6,7 +6,8 @@ import sys
|
||||
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--config-dir",
|
||||
@ -76,3 +77,7 @@ if __name__ == "__main__":
|
||||
shutil.copyfileobj(args.header_file, args.output_file)
|
||||
|
||||
args.output_file.write(conf)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -19,7 +19,8 @@ import sys
|
||||
|
||||
from synapse.config.logger import DEFAULT_LOG_CONFIG
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument(
|
||||
@ -42,3 +43,7 @@ if __name__ == "__main__":
|
||||
out = args.output_file
|
||||
out.write(DEFAULT_LOG_CONFIG.substitute(log_file=args.log_file))
|
||||
out.flush()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -19,7 +19,8 @@ from signedjson.key import generate_signing_key, write_signing_keys
|
||||
|
||||
from synapse.util.stringutils import random_string
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument(
|
||||
@ -34,3 +35,7 @@ if __name__ == "__main__":
|
||||
key_id = "a_" + random_string(4)
|
||||
key = (generate_signing_key(key_id),)
|
||||
write_signing_keys(args.output_file, key)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -8,9 +8,6 @@ import unicodedata
|
||||
import bcrypt
|
||||
import yaml
|
||||
|
||||
bcrypt_rounds = 12
|
||||
password_pepper = ""
|
||||
|
||||
|
||||
def prompt_for_pass():
|
||||
password = getpass.getpass("Password: ")
|
||||
@ -26,7 +23,10 @@ def prompt_for_pass():
|
||||
return password
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
def main():
|
||||
bcrypt_rounds = 12
|
||||
password_pepper = ""
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description=(
|
||||
"Calculate the hash of a new password, so that passwords can be reset"
|
||||
@ -77,3 +77,7 @@ if __name__ == "__main__":
|
||||
).decode("ascii")
|
||||
|
||||
print(hashed)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -28,7 +28,7 @@ This can be extracted from postgres with::
|
||||
|
||||
To use, pipe the above into::
|
||||
|
||||
PYTHON_PATH=. ./scripts/move_remote_media_to_new_store.py <source repo> <dest repo>
|
||||
PYTHON_PATH=. synapse/_scripts/move_remote_media_to_new_store.py <source repo> <dest repo>
|
||||
"""
|
||||
|
||||
import argparse
|
@ -1146,7 +1146,7 @@ class TerminalProgress(Progress):
|
||||
##############################################
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="A script to port an existing synapse SQLite database to"
|
||||
" a new PostgreSQL database."
|
||||
@ -1251,3 +1251,7 @@ if __name__ == "__main__":
|
||||
sys.stderr.write(end_error)
|
||||
|
||||
sys.exit(5)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -178,7 +178,9 @@ class RelationTypes:
|
||||
ANNOTATION: Final = "m.annotation"
|
||||
REPLACE: Final = "m.replace"
|
||||
REFERENCE: Final = "m.reference"
|
||||
THREAD: Final = "io.element.thread"
|
||||
THREAD: Final = "m.thread"
|
||||
# TODO Remove this in Synapse >= v1.57.0.
|
||||
UNSTABLE_THREAD: Final = "io.element.thread"
|
||||
|
||||
|
||||
class LimitBlockingTypes:
|
||||
|
@ -88,7 +88,9 @@ ROOM_EVENT_FILTER_SCHEMA = {
|
||||
"org.matrix.labels": {"type": "array", "items": {"type": "string"}},
|
||||
"org.matrix.not_labels": {"type": "array", "items": {"type": "string"}},
|
||||
# MSC3440, filtering by event relations.
|
||||
"related_by_senders": {"type": "array", "items": {"type": "string"}},
|
||||
"io.element.relation_senders": {"type": "array", "items": {"type": "string"}},
|
||||
"related_by_rel_types": {"type": "array", "items": {"type": "string"}},
|
||||
"io.element.relation_types": {"type": "array", "items": {"type": "string"}},
|
||||
},
|
||||
}
|
||||
@ -318,19 +320,18 @@ class Filter:
|
||||
self.labels = filter_json.get("org.matrix.labels", None)
|
||||
self.not_labels = filter_json.get("org.matrix.not_labels", [])
|
||||
|
||||
# Ideally these would be rejected at the endpoint if they were provided
|
||||
# and not supported, but that would involve modifying the JSON schema
|
||||
# based on the homeserver configuration.
|
||||
self.related_by_senders = self.filter_json.get("related_by_senders", None)
|
||||
self.related_by_rel_types = self.filter_json.get("related_by_rel_types", None)
|
||||
|
||||
# Fallback to the unstable prefix if the stable version is not given.
|
||||
if hs.config.experimental.msc3440_enabled:
|
||||
self.relation_senders = self.filter_json.get(
|
||||
self.related_by_senders = self.related_by_senders or self.filter_json.get(
|
||||
"io.element.relation_senders", None
|
||||
)
|
||||
self.relation_types = self.filter_json.get(
|
||||
"io.element.relation_types", None
|
||||
self.related_by_rel_types = (
|
||||
self.related_by_rel_types
|
||||
or self.filter_json.get("io.element.relation_types", None)
|
||||
)
|
||||
else:
|
||||
self.relation_senders = None
|
||||
self.relation_types = None
|
||||
|
||||
def filters_all_types(self) -> bool:
|
||||
return "*" in self.not_types
|
||||
@ -461,7 +462,7 @@ class Filter:
|
||||
event_ids = [event.event_id for event in events if isinstance(event, EventBase)] # type: ignore[attr-defined]
|
||||
event_ids_to_keep = set(
|
||||
await self._store.events_have_relations(
|
||||
event_ids, self.relation_senders, self.relation_types
|
||||
event_ids, self.related_by_senders, self.related_by_rel_types
|
||||
)
|
||||
)
|
||||
|
||||
@ -474,7 +475,7 @@ class Filter:
|
||||
async def filter(self, events: Iterable[FilterEvent]) -> List[FilterEvent]:
|
||||
result = [event for event in events if self._check(event)]
|
||||
|
||||
if self.relation_senders or self.relation_types:
|
||||
if self.related_by_senders or self.related_by_rel_types:
|
||||
return await self._check_event_relations(result)
|
||||
|
||||
return result
|
||||
|
@ -322,7 +322,8 @@ class GenericWorkerServer(HomeServer):
|
||||
|
||||
presence.register_servlets(self, resource)
|
||||
|
||||
groups.register_servlets(self, resource)
|
||||
if self.config.experimental.groups_enabled:
|
||||
groups.register_servlets(self, resource)
|
||||
|
||||
resources.update({CLIENT_API_PREFIX: resource})
|
||||
|
||||
@ -417,7 +418,7 @@ class GenericWorkerServer(HomeServer):
|
||||
else:
|
||||
logger.warning("Unsupported listener type: %s", listener.type)
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
self.get_replication_command_handler().start_replication(self)
|
||||
|
||||
|
||||
def start(config_options: List[str]) -> None:
|
||||
|
@ -273,7 +273,7 @@ class SynapseHomeServer(HomeServer):
|
||||
# If redis is enabled we connect via the replication command handler
|
||||
# in the same way as the workers (since we're effectively a client
|
||||
# rather than a server).
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
self.get_replication_command_handler().start_replication(self)
|
||||
|
||||
for listener in self.config.server.listeners:
|
||||
if listener.type == "http":
|
||||
|
@ -175,27 +175,14 @@ class ApplicationService:
|
||||
return namespace.exclusive
|
||||
return False
|
||||
|
||||
async def _matches_user(self, event: EventBase, store: "DataStore") -> bool:
|
||||
if self.is_interested_in_user(event.sender):
|
||||
return True
|
||||
|
||||
# also check m.room.member state key
|
||||
if event.type == EventTypes.Member and self.is_interested_in_user(
|
||||
event.state_key
|
||||
):
|
||||
return True
|
||||
|
||||
does_match = await self.matches_user_in_member_list(event.room_id, store)
|
||||
return does_match
|
||||
|
||||
@cached(num_args=1, cache_context=True)
|
||||
async def matches_user_in_member_list(
|
||||
async def _matches_user_in_member_list(
|
||||
self,
|
||||
room_id: str,
|
||||
store: "DataStore",
|
||||
cache_context: _CacheContext,
|
||||
) -> bool:
|
||||
"""Check if this service is interested a room based upon it's membership
|
||||
"""Check if this service is interested a room based upon its membership
|
||||
|
||||
Args:
|
||||
room_id: The room to check.
|
||||
@ -214,47 +201,110 @@ class ApplicationService:
|
||||
return True
|
||||
return False
|
||||
|
||||
def _matches_room_id(self, event: EventBase) -> bool:
|
||||
if hasattr(event, "room_id"):
|
||||
return self.is_interested_in_room(event.room_id)
|
||||
return False
|
||||
def is_interested_in_user(
|
||||
self,
|
||||
user_id: str,
|
||||
) -> bool:
|
||||
"""
|
||||
Returns whether the application is interested in a given user ID.
|
||||
|
||||
async def _matches_aliases(self, event: EventBase, store: "DataStore") -> bool:
|
||||
alias_list = await store.get_aliases_for_room(event.room_id)
|
||||
The appservice is considered to be interested in a user if either: the
|
||||
user ID is in the appservice's user namespace, or if the user is the
|
||||
appservice's configured sender_localpart.
|
||||
|
||||
Args:
|
||||
user_id: The ID of the user to check.
|
||||
|
||||
Returns:
|
||||
True if the application service is interested in the user, False if not.
|
||||
"""
|
||||
return (
|
||||
# User is the appservice's sender_localpart user
|
||||
user_id == self.sender
|
||||
# User is in the appservice's user namespace
|
||||
or self.is_user_in_namespace(user_id)
|
||||
)
|
||||
|
||||
@cached(num_args=1, cache_context=True)
|
||||
async def is_interested_in_room(
|
||||
self,
|
||||
room_id: str,
|
||||
store: "DataStore",
|
||||
cache_context: _CacheContext,
|
||||
) -> bool:
|
||||
"""
|
||||
Returns whether the application service is interested in a given room ID.
|
||||
|
||||
The appservice is considered to be interested in the room if either: the ID or one
|
||||
of the aliases of the room is in the appservice's room ID or alias namespace
|
||||
respectively, or if one of the members of the room fall into the appservice's user
|
||||
namespace.
|
||||
|
||||
Args:
|
||||
room_id: The ID of the room to check.
|
||||
store: The homeserver's datastore class.
|
||||
|
||||
Returns:
|
||||
True if the application service is interested in the room, False if not.
|
||||
"""
|
||||
# Check if we have interest in this room ID
|
||||
if self.is_room_id_in_namespace(room_id):
|
||||
return True
|
||||
|
||||
# likewise with the room's aliases (if it has any)
|
||||
alias_list = await store.get_aliases_for_room(room_id)
|
||||
for alias in alias_list:
|
||||
if self.is_interested_in_alias(alias):
|
||||
if self.is_room_alias_in_namespace(alias):
|
||||
return True
|
||||
|
||||
return False
|
||||
# And finally, perform an expensive check on whether any of the
|
||||
# users in the room match the appservice's user namespace
|
||||
return await self._matches_user_in_member_list(
|
||||
room_id, store, on_invalidate=cache_context.invalidate
|
||||
)
|
||||
|
||||
async def is_interested(self, event: EventBase, store: "DataStore") -> bool:
|
||||
@cached(num_args=1, cache_context=True)
|
||||
async def is_interested_in_event(
|
||||
self,
|
||||
event_id: str,
|
||||
event: EventBase,
|
||||
store: "DataStore",
|
||||
cache_context: _CacheContext,
|
||||
) -> bool:
|
||||
"""Check if this service is interested in this event.
|
||||
|
||||
Args:
|
||||
event_id: The ID of the event to check. This is purely used for simplifying the
|
||||
caching of calls to this method.
|
||||
event: The event to check.
|
||||
store: The datastore to query.
|
||||
|
||||
Returns:
|
||||
True if this service would like to know about this event.
|
||||
True if this service would like to know about this event, otherwise False.
|
||||
"""
|
||||
# Do cheap checks first
|
||||
if self._matches_room_id(event):
|
||||
# Check if we're interested in this event's sender by namespace (or if they're the
|
||||
# sender_localpart user)
|
||||
if self.is_interested_in_user(event.sender):
|
||||
return True
|
||||
|
||||
# This will check the namespaces first before
|
||||
# checking the store, so should be run before _matches_aliases
|
||||
if await self._matches_user(event, store):
|
||||
# additionally, if this is a membership event, perform the same checks on
|
||||
# the user it references
|
||||
if event.type == EventTypes.Member and self.is_interested_in_user(
|
||||
event.state_key
|
||||
):
|
||||
return True
|
||||
|
||||
# This will check the store, so should be run last
|
||||
if await self._matches_aliases(event, store):
|
||||
# This will check the datastore, so should be run last
|
||||
if await self.is_interested_in_room(
|
||||
event.room_id, store, on_invalidate=cache_context.invalidate
|
||||
):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@cached(num_args=1)
|
||||
@cached(num_args=1, cache_context=True)
|
||||
async def is_interested_in_presence(
|
||||
self, user_id: UserID, store: "DataStore"
|
||||
self, user_id: UserID, store: "DataStore", cache_context: _CacheContext
|
||||
) -> bool:
|
||||
"""Check if this service is interested a user's presence
|
||||
|
||||
@ -272,20 +322,19 @@ class ApplicationService:
|
||||
|
||||
# Then find out if the appservice is interested in any of those rooms
|
||||
for room_id in room_ids:
|
||||
if await self.matches_user_in_member_list(room_id, store):
|
||||
if await self.is_interested_in_room(
|
||||
room_id, store, on_invalidate=cache_context.invalidate
|
||||
):
|
||||
return True
|
||||
return False
|
||||
|
||||
def is_interested_in_user(self, user_id: str) -> bool:
|
||||
return (
|
||||
bool(self._matches_regex(ApplicationService.NS_USERS, user_id))
|
||||
or user_id == self.sender
|
||||
)
|
||||
def is_user_in_namespace(self, user_id: str) -> bool:
|
||||
return bool(self._matches_regex(ApplicationService.NS_USERS, user_id))
|
||||
|
||||
def is_interested_in_alias(self, alias: str) -> bool:
|
||||
def is_room_alias_in_namespace(self, alias: str) -> bool:
|
||||
return bool(self._matches_regex(ApplicationService.NS_ALIASES, alias))
|
||||
|
||||
def is_interested_in_room(self, room_id: str) -> bool:
|
||||
def is_room_id_in_namespace(self, room_id: str) -> bool:
|
||||
return bool(self._matches_regex(ApplicationService.NS_ROOMS, room_id))
|
||||
|
||||
def is_exclusive_user(self, user_id: str) -> bool:
|
||||
|
@ -25,7 +25,7 @@ from synapse.appservice import (
|
||||
TransactionUnusedFallbackKeys,
|
||||
)
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.utils import serialize_event
|
||||
from synapse.events.utils import SerializeEventConfig, serialize_event
|
||||
from synapse.http.client import SimpleHttpClient
|
||||
from synapse.types import JsonDict, ThirdPartyInstanceID
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
@ -321,16 +321,18 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
serialize_event(
|
||||
e,
|
||||
time_now,
|
||||
as_client_event=True,
|
||||
# If this is an invite or a knock membership event, and we're interested
|
||||
# in this user, then include any stripped state alongside the event.
|
||||
include_stripped_room_state=(
|
||||
e.type == EventTypes.Member
|
||||
and (
|
||||
e.membership == Membership.INVITE
|
||||
or e.membership == Membership.KNOCK
|
||||
)
|
||||
and service.is_interested_in_user(e.state_key)
|
||||
config=SerializeEventConfig(
|
||||
as_client_event=True,
|
||||
# If this is an invite or a knock membership event, and we're interested
|
||||
# in this user, then include any stripped state alongside the event.
|
||||
include_stripped_room_state=(
|
||||
e.type == EventTypes.Member
|
||||
and (
|
||||
e.membership == Membership.INVITE
|
||||
or e.membership == Membership.KNOCK
|
||||
)
|
||||
and service.is_interested_in_user(e.state_key)
|
||||
),
|
||||
),
|
||||
)
|
||||
for e in events
|
||||
|
@ -383,7 +383,7 @@ class RootConfig:
|
||||
Build a default configuration file
|
||||
|
||||
This is used when the user explicitly asks us to generate a config file
|
||||
(eg with --generate_config).
|
||||
(eg with --generate-config).
|
||||
|
||||
Args:
|
||||
config_dir_path: The path where the config files are kept. Used to
|
||||
|
@ -19,6 +19,7 @@ from synapse.config import (
|
||||
api,
|
||||
appservice,
|
||||
auth,
|
||||
background_updates,
|
||||
cache,
|
||||
captcha,
|
||||
cas,
|
||||
@ -115,6 +116,7 @@ class RootConfig:
|
||||
caches: cache.CacheConfig
|
||||
federation: federation.FederationConfig
|
||||
retention: retention.RetentionConfig
|
||||
background_updates: background_updates.BackgroundUpdateConfig
|
||||
|
||||
config_classes: List[Type["Config"]] = ...
|
||||
def __init__(self) -> None: ...
|
||||
|
68
synapse/config/background_updates.py
Normal file
68
synapse/config/background_updates.py
Normal file
@ -0,0 +1,68 @@
|
||||
# Copyright 2022 Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
|
||||
|
||||
class BackgroundUpdateConfig(Config):
|
||||
section = "background_updates"
|
||||
|
||||
def generate_config_section(self, **kwargs) -> str:
|
||||
return """\
|
||||
## Background Updates ##
|
||||
|
||||
# Background updates are database updates that are run in the background in batches.
|
||||
# The duration, minimum batch size, default batch size, whether to sleep between batches and if so, how long to
|
||||
# sleep can all be configured. This is helpful to speed up or slow down the updates.
|
||||
#
|
||||
background_updates:
|
||||
# How long in milliseconds to run a batch of background updates for. Defaults to 100. Uncomment and set
|
||||
# a time to change the default.
|
||||
#
|
||||
#background_update_duration_ms: 500
|
||||
|
||||
# Whether to sleep between updates. Defaults to True. Uncomment to change the default.
|
||||
#
|
||||
#sleep_enabled: false
|
||||
|
||||
# If sleeping between updates, how long in milliseconds to sleep for. Defaults to 1000. Uncomment
|
||||
# and set a duration to change the default.
|
||||
#
|
||||
#sleep_duration_ms: 300
|
||||
|
||||
# Minimum size a batch of background updates can be. Must be greater than 0. Defaults to 1. Uncomment and
|
||||
# set a size to change the default.
|
||||
#
|
||||
#min_batch_size: 10
|
||||
|
||||
# The batch size to use for the first iteration of a new background update. The default is 100.
|
||||
# Uncomment and set a size to change the default.
|
||||
#
|
||||
#default_batch_size: 50
|
||||
"""
|
||||
|
||||
def read_config(self, config, **kwargs) -> None:
|
||||
bg_update_config = config.get("background_updates") or {}
|
||||
|
||||
self.update_duration_ms = bg_update_config.get(
|
||||
"background_update_duration_ms", 100
|
||||
)
|
||||
|
||||
self.sleep_enabled = bg_update_config.get("sleep_enabled", True)
|
||||
|
||||
self.sleep_duration_ms = bg_update_config.get("sleep_duration_ms", 1000)
|
||||
|
||||
self.min_batch_size = bg_update_config.get("min_batch_size", 1)
|
||||
|
||||
self.default_batch_size = bg_update_config.get("default_batch_size", 100)
|
@ -74,3 +74,6 @@ class ExperimentalConfig(Config):
|
||||
|
||||
# MSC3720 (Account status endpoint)
|
||||
self.msc3720_enabled: bool = experimental.get("msc3720_enabled", False)
|
||||
|
||||
# The deprecated groups feature.
|
||||
self.groups_enabled: bool = experimental.get("groups_enabled", True)
|
||||
|
@ -17,6 +17,7 @@ from .account_validity import AccountValidityConfig
|
||||
from .api import ApiConfig
|
||||
from .appservice import AppServiceConfig
|
||||
from .auth import AuthConfig
|
||||
from .background_updates import BackgroundUpdateConfig
|
||||
from .cache import CacheConfig
|
||||
from .captcha import CaptchaConfig
|
||||
from .cas import CasConfig
|
||||
@ -101,4 +102,5 @@ class HomeServerConfig(RootConfig):
|
||||
WorkerConfig,
|
||||
RedisConfig,
|
||||
ExperimentalConfig,
|
||||
BackgroundUpdateConfig,
|
||||
]
|
||||
|
@ -182,8 +182,14 @@ class OIDCConfig(Config):
|
||||
#
|
||||
# localpart_template: Jinja2 template for the localpart of the MXID.
|
||||
# If this is not set, the user will be prompted to choose their
|
||||
# own username (see 'sso_auth_account_details.html' in the 'sso'
|
||||
# section of this file).
|
||||
# own username (see the documentation for the
|
||||
# 'sso_auth_account_details.html' template). This template can
|
||||
# use the 'localpart_from_email' filter.
|
||||
#
|
||||
# confirm_localpart: Whether to prompt the user to validate (or
|
||||
# change) the generated localpart (see the documentation for the
|
||||
# 'sso_auth_account_details.html' template), instead of
|
||||
# registering the account right away.
|
||||
#
|
||||
# display_name_template: Jinja2 template for the display name to set
|
||||
# on first login. If unset, no displayname will be set.
|
||||
|
@ -245,8 +245,8 @@ class SpamChecker:
|
||||
"""Checks if a given event is considered "spammy" by this server.
|
||||
|
||||
If the server considers an event spammy, then it will be rejected if
|
||||
sent by a local user. If it is sent by a user on another server, then
|
||||
users receive a blank event.
|
||||
sent by a local user. If it is sent by a user on another server, the
|
||||
event is soft-failed.
|
||||
|
||||
Args:
|
||||
event: the event to be checked
|
||||
|
@ -38,6 +38,8 @@ CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK = Callable[
|
||||
[str, StateMap[EventBase], str], Awaitable[bool]
|
||||
]
|
||||
ON_NEW_EVENT_CALLBACK = Callable[[EventBase, StateMap[EventBase]], Awaitable]
|
||||
CHECK_CAN_SHUTDOWN_ROOM_CALLBACK = Callable[[str, str], Awaitable[bool]]
|
||||
CHECK_CAN_DEACTIVATE_USER_CALLBACK = Callable[[str, bool], Awaitable[bool]]
|
||||
ON_PROFILE_UPDATE_CALLBACK = Callable[[str, ProfileInfo, bool, bool], Awaitable]
|
||||
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK = Callable[[str, bool, bool], Awaitable]
|
||||
|
||||
@ -157,6 +159,12 @@ class ThirdPartyEventRules:
|
||||
CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK
|
||||
] = []
|
||||
self._on_new_event_callbacks: List[ON_NEW_EVENT_CALLBACK] = []
|
||||
self._check_can_shutdown_room_callbacks: List[
|
||||
CHECK_CAN_SHUTDOWN_ROOM_CALLBACK
|
||||
] = []
|
||||
self._check_can_deactivate_user_callbacks: List[
|
||||
CHECK_CAN_DEACTIVATE_USER_CALLBACK
|
||||
] = []
|
||||
self._on_profile_update_callbacks: List[ON_PROFILE_UPDATE_CALLBACK] = []
|
||||
self._on_user_deactivation_status_changed_callbacks: List[
|
||||
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK
|
||||
@ -173,6 +181,8 @@ class ThirdPartyEventRules:
|
||||
CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK
|
||||
] = None,
|
||||
on_new_event: Optional[ON_NEW_EVENT_CALLBACK] = None,
|
||||
check_can_shutdown_room: Optional[CHECK_CAN_SHUTDOWN_ROOM_CALLBACK] = None,
|
||||
check_can_deactivate_user: Optional[CHECK_CAN_DEACTIVATE_USER_CALLBACK] = None,
|
||||
on_profile_update: Optional[ON_PROFILE_UPDATE_CALLBACK] = None,
|
||||
on_user_deactivation_status_changed: Optional[
|
||||
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK
|
||||
@ -198,6 +208,11 @@ class ThirdPartyEventRules:
|
||||
if on_new_event is not None:
|
||||
self._on_new_event_callbacks.append(on_new_event)
|
||||
|
||||
if check_can_shutdown_room is not None:
|
||||
self._check_can_shutdown_room_callbacks.append(check_can_shutdown_room)
|
||||
|
||||
if check_can_deactivate_user is not None:
|
||||
self._check_can_deactivate_user_callbacks.append(check_can_deactivate_user)
|
||||
if on_profile_update is not None:
|
||||
self._on_profile_update_callbacks.append(on_profile_update)
|
||||
|
||||
@ -369,6 +384,46 @@ class ThirdPartyEventRules:
|
||||
"Failed to run module API callback %s: %s", callback, e
|
||||
)
|
||||
|
||||
async def check_can_shutdown_room(self, user_id: str, room_id: str) -> bool:
|
||||
"""Intercept requests to shutdown a room. If `False` is returned, the
|
||||
room must not be shut down.
|
||||
|
||||
Args:
|
||||
requester: The ID of the user requesting the shutdown.
|
||||
room_id: The ID of the room.
|
||||
"""
|
||||
for callback in self._check_can_shutdown_room_callbacks:
|
||||
try:
|
||||
if await callback(user_id, room_id) is False:
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"Failed to run module API callback %s: %s", callback, e
|
||||
)
|
||||
return True
|
||||
|
||||
async def check_can_deactivate_user(
|
||||
self,
|
||||
user_id: str,
|
||||
by_admin: bool,
|
||||
) -> bool:
|
||||
"""Intercept requests to deactivate a user. If `False` is returned, the
|
||||
user should not be deactivated.
|
||||
|
||||
Args:
|
||||
requester
|
||||
user_id: The ID of the room.
|
||||
"""
|
||||
for callback in self._check_can_deactivate_user_callbacks:
|
||||
try:
|
||||
if await callback(user_id, by_admin) is False:
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"Failed to run module API callback %s: %s", callback, e
|
||||
)
|
||||
return True
|
||||
|
||||
async def _get_state_map_for_room(self, room_id: str) -> StateMap[EventBase]:
|
||||
"""Given a room ID, return the state events of that room.
|
||||
|
||||
|
@ -26,6 +26,7 @@ from typing import (
|
||||
Union,
|
||||
)
|
||||
|
||||
import attr
|
||||
from frozendict import frozendict
|
||||
|
||||
from synapse.api.constants import EventContentFields, EventTypes, RelationTypes
|
||||
@ -37,6 +38,7 @@ from synapse.util.frozenutils import unfreeze
|
||||
from . import EventBase
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.databases.main.relations import BundledAggregations
|
||||
|
||||
|
||||
@ -303,29 +305,37 @@ def format_event_for_client_v2_without_room_id(d: JsonDict) -> JsonDict:
|
||||
return d
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class SerializeEventConfig:
|
||||
as_client_event: bool = True
|
||||
# Function to convert from federation format to client format
|
||||
event_format: Callable[[JsonDict], JsonDict] = format_event_for_client_v1
|
||||
# ID of the user's auth token - used for namespacing of transaction IDs
|
||||
token_id: Optional[int] = None
|
||||
# List of event fields to include. If empty, all fields will be returned.
|
||||
only_event_fields: Optional[List[str]] = None
|
||||
# Some events can have stripped room state stored in the `unsigned` field.
|
||||
# This is required for invite and knock functionality. If this option is
|
||||
# False, that state will be removed from the event before it is returned.
|
||||
# Otherwise, it will be kept.
|
||||
include_stripped_room_state: bool = False
|
||||
|
||||
|
||||
_DEFAULT_SERIALIZE_EVENT_CONFIG = SerializeEventConfig()
|
||||
|
||||
|
||||
def serialize_event(
|
||||
e: Union[JsonDict, EventBase],
|
||||
time_now_ms: int,
|
||||
*,
|
||||
as_client_event: bool = True,
|
||||
event_format: Callable[[JsonDict], JsonDict] = format_event_for_client_v1,
|
||||
token_id: Optional[str] = None,
|
||||
only_event_fields: Optional[List[str]] = None,
|
||||
include_stripped_room_state: bool = False,
|
||||
config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG,
|
||||
) -> JsonDict:
|
||||
"""Serialize event for clients
|
||||
|
||||
Args:
|
||||
e
|
||||
time_now_ms
|
||||
as_client_event
|
||||
event_format
|
||||
token_id
|
||||
only_event_fields
|
||||
include_stripped_room_state: Some events can have stripped room state
|
||||
stored in the `unsigned` field. This is required for invite and knock
|
||||
functionality. If this option is False, that state will be removed from the
|
||||
event before it is returned. Otherwise, it will be kept.
|
||||
config: Event serialization config
|
||||
|
||||
Returns:
|
||||
The serialized event dictionary.
|
||||
@ -348,11 +358,11 @@ def serialize_event(
|
||||
|
||||
if "redacted_because" in e.unsigned:
|
||||
d["unsigned"]["redacted_because"] = serialize_event(
|
||||
e.unsigned["redacted_because"], time_now_ms, event_format=event_format
|
||||
e.unsigned["redacted_because"], time_now_ms, config=config
|
||||
)
|
||||
|
||||
if token_id is not None:
|
||||
if token_id == getattr(e.internal_metadata, "token_id", None):
|
||||
if config.token_id is not None:
|
||||
if config.token_id == getattr(e.internal_metadata, "token_id", None):
|
||||
txn_id = getattr(e.internal_metadata, "txn_id", None)
|
||||
if txn_id is not None:
|
||||
d["unsigned"]["transaction_id"] = txn_id
|
||||
@ -361,13 +371,14 @@ def serialize_event(
|
||||
# that are meant to provide metadata about a room to an invitee/knocker. They are
|
||||
# intended to only be included in specific circumstances, such as down sync, and
|
||||
# should not be included in any other case.
|
||||
if not include_stripped_room_state:
|
||||
if not config.include_stripped_room_state:
|
||||
d["unsigned"].pop("invite_room_state", None)
|
||||
d["unsigned"].pop("knock_room_state", None)
|
||||
|
||||
if as_client_event:
|
||||
d = event_format(d)
|
||||
if config.as_client_event:
|
||||
d = config.event_format(d)
|
||||
|
||||
only_event_fields = config.only_event_fields
|
||||
if only_event_fields:
|
||||
if not isinstance(only_event_fields, list) or not all(
|
||||
isinstance(f, str) for f in only_event_fields
|
||||
@ -385,23 +396,26 @@ class EventClientSerializer:
|
||||
clients.
|
||||
"""
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self._msc3440_enabled = hs.config.experimental.msc3440_enabled
|
||||
|
||||
def serialize_event(
|
||||
self,
|
||||
event: Union[JsonDict, EventBase],
|
||||
time_now: int,
|
||||
*,
|
||||
config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG,
|
||||
bundle_aggregations: Optional[Dict[str, "BundledAggregations"]] = None,
|
||||
**kwargs: Any,
|
||||
) -> JsonDict:
|
||||
"""Serializes a single event.
|
||||
|
||||
Args:
|
||||
event: The event being serialized.
|
||||
time_now: The current time in milliseconds
|
||||
config: Event serialization config
|
||||
bundle_aggregations: Whether to include the bundled aggregations for this
|
||||
event. Only applies to non-state events. (State events never include
|
||||
bundled aggregations.)
|
||||
**kwargs: Arguments to pass to `serialize_event`
|
||||
|
||||
Returns:
|
||||
The serialized event
|
||||
@ -410,7 +424,7 @@ class EventClientSerializer:
|
||||
if not isinstance(event, EventBase):
|
||||
return event
|
||||
|
||||
serialized_event = serialize_event(event, time_now, **kwargs)
|
||||
serialized_event = serialize_event(event, time_now, config=config)
|
||||
|
||||
# Check if there are any bundled aggregations to include with the event.
|
||||
if bundle_aggregations:
|
||||
@ -419,6 +433,7 @@ class EventClientSerializer:
|
||||
self._inject_bundled_aggregations(
|
||||
event,
|
||||
time_now,
|
||||
config,
|
||||
bundle_aggregations[event.event_id],
|
||||
serialized_event,
|
||||
)
|
||||
@ -456,6 +471,7 @@ class EventClientSerializer:
|
||||
self,
|
||||
event: EventBase,
|
||||
time_now: int,
|
||||
config: SerializeEventConfig,
|
||||
aggregations: "BundledAggregations",
|
||||
serialized_event: JsonDict,
|
||||
) -> None:
|
||||
@ -466,6 +482,7 @@ class EventClientSerializer:
|
||||
time_now: The current time in milliseconds
|
||||
aggregations: The bundled aggregation to serialize.
|
||||
serialized_event: The serialized event which may be modified.
|
||||
config: Event serialization config
|
||||
|
||||
"""
|
||||
serialized_aggregations = {}
|
||||
@ -493,8 +510,8 @@ class EventClientSerializer:
|
||||
thread = aggregations.thread
|
||||
|
||||
# Don't bundle aggregations as this could recurse forever.
|
||||
serialized_latest_event = self.serialize_event(
|
||||
thread.latest_event, time_now, bundle_aggregations=None
|
||||
serialized_latest_event = serialize_event(
|
||||
thread.latest_event, time_now, config=config
|
||||
)
|
||||
# Manually apply an edit, if one exists.
|
||||
if thread.latest_edit:
|
||||
@ -502,33 +519,53 @@ class EventClientSerializer:
|
||||
thread.latest_event, serialized_latest_event, thread.latest_edit
|
||||
)
|
||||
|
||||
serialized_aggregations[RelationTypes.THREAD] = {
|
||||
thread_summary = {
|
||||
"latest_event": serialized_latest_event,
|
||||
"count": thread.count,
|
||||
"current_user_participated": thread.current_user_participated,
|
||||
}
|
||||
serialized_aggregations[RelationTypes.THREAD] = thread_summary
|
||||
if self._msc3440_enabled:
|
||||
serialized_aggregations[RelationTypes.UNSTABLE_THREAD] = thread_summary
|
||||
|
||||
# Include the bundled aggregations in the event.
|
||||
if serialized_aggregations:
|
||||
serialized_event["unsigned"].setdefault("m.relations", {}).update(
|
||||
serialized_aggregations
|
||||
)
|
||||
# There is likely already an "unsigned" field, but a filter might
|
||||
# have stripped it off (via the event_fields option). The server is
|
||||
# allowed to return additional fields, so add it back.
|
||||
serialized_event.setdefault("unsigned", {}).setdefault(
|
||||
"m.relations", {}
|
||||
).update(serialized_aggregations)
|
||||
|
||||
def serialize_events(
|
||||
self, events: Iterable[Union[JsonDict, EventBase]], time_now: int, **kwargs: Any
|
||||
self,
|
||||
events: Iterable[Union[JsonDict, EventBase]],
|
||||
time_now: int,
|
||||
*,
|
||||
config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG,
|
||||
bundle_aggregations: Optional[Dict[str, "BundledAggregations"]] = None,
|
||||
) -> List[JsonDict]:
|
||||
"""Serializes multiple events.
|
||||
|
||||
Args:
|
||||
event
|
||||
time_now: The current time in milliseconds
|
||||
**kwargs: Arguments to pass to `serialize_event`
|
||||
config: Event serialization config
|
||||
bundle_aggregations: Whether to include the bundled aggregations for this
|
||||
event. Only applies to non-state events. (State events never include
|
||||
bundled aggregations.)
|
||||
|
||||
Returns:
|
||||
The list of serialized events
|
||||
"""
|
||||
return [
|
||||
self.serialize_event(event, time_now=time_now, **kwargs) for event in events
|
||||
self.serialize_event(
|
||||
event,
|
||||
time_now,
|
||||
config=config,
|
||||
bundle_aggregations=bundle_aggregations,
|
||||
)
|
||||
for event in events
|
||||
]
|
||||
|
||||
|
||||
|
@ -1428,7 +1428,7 @@ class FederationClient(FederationBase):
|
||||
|
||||
# Validate children_state of the room.
|
||||
children_state = room.pop("children_state", [])
|
||||
if not isinstance(children_state, Sequence):
|
||||
if not isinstance(children_state, list):
|
||||
raise InvalidResponseError("'room.children_state' must be a list")
|
||||
if any(not isinstance(e, dict) for e in children_state):
|
||||
raise InvalidResponseError("Invalid event in 'children_state' list")
|
||||
@ -1440,14 +1440,14 @@ class FederationClient(FederationBase):
|
||||
|
||||
# Validate the children rooms.
|
||||
children = res.get("children", [])
|
||||
if not isinstance(children, Sequence):
|
||||
if not isinstance(children, list):
|
||||
raise InvalidResponseError("'children' must be a list")
|
||||
if any(not isinstance(r, dict) for r in children):
|
||||
raise InvalidResponseError("Invalid room in 'children' list")
|
||||
|
||||
# Validate the inaccessible children.
|
||||
inaccessible_children = res.get("inaccessible_children", [])
|
||||
if not isinstance(inaccessible_children, Sequence):
|
||||
if not isinstance(inaccessible_children, list):
|
||||
raise InvalidResponseError("'inaccessible_children' must be a list")
|
||||
if any(not isinstance(r, str) for r in inaccessible_children):
|
||||
raise InvalidResponseError(
|
||||
@ -1630,7 +1630,7 @@ def _validate_hierarchy_event(d: JsonDict) -> None:
|
||||
raise ValueError("Invalid event: 'content' must be a dict")
|
||||
|
||||
via = content.get("via")
|
||||
if not isinstance(via, Sequence):
|
||||
if not isinstance(via, list):
|
||||
raise ValueError("Invalid event: 'via' must be a list")
|
||||
if any(not isinstance(v, str) for v in via):
|
||||
raise ValueError("Invalid event: 'via' must be a list of strings")
|
||||
|
@ -244,7 +244,7 @@ class FederationRemoteSendQueue(AbstractFederationSender):
|
||||
|
||||
self.notifier.on_new_replication_data()
|
||||
|
||||
def send_device_messages(self, destination: str) -> None:
|
||||
def send_device_messages(self, destination: str, immediate: bool = False) -> None:
|
||||
"""As per FederationSender"""
|
||||
# We don't need to replicate this as it gets sent down a different
|
||||
# stream.
|
||||
|
@ -118,7 +118,12 @@ class AbstractFederationSender(metaclass=abc.ABCMeta):
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def send_device_messages(self, destination: str) -> None:
|
||||
def send_device_messages(self, destination: str, immediate: bool = True) -> None:
|
||||
"""Tells the sender that a new device message is ready to be sent to the
|
||||
destination. The `immediate` flag specifies whether the messages should
|
||||
be tried to be sent immediately, or whether it can be delayed for a
|
||||
short while (to aid performance).
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
@ -146,9 +151,8 @@ class AbstractFederationSender(metaclass=abc.ABCMeta):
|
||||
|
||||
|
||||
@attr.s
|
||||
class _PresenceQueue:
|
||||
"""A queue of destinations that need to be woken up due to new presence
|
||||
updates.
|
||||
class _DestinationWakeupQueue:
|
||||
"""A queue of destinations that need to be woken up due to new updates.
|
||||
|
||||
Staggers waking up of per destination queues to ensure that we don't attempt
|
||||
to start TLS connections with many hosts all at once, leading to pinned CPU.
|
||||
@ -175,7 +179,7 @@ class _PresenceQueue:
|
||||
if not self.processing:
|
||||
self._handle()
|
||||
|
||||
@wrap_as_background_process("_PresenceQueue.handle")
|
||||
@wrap_as_background_process("_DestinationWakeupQueue.handle")
|
||||
async def _handle(self) -> None:
|
||||
"""Background process to drain the queue."""
|
||||
|
||||
@ -297,7 +301,7 @@ class FederationSender(AbstractFederationSender):
|
||||
|
||||
self._external_cache = hs.get_external_cache()
|
||||
|
||||
self._presence_queue = _PresenceQueue(self, self.clock)
|
||||
self._destination_wakeup_queue = _DestinationWakeupQueue(self, self.clock)
|
||||
|
||||
def _get_per_destination_queue(self, destination: str) -> PerDestinationQueue:
|
||||
"""Get or create a PerDestinationQueue for the given destination
|
||||
@ -614,7 +618,7 @@ class FederationSender(AbstractFederationSender):
|
||||
states, start_loop=False
|
||||
)
|
||||
|
||||
self._presence_queue.add_to_queue(destination)
|
||||
self._destination_wakeup_queue.add_to_queue(destination)
|
||||
|
||||
def build_and_send_edu(
|
||||
self,
|
||||
@ -667,7 +671,7 @@ class FederationSender(AbstractFederationSender):
|
||||
else:
|
||||
queue.send_edu(edu)
|
||||
|
||||
def send_device_messages(self, destination: str) -> None:
|
||||
def send_device_messages(self, destination: str, immediate: bool = False) -> None:
|
||||
if destination == self.server_name:
|
||||
logger.warning("Not sending device update to ourselves")
|
||||
return
|
||||
@ -677,7 +681,11 @@ class FederationSender(AbstractFederationSender):
|
||||
):
|
||||
return
|
||||
|
||||
self._get_per_destination_queue(destination).attempt_new_transaction()
|
||||
if immediate:
|
||||
self._get_per_destination_queue(destination).attempt_new_transaction()
|
||||
else:
|
||||
self._get_per_destination_queue(destination).mark_new_data()
|
||||
self._destination_wakeup_queue.add_to_queue(destination)
|
||||
|
||||
def wake_destination(self, destination: str) -> None:
|
||||
"""Called when we want to retry sending transactions to a remote.
|
||||
|
@ -219,6 +219,16 @@ class PerDestinationQueue:
|
||||
self._pending_edus.append(edu)
|
||||
self.attempt_new_transaction()
|
||||
|
||||
def mark_new_data(self) -> None:
|
||||
"""Marks that the destination has new data to send, without starting a
|
||||
new transaction.
|
||||
|
||||
If a transaction loop is already in progress then a new transcation will
|
||||
be attempted when the current one finishes.
|
||||
"""
|
||||
|
||||
self._new_data_to_send = True
|
||||
|
||||
def attempt_new_transaction(self) -> None:
|
||||
"""Try to start a new transaction to this destination
|
||||
|
||||
|
@ -289,7 +289,7 @@ class OpenIdUserInfo(BaseFederationServlet):
|
||||
return 200, {"sub": user_id}
|
||||
|
||||
|
||||
DEFAULT_SERVLET_GROUPS: Dict[str, Iterable[Type[BaseFederationServlet]]] = {
|
||||
SERVLET_GROUPS: Dict[str, Iterable[Type[BaseFederationServlet]]] = {
|
||||
"federation": FEDERATION_SERVLET_CLASSES,
|
||||
"room_list": (PublicRoomList,),
|
||||
"group_server": GROUP_SERVER_SERVLET_CLASSES,
|
||||
@ -298,6 +298,10 @@ DEFAULT_SERVLET_GROUPS: Dict[str, Iterable[Type[BaseFederationServlet]]] = {
|
||||
"openid": (OpenIdUserInfo,),
|
||||
}
|
||||
|
||||
DEFAULT_SERVLET_GROUPS = ("federation", "room_list", "openid")
|
||||
|
||||
GROUP_SERVLET_GROUPS = ("group_server", "group_local", "group_attestation")
|
||||
|
||||
|
||||
def register_servlets(
|
||||
hs: "HomeServer",
|
||||
@ -320,16 +324,19 @@ def register_servlets(
|
||||
Defaults to ``DEFAULT_SERVLET_GROUPS``.
|
||||
"""
|
||||
if not servlet_groups:
|
||||
servlet_groups = DEFAULT_SERVLET_GROUPS.keys()
|
||||
servlet_groups = DEFAULT_SERVLET_GROUPS
|
||||
# Only allow the groups servlets if the deprecated groups feature is enabled.
|
||||
if hs.config.experimental.groups_enabled:
|
||||
servlet_groups = servlet_groups + GROUP_SERVLET_GROUPS
|
||||
|
||||
for servlet_group in servlet_groups:
|
||||
# Skip unknown servlet groups.
|
||||
if servlet_group not in DEFAULT_SERVLET_GROUPS:
|
||||
if servlet_group not in SERVLET_GROUPS:
|
||||
raise RuntimeError(
|
||||
f"Attempting to register unknown federation servlet: '{servlet_group}'"
|
||||
)
|
||||
|
||||
for servletclass in DEFAULT_SERVLET_GROUPS[servlet_group]:
|
||||
for servletclass in SERVLET_GROUPS[servlet_group]:
|
||||
# Only allow the `/timestamp_to_event` servlet if msc3030 is enabled
|
||||
if (
|
||||
servletclass == FederationTimestampLookupServlet
|
||||
|
@ -63,7 +63,7 @@ class Authenticator:
|
||||
|
||||
self.replication_client = None
|
||||
if hs.config.worker.worker_app:
|
||||
self.replication_client = hs.get_tcp_replication()
|
||||
self.replication_client = hs.get_replication_command_handler()
|
||||
|
||||
# A method just so we can pass 'self' as the authenticator to the Servlets
|
||||
async def authenticate_request(
|
||||
|
@ -571,7 +571,7 @@ class ApplicationServicesHandler:
|
||||
room_alias_str = room_alias.to_string()
|
||||
services = self.store.get_app_services()
|
||||
alias_query_services = [
|
||||
s for s in services if (s.is_interested_in_alias(room_alias_str))
|
||||
s for s in services if (s.is_room_alias_in_namespace(room_alias_str))
|
||||
]
|
||||
for alias_service in alias_query_services:
|
||||
is_known_alias = await self.appservice_api.query_alias(
|
||||
@ -660,7 +660,7 @@ class ApplicationServicesHandler:
|
||||
# inside of a list comprehension anymore.
|
||||
interested_list = []
|
||||
for s in services:
|
||||
if await s.is_interested(event, self.store):
|
||||
if await s.is_interested_in_event(event.event_id, event, self.store):
|
||||
interested_list.append(s)
|
||||
|
||||
return interested_list
|
||||
|
@ -17,7 +17,7 @@ from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import Requester, UserID, create_requester
|
||||
from synapse.types import Codes, Requester, UserID, create_requester
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@ -42,6 +42,7 @@ class DeactivateAccountHandler:
|
||||
|
||||
# Flag that indicates whether the process to part users from rooms is running
|
||||
self._user_parter_running = False
|
||||
self._third_party_rules = hs.get_third_party_event_rules()
|
||||
|
||||
# Start the user parter loop so it can resume parting users from rooms where
|
||||
# it left off (if it has work left to do).
|
||||
@ -74,6 +75,15 @@ class DeactivateAccountHandler:
|
||||
Returns:
|
||||
True if identity server supports removing threepids, otherwise False.
|
||||
"""
|
||||
|
||||
# Check if this user can be deactivated
|
||||
if not await self._third_party_rules.check_can_deactivate_user(
|
||||
user_id, by_admin
|
||||
):
|
||||
raise SynapseError(
|
||||
403, "Deactivation of this user is forbidden", Codes.FORBIDDEN
|
||||
)
|
||||
|
||||
# FIXME: Theoretically there is a race here wherein user resets
|
||||
# password using threepid.
|
||||
|
||||
|
@ -371,7 +371,6 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
log_kv(
|
||||
{"reason": "User doesn't have device id.", "device_id": device_id}
|
||||
)
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
|
||||
@ -414,7 +413,6 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
# no match
|
||||
set_tag("error", True)
|
||||
set_tag("reason", "User doesn't have that device id.")
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
|
||||
@ -506,7 +504,7 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
"Sending device list update notif for %r to: %r", user_id, hosts
|
||||
)
|
||||
for host in hosts:
|
||||
self.federation_sender.send_device_messages(host)
|
||||
self.federation_sender.send_device_messages(host, immediate=False)
|
||||
log_kv({"message": "sent device update to host", "host": host})
|
||||
|
||||
async def notify_user_signature_update(
|
||||
|
@ -121,7 +121,7 @@ class DirectoryHandler:
|
||||
|
||||
service = requester.app_service
|
||||
if service:
|
||||
if not service.is_interested_in_alias(room_alias_str):
|
||||
if not service.is_room_alias_in_namespace(room_alias_str):
|
||||
raise SynapseError(
|
||||
400,
|
||||
"This application service has not reserved this kind of alias.",
|
||||
@ -223,7 +223,7 @@ class DirectoryHandler:
|
||||
async def delete_appservice_association(
|
||||
self, service: ApplicationService, room_alias: RoomAlias
|
||||
) -> None:
|
||||
if not service.is_interested_in_alias(room_alias.to_string()):
|
||||
if not service.is_room_alias_in_namespace(room_alias.to_string()):
|
||||
raise SynapseError(
|
||||
400,
|
||||
"This application service has not reserved this kind of alias",
|
||||
@ -378,7 +378,7 @@ class DirectoryHandler:
|
||||
# non-exclusive locks on the alias (or there are no interested services)
|
||||
services = self.store.get_app_services()
|
||||
interested_services = [
|
||||
s for s in services if s.is_interested_in_alias(alias.to_string())
|
||||
s for s in services if s.is_room_alias_in_namespace(alias.to_string())
|
||||
]
|
||||
|
||||
for service in interested_services:
|
||||
|
@ -19,6 +19,7 @@ from typing import TYPE_CHECKING, Iterable, List, Optional
|
||||
from synapse.api.constants import EduTypes, EventTypes, Membership
|
||||
from synapse.api.errors import AuthError, SynapseError
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.utils import SerializeEventConfig
|
||||
from synapse.handlers.presence import format_user_presence_state
|
||||
from synapse.streams.config import PaginationConfig
|
||||
from synapse.types import JsonDict, UserID
|
||||
@ -120,7 +121,7 @@ class EventStreamHandler:
|
||||
chunks = self._event_serializer.serialize_events(
|
||||
events,
|
||||
time_now,
|
||||
as_client_event=as_client_event,
|
||||
config=SerializeEventConfig(as_client_event=as_client_event),
|
||||
)
|
||||
|
||||
chunk = {
|
||||
|
@ -23,8 +23,6 @@ from signedjson.key import decode_verify_key_bytes
|
||||
from signedjson.sign import verify_signed_json
|
||||
from unpaddedbase64 import decode_base64
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse import event_auth
|
||||
from synapse.api.constants import EventContentFields, EventTypes, Membership
|
||||
from synapse.api.errors import (
|
||||
@ -45,11 +43,7 @@ from synapse.events.snapshot import EventContext
|
||||
from synapse.events.validator import EventValidator
|
||||
from synapse.federation.federation_client import InvalidResponseError
|
||||
from synapse.http.servlet import assert_params_in_dict
|
||||
from synapse.logging.context import (
|
||||
make_deferred_yieldable,
|
||||
nested_logging_context,
|
||||
preserve_fn,
|
||||
)
|
||||
from synapse.logging.context import nested_logging_context
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.replication.http.federation import (
|
||||
ReplicationCleanRoomRestServlet,
|
||||
@ -355,56 +349,8 @@ class FederationHandler:
|
||||
if success:
|
||||
return True
|
||||
|
||||
# Huh, well *those* domains didn't work out. Lets try some domains
|
||||
# from the time.
|
||||
|
||||
tried_domains = set(likely_domains)
|
||||
tried_domains.add(self.server_name)
|
||||
|
||||
event_ids = list(extremities.keys())
|
||||
|
||||
logger.debug("calling resolve_state_groups in _maybe_backfill")
|
||||
resolve = preserve_fn(self.state_handler.resolve_state_groups_for_events)
|
||||
states_list = await make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
[resolve(room_id, [e]) for e in event_ids], consumeErrors=True
|
||||
)
|
||||
)
|
||||
|
||||
# A map from event_id to state map of event_ids.
|
||||
state_ids: Dict[str, StateMap[str]] = dict(
|
||||
zip(event_ids, [s.state for s in states_list])
|
||||
)
|
||||
|
||||
state_map = await self.store.get_events(
|
||||
[e_id for ids in state_ids.values() for e_id in ids.values()],
|
||||
get_prev_content=False,
|
||||
)
|
||||
|
||||
# A map from event_id to state map of events.
|
||||
state_events: Dict[str, StateMap[EventBase]] = {
|
||||
key: {
|
||||
k: state_map[e_id]
|
||||
for k, e_id in state_dict.items()
|
||||
if e_id in state_map
|
||||
}
|
||||
for key, state_dict in state_ids.items()
|
||||
}
|
||||
|
||||
for e_id in event_ids:
|
||||
likely_extremeties_domains = get_domains_from_state(state_events[e_id])
|
||||
|
||||
success = await try_backfill(
|
||||
[
|
||||
dom
|
||||
for dom, _ in likely_extremeties_domains
|
||||
if dom not in tried_domains
|
||||
]
|
||||
)
|
||||
if success:
|
||||
return True
|
||||
|
||||
tried_domains.update(dom for dom, _ in likely_extremeties_domains)
|
||||
# TODO: we could also try servers which were previously in the room, but
|
||||
# are no longer.
|
||||
|
||||
return False
|
||||
|
||||
|
@ -18,6 +18,7 @@ from typing import TYPE_CHECKING, List, Optional, Tuple, cast
|
||||
from synapse.api.constants import EduTypes, EventTypes, Membership
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.utils import SerializeEventConfig
|
||||
from synapse.events.validator import EventValidator
|
||||
from synapse.handlers.presence import format_user_presence_state
|
||||
from synapse.handlers.receipts import ReceiptEventSource
|
||||
@ -152,10 +153,13 @@ class InitialSyncHandler:
|
||||
|
||||
public_room_ids = await self.store.get_public_room_ids()
|
||||
|
||||
limit = pagin_config.limit
|
||||
if limit is None:
|
||||
if pagin_config.limit is not None:
|
||||
limit = pagin_config.limit
|
||||
else:
|
||||
limit = 10
|
||||
|
||||
serializer_options = SerializeEventConfig(as_client_event=as_client_event)
|
||||
|
||||
async def handle_room(event: RoomsForUser) -> None:
|
||||
d: JsonDict = {
|
||||
"room_id": event.room_id,
|
||||
@ -173,7 +177,7 @@ class InitialSyncHandler:
|
||||
d["invite"] = self._event_serializer.serialize_event(
|
||||
invite_event,
|
||||
time_now,
|
||||
as_client_event=as_client_event,
|
||||
config=serializer_options,
|
||||
)
|
||||
|
||||
rooms_ret.append(d)
|
||||
@ -225,7 +229,7 @@ class InitialSyncHandler:
|
||||
self._event_serializer.serialize_events(
|
||||
messages,
|
||||
time_now=time_now,
|
||||
as_client_event=as_client_event,
|
||||
config=serializer_options,
|
||||
)
|
||||
),
|
||||
"start": await start_token.to_string(self.store),
|
||||
@ -235,7 +239,7 @@ class InitialSyncHandler:
|
||||
d["state"] = self._event_serializer.serialize_events(
|
||||
current_state.values(),
|
||||
time_now=time_now,
|
||||
as_client_event=as_client_event,
|
||||
config=serializer_options,
|
||||
)
|
||||
|
||||
account_data_events = []
|
||||
|
@ -1071,6 +1071,9 @@ class EventCreationHandler:
|
||||
if relation_type == RelationTypes.ANNOTATION:
|
||||
aggregation_key = relation["key"]
|
||||
|
||||
if len(aggregation_key) > 500:
|
||||
raise SynapseError(400, "Aggregation key is too long")
|
||||
|
||||
already_exists = await self.store.has_user_annotated_event(
|
||||
relates_to, event.type, aggregation_key, event.sender
|
||||
)
|
||||
@ -1078,7 +1081,10 @@ class EventCreationHandler:
|
||||
raise SynapseError(400, "Can't send same reaction twice")
|
||||
|
||||
# Don't attempt to start a thread if the parent event is a relation.
|
||||
elif relation_type == RelationTypes.THREAD:
|
||||
elif (
|
||||
relation_type == RelationTypes.THREAD
|
||||
or relation_type == RelationTypes.UNSTABLE_THREAD
|
||||
):
|
||||
if await self.store.event_includes_relation(relates_to):
|
||||
raise SynapseError(
|
||||
400, "Cannot start threads from an event with a relation"
|
||||
|
@ -45,6 +45,7 @@ from synapse.types import JsonDict, UserID, map_username_to_mxid_localpart
|
||||
from synapse.util import Clock, json_decoder
|
||||
from synapse.util.caches.cached_call import RetryOnExceptionCachedCall
|
||||
from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry
|
||||
from synapse.util.templates import _localpart_from_email_filter
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@ -1228,6 +1229,7 @@ class OidcSessionData:
|
||||
|
||||
class UserAttributeDict(TypedDict):
|
||||
localpart: Optional[str]
|
||||
confirm_localpart: bool
|
||||
display_name: Optional[str]
|
||||
emails: List[str]
|
||||
|
||||
@ -1307,6 +1309,11 @@ def jinja_finalize(thing: Any) -> Any:
|
||||
|
||||
|
||||
env = Environment(finalize=jinja_finalize)
|
||||
env.filters.update(
|
||||
{
|
||||
"localpart_from_email": _localpart_from_email_filter,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
@ -1316,6 +1323,7 @@ class JinjaOidcMappingConfig:
|
||||
display_name_template: Optional[Template]
|
||||
email_template: Optional[Template]
|
||||
extra_attributes: Dict[str, Template]
|
||||
confirm_localpart: bool = False
|
||||
|
||||
|
||||
class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
|
||||
@ -1357,12 +1365,17 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
|
||||
"invalid jinja template", path=["extra_attributes", key]
|
||||
) from e
|
||||
|
||||
confirm_localpart = config.get("confirm_localpart") or False
|
||||
if not isinstance(confirm_localpart, bool):
|
||||
raise ConfigError("must be a bool", path=["confirm_localpart"])
|
||||
|
||||
return JinjaOidcMappingConfig(
|
||||
subject_claim=subject_claim,
|
||||
localpart_template=localpart_template,
|
||||
display_name_template=display_name_template,
|
||||
email_template=email_template,
|
||||
extra_attributes=extra_attributes,
|
||||
confirm_localpart=confirm_localpart,
|
||||
)
|
||||
|
||||
def get_remote_user_id(self, userinfo: UserInfo) -> str:
|
||||
@ -1398,7 +1411,10 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
|
||||
emails.append(email)
|
||||
|
||||
return UserAttributeDict(
|
||||
localpart=localpart, display_name=display_name, emails=emails
|
||||
localpart=localpart,
|
||||
display_name=display_name,
|
||||
emails=emails,
|
||||
confirm_localpart=self._config.confirm_localpart,
|
||||
)
|
||||
|
||||
async def get_extra_attributes(self, userinfo: UserInfo, token: Token) -> JsonDict:
|
||||
|
@ -22,6 +22,7 @@ from twisted.python.failure import Failure
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.filtering import Filter
|
||||
from synapse.events.utils import SerializeEventConfig
|
||||
from synapse.handlers.room import ShutdownRoomResponse
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage.state import StateFilter
|
||||
@ -349,7 +350,7 @@ class PaginationHandler:
|
||||
"""
|
||||
self._purges_in_progress_by_room.add(room_id)
|
||||
try:
|
||||
with await self.pagination_lock.write(room_id):
|
||||
async with self.pagination_lock.write(room_id):
|
||||
await self.storage.purge_events.purge_history(
|
||||
room_id, token, delete_local_events
|
||||
)
|
||||
@ -405,7 +406,7 @@ class PaginationHandler:
|
||||
room_id: room to be purged
|
||||
force: set true to skip checking for joined users.
|
||||
"""
|
||||
with await self.pagination_lock.write(room_id):
|
||||
async with self.pagination_lock.write(room_id):
|
||||
# first check that we have no users in this room
|
||||
if not force:
|
||||
joined = await self.store.is_host_joined(room_id, self._server_name)
|
||||
@ -447,7 +448,7 @@ class PaginationHandler:
|
||||
|
||||
room_token = from_token.room_key
|
||||
|
||||
with await self.pagination_lock.read(room_id):
|
||||
async with self.pagination_lock.read(room_id):
|
||||
(
|
||||
membership,
|
||||
member_event_id,
|
||||
@ -541,13 +542,15 @@ class PaginationHandler:
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
serialize_options = SerializeEventConfig(as_client_event=as_client_event)
|
||||
|
||||
chunk = {
|
||||
"chunk": (
|
||||
self._event_serializer.serialize_events(
|
||||
events,
|
||||
time_now,
|
||||
config=serialize_options,
|
||||
bundle_aggregations=aggregations,
|
||||
as_client_event=as_client_event,
|
||||
)
|
||||
),
|
||||
"start": await from_token.to_string(self.store),
|
||||
@ -556,7 +559,7 @@ class PaginationHandler:
|
||||
|
||||
if state:
|
||||
chunk["state"] = self._event_serializer.serialize_events(
|
||||
state, time_now, as_client_event=as_client_event
|
||||
state, time_now, config=serialize_options
|
||||
)
|
||||
|
||||
return chunk
|
||||
@ -612,7 +615,7 @@ class PaginationHandler:
|
||||
|
||||
self._purges_in_progress_by_room.add(room_id)
|
||||
try:
|
||||
with await self.pagination_lock.write(room_id):
|
||||
async with self.pagination_lock.write(room_id):
|
||||
self._delete_by_id[delete_id].status = DeleteStatus.STATUS_SHUTTING_DOWN
|
||||
self._delete_by_id[
|
||||
delete_id
|
||||
|
@ -267,7 +267,6 @@ class BasePresenceHandler(abc.ABC):
|
||||
is_syncing: Whether or not the user is now syncing
|
||||
sync_time_msec: Time in ms when the user was last syncing
|
||||
"""
|
||||
pass
|
||||
|
||||
async def update_external_syncs_clear(self, process_id: str) -> None:
|
||||
"""Marks all users that had been marked as syncing by a given process
|
||||
@ -277,7 +276,6 @@ class BasePresenceHandler(abc.ABC):
|
||||
|
||||
This is a no-op when presence is handled by a different worker.
|
||||
"""
|
||||
pass
|
||||
|
||||
async def process_replication_rows(
|
||||
self, stream_name: str, instance_name: str, token: int, rows: list
|
||||
@ -424,13 +422,13 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||
|
||||
async def _on_shutdown(self) -> None:
|
||||
if self._presence_enabled:
|
||||
self.hs.get_tcp_replication().send_command(
|
||||
self.hs.get_replication_command_handler().send_command(
|
||||
ClearUserSyncsCommand(self.instance_id)
|
||||
)
|
||||
|
||||
def send_user_sync(self, user_id: str, is_syncing: bool, last_sync_ms: int) -> None:
|
||||
if self._presence_enabled:
|
||||
self.hs.get_tcp_replication().send_user_sync(
|
||||
self.hs.get_replication_command_handler().send_user_sync(
|
||||
self.instance_id, user_id, is_syncing, last_sync_ms
|
||||
)
|
||||
|
||||
|
@ -270,7 +270,7 @@ class ReceiptEventSource(EventSource[int, JsonDict]):
|
||||
# Then filter down to rooms that the AS can read
|
||||
events = []
|
||||
for room_id, event in rooms_to_events.items():
|
||||
if not await service.matches_user_in_member_list(room_id, self.store):
|
||||
if not await service.is_interested_in_room(room_id, self.store):
|
||||
continue
|
||||
|
||||
events.append(event)
|
||||
|
@ -1485,6 +1485,7 @@ class RoomShutdownHandler:
|
||||
self.room_member_handler = hs.get_room_member_handler()
|
||||
self._room_creation_handler = hs.get_room_creation_handler()
|
||||
self._replication = hs.get_replication_data_handler()
|
||||
self._third_party_rules = hs.get_third_party_event_rules()
|
||||
self.event_creation_handler = hs.get_event_creation_handler()
|
||||
self.store = hs.get_datastores().main
|
||||
|
||||
@ -1558,6 +1559,13 @@ class RoomShutdownHandler:
|
||||
if not RoomID.is_valid(room_id):
|
||||
raise SynapseError(400, "%s is not a legal room ID" % (room_id,))
|
||||
|
||||
if not await self._third_party_rules.check_can_shutdown_room(
|
||||
requester_user_id, room_id
|
||||
):
|
||||
raise SynapseError(
|
||||
403, "Shutdown of this room is forbidden", Codes.FORBIDDEN
|
||||
)
|
||||
|
||||
# Action the block first (even if the room doesn't exist yet)
|
||||
if block:
|
||||
# This will work even if the room is already blocked, but that is
|
||||
|
@ -1736,8 +1736,8 @@ class RoomMemberMasterHandler(RoomMemberHandler):
|
||||
txn_id=txn_id,
|
||||
prev_event_ids=prev_event_ids,
|
||||
auth_event_ids=auth_event_ids,
|
||||
outlier=True,
|
||||
)
|
||||
event.internal_metadata.outlier = True
|
||||
event.internal_metadata.out_of_band_membership = True
|
||||
|
||||
result_event = await self.event_creation_handler.handle_new_client_event(
|
||||
|
@ -295,7 +295,7 @@ class RoomSummaryHandler:
|
||||
# inaccessible to the requesting user.
|
||||
if room_entry:
|
||||
# Add the room (including the stripped m.space.child events).
|
||||
rooms_result.append(room_entry.as_json())
|
||||
rooms_result.append(room_entry.as_json(for_client=True))
|
||||
|
||||
# If this room is not at the max-depth, check if there are any
|
||||
# children to process.
|
||||
@ -843,21 +843,32 @@ class _RoomEntry:
|
||||
# This may not include all children.
|
||||
children_state_events: Sequence[JsonDict] = ()
|
||||
|
||||
def as_json(self) -> JsonDict:
|
||||
def as_json(self, for_client: bool = False) -> JsonDict:
|
||||
"""
|
||||
Returns a JSON dictionary suitable for the room hierarchy endpoint.
|
||||
|
||||
It returns the room summary including the stripped m.space.child events
|
||||
as a sub-key.
|
||||
|
||||
Args:
|
||||
for_client: If true, any server-server only fields are stripped from
|
||||
the result.
|
||||
|
||||
"""
|
||||
result = dict(self.room)
|
||||
|
||||
# Before returning to the client, remove the allowed_room_ids key, if it
|
||||
# exists.
|
||||
if for_client:
|
||||
result.pop("allowed_room_ids", False)
|
||||
|
||||
result["children_state"] = self.children_state_events
|
||||
return result
|
||||
|
||||
|
||||
def _has_valid_via(e: EventBase) -> bool:
|
||||
via = e.content.get("via")
|
||||
if not via or not isinstance(via, Sequence):
|
||||
if not via or not isinstance(via, list):
|
||||
return False
|
||||
for v in via:
|
||||
if not isinstance(v, str):
|
||||
|
@ -132,6 +132,7 @@ class UserAttributes:
|
||||
# if `None`, the mapper has not picked a userid, and the user should be prompted to
|
||||
# enter one.
|
||||
localpart: Optional[str]
|
||||
confirm_localpart: bool = False
|
||||
display_name: Optional[str] = None
|
||||
emails: Collection[str] = attr.Factory(list)
|
||||
|
||||
@ -561,9 +562,10 @@ class SsoHandler:
|
||||
# Must provide either attributes or session, not both
|
||||
assert (attributes is not None) != (session is not None)
|
||||
|
||||
if (attributes and attributes.localpart is None) or (
|
||||
session and session.chosen_localpart is None
|
||||
):
|
||||
if (
|
||||
attributes
|
||||
and (attributes.localpart is None or attributes.confirm_localpart is True)
|
||||
) or (session and session.chosen_localpart is None):
|
||||
return b"/_synapse/client/pick_username/account_details"
|
||||
elif self._consent_at_registration and not (
|
||||
session and session.terms_accepted_version
|
||||
|
@ -160,8 +160,9 @@ class FollowerTypingHandler:
|
||||
"""Should be called whenever we receive updates for typing stream."""
|
||||
|
||||
if self._latest_room_serial > token:
|
||||
# The master has gone backwards. To prevent inconsistent data, just
|
||||
# clear everything.
|
||||
# The typing worker has gone backwards (e.g. it may have restarted).
|
||||
# To prevent inconsistent data, just clear everything.
|
||||
logger.info("Typing handler stream went backwards; resetting")
|
||||
self._reset()
|
||||
|
||||
# Set the latest serial token to whatever the server gave us.
|
||||
@ -486,9 +487,7 @@ class TypingNotificationEventSource(EventSource[int, JsonDict]):
|
||||
if handler._room_serials[room_id] <= from_key:
|
||||
continue
|
||||
|
||||
if not await service.matches_user_in_member_list(
|
||||
room_id, self._main_store
|
||||
):
|
||||
if not await service.is_interested_in_room(room_id, self._main_store):
|
||||
continue
|
||||
|
||||
events.append(self._make_event_for(room_id))
|
||||
|
@ -120,7 +120,6 @@ class ByteParser(ByteWriteable, Generic[T], abc.ABC):
|
||||
"""Called when response has finished streaming and the parser should
|
||||
return the final result (or error).
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
@ -601,7 +600,6 @@ class MatrixFederationHttpClient:
|
||||
response.code,
|
||||
response_phrase,
|
||||
)
|
||||
pass
|
||||
else:
|
||||
logger.info(
|
||||
"{%s} [%s] Got response headers: %d %s",
|
||||
|
@ -233,7 +233,6 @@ class HttpServer(Protocol):
|
||||
servlet_classname (str): The name of the handler to be used in prometheus
|
||||
and opentracing logs.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta):
|
||||
|
@ -29,7 +29,6 @@ import warnings
|
||||
from types import TracebackType
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Optional,
|
||||
@ -41,7 +40,7 @@ from typing import (
|
||||
)
|
||||
|
||||
import attr
|
||||
from typing_extensions import Literal
|
||||
from typing_extensions import Literal, ParamSpec
|
||||
|
||||
from twisted.internet import defer, threads
|
||||
from twisted.python.threadpool import ThreadPool
|
||||
@ -719,32 +718,33 @@ def nested_logging_context(suffix: str) -> LoggingContext:
|
||||
)
|
||||
|
||||
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
|
||||
|
||||
@overload
|
||||
def preserve_fn( # type: ignore[misc]
|
||||
f: Callable[..., Awaitable[R]],
|
||||
) -> Callable[..., "defer.Deferred[R]"]:
|
||||
f: Callable[P, Awaitable[R]],
|
||||
) -> Callable[P, "defer.Deferred[R]"]:
|
||||
# The `type: ignore[misc]` above suppresses
|
||||
# "Overloaded function signatures 1 and 2 overlap with incompatible return types"
|
||||
...
|
||||
|
||||
|
||||
@overload
|
||||
def preserve_fn(f: Callable[..., R]) -> Callable[..., "defer.Deferred[R]"]:
|
||||
def preserve_fn(f: Callable[P, R]) -> Callable[P, "defer.Deferred[R]"]:
|
||||
...
|
||||
|
||||
|
||||
def preserve_fn(
|
||||
f: Union[
|
||||
Callable[..., R],
|
||||
Callable[..., Awaitable[R]],
|
||||
Callable[P, R],
|
||||
Callable[P, Awaitable[R]],
|
||||
]
|
||||
) -> Callable[..., "defer.Deferred[R]"]:
|
||||
) -> Callable[P, "defer.Deferred[R]"]:
|
||||
"""Function decorator which wraps the function with run_in_background"""
|
||||
|
||||
def g(*args: Any, **kwargs: Any) -> "defer.Deferred[R]":
|
||||
def g(*args: P.args, **kwargs: P.kwargs) -> "defer.Deferred[R]":
|
||||
return run_in_background(f, *args, **kwargs)
|
||||
|
||||
return g
|
||||
@ -752,7 +752,7 @@ def preserve_fn(
|
||||
|
||||
@overload
|
||||
def run_in_background( # type: ignore[misc]
|
||||
f: Callable[..., Awaitable[R]], *args: Any, **kwargs: Any
|
||||
f: Callable[P, Awaitable[R]], *args: P.args, **kwargs: P.kwargs
|
||||
) -> "defer.Deferred[R]":
|
||||
# The `type: ignore[misc]` above suppresses
|
||||
# "Overloaded function signatures 1 and 2 overlap with incompatible return types"
|
||||
@ -761,18 +761,22 @@ def run_in_background( # type: ignore[misc]
|
||||
|
||||
@overload
|
||||
def run_in_background(
|
||||
f: Callable[..., R], *args: Any, **kwargs: Any
|
||||
f: Callable[P, R], *args: P.args, **kwargs: P.kwargs
|
||||
) -> "defer.Deferred[R]":
|
||||
...
|
||||
|
||||
|
||||
def run_in_background(
|
||||
def run_in_background( # type: ignore[misc]
|
||||
# The `type: ignore[misc]` above suppresses
|
||||
# "Overloaded function implementation does not accept all possible arguments of signature 1"
|
||||
# "Overloaded function implementation does not accept all possible arguments of signature 2"
|
||||
# which seems like a bug in mypy.
|
||||
f: Union[
|
||||
Callable[..., R],
|
||||
Callable[..., Awaitable[R]],
|
||||
Callable[P, R],
|
||||
Callable[P, Awaitable[R]],
|
||||
],
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
*args: P.args,
|
||||
**kwargs: P.kwargs,
|
||||
) -> "defer.Deferred[R]":
|
||||
"""Calls a function, ensuring that the current context is restored after
|
||||
return from the function, and that the sentinel context is set once the
|
||||
@ -872,7 +876,7 @@ def _set_context_cb(result: ResultT, context: LoggingContext) -> ResultT:
|
||||
|
||||
|
||||
def defer_to_thread(
|
||||
reactor: "ISynapseReactor", f: Callable[..., R], *args: Any, **kwargs: Any
|
||||
reactor: "ISynapseReactor", f: Callable[P, R], *args: P.args, **kwargs: P.kwargs
|
||||
) -> "defer.Deferred[R]":
|
||||
"""
|
||||
Calls the function `f` using a thread from the reactor's default threadpool and
|
||||
@ -908,9 +912,9 @@ def defer_to_thread(
|
||||
def defer_to_threadpool(
|
||||
reactor: "ISynapseReactor",
|
||||
threadpool: ThreadPool,
|
||||
f: Callable[..., R],
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
f: Callable[P, R],
|
||||
*args: P.args,
|
||||
**kwargs: P.kwargs,
|
||||
) -> "defer.Deferred[R]":
|
||||
"""
|
||||
A wrapper for twisted.internet.threads.deferToThreadpool, which handles
|
||||
|
@ -54,6 +54,8 @@ from synapse.events.spamcheck import (
|
||||
USER_MAY_SEND_3PID_INVITE_CALLBACK,
|
||||
)
|
||||
from synapse.events.third_party_rules import (
|
||||
CHECK_CAN_DEACTIVATE_USER_CALLBACK,
|
||||
CHECK_CAN_SHUTDOWN_ROOM_CALLBACK,
|
||||
CHECK_EVENT_ALLOWED_CALLBACK,
|
||||
CHECK_THREEPID_CAN_BE_INVITED_CALLBACK,
|
||||
CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK,
|
||||
@ -283,6 +285,8 @@ class ModuleApi:
|
||||
CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK
|
||||
] = None,
|
||||
on_new_event: Optional[ON_NEW_EVENT_CALLBACK] = None,
|
||||
check_can_shutdown_room: Optional[CHECK_CAN_SHUTDOWN_ROOM_CALLBACK] = None,
|
||||
check_can_deactivate_user: Optional[CHECK_CAN_DEACTIVATE_USER_CALLBACK] = None,
|
||||
on_profile_update: Optional[ON_PROFILE_UPDATE_CALLBACK] = None,
|
||||
on_user_deactivation_status_changed: Optional[
|
||||
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK
|
||||
@ -298,6 +302,8 @@ class ModuleApi:
|
||||
check_threepid_can_be_invited=check_threepid_can_be_invited,
|
||||
check_visibility_can_be_modified=check_visibility_can_be_modified,
|
||||
on_new_event=on_new_event,
|
||||
check_can_shutdown_room=check_can_shutdown_room,
|
||||
check_can_deactivate_user=check_can_deactivate_user,
|
||||
on_profile_update=on_profile_update,
|
||||
on_user_deactivation_status_changed=on_user_deactivation_status_changed,
|
||||
)
|
||||
|
@ -169,7 +169,7 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [
|
||||
"kind": "event_match",
|
||||
"key": "content.msgtype",
|
||||
"pattern": "m.notice",
|
||||
"_id": "_suppress_notices",
|
||||
"_cache_key": "_suppress_notices",
|
||||
}
|
||||
],
|
||||
"actions": ["dont_notify"],
|
||||
@ -183,13 +183,13 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [
|
||||
"kind": "event_match",
|
||||
"key": "type",
|
||||
"pattern": "m.room.member",
|
||||
"_id": "_member",
|
||||
"_cache_key": "_member",
|
||||
},
|
||||
{
|
||||
"kind": "event_match",
|
||||
"key": "content.membership",
|
||||
"pattern": "invite",
|
||||
"_id": "_invite_member",
|
||||
"_cache_key": "_invite_member",
|
||||
},
|
||||
{"kind": "event_match", "key": "state_key", "pattern_type": "user_id"},
|
||||
],
|
||||
@ -212,7 +212,7 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [
|
||||
"kind": "event_match",
|
||||
"key": "type",
|
||||
"pattern": "m.room.member",
|
||||
"_id": "_member",
|
||||
"_cache_key": "_member",
|
||||
}
|
||||
],
|
||||
"actions": ["dont_notify"],
|
||||
@ -237,12 +237,12 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [
|
||||
"kind": "event_match",
|
||||
"key": "content.body",
|
||||
"pattern": "@room",
|
||||
"_id": "_roomnotif_content",
|
||||
"_cache_key": "_roomnotif_content",
|
||||
},
|
||||
{
|
||||
"kind": "sender_notification_permission",
|
||||
"key": "room",
|
||||
"_id": "_roomnotif_pl",
|
||||
"_cache_key": "_roomnotif_pl",
|
||||
},
|
||||
],
|
||||
"actions": ["notify", {"set_tweak": "highlight", "value": True}],
|
||||
@ -254,13 +254,13 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [
|
||||
"kind": "event_match",
|
||||
"key": "type",
|
||||
"pattern": "m.room.tombstone",
|
||||
"_id": "_tombstone",
|
||||
"_cache_key": "_tombstone",
|
||||
},
|
||||
{
|
||||
"kind": "event_match",
|
||||
"key": "state_key",
|
||||
"pattern": "",
|
||||
"_id": "_tombstone_statekey",
|
||||
"_cache_key": "_tombstone_statekey",
|
||||
},
|
||||
],
|
||||
"actions": ["notify", {"set_tweak": "highlight", "value": True}],
|
||||
@ -272,7 +272,7 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [
|
||||
"kind": "event_match",
|
||||
"key": "type",
|
||||
"pattern": "m.reaction",
|
||||
"_id": "_reaction",
|
||||
"_cache_key": "_reaction",
|
||||
}
|
||||
],
|
||||
"actions": ["dont_notify"],
|
||||
@ -288,7 +288,7 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [
|
||||
"kind": "event_match",
|
||||
"key": "type",
|
||||
"pattern": "m.call.invite",
|
||||
"_id": "_call",
|
||||
"_cache_key": "_call",
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
@ -302,12 +302,12 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [
|
||||
{
|
||||
"rule_id": "global/underride/.m.rule.room_one_to_one",
|
||||
"conditions": [
|
||||
{"kind": "room_member_count", "is": "2", "_id": "member_count"},
|
||||
{"kind": "room_member_count", "is": "2", "_cache_key": "member_count"},
|
||||
{
|
||||
"kind": "event_match",
|
||||
"key": "type",
|
||||
"pattern": "m.room.message",
|
||||
"_id": "_message",
|
||||
"_cache_key": "_message",
|
||||
},
|
||||
],
|
||||
"actions": [
|
||||
@ -321,12 +321,12 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [
|
||||
{
|
||||
"rule_id": "global/underride/.m.rule.encrypted_room_one_to_one",
|
||||
"conditions": [
|
||||
{"kind": "room_member_count", "is": "2", "_id": "member_count"},
|
||||
{"kind": "room_member_count", "is": "2", "_cache_key": "member_count"},
|
||||
{
|
||||
"kind": "event_match",
|
||||
"key": "type",
|
||||
"pattern": "m.room.encrypted",
|
||||
"_id": "_encrypted",
|
||||
"_cache_key": "_encrypted",
|
||||
},
|
||||
],
|
||||
"actions": [
|
||||
@ -342,7 +342,7 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [
|
||||
"kind": "event_match",
|
||||
"key": "type",
|
||||
"pattern": "m.room.message",
|
||||
"_id": "_message",
|
||||
"_cache_key": "_message",
|
||||
}
|
||||
],
|
||||
"actions": ["notify", {"set_tweak": "highlight", "value": False}],
|
||||
@ -356,7 +356,7 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [
|
||||
"kind": "event_match",
|
||||
"key": "type",
|
||||
"pattern": "m.room.encrypted",
|
||||
"_id": "_encrypted",
|
||||
"_cache_key": "_encrypted",
|
||||
}
|
||||
],
|
||||
"actions": ["notify", {"set_tweak": "highlight", "value": False}],
|
||||
@ -368,19 +368,19 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [
|
||||
"kind": "event_match",
|
||||
"key": "type",
|
||||
"pattern": "im.vector.modular.widgets",
|
||||
"_id": "_type_modular_widgets",
|
||||
"_cache_key": "_type_modular_widgets",
|
||||
},
|
||||
{
|
||||
"kind": "event_match",
|
||||
"key": "content.type",
|
||||
"pattern": "jitsi",
|
||||
"_id": "_content_type_jitsi",
|
||||
"_cache_key": "_content_type_jitsi",
|
||||
},
|
||||
{
|
||||
"kind": "event_match",
|
||||
"key": "state_key",
|
||||
"pattern": "*",
|
||||
"_id": "_is_state_event",
|
||||
"_cache_key": "_is_state_event",
|
||||
},
|
||||
],
|
||||
"actions": ["notify", {"set_tweak": "highlight", "value": False}],
|
||||
|
@ -274,17 +274,17 @@ def _condition_checker(
|
||||
cache: Dict[str, bool],
|
||||
) -> bool:
|
||||
for cond in conditions:
|
||||
_id = cond.get("_id", None)
|
||||
if _id:
|
||||
res = cache.get(_id, None)
|
||||
_cache_key = cond.get("_cache_key", None)
|
||||
if _cache_key:
|
||||
res = cache.get(_cache_key, None)
|
||||
if res is False:
|
||||
return False
|
||||
elif res is True:
|
||||
continue
|
||||
|
||||
res = evaluator.matches(cond, uid, display_name)
|
||||
if _id:
|
||||
cache[_id] = bool(res)
|
||||
if _cache_key:
|
||||
cache[_cache_key] = bool(res)
|
||||
|
||||
if not res:
|
||||
return False
|
||||
|
@ -40,7 +40,7 @@ def format_push_rules_for_user(
|
||||
|
||||
# Remove internal stuff.
|
||||
for c in r["conditions"]:
|
||||
c.pop("_id", None)
|
||||
c.pop("_cache_key", None)
|
||||
|
||||
pattern_type = c.pop("pattern_type", None)
|
||||
if pattern_type == "user_id":
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user