mirror of
https://git.anonymousland.org/anonymousland/synapse.git
synced 2025-08-15 14:00:16 -04:00
Merge remote-tracking branch 'upstream/release-v1.44'
This commit is contained in:
commit
8631aaeb5a
243 changed files with 3908 additions and 2190 deletions
1
.github/workflows/docs.yaml
vendored
1
.github/workflows/docs.yaml
vendored
|
@ -61,6 +61,5 @@ jobs:
|
||||||
uses: peaceiris/actions-gh-pages@068dc23d9710f1ba62e86896f84735d869951305 # v3.8.0
|
uses: peaceiris/actions-gh-pages@068dc23d9710f1ba62e86896f84735d869951305 # v3.8.0
|
||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
keep_files: true
|
|
||||||
publish_dir: ./book
|
publish_dir: ./book
|
||||||
destination_dir: ./${{ steps.vars.outputs.branch-version }}
|
destination_dir: ./${{ steps.vars.outputs.branch-version }}
|
||||||
|
|
1
.github/workflows/tests.yml
vendored
1
.github/workflows/tests.yml
vendored
|
@ -192,6 +192,7 @@ jobs:
|
||||||
volumes:
|
volumes:
|
||||||
- ${{ github.workspace }}:/src
|
- ${{ github.workspace }}:/src
|
||||||
env:
|
env:
|
||||||
|
SYTEST_BRANCH: ${{ github.head_ref }}
|
||||||
POSTGRES: ${{ matrix.postgres && 1}}
|
POSTGRES: ${{ matrix.postgres && 1}}
|
||||||
MULTI_POSTGRES: ${{ (matrix.postgres == 'multi-postgres') && 1}}
|
MULTI_POSTGRES: ${{ (matrix.postgres == 'multi-postgres') && 1}}
|
||||||
WORKERS: ${{ matrix.workers && 1 }}
|
WORKERS: ${{ matrix.workers && 1 }}
|
||||||
|
|
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -40,6 +40,7 @@ __pycache__/
|
||||||
/.coverage*
|
/.coverage*
|
||||||
/.mypy_cache/
|
/.mypy_cache/
|
||||||
/.tox
|
/.tox
|
||||||
|
/.tox-pg-container
|
||||||
/build/
|
/build/
|
||||||
/coverage.*
|
/coverage.*
|
||||||
/dist/
|
/dist/
|
||||||
|
|
72
CHANGES.md
72
CHANGES.md
|
@ -1,3 +1,75 @@
|
||||||
|
Synapse 1.44.0rc1 (2021-09-29)
|
||||||
|
==============================
|
||||||
|
|
||||||
|
Features
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Only allow the [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send?chunk_id=xxx` endpoint to connect to an already existing insertion event. ([\#10776](https://github.com/matrix-org/synapse/issues/10776))
|
||||||
|
- Improve oEmbed URL previews by processing the author name, photo, and video information. ([\#10814](https://github.com/matrix-org/synapse/issues/10814), [\#10819](https://github.com/matrix-org/synapse/issues/10819))
|
||||||
|
- Speed up responding with large JSON objects to requests. ([\#10868](https://github.com/matrix-org/synapse/issues/10868), [\#10905](https://github.com/matrix-org/synapse/issues/10905))
|
||||||
|
- Add a `user_may_create_room_with_invites` spam checker callback to allow modules to allow or deny a room creation request based on the invites and/or 3PID invites it includes. ([\#10898](https://github.com/matrix-org/synapse/issues/10898))
|
||||||
|
|
||||||
|
|
||||||
|
Bugfixes
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Fix a long-standing bug that caused an `AssertionError` when purging history in certain rooms. Contributed by @Kokokokoka. ([\#10690](https://github.com/matrix-org/synapse/issues/10690))
|
||||||
|
- Fix a long-standing bug which caused deactivated users that were later reactivated to be missing from the user directory. ([\#10782](https://github.com/matrix-org/synapse/issues/10782))
|
||||||
|
- Fix a long-standing bug that caused unbanning a user by sending a membership event to fail. Contributed by @aaronraimist. ([\#10807](https://github.com/matrix-org/synapse/issues/10807))
|
||||||
|
- Fix a long-standing bug where logging contexts would go missing when federation requests time out. ([\#10810](https://github.com/matrix-org/synapse/issues/10810))
|
||||||
|
- Fix a long-standing bug causing an error in the deprecated `/initialSync` endpoint when using the undocumented `from` and `to` parameters. ([\#10827](https://github.com/matrix-org/synapse/issues/10827))
|
||||||
|
- Fix a bug causing the `remove_stale_pushers` background job to repeatedly fail and log errors. This bug affected Synapse servers that had been upgraded from version 1.28 or older and are using SQLite. ([\#10843](https://github.com/matrix-org/synapse/issues/10843))
|
||||||
|
- Fix a long-standing bug in Unicode support of the room search admin API breaking search for rooms with non-ASCII characters. ([\#10859](https://github.com/matrix-org/synapse/issues/10859))
|
||||||
|
- Fix a bug introduced in Synapse 1.37.0 which caused `knock` membership events which we sent to remote servers to be incorrectly stored in the local database. ([\#10873](https://github.com/matrix-org/synapse/issues/10873))
|
||||||
|
- Fix invalidating one-time key count cache after claiming keys. The bug was introduced in Synapse v1.41.0. Contributed by Tulir at Beeper. ([\#10875](https://github.com/matrix-org/synapse/issues/10875))
|
||||||
|
- Fix a long-standing bug causing application service users to be subject to MAU blocking if the MAU limit had been reached, even if configured not to be blocked. ([\#10881](https://github.com/matrix-org/synapse/issues/10881))
|
||||||
|
- Fix a long-standing bug which could cause events pulled over federation to be incorrectly rejected. ([\#10907](https://github.com/matrix-org/synapse/issues/10907))
|
||||||
|
- Fix a long-standing bug causing URL cache files to be stored in storage providers. Server admins may safely delete the `url_cache/` and `url_cache_thumbnails/` directories from any configured storage providers to reclaim space. ([\#10911](https://github.com/matrix-org/synapse/issues/10911))
|
||||||
|
- Fix a long-standing bug leading to race conditions when creating media store and config directories. ([\#10913](https://github.com/matrix-org/synapse/issues/10913))
|
||||||
|
|
||||||
|
|
||||||
|
Improved Documentation
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
- Fix some crashes in the Module API example code, by adding JSON encoding/decoding. ([\#10845](https://github.com/matrix-org/synapse/issues/10845))
|
||||||
|
- Add developer documentation about experimental configuration flags. ([\#10865](https://github.com/matrix-org/synapse/issues/10865))
|
||||||
|
- Properly remove deleted files from GitHub pages when generating the documentation. ([\#10869](https://github.com/matrix-org/synapse/issues/10869))
|
||||||
|
|
||||||
|
|
||||||
|
Internal Changes
|
||||||
|
----------------
|
||||||
|
|
||||||
|
- Fix GitHub Actions config so we can run sytest on synapse from parallel branches. ([\#10659](https://github.com/matrix-org/synapse/issues/10659))
|
||||||
|
- Split out [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) meta events to their own fields in the `/batch_send` response. ([\#10777](https://github.com/matrix-org/synapse/issues/10777))
|
||||||
|
- Add missing type hints to REST servlets. ([\#10785](https://github.com/matrix-org/synapse/issues/10785), [\#10817](https://github.com/matrix-org/synapse/issues/10817))
|
||||||
|
- Simplify the internal logic which maintains the user directory database tables. ([\#10796](https://github.com/matrix-org/synapse/issues/10796))
|
||||||
|
- Use direct references to config flags. ([\#10812](https://github.com/matrix-org/synapse/issues/10812), [\#10885](https://github.com/matrix-org/synapse/issues/10885), [\#10893](https://github.com/matrix-org/synapse/issues/10893), [\#10897](https://github.com/matrix-org/synapse/issues/10897))
|
||||||
|
- Specify the type of token in generic "Invalid token" error messages. ([\#10815](https://github.com/matrix-org/synapse/issues/10815))
|
||||||
|
- Make `StateFilter` frozen so it is hashable. ([\#10816](https://github.com/matrix-org/synapse/issues/10816))
|
||||||
|
- Fix a long-standing bug where an `m.room.message` event containing a null byte would cause an internal server error. ([\#10820](https://github.com/matrix-org/synapse/issues/10820))
|
||||||
|
- Add type hints to the state database. ([\#10823](https://github.com/matrix-org/synapse/issues/10823))
|
||||||
|
- Opt out of cache expiry for `get_users_who_share_room_with_user`, to hopefully improve `/sync` performance when you
|
||||||
|
haven't synced recently. ([\#10826](https://github.com/matrix-org/synapse/issues/10826))
|
||||||
|
- Track cache eviction rates more finely in Prometheus's monitoring. ([\#10829](https://github.com/matrix-org/synapse/issues/10829))
|
||||||
|
- Add missing type hints to `synapse.handlers`. ([\#10831](https://github.com/matrix-org/synapse/issues/10831), [\#10856](https://github.com/matrix-org/synapse/issues/10856))
|
||||||
|
- Extend the Module API to let plug-ins check whether an ID is local and to access IP + User Agent data. ([\#10833](https://github.com/matrix-org/synapse/issues/10833))
|
||||||
|
- Factor out PNG image data to a constant to be used in several tests. ([\#10834](https://github.com/matrix-org/synapse/issues/10834))
|
||||||
|
- Add a test to ensure state events sent by modules get persisted correctly. ([\#10835](https://github.com/matrix-org/synapse/issues/10835))
|
||||||
|
- Rename [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) fields and event types from `chunk` to `batch` to match the `/batch_send` endpoint. ([\#10838](https://github.com/matrix-org/synapse/issues/10838))
|
||||||
|
- Rename [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` query parameter from `?prev_event` to more obvious usage with `?prev_event_id`. ([\#10839](https://github.com/matrix-org/synapse/issues/10839))
|
||||||
|
- Add type hints to `synapse.http.site`. ([\#10867](https://github.com/matrix-org/synapse/issues/10867))
|
||||||
|
- Include outlier status when we log V2 or V3 events. ([\#10879](https://github.com/matrix-org/synapse/issues/10879))
|
||||||
|
- Break down Grafana's cache expiry time series based on reason for eviction, c.f. [\#10829](https://github.com/matrix-org/synapse/issues/10829). ([\#10880](https://github.com/matrix-org/synapse/issues/10880))
|
||||||
|
- Clean up some of the federation event authentication code for clarity. ([\#10883](https://github.com/matrix-org/synapse/issues/10883), [\#10884](https://github.com/matrix-org/synapse/issues/10884), [\#10896](https://github.com/matrix-org/synapse/issues/10896), [\#10901](https://github.com/matrix-org/synapse/issues/10901))
|
||||||
|
- Allow the `.` and `~` characters when creating registration tokens as per the change to [MSC3231](https://github.com/matrix-org/matrix-doc/pull/3231). ([\#10887](https://github.com/matrix-org/synapse/issues/10887))
|
||||||
|
- Clean up some unnecessary parentheses in places around the codebase. ([\#10889](https://github.com/matrix-org/synapse/issues/10889))
|
||||||
|
- Improve type hinting in the user directory code. ([\#10891](https://github.com/matrix-org/synapse/issues/10891))
|
||||||
|
- Update development testing script `test_postgresql.sh` to use a supported Python version and make re-runs quicker. ([\#10906](https://github.com/matrix-org/synapse/issues/10906))
|
||||||
|
- Document and summarize changes in schema version `61` – `64`. ([\#10917](https://github.com/matrix-org/synapse/issues/10917))
|
||||||
|
- Update release script to sign the newly created git tags. ([\#10925](https://github.com/matrix-org/synapse/issues/10925))
|
||||||
|
- Fix Debian builds due to `dh-virtualenv` no longer being able to build their docs. ([\#10931](https://github.com/matrix-org/synapse/issues/10931))
|
||||||
|
|
||||||
|
|
||||||
Synapse 1.43.0 (2021-09-21)
|
Synapse 1.43.0 (2021-09-21)
|
||||||
===========================
|
===========================
|
||||||
|
|
||||||
|
|
|
@ -288,7 +288,7 @@ Quick start
|
||||||
|
|
||||||
Before setting up a development environment for synapse, make sure you have the
|
Before setting up a development environment for synapse, make sure you have the
|
||||||
system dependencies (such as the python header files) installed - see
|
system dependencies (such as the python header files) installed - see
|
||||||
`Installing from source <https://matrix-org.github.io/synapse/latest/setup/installation.html#installing-from-source>`_.
|
`Platform-specific prerequisites <https://matrix-org.github.io/synapse/latest/setup/installation.html#platform-specific-prerequisites>`_.
|
||||||
|
|
||||||
To check out a synapse for development, clone the git repo into a working
|
To check out a synapse for development, clone the git repo into a working
|
||||||
directory of your choice::
|
directory of your choice::
|
||||||
|
|
1
changelog.d/10919.doc
Normal file
1
changelog.d/10919.doc
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Minor updates to the installation instructions.
|
|
@ -6785,7 +6785,7 @@
|
||||||
"expr": "rate(synapse_util_caches_cache:evicted_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
"expr": "rate(synapse_util_caches_cache:evicted_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{name}} {{job}}-{{index}}",
|
"legendFormat": "{{name}} ({{reason}}) {{job}}-{{index}}",
|
||||||
"refId": "A"
|
"refId": "A"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
@ -10888,5 +10888,5 @@
|
||||||
"timezone": "",
|
"timezone": "",
|
||||||
"title": "Synapse",
|
"title": "Synapse",
|
||||||
"uid": "000000012",
|
"uid": "000000012",
|
||||||
"version": 99
|
"version": 100
|
||||||
}
|
}
|
6
debian/changelog
vendored
6
debian/changelog
vendored
|
@ -1,3 +1,9 @@
|
||||||
|
matrix-synapse-py3 (1.44.0~rc1) stable; urgency=medium
|
||||||
|
|
||||||
|
* New synapse release 1.44.0~rc1.
|
||||||
|
|
||||||
|
-- Synapse Packaging team <packages@matrix.org> Tue, 28 Sep 2021 13:41:28 +0100
|
||||||
|
|
||||||
matrix-synapse-py3 (1.43.0) stable; urgency=medium
|
matrix-synapse-py3 (1.43.0) stable; urgency=medium
|
||||||
|
|
||||||
* New synapse release 1.43.0.
|
* New synapse release 1.43.0.
|
||||||
|
|
|
@ -47,8 +47,9 @@ RUN apt-get update -qq -o Acquire::Languages=none \
|
||||||
&& cd /dh-virtualenv \
|
&& cd /dh-virtualenv \
|
||||||
&& env DEBIAN_FRONTEND=noninteractive mk-build-deps -ri -t "apt-get -y --no-install-recommends"
|
&& env DEBIAN_FRONTEND=noninteractive mk-build-deps -ri -t "apt-get -y --no-install-recommends"
|
||||||
|
|
||||||
# build it
|
# Build it. Note that building the docs doesn't work due to differences in
|
||||||
RUN cd /dh-virtualenv && dpkg-buildpackage -us -uc -b
|
# Sphinx APIs across versions/distros.
|
||||||
|
RUN cd /dh-virtualenv && DEB_BUILD_OPTIONS=nodoc dpkg-buildpackage -us -uc -b
|
||||||
|
|
||||||
###
|
###
|
||||||
### Stage 1
|
### Stage 1
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# Use the Sytest image that comes with a lot of the build dependencies
|
# Use the Sytest image that comes with a lot of the build dependencies
|
||||||
# pre-installed
|
# pre-installed
|
||||||
FROM matrixdotorg/sytest:latest
|
FROM matrixdotorg/sytest:bionic
|
||||||
|
|
||||||
# The Sytest image doesn't come with python, so install that
|
# The Sytest image doesn't come with python, so install that
|
||||||
RUN apt-get update && apt-get -qq install -y python3 python3-dev python3-pip
|
RUN apt-get update && apt-get -qq install -y python3 python3-dev python3-pip
|
||||||
|
@ -8,5 +8,23 @@ RUN apt-get update && apt-get -qq install -y python3 python3-dev python3-pip
|
||||||
# We need tox to run the tests in run_pg_tests.sh
|
# We need tox to run the tests in run_pg_tests.sh
|
||||||
RUN python3 -m pip install tox
|
RUN python3 -m pip install tox
|
||||||
|
|
||||||
ADD run_pg_tests.sh /pg_tests.sh
|
# Initialise the db
|
||||||
ENTRYPOINT /pg_tests.sh
|
RUN su -c '/usr/lib/postgresql/10/bin/initdb -D /var/lib/postgresql/data -E "UTF-8" --lc-collate="C.UTF-8" --lc-ctype="C.UTF-8" --username=postgres' postgres
|
||||||
|
|
||||||
|
# Add a user with our UID and GID so that files get created on the host owned
|
||||||
|
# by us, not root.
|
||||||
|
ARG UID
|
||||||
|
ARG GID
|
||||||
|
RUN groupadd --gid $GID user
|
||||||
|
RUN useradd --uid $UID --gid $GID --groups sudo --no-create-home user
|
||||||
|
|
||||||
|
# Ensure we can start postgres by sudo-ing as the postgres user.
|
||||||
|
RUN apt-get update && apt-get -qq install -y sudo
|
||||||
|
RUN echo "user ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
|
||||||
|
|
||||||
|
ADD run_pg_tests.sh /run_pg_tests.sh
|
||||||
|
# Use the "exec form" of ENTRYPOINT (https://docs.docker.com/engine/reference/builder/#entrypoint)
|
||||||
|
# so that we can `docker run` this container and pass arguments to pg_tests.sh
|
||||||
|
ENTRYPOINT ["/run_pg_tests.sh"]
|
||||||
|
|
||||||
|
USER user
|
||||||
|
|
|
@ -10,11 +10,10 @@ set -e
|
||||||
# Set PGUSER so Synapse's tests know what user to connect to the database with
|
# Set PGUSER so Synapse's tests know what user to connect to the database with
|
||||||
export PGUSER=postgres
|
export PGUSER=postgres
|
||||||
|
|
||||||
# Initialise & start the database
|
# Start the database
|
||||||
su -c '/usr/lib/postgresql/9.6/bin/initdb -D /var/lib/postgresql/data -E "UTF-8" --lc-collate="en_US.UTF-8" --lc-ctype="en_US.UTF-8" --username=postgres' postgres
|
sudo -u postgres /usr/lib/postgresql/10/bin/pg_ctl -w -D /var/lib/postgresql/data start
|
||||||
su -c '/usr/lib/postgresql/9.6/bin/pg_ctl -w -D /var/lib/postgresql/data start' postgres
|
|
||||||
|
|
||||||
# Run the tests
|
# Run the tests
|
||||||
cd /src
|
cd /src
|
||||||
export TRIAL_FLAGS="-j 4"
|
export TRIAL_FLAGS="-j 4"
|
||||||
tox --workdir=/tmp -e py35-postgres
|
tox --workdir=./.tox-pg-container -e py36-postgres "$@"
|
||||||
|
|
|
@ -74,6 +74,7 @@
|
||||||
- [Testing]()
|
- [Testing]()
|
||||||
- [OpenTracing](opentracing.md)
|
- [OpenTracing](opentracing.md)
|
||||||
- [Database Schemas](development/database_schema.md)
|
- [Database Schemas](development/database_schema.md)
|
||||||
|
- [Experimental features](development/experimental_features.md)
|
||||||
- [Synapse Architecture]()
|
- [Synapse Architecture]()
|
||||||
- [Log Contexts](log_contexts.md)
|
- [Log Contexts](log_contexts.md)
|
||||||
- [Replication](replication.md)
|
- [Replication](replication.md)
|
||||||
|
|
|
@ -170,6 +170,53 @@ To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`:
|
||||||
SYNAPSE_TEST_LOG_LEVEL=DEBUG trial tests
|
SYNAPSE_TEST_LOG_LEVEL=DEBUG trial tests
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Running tests under PostgreSQL
|
||||||
|
|
||||||
|
Invoking `trial` as above will use an in-memory SQLite database. This is great for
|
||||||
|
quick development and testing. However, we recommend using a PostgreSQL database
|
||||||
|
in production (and indeed, we have some code paths specific to each database).
|
||||||
|
This means that we need to run our unit tests against PostgreSQL too. Our CI does
|
||||||
|
this automatically for pull requests and release candidates, but it's sometimes
|
||||||
|
useful to reproduce this locally.
|
||||||
|
|
||||||
|
To do so, [configure Postgres](../postgres.md) and run `trial` with the
|
||||||
|
following environment variables matching your configuration:
|
||||||
|
|
||||||
|
- `SYNAPSE_POSTGRES` to anything nonempty
|
||||||
|
- `SYNAPSE_POSTGRES_HOST`
|
||||||
|
- `SYNAPSE_POSTGRES_USER`
|
||||||
|
- `SYNAPSE_POSTGRES_PASSWORD`
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
export SYNAPSE_POSTGRES=1
|
||||||
|
export SYNAPSE_POSTGRES_HOST=localhost
|
||||||
|
export SYNAPSE_POSTGRES_USER=postgres
|
||||||
|
export SYNAPSE_POSTGRES_PASSWORD=mydevenvpassword
|
||||||
|
trial
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Prebuilt container
|
||||||
|
|
||||||
|
Since configuring PostgreSQL can be fiddly, we can make use of a pre-made
|
||||||
|
Docker container to set up PostgreSQL and run our tests for us. To do so, run
|
||||||
|
|
||||||
|
```shell
|
||||||
|
scripts-dev/test_postgresql.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
Any extra arguments to the script will be passed to `tox` and then to `trial`,
|
||||||
|
so we can run a specific test in this container with e.g.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
scripts-dev/test_postgresql.sh tests.replication.test_sharded_event_persister.EventPersisterShardTestCase
|
||||||
|
```
|
||||||
|
|
||||||
|
The container creates a folder in your Synapse checkout called
|
||||||
|
`.tox-pg-container` and uses this as a tox environment. The output of any
|
||||||
|
`trial` runs goes into `_trial_temp` in your synapse source directory — the same
|
||||||
|
as running `trial` directly on your host machine.
|
||||||
|
|
||||||
## Run the integration tests ([Sytest](https://github.com/matrix-org/sytest)).
|
## Run the integration tests ([Sytest](https://github.com/matrix-org/sytest)).
|
||||||
|
|
||||||
|
|
37
docs/development/experimental_features.md
Normal file
37
docs/development/experimental_features.md
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
# Implementing experimental features in Synapse
|
||||||
|
|
||||||
|
It can be desirable to implement "experimental" features which are disabled by
|
||||||
|
default and must be explicitly enabled via the Synapse configuration. This is
|
||||||
|
applicable for features which:
|
||||||
|
|
||||||
|
* Are unstable in the Matrix spec (e.g. those defined by an MSC that has not yet been merged).
|
||||||
|
* Developers are not confident in their use by general Synapse administrators/users
|
||||||
|
(e.g. a feature is incomplete, buggy, performs poorly, or needs further testing).
|
||||||
|
|
||||||
|
Note that this only really applies to features which are expected to be desirable
|
||||||
|
to a broad audience. The [module infrastructure](../modules/index.md) should
|
||||||
|
instead be investigated for non-standard features.
|
||||||
|
|
||||||
|
Guarding experimental features behind configuration flags should help with some
|
||||||
|
of the following scenarios:
|
||||||
|
|
||||||
|
* Ensure that clients do not assume that unstable features exist (failing
|
||||||
|
gracefully if they do not).
|
||||||
|
* Unstable features do not become de-facto standards and can be removed
|
||||||
|
aggressively (since only those who have opted-in will be affected).
|
||||||
|
* Ease finding the implementation of unstable features in Synapse (for future
|
||||||
|
removal or stabilization).
|
||||||
|
* Ease testing a feature (or removal of feature) due to enabling/disabling without
|
||||||
|
code changes. It also becomes possible to ask for wider testing, if desired.
|
||||||
|
|
||||||
|
Experimental configuration flags should be disabled by default (requiring Synapse
|
||||||
|
administrators to explicitly opt-in), although there are situations where it makes
|
||||||
|
sense (from a product point-of-view) to enable features by default. This is
|
||||||
|
expected and not an issue.
|
||||||
|
|
||||||
|
It is not a requirement for experimental features to be behind a configuration flag,
|
||||||
|
but one should be used if unsure.
|
||||||
|
|
||||||
|
New experimental configuration flags should be added under the `experimental`
|
||||||
|
configuration key (see the `synapse.config.experimental` file) and either explain
|
||||||
|
(briefly) what is being enabled, or include the MSC number.
|
|
@ -25,16 +25,14 @@ When Synapse is asked to preview a URL it does the following:
|
||||||
3. Kicks off a background process to generate a preview:
|
3. Kicks off a background process to generate a preview:
|
||||||
1. Checks the database cache by URL and timestamp and returns the result if it
|
1. Checks the database cache by URL and timestamp and returns the result if it
|
||||||
has not expired and was successful (a 2xx return code).
|
has not expired and was successful (a 2xx return code).
|
||||||
2. Checks if the URL matches an oEmbed pattern. If it does, fetch the oEmbed
|
2. Checks if the URL matches an [oEmbed](https://oembed.com/) pattern. If it
|
||||||
response. If this is an image, replace the URL to fetch and continue. If
|
does, update the URL to download.
|
||||||
if it is HTML content, use the HTML as the document and continue.
|
3. Downloads the URL and stores it into a file via the media storage provider
|
||||||
3. If it doesn't match an oEmbed pattern, downloads the URL and stores it
|
and saves the local media metadata.
|
||||||
into a file via the media storage provider and saves the local media
|
4. If the media is an image:
|
||||||
metadata.
|
|
||||||
5. If the media is an image:
|
|
||||||
1. Generates thumbnails.
|
1. Generates thumbnails.
|
||||||
2. Generates an Open Graph response based on image properties.
|
2. Generates an Open Graph response based on image properties.
|
||||||
6. If the media is HTML:
|
5. If the media is HTML:
|
||||||
1. Decodes the HTML via the stored file.
|
1. Decodes the HTML via the stored file.
|
||||||
2. Generates an Open Graph response from the HTML.
|
2. Generates an Open Graph response from the HTML.
|
||||||
3. If an image exists in the Open Graph response:
|
3. If an image exists in the Open Graph response:
|
||||||
|
@ -42,6 +40,13 @@ When Synapse is asked to preview a URL it does the following:
|
||||||
provider and saves the local media metadata.
|
provider and saves the local media metadata.
|
||||||
2. Generates thumbnails.
|
2. Generates thumbnails.
|
||||||
3. Updates the Open Graph response based on image properties.
|
3. Updates the Open Graph response based on image properties.
|
||||||
|
6. If the media is JSON and an oEmbed URL was found:
|
||||||
|
1. Convert the oEmbed response to an Open Graph response.
|
||||||
|
2. If a thumbnail or image is in the oEmbed response:
|
||||||
|
1. Downloads the URL and stores it into a file via the media storage
|
||||||
|
provider and saves the local media metadata.
|
||||||
|
2. Generates thumbnails.
|
||||||
|
3. Updates the Open Graph response based on image properties.
|
||||||
7. Stores the result in the database cache.
|
7. Stores the result in the database cache.
|
||||||
4. Returns the result.
|
4. Returns the result.
|
||||||
|
|
||||||
|
|
|
@ -38,6 +38,35 @@ async def user_may_create_room(user: str) -> bool
|
||||||
Called when processing a room creation request. The module must return a `bool` indicating
|
Called when processing a room creation request. The module must return a `bool` indicating
|
||||||
whether the given user (represented by their Matrix user ID) is allowed to create a room.
|
whether the given user (represented by their Matrix user ID) is allowed to create a room.
|
||||||
|
|
||||||
|
### `user_may_create_room_with_invites`
|
||||||
|
|
||||||
|
```python
|
||||||
|
async def user_may_create_room_with_invites(
|
||||||
|
user: str,
|
||||||
|
invites: List[str],
|
||||||
|
threepid_invites: List[Dict[str, str]],
|
||||||
|
) -> bool
|
||||||
|
```
|
||||||
|
|
||||||
|
Called when processing a room creation request (right after `user_may_create_room`).
|
||||||
|
The module is given the Matrix user ID of the user trying to create a room, as well as a
|
||||||
|
list of Matrix users to invite and a list of third-party identifiers (3PID, e.g. email
|
||||||
|
addresses) to invite.
|
||||||
|
|
||||||
|
An invited Matrix user to invite is represented by their Matrix user IDs, and an invited
|
||||||
|
3PIDs is represented by a dict that includes the 3PID medium (e.g. "email") through its
|
||||||
|
`medium` key and its address (e.g. "alice@example.com") through its `address` key.
|
||||||
|
|
||||||
|
See [the Matrix specification](https://matrix.org/docs/spec/appendices#pid-types) for more
|
||||||
|
information regarding third-party identifiers.
|
||||||
|
|
||||||
|
If no invite and/or 3PID invite were specified in the room creation request, the
|
||||||
|
corresponding list(s) will be empty.
|
||||||
|
|
||||||
|
**Note**: This callback is not called when a room is cloned (e.g. during a room upgrade)
|
||||||
|
since no invites are sent when cloning a room. To cover this case, modules also need to
|
||||||
|
implement `user_may_create_room`.
|
||||||
|
|
||||||
### `user_may_create_room_alias`
|
### `user_may_create_room_alias`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
@ -136,9 +165,9 @@ class IsUserEvilResource(Resource):
|
||||||
self.evil_users = config.get("evil_users") or []
|
self.evil_users = config.get("evil_users") or []
|
||||||
|
|
||||||
def render_GET(self, request: Request):
|
def render_GET(self, request: Request):
|
||||||
user = request.args.get(b"user")[0]
|
user = request.args.get(b"user")[0].decode()
|
||||||
request.setHeader(b"Content-Type", b"application/json")
|
request.setHeader(b"Content-Type", b"application/json")
|
||||||
return json.dumps({"evil": user in self.evil_users})
|
return json.dumps({"evil": user in self.evil_users}).encode()
|
||||||
|
|
||||||
|
|
||||||
class ListSpamChecker:
|
class ListSpamChecker:
|
||||||
|
|
|
@ -2362,12 +2362,16 @@ user_directory:
|
||||||
#enabled: false
|
#enabled: false
|
||||||
|
|
||||||
# Defines whether to search all users visible to your HS when searching
|
# Defines whether to search all users visible to your HS when searching
|
||||||
# the user directory, rather than limiting to users visible in public
|
# the user directory. If false, search results will only contain users
|
||||||
# rooms. Defaults to false.
|
# visible in public rooms and users sharing a room with the requester.
|
||||||
|
# Defaults to false.
|
||||||
#
|
#
|
||||||
# If you set it true, you'll have to rebuild the user_directory search
|
# NB. If you set this to true, and the last time the user_directory search
|
||||||
# indexes, see:
|
# indexes were (re)built was before Synapse 1.44, you'll have to
|
||||||
# https://matrix-org.github.io/synapse/latest/user_directory.html
|
# rebuild the indexes in order to search through all known users.
|
||||||
|
# These indexes are built the first time Synapse starts; admins can
|
||||||
|
# manually trigger a rebuild following the instructions at
|
||||||
|
# https://matrix-org.github.io/synapse/latest/user_directory.html
|
||||||
#
|
#
|
||||||
# Uncomment to return search results containing all known users, even if that
|
# Uncomment to return search results containing all known users, even if that
|
||||||
# user does not share a room with the requester.
|
# user does not share a room with the requester.
|
||||||
|
|
|
@ -18,19 +18,179 @@ that your email address is probably `user@example.com` rather than
|
||||||
|
|
||||||
## Installing Synapse
|
## Installing Synapse
|
||||||
|
|
||||||
### Installing from source
|
### Prebuilt packages
|
||||||
|
|
||||||
(Prebuilt packages are available for some platforms - see [Prebuilt packages](#prebuilt-packages).)
|
Prebuilt packages are available for a number of platforms. These are recommended
|
||||||
|
for most users.
|
||||||
|
|
||||||
When installing from source please make sure that the [Platform-specific prerequisites](#platform-specific-prerequisites) are already installed.
|
#### Docker images and Ansible playbooks
|
||||||
|
|
||||||
|
There is an official synapse image available at
|
||||||
|
<https://hub.docker.com/r/matrixdotorg/synapse> which can be used with
|
||||||
|
the docker-compose file available at
|
||||||
|
[contrib/docker](https://github.com/matrix-org/synapse/tree/develop/contrib/docker).
|
||||||
|
Further information on this including configuration options is available in the README
|
||||||
|
on hub.docker.com.
|
||||||
|
|
||||||
|
Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
|
||||||
|
Dockerfile to automate a synapse server in a single Docker image, at
|
||||||
|
<https://hub.docker.com/r/avhost/docker-matrix/tags/>
|
||||||
|
|
||||||
|
Slavi Pantaleev has created an Ansible playbook,
|
||||||
|
which installs the offical Docker image of Matrix Synapse
|
||||||
|
along with many other Matrix-related services (Postgres database, Element, coturn,
|
||||||
|
ma1sd, SSL support, etc.).
|
||||||
|
For more details, see
|
||||||
|
<https://github.com/spantaleev/matrix-docker-ansible-deploy>
|
||||||
|
|
||||||
|
#### Debian/Ubuntu
|
||||||
|
|
||||||
|
##### Matrix.org packages
|
||||||
|
|
||||||
|
Matrix.org provides Debian/Ubuntu packages of Synapse, for the amd64
|
||||||
|
architecture via <https://packages.matrix.org/debian/>.
|
||||||
|
|
||||||
|
To install the latest release:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo apt install -y lsb-release wget apt-transport-https
|
||||||
|
sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg
|
||||||
|
echo "deb [signed-by=/usr/share/keyrings/matrix-org-archive-keyring.gpg] https://packages.matrix.org/debian/ $(lsb_release -cs) main" |
|
||||||
|
sudo tee /etc/apt/sources.list.d/matrix-org.list
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install matrix-synapse-py3
|
||||||
|
```
|
||||||
|
|
||||||
|
Packages are also published for release candidates. To enable the prerelease
|
||||||
|
channel, add `prerelease` to the `sources.list` line. For example:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg
|
||||||
|
echo "deb [signed-by=/usr/share/keyrings/matrix-org-archive-keyring.gpg] https://packages.matrix.org/debian/ $(lsb_release -cs) main prerelease" |
|
||||||
|
sudo tee /etc/apt/sources.list.d/matrix-org.list
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install matrix-synapse-py3
|
||||||
|
```
|
||||||
|
|
||||||
|
The fingerprint of the repository signing key (as shown by `gpg
|
||||||
|
/usr/share/keyrings/matrix-org-archive-keyring.gpg`) is
|
||||||
|
`AAF9AE843A7584B5A3E4CD2BCF45A512DE2DA058`.
|
||||||
|
|
||||||
|
##### Downstream Debian packages
|
||||||
|
|
||||||
|
We do not recommend using the packages from the default Debian `buster`
|
||||||
|
repository at this time, as they are old and suffer from known security
|
||||||
|
vulnerabilities. You can install the latest version of Synapse from
|
||||||
|
[our repository](#matrixorg-packages) or from `buster-backports`. Please
|
||||||
|
see the [Debian documentation](https://backports.debian.org/Instructions/)
|
||||||
|
for information on how to use backports.
|
||||||
|
|
||||||
|
If you are using Debian `sid` or testing, Synapse is available in the default
|
||||||
|
repositories and it should be possible to install it simply with:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo apt install matrix-synapse
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Downstream Ubuntu packages
|
||||||
|
|
||||||
|
We do not recommend using the packages in the default Ubuntu repository
|
||||||
|
at this time, as they are old and suffer from known security vulnerabilities.
|
||||||
|
The latest version of Synapse can be installed from [our repository](#matrixorg-packages).
|
||||||
|
|
||||||
|
#### Fedora
|
||||||
|
|
||||||
|
Synapse is in the Fedora repositories as `matrix-synapse`:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo dnf install matrix-synapse
|
||||||
|
```
|
||||||
|
|
||||||
|
Oleg Girko provides Fedora RPMs at
|
||||||
|
<https://obs.infoserver.lv/project/monitor/matrix-synapse>
|
||||||
|
|
||||||
|
#### OpenSUSE
|
||||||
|
|
||||||
|
Synapse is in the OpenSUSE repositories as `matrix-synapse`:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo zypper install matrix-synapse
|
||||||
|
```
|
||||||
|
|
||||||
|
#### SUSE Linux Enterprise Server
|
||||||
|
|
||||||
|
Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 repository at
|
||||||
|
<https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15/standard/>
|
||||||
|
|
||||||
|
#### ArchLinux
|
||||||
|
|
||||||
|
The quickest way to get up and running with ArchLinux is probably with the community package
|
||||||
|
<https://www.archlinux.org/packages/community/any/matrix-synapse/>, which should pull in most of
|
||||||
|
the necessary dependencies.
|
||||||
|
|
||||||
|
pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 ):
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo pip install --upgrade pip
|
||||||
|
```
|
||||||
|
|
||||||
|
If you encounter an error with lib bcrypt causing an Wrong ELF Class:
|
||||||
|
ELFCLASS32 (x64 Systems), you may need to reinstall py-bcrypt to correctly
|
||||||
|
compile it under the right architecture. (This should not be needed if
|
||||||
|
installing under virtualenv):
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo pip uninstall py-bcrypt
|
||||||
|
sudo pip install py-bcrypt
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Void Linux
|
||||||
|
|
||||||
|
Synapse can be found in the void repositories as 'synapse':
|
||||||
|
|
||||||
|
```sh
|
||||||
|
xbps-install -Su
|
||||||
|
xbps-install -S synapse
|
||||||
|
```
|
||||||
|
|
||||||
|
#### FreeBSD
|
||||||
|
|
||||||
|
Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from:
|
||||||
|
|
||||||
|
- Ports: `cd /usr/ports/net-im/py-matrix-synapse && make install clean`
|
||||||
|
- Packages: `pkg install py37-matrix-synapse`
|
||||||
|
|
||||||
|
#### OpenBSD
|
||||||
|
|
||||||
|
As of OpenBSD 6.7 Synapse is available as a pre-compiled binary. The filesystem
|
||||||
|
underlying the homeserver directory (defaults to `/var/synapse`) has to be
|
||||||
|
mounted with `wxallowed` (cf. `mount(8)`), so creating a separate filesystem
|
||||||
|
and mounting it to `/var/synapse` should be taken into consideration.
|
||||||
|
|
||||||
|
Installing Synapse:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
doas pkg_add synapse
|
||||||
|
```
|
||||||
|
|
||||||
|
#### NixOS
|
||||||
|
|
||||||
|
Robin Lambertz has packaged Synapse for NixOS at:
|
||||||
|
<https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-synapse.nix>
|
||||||
|
|
||||||
|
|
||||||
|
### Installing as a Python module from PyPI
|
||||||
|
|
||||||
|
It's also possible to install Synapse as a Python module from PyPI.
|
||||||
|
|
||||||
|
When following this route please make sure that the [Platform-specific prerequisites](#platform-specific-prerequisites) are already installed.
|
||||||
|
|
||||||
System requirements:
|
System requirements:
|
||||||
|
|
||||||
- POSIX-compliant system (tested on Linux & OS X)
|
- POSIX-compliant system (tested on Linux & OS X)
|
||||||
- Python 3.5.2 or later, up to Python 3.9.
|
- Python 3.6 or later, up to Python 3.9.
|
||||||
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||||
|
|
||||||
|
|
||||||
To install the Synapse homeserver run:
|
To install the Synapse homeserver run:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
|
@ -203,164 +363,6 @@ be found at <https://docs.microsoft.com/en-us/windows/wsl/install-win10> for
|
||||||
Windows 10 and <https://docs.microsoft.com/en-us/windows/wsl/install-on-server>
|
Windows 10 and <https://docs.microsoft.com/en-us/windows/wsl/install-on-server>
|
||||||
for Windows Server.
|
for Windows Server.
|
||||||
|
|
||||||
### Prebuilt packages
|
|
||||||
|
|
||||||
As an alternative to installing from source, prebuilt packages are available
|
|
||||||
for a number of platforms.
|
|
||||||
|
|
||||||
#### Docker images and Ansible playbooks
|
|
||||||
|
|
||||||
There is an official synapse image available at
|
|
||||||
<https://hub.docker.com/r/matrixdotorg/synapse> which can be used with
|
|
||||||
the docker-compose file available at
|
|
||||||
[contrib/docker](https://github.com/matrix-org/synapse/tree/develop/contrib/docker).
|
|
||||||
Further information on this including configuration options is available in the README
|
|
||||||
on hub.docker.com.
|
|
||||||
|
|
||||||
Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
|
|
||||||
Dockerfile to automate a synapse server in a single Docker image, at
|
|
||||||
<https://hub.docker.com/r/avhost/docker-matrix/tags/>
|
|
||||||
|
|
||||||
Slavi Pantaleev has created an Ansible playbook,
|
|
||||||
which installs the offical Docker image of Matrix Synapse
|
|
||||||
along with many other Matrix-related services (Postgres database, Element, coturn,
|
|
||||||
ma1sd, SSL support, etc.).
|
|
||||||
For more details, see
|
|
||||||
<https://github.com/spantaleev/matrix-docker-ansible-deploy>
|
|
||||||
|
|
||||||
#### Debian/Ubuntu
|
|
||||||
|
|
||||||
##### Matrix.org packages
|
|
||||||
|
|
||||||
Matrix.org provides Debian/Ubuntu packages of Synapse via
|
|
||||||
<https://packages.matrix.org/debian/>. To install the latest release:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
sudo apt install -y lsb-release wget apt-transport-https
|
|
||||||
sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg
|
|
||||||
echo "deb [signed-by=/usr/share/keyrings/matrix-org-archive-keyring.gpg] https://packages.matrix.org/debian/ $(lsb_release -cs) main" |
|
|
||||||
sudo tee /etc/apt/sources.list.d/matrix-org.list
|
|
||||||
sudo apt update
|
|
||||||
sudo apt install matrix-synapse-py3
|
|
||||||
```
|
|
||||||
|
|
||||||
Packages are also published for release candidates. To enable the prerelease
|
|
||||||
channel, add `prerelease` to the `sources.list` line. For example:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg
|
|
||||||
echo "deb [signed-by=/usr/share/keyrings/matrix-org-archive-keyring.gpg] https://packages.matrix.org/debian/ $(lsb_release -cs) main prerelease" |
|
|
||||||
sudo tee /etc/apt/sources.list.d/matrix-org.list
|
|
||||||
sudo apt update
|
|
||||||
sudo apt install matrix-synapse-py3
|
|
||||||
```
|
|
||||||
|
|
||||||
The fingerprint of the repository signing key (as shown by `gpg
|
|
||||||
/usr/share/keyrings/matrix-org-archive-keyring.gpg`) is
|
|
||||||
`AAF9AE843A7584B5A3E4CD2BCF45A512DE2DA058`.
|
|
||||||
|
|
||||||
##### Downstream Debian packages
|
|
||||||
|
|
||||||
We do not recommend using the packages from the default Debian `buster`
|
|
||||||
repository at this time, as they are old and suffer from known security
|
|
||||||
vulnerabilities. You can install the latest version of Synapse from
|
|
||||||
[our repository](#matrixorg-packages) or from `buster-backports`. Please
|
|
||||||
see the [Debian documentation](https://backports.debian.org/Instructions/)
|
|
||||||
for information on how to use backports.
|
|
||||||
|
|
||||||
If you are using Debian `sid` or testing, Synapse is available in the default
|
|
||||||
repositories and it should be possible to install it simply with:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
sudo apt install matrix-synapse
|
|
||||||
```
|
|
||||||
|
|
||||||
##### Downstream Ubuntu packages
|
|
||||||
|
|
||||||
We do not recommend using the packages in the default Ubuntu repository
|
|
||||||
at this time, as they are old and suffer from known security vulnerabilities.
|
|
||||||
The latest version of Synapse can be installed from [our repository](#matrixorg-packages).
|
|
||||||
|
|
||||||
#### Fedora
|
|
||||||
|
|
||||||
Synapse is in the Fedora repositories as `matrix-synapse`:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
sudo dnf install matrix-synapse
|
|
||||||
```
|
|
||||||
|
|
||||||
Oleg Girko provides Fedora RPMs at
|
|
||||||
<https://obs.infoserver.lv/project/monitor/matrix-synapse>
|
|
||||||
|
|
||||||
#### OpenSUSE
|
|
||||||
|
|
||||||
Synapse is in the OpenSUSE repositories as `matrix-synapse`:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
sudo zypper install matrix-synapse
|
|
||||||
```
|
|
||||||
|
|
||||||
#### SUSE Linux Enterprise Server
|
|
||||||
|
|
||||||
Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 repository at
|
|
||||||
<https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15/standard/>
|
|
||||||
|
|
||||||
#### ArchLinux
|
|
||||||
|
|
||||||
The quickest way to get up and running with ArchLinux is probably with the community package
|
|
||||||
<https://www.archlinux.org/packages/community/any/matrix-synapse/>, which should pull in most of
|
|
||||||
the necessary dependencies.
|
|
||||||
|
|
||||||
pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 ):
|
|
||||||
|
|
||||||
```sh
|
|
||||||
sudo pip install --upgrade pip
|
|
||||||
```
|
|
||||||
|
|
||||||
If you encounter an error with lib bcrypt causing an Wrong ELF Class:
|
|
||||||
ELFCLASS32 (x64 Systems), you may need to reinstall py-bcrypt to correctly
|
|
||||||
compile it under the right architecture. (This should not be needed if
|
|
||||||
installing under virtualenv):
|
|
||||||
|
|
||||||
```sh
|
|
||||||
sudo pip uninstall py-bcrypt
|
|
||||||
sudo pip install py-bcrypt
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Void Linux
|
|
||||||
|
|
||||||
Synapse can be found in the void repositories as 'synapse':
|
|
||||||
|
|
||||||
```sh
|
|
||||||
xbps-install -Su
|
|
||||||
xbps-install -S synapse
|
|
||||||
```
|
|
||||||
|
|
||||||
#### FreeBSD
|
|
||||||
|
|
||||||
Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from:
|
|
||||||
|
|
||||||
- Ports: `cd /usr/ports/net-im/py-matrix-synapse && make install clean`
|
|
||||||
- Packages: `pkg install py37-matrix-synapse`
|
|
||||||
|
|
||||||
#### OpenBSD
|
|
||||||
|
|
||||||
As of OpenBSD 6.7 Synapse is available as a pre-compiled binary. The filesystem
|
|
||||||
underlying the homeserver directory (defaults to `/var/synapse`) has to be
|
|
||||||
mounted with `wxallowed` (cf. `mount(8)`), so creating a separate filesystem
|
|
||||||
and mounting it to `/var/synapse` should be taken into consideration.
|
|
||||||
|
|
||||||
Installing Synapse:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
doas pkg_add synapse
|
|
||||||
```
|
|
||||||
|
|
||||||
#### NixOS
|
|
||||||
|
|
||||||
Robin Lambertz has packaged Synapse for NixOS at:
|
|
||||||
<https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-synapse.nix>
|
|
||||||
|
|
||||||
## Setting up Synapse
|
## Setting up Synapse
|
||||||
|
|
||||||
Once you have installed synapse as above, you will need to configure it.
|
Once you have installed synapse as above, you will need to configure it.
|
||||||
|
|
|
@ -85,6 +85,13 @@ process, for example:
|
||||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||||
```
|
```
|
||||||
|
|
||||||
|
# Upgrading to v1.44.0
|
||||||
|
|
||||||
|
## The URL preview cache is no longer mirrored to storage providers
|
||||||
|
The `url_cache/` and `url_cache_thumbnails/` directories in the media store are
|
||||||
|
no longer mirrored to storage providers. These two directories can be safely
|
||||||
|
deleted from any configured storage providers to reclaim space.
|
||||||
|
|
||||||
# Upgrading to v1.43.0
|
# Upgrading to v1.43.0
|
||||||
|
|
||||||
## The spaces summary APIs can now be handled by workers
|
## The spaces summary APIs can now be handled by workers
|
||||||
|
|
9
mypy.ini
9
mypy.ini
|
@ -60,6 +60,7 @@ files =
|
||||||
synapse/storage/databases/main/session.py,
|
synapse/storage/databases/main/session.py,
|
||||||
synapse/storage/databases/main/stream.py,
|
synapse/storage/databases/main/stream.py,
|
||||||
synapse/storage/databases/main/ui_auth.py,
|
synapse/storage/databases/main/ui_auth.py,
|
||||||
|
synapse/storage/databases/state,
|
||||||
synapse/storage/database.py,
|
synapse/storage/database.py,
|
||||||
synapse/storage/engines,
|
synapse/storage/engines,
|
||||||
synapse/storage/keys.py,
|
synapse/storage/keys.py,
|
||||||
|
@ -84,12 +85,18 @@ files =
|
||||||
tests/handlers/test_room_summary.py,
|
tests/handlers/test_room_summary.py,
|
||||||
tests/handlers/test_send_email.py,
|
tests/handlers/test_send_email.py,
|
||||||
tests/handlers/test_sync.py,
|
tests/handlers/test_sync.py,
|
||||||
|
tests/handlers/test_user_directory.py,
|
||||||
tests/rest/client/test_login.py,
|
tests/rest/client/test_login.py,
|
||||||
tests/rest/client/test_auth.py,
|
tests/rest/client/test_auth.py,
|
||||||
|
tests/storage/test_state.py,
|
||||||
|
tests/storage/test_user_directory.py,
|
||||||
tests/util/test_itertools.py,
|
tests/util/test_itertools.py,
|
||||||
tests/util/test_stream_change_cache.py
|
tests/util/test_stream_change_cache.py
|
||||||
|
|
||||||
[mypy-synapse.rest.client.*]
|
[mypy-synapse.handlers.*]
|
||||||
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
|
[mypy-synapse.rest.*]
|
||||||
disallow_untyped_defs = True
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
[mypy-synapse.util.batching_queue]
|
[mypy-synapse.util.batching_queue]
|
||||||
|
|
|
@ -276,7 +276,7 @@ def tag(gh_token: Optional[str]):
|
||||||
if click.confirm("Edit text?", default=False):
|
if click.confirm("Edit text?", default=False):
|
||||||
changes = click.edit(changes, require_save=False)
|
changes = click.edit(changes, require_save=False)
|
||||||
|
|
||||||
repo.create_tag(tag_name, message=changes)
|
repo.create_tag(tag_name, message=changes, sign=True)
|
||||||
|
|
||||||
if not click.confirm("Push tag to GitHub?", default=True):
|
if not click.confirm("Push tag to GitHub?", default=True):
|
||||||
print("")
|
print("")
|
||||||
|
|
19
scripts-dev/test_postgresql.sh
Executable file
19
scripts-dev/test_postgresql.sh
Executable file
|
@ -0,0 +1,19 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# This script builds the Docker image to run the PostgreSQL tests, and then runs
|
||||||
|
# the tests. It uses a dedicated tox environment so that we don't have to
|
||||||
|
# rebuild it each time.
|
||||||
|
|
||||||
|
# Command line arguments to this script are forwarded to "tox" and then to "trial".
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Build, and tag
|
||||||
|
docker build docker/ \
|
||||||
|
--build-arg "UID=$(id -u)" \
|
||||||
|
--build-arg "GID=$(id -g)" \
|
||||||
|
-f docker/Dockerfile-pgtests \
|
||||||
|
-t synapsepgtests
|
||||||
|
|
||||||
|
# Run, mounting the current directory into /src
|
||||||
|
docker run --rm -it -v "$(pwd):/src" -v synapse-pg-test-tox:/tox synapsepgtests "$@"
|
|
@ -47,7 +47,7 @@ try:
|
||||||
except ImportError:
|
except ImportError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
__version__ = "1.43.0"
|
__version__ = "1.44.0rc1"
|
||||||
|
|
||||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||||
# We import here so that we don't have to install a bunch of deps when
|
# We import here so that we don't have to install a bunch of deps when
|
||||||
|
|
|
@ -70,8 +70,8 @@ class Auth:
|
||||||
|
|
||||||
self._auth_blocking = AuthBlocking(self.hs)
|
self._auth_blocking = AuthBlocking(self.hs)
|
||||||
|
|
||||||
self._track_appservice_user_ips = hs.config.track_appservice_user_ips
|
self._track_appservice_user_ips = hs.config.appservice.track_appservice_user_ips
|
||||||
self._macaroon_secret_key = hs.config.macaroon_secret_key
|
self._macaroon_secret_key = hs.config.key.macaroon_secret_key
|
||||||
self._force_tracing_for_users = hs.config.tracing.force_tracing_for_users
|
self._force_tracing_for_users = hs.config.tracing.force_tracing_for_users
|
||||||
|
|
||||||
async def check_user_in_room(
|
async def check_user_in_room(
|
||||||
|
|
|
@ -30,13 +30,15 @@ class AuthBlocking:
|
||||||
def __init__(self, hs: "HomeServer"):
|
def __init__(self, hs: "HomeServer"):
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
|
|
||||||
self._server_notices_mxid = hs.config.server_notices_mxid
|
self._server_notices_mxid = hs.config.servernotices.server_notices_mxid
|
||||||
self._hs_disabled = hs.config.hs_disabled
|
self._hs_disabled = hs.config.server.hs_disabled
|
||||||
self._hs_disabled_message = hs.config.hs_disabled_message
|
self._hs_disabled_message = hs.config.server.hs_disabled_message
|
||||||
self._admin_contact = hs.config.admin_contact
|
self._admin_contact = hs.config.server.admin_contact
|
||||||
self._max_mau_value = hs.config.max_mau_value
|
self._max_mau_value = hs.config.server.max_mau_value
|
||||||
self._limit_usage_by_mau = hs.config.limit_usage_by_mau
|
self._limit_usage_by_mau = hs.config.server.limit_usage_by_mau
|
||||||
self._mau_limits_reserved_threepids = hs.config.mau_limits_reserved_threepids
|
self._mau_limits_reserved_threepids = (
|
||||||
|
hs.config.server.mau_limits_reserved_threepids
|
||||||
|
)
|
||||||
self._server_name = hs.hostname
|
self._server_name = hs.hostname
|
||||||
self._track_appservice_user_ips = hs.config.appservice.track_appservice_user_ips
|
self._track_appservice_user_ips = hs.config.appservice.track_appservice_user_ips
|
||||||
|
|
||||||
|
@ -79,7 +81,7 @@ class AuthBlocking:
|
||||||
# We never block the server from doing actions on behalf of
|
# We never block the server from doing actions on behalf of
|
||||||
# users.
|
# users.
|
||||||
return
|
return
|
||||||
elif requester.app_service and not self._track_appservice_user_ips:
|
if requester.app_service and not self._track_appservice_user_ips:
|
||||||
# If we're authenticated as an appservice then we only block
|
# If we're authenticated as an appservice then we only block
|
||||||
# auth if `track_appservice_user_ips` is set, as that option
|
# auth if `track_appservice_user_ips` is set, as that option
|
||||||
# implicitly means that application services are part of MAU
|
# implicitly means that application services are part of MAU
|
||||||
|
|
|
@ -121,7 +121,7 @@ class EventTypes:
|
||||||
SpaceParent = "m.space.parent"
|
SpaceParent = "m.space.parent"
|
||||||
|
|
||||||
MSC2716_INSERTION = "org.matrix.msc2716.insertion"
|
MSC2716_INSERTION = "org.matrix.msc2716.insertion"
|
||||||
MSC2716_CHUNK = "org.matrix.msc2716.chunk"
|
MSC2716_BATCH = "org.matrix.msc2716.batch"
|
||||||
MSC2716_MARKER = "org.matrix.msc2716.marker"
|
MSC2716_MARKER = "org.matrix.msc2716.marker"
|
||||||
|
|
||||||
|
|
||||||
|
@ -209,11 +209,11 @@ class EventContentFields:
|
||||||
|
|
||||||
# Used on normal messages to indicate they were historically imported after the fact
|
# Used on normal messages to indicate they were historically imported after the fact
|
||||||
MSC2716_HISTORICAL = "org.matrix.msc2716.historical"
|
MSC2716_HISTORICAL = "org.matrix.msc2716.historical"
|
||||||
# For "insertion" events to indicate what the next chunk ID should be in
|
# For "insertion" events to indicate what the next batch ID should be in
|
||||||
# order to connect to it
|
# order to connect to it
|
||||||
MSC2716_NEXT_CHUNK_ID = "org.matrix.msc2716.next_chunk_id"
|
MSC2716_NEXT_BATCH_ID = "org.matrix.msc2716.next_batch_id"
|
||||||
# Used on "chunk" events to indicate which insertion event it connects to
|
# Used on "batch" events to indicate which insertion event it connects to
|
||||||
MSC2716_CHUNK_ID = "org.matrix.msc2716.chunk_id"
|
MSC2716_BATCH_ID = "org.matrix.msc2716.batch_id"
|
||||||
# For "marker" events
|
# For "marker" events
|
||||||
MSC2716_MARKER_INSERTION = "org.matrix.msc2716.marker.insertion"
|
MSC2716_MARKER_INSERTION = "org.matrix.msc2716.marker.insertion"
|
||||||
|
|
||||||
|
|
|
@ -244,24 +244,8 @@ class RoomVersions:
|
||||||
msc2716_historical=False,
|
msc2716_historical=False,
|
||||||
msc2716_redactions=False,
|
msc2716_redactions=False,
|
||||||
)
|
)
|
||||||
MSC2716 = RoomVersion(
|
MSC2716v3 = RoomVersion(
|
||||||
"org.matrix.msc2716",
|
"org.matrix.msc2716v3",
|
||||||
RoomDisposition.UNSTABLE,
|
|
||||||
EventFormatVersions.V3,
|
|
||||||
StateResolutionVersions.V2,
|
|
||||||
enforce_key_validity=True,
|
|
||||||
special_case_aliases_auth=False,
|
|
||||||
strict_canonicaljson=True,
|
|
||||||
limit_notifications_power_levels=True,
|
|
||||||
msc2176_redaction_rules=False,
|
|
||||||
msc3083_join_rules=False,
|
|
||||||
msc3375_redaction_rules=False,
|
|
||||||
msc2403_knocking=True,
|
|
||||||
msc2716_historical=True,
|
|
||||||
msc2716_redactions=False,
|
|
||||||
)
|
|
||||||
MSC2716v2 = RoomVersion(
|
|
||||||
"org.matrix.msc2716v2",
|
|
||||||
RoomDisposition.UNSTABLE,
|
RoomDisposition.UNSTABLE,
|
||||||
EventFormatVersions.V3,
|
EventFormatVersions.V3,
|
||||||
StateResolutionVersions.V2,
|
StateResolutionVersions.V2,
|
||||||
|
@ -289,9 +273,9 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = {
|
||||||
RoomVersions.V6,
|
RoomVersions.V6,
|
||||||
RoomVersions.MSC2176,
|
RoomVersions.MSC2176,
|
||||||
RoomVersions.V7,
|
RoomVersions.V7,
|
||||||
RoomVersions.MSC2716,
|
|
||||||
RoomVersions.V8,
|
RoomVersions.V8,
|
||||||
RoomVersions.V9,
|
RoomVersions.V9,
|
||||||
|
RoomVersions.MSC2716v3,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -39,12 +39,12 @@ class ConsentURIBuilder:
|
||||||
Args:
|
Args:
|
||||||
hs_config (synapse.config.homeserver.HomeServerConfig):
|
hs_config (synapse.config.homeserver.HomeServerConfig):
|
||||||
"""
|
"""
|
||||||
if hs_config.form_secret is None:
|
if hs_config.key.form_secret is None:
|
||||||
raise ConfigError("form_secret not set in config")
|
raise ConfigError("form_secret not set in config")
|
||||||
if hs_config.server.public_baseurl is None:
|
if hs_config.server.public_baseurl is None:
|
||||||
raise ConfigError("public_baseurl not set in config")
|
raise ConfigError("public_baseurl not set in config")
|
||||||
|
|
||||||
self._hmac_secret = hs_config.form_secret.encode("utf-8")
|
self._hmac_secret = hs_config.key.form_secret.encode("utf-8")
|
||||||
self._public_baseurl = hs_config.server.public_baseurl
|
self._public_baseurl = hs_config.server.public_baseurl
|
||||||
|
|
||||||
def build_user_consent_uri(self, user_id):
|
def build_user_consent_uri(self, user_id):
|
||||||
|
|
|
@ -88,8 +88,8 @@ def start_worker_reactor(appname, config, run_command=reactor.run):
|
||||||
appname,
|
appname,
|
||||||
soft_file_limit=config.soft_file_limit,
|
soft_file_limit=config.soft_file_limit,
|
||||||
gc_thresholds=config.gc_thresholds,
|
gc_thresholds=config.gc_thresholds,
|
||||||
pid_file=config.worker_pid_file,
|
pid_file=config.worker.worker_pid_file,
|
||||||
daemonize=config.worker_daemonize,
|
daemonize=config.worker.worker_daemonize,
|
||||||
print_pidfile=config.print_pidfile,
|
print_pidfile=config.print_pidfile,
|
||||||
logger=logger,
|
logger=logger,
|
||||||
run_command=run_command,
|
run_command=run_command,
|
||||||
|
@ -424,12 +424,14 @@ def setup_sentry(hs):
|
||||||
hs (synapse.server.HomeServer)
|
hs (synapse.server.HomeServer)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not hs.config.sentry_enabled:
|
if not hs.config.metrics.sentry_enabled:
|
||||||
return
|
return
|
||||||
|
|
||||||
import sentry_sdk
|
import sentry_sdk
|
||||||
|
|
||||||
sentry_sdk.init(dsn=hs.config.sentry_dsn, release=get_version_string(synapse))
|
sentry_sdk.init(
|
||||||
|
dsn=hs.config.metrics.sentry_dsn, release=get_version_string(synapse)
|
||||||
|
)
|
||||||
|
|
||||||
# We set some default tags that give some context to this instance
|
# We set some default tags that give some context to this instance
|
||||||
with sentry_sdk.configure_scope() as scope:
|
with sentry_sdk.configure_scope() as scope:
|
||||||
|
|
|
@ -186,13 +186,13 @@ def start(config_options):
|
||||||
config.worker.worker_app = "synapse.app.admin_cmd"
|
config.worker.worker_app = "synapse.app.admin_cmd"
|
||||||
|
|
||||||
if (
|
if (
|
||||||
not config.worker_daemonize
|
not config.worker.worker_daemonize
|
||||||
and not config.worker_log_file
|
and not config.worker.worker_log_file
|
||||||
and not config.worker_log_config
|
and not config.worker.worker_log_config
|
||||||
):
|
):
|
||||||
# Since we're meant to be run as a "command" let's not redirect stdio
|
# Since we're meant to be run as a "command" let's not redirect stdio
|
||||||
# unless we've actually set log config.
|
# unless we've actually set log config.
|
||||||
config.no_redirect_stdio = True
|
config.logging.no_redirect_stdio = True
|
||||||
|
|
||||||
# Explicitly disable background processes
|
# Explicitly disable background processes
|
||||||
config.update_user_directory = False
|
config.update_user_directory = False
|
||||||
|
|
|
@ -140,7 +140,7 @@ class KeyUploadServlet(RestServlet):
|
||||||
self.auth = hs.get_auth()
|
self.auth = hs.get_auth()
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
self.http_client = hs.get_simple_http_client()
|
self.http_client = hs.get_simple_http_client()
|
||||||
self.main_uri = hs.config.worker_main_http_uri
|
self.main_uri = hs.config.worker.worker_main_http_uri
|
||||||
|
|
||||||
async def on_POST(self, request: Request, device_id: Optional[str]):
|
async def on_POST(self, request: Request, device_id: Optional[str]):
|
||||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||||
|
@ -321,7 +321,7 @@ class GenericWorkerServer(HomeServer):
|
||||||
elif name == "federation":
|
elif name == "federation":
|
||||||
resources.update({FEDERATION_PREFIX: TransportLayerServer(self)})
|
resources.update({FEDERATION_PREFIX: TransportLayerServer(self)})
|
||||||
elif name == "media":
|
elif name == "media":
|
||||||
if self.config.can_load_media_repo:
|
if self.config.media.can_load_media_repo:
|
||||||
media_repo = self.get_media_repository_resource()
|
media_repo = self.get_media_repository_resource()
|
||||||
|
|
||||||
# We need to serve the admin servlets for media on the
|
# We need to serve the admin servlets for media on the
|
||||||
|
@ -384,7 +384,7 @@ class GenericWorkerServer(HomeServer):
|
||||||
logger.info("Synapse worker now listening on port %d", port)
|
logger.info("Synapse worker now listening on port %d", port)
|
||||||
|
|
||||||
def start_listening(self):
|
def start_listening(self):
|
||||||
for listener in self.config.worker_listeners:
|
for listener in self.config.worker.worker_listeners:
|
||||||
if listener.type == "http":
|
if listener.type == "http":
|
||||||
self._listen_http(listener)
|
self._listen_http(listener)
|
||||||
elif listener.type == "manhole":
|
elif listener.type == "manhole":
|
||||||
|
@ -395,7 +395,7 @@ class GenericWorkerServer(HomeServer):
|
||||||
manhole_globals={"hs": self},
|
manhole_globals={"hs": self},
|
||||||
)
|
)
|
||||||
elif listener.type == "metrics":
|
elif listener.type == "metrics":
|
||||||
if not self.config.enable_metrics:
|
if not self.config.metrics.enable_metrics:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
"Metrics listener configured, but "
|
"Metrics listener configured, but "
|
||||||
"enable_metrics is not True!"
|
"enable_metrics is not True!"
|
||||||
|
@ -488,7 +488,7 @@ def start(config_options):
|
||||||
register_start(_base.start, hs)
|
register_start(_base.start, hs)
|
||||||
|
|
||||||
# redirect stdio to the logs, if configured.
|
# redirect stdio to the logs, if configured.
|
||||||
if not hs.config.no_redirect_stdio:
|
if not hs.config.logging.no_redirect_stdio:
|
||||||
redirect_stdio_to_logs()
|
redirect_stdio_to_logs()
|
||||||
|
|
||||||
_base.start_worker_reactor("synapse-generic-worker", config)
|
_base.start_worker_reactor("synapse-generic-worker", config)
|
||||||
|
|
|
@ -195,7 +195,7 @@ class SynapseHomeServer(HomeServer):
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
|
if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
|
||||||
from synapse.rest.synapse.client.password_reset import (
|
from synapse.rest.synapse.client.password_reset import (
|
||||||
PasswordResetSubmitTokenResource,
|
PasswordResetSubmitTokenResource,
|
||||||
)
|
)
|
||||||
|
@ -234,7 +234,7 @@ class SynapseHomeServer(HomeServer):
|
||||||
)
|
)
|
||||||
|
|
||||||
if name in ["media", "federation", "client"]:
|
if name in ["media", "federation", "client"]:
|
||||||
if self.config.enable_media_repo:
|
if self.config.media.enable_media_repo:
|
||||||
media_repo = self.get_media_repository_resource()
|
media_repo = self.get_media_repository_resource()
|
||||||
resources.update(
|
resources.update(
|
||||||
{MEDIA_PREFIX: media_repo, LEGACY_MEDIA_PREFIX: media_repo}
|
{MEDIA_PREFIX: media_repo, LEGACY_MEDIA_PREFIX: media_repo}
|
||||||
|
@ -269,7 +269,7 @@ class SynapseHomeServer(HomeServer):
|
||||||
# https://twistedmatrix.com/trac/ticket/7678
|
# https://twistedmatrix.com/trac/ticket/7678
|
||||||
resources[WEB_CLIENT_PREFIX] = File(webclient_loc)
|
resources[WEB_CLIENT_PREFIX] = File(webclient_loc)
|
||||||
|
|
||||||
if name == "metrics" and self.config.enable_metrics:
|
if name == "metrics" and self.config.metrics.enable_metrics:
|
||||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
||||||
|
|
||||||
if name == "replication":
|
if name == "replication":
|
||||||
|
@ -278,7 +278,7 @@ class SynapseHomeServer(HomeServer):
|
||||||
return resources
|
return resources
|
||||||
|
|
||||||
def start_listening(self):
|
def start_listening(self):
|
||||||
if self.config.redis_enabled:
|
if self.config.redis.redis_enabled:
|
||||||
# If redis is enabled we connect via the replication command handler
|
# If redis is enabled we connect via the replication command handler
|
||||||
# in the same way as the workers (since we're effectively a client
|
# in the same way as the workers (since we're effectively a client
|
||||||
# rather than a server).
|
# rather than a server).
|
||||||
|
@ -305,7 +305,7 @@ class SynapseHomeServer(HomeServer):
|
||||||
for s in services:
|
for s in services:
|
||||||
reactor.addSystemEventTrigger("before", "shutdown", s.stopListening)
|
reactor.addSystemEventTrigger("before", "shutdown", s.stopListening)
|
||||||
elif listener.type == "metrics":
|
elif listener.type == "metrics":
|
||||||
if not self.config.enable_metrics:
|
if not self.config.metrics.enable_metrics:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
"Metrics listener configured, but "
|
"Metrics listener configured, but "
|
||||||
"enable_metrics is not True!"
|
"enable_metrics is not True!"
|
||||||
|
@ -366,7 +366,7 @@ def setup(config_options):
|
||||||
|
|
||||||
async def start():
|
async def start():
|
||||||
# Load the OIDC provider metadatas, if OIDC is enabled.
|
# Load the OIDC provider metadatas, if OIDC is enabled.
|
||||||
if hs.config.oidc_enabled:
|
if hs.config.oidc.oidc_enabled:
|
||||||
oidc = hs.get_oidc_handler()
|
oidc = hs.get_oidc_handler()
|
||||||
# Loading the provider metadata also ensures the provider config is valid.
|
# Loading the provider metadata also ensures the provider config is valid.
|
||||||
await oidc.load_metadata()
|
await oidc.load_metadata()
|
||||||
|
@ -455,7 +455,7 @@ def main():
|
||||||
hs = setup(sys.argv[1:])
|
hs = setup(sys.argv[1:])
|
||||||
|
|
||||||
# redirect stdio to the logs, if configured.
|
# redirect stdio to the logs, if configured.
|
||||||
if not hs.config.no_redirect_stdio:
|
if not hs.config.logging.no_redirect_stdio:
|
||||||
redirect_stdio_to_logs()
|
redirect_stdio_to_logs()
|
||||||
|
|
||||||
run(hs)
|
run(hs)
|
||||||
|
|
|
@ -131,10 +131,12 @@ async def phone_stats_home(hs, stats, stats_process=_stats_process):
|
||||||
log_level = synapse_logger.getEffectiveLevel()
|
log_level = synapse_logger.getEffectiveLevel()
|
||||||
stats["log_level"] = logging.getLevelName(log_level)
|
stats["log_level"] = logging.getLevelName(log_level)
|
||||||
|
|
||||||
logger.info("Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats))
|
logger.info(
|
||||||
|
"Reporting stats to %s: %s" % (hs.config.metrics.report_stats_endpoint, stats)
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
await hs.get_proxied_http_client().put_json(
|
await hs.get_proxied_http_client().put_json(
|
||||||
hs.config.report_stats_endpoint, stats
|
hs.config.metrics.report_stats_endpoint, stats
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning("Error reporting stats: %s", e)
|
logger.warning("Error reporting stats: %s", e)
|
||||||
|
@ -188,7 +190,7 @@ def start_phone_stats_home(hs):
|
||||||
clock.looping_call(generate_monthly_active_users, 5 * 60 * 1000)
|
clock.looping_call(generate_monthly_active_users, 5 * 60 * 1000)
|
||||||
# End of monthly active user settings
|
# End of monthly active user settings
|
||||||
|
|
||||||
if hs.config.report_stats:
|
if hs.config.metrics.report_stats:
|
||||||
logger.info("Scheduling stats reporting for 3 hour intervals")
|
logger.info("Scheduling stats reporting for 3 hour intervals")
|
||||||
clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000, hs, stats)
|
clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000, hs, stats)
|
||||||
|
|
||||||
|
|
|
@ -200,11 +200,7 @@ class Config:
|
||||||
@classmethod
|
@classmethod
|
||||||
def ensure_directory(cls, dir_path):
|
def ensure_directory(cls, dir_path):
|
||||||
dir_path = cls.abspath(dir_path)
|
dir_path = cls.abspath(dir_path)
|
||||||
try:
|
os.makedirs(dir_path, exist_ok=True)
|
||||||
os.makedirs(dir_path)
|
|
||||||
except OSError as e:
|
|
||||||
if e.errno != errno.EEXIST:
|
|
||||||
raise
|
|
||||||
if not os.path.isdir(dir_path):
|
if not os.path.isdir(dir_path):
|
||||||
raise ConfigError("%s is not a directory" % (dir_path,))
|
raise ConfigError("%s is not a directory" % (dir_path,))
|
||||||
return dir_path
|
return dir_path
|
||||||
|
@ -693,8 +689,7 @@ class RootConfig:
|
||||||
open_private_ports=config_args.open_private_ports,
|
open_private_ports=config_args.open_private_ports,
|
||||||
)
|
)
|
||||||
|
|
||||||
if not path_exists(config_dir_path):
|
os.makedirs(config_dir_path, exist_ok=True)
|
||||||
os.makedirs(config_dir_path)
|
|
||||||
with open(config_path, "w") as config_file:
|
with open(config_path, "w") as config_file:
|
||||||
config_file.write(config_str)
|
config_file.write(config_str)
|
||||||
config_file.write("\n\n# vim:ft=yaml")
|
config_file.write("\n\n# vim:ft=yaml")
|
||||||
|
|
|
@ -13,6 +13,7 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from os import path
|
from os import path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
from synapse.config import ConfigError
|
from synapse.config import ConfigError
|
||||||
|
|
||||||
|
@ -78,8 +79,8 @@ class ConsentConfig(Config):
|
||||||
def __init__(self, *args):
|
def __init__(self, *args):
|
||||||
super().__init__(*args)
|
super().__init__(*args)
|
||||||
|
|
||||||
self.user_consent_version = None
|
self.user_consent_version: Optional[str] = None
|
||||||
self.user_consent_template_dir = None
|
self.user_consent_template_dir: Optional[str] = None
|
||||||
self.user_consent_server_notice_content = None
|
self.user_consent_server_notice_content = None
|
||||||
self.user_consent_server_notice_to_guests = False
|
self.user_consent_server_notice_to_guests = False
|
||||||
self.block_events_without_consent_error = None
|
self.block_events_without_consent_error = None
|
||||||
|
@ -94,7 +95,9 @@ class ConsentConfig(Config):
|
||||||
return
|
return
|
||||||
self.user_consent_version = str(consent_config["version"])
|
self.user_consent_version = str(consent_config["version"])
|
||||||
self.user_consent_template_dir = self.abspath(consent_config["template_dir"])
|
self.user_consent_template_dir = self.abspath(consent_config["template_dir"])
|
||||||
if not path.isdir(self.user_consent_template_dir):
|
if not isinstance(self.user_consent_template_dir, str) or not path.isdir(
|
||||||
|
self.user_consent_template_dir
|
||||||
|
):
|
||||||
raise ConfigError(
|
raise ConfigError(
|
||||||
"Could not find template directory '%s'"
|
"Could not find template directory '%s'"
|
||||||
% (self.user_consent_template_dir,)
|
% (self.user_consent_template_dir,)
|
||||||
|
|
|
@ -322,7 +322,9 @@ def setup_logging(
|
||||||
|
|
||||||
"""
|
"""
|
||||||
log_config_path = (
|
log_config_path = (
|
||||||
config.worker_log_config if use_worker_options else config.log_config
|
config.worker.worker_log_config
|
||||||
|
if use_worker_options
|
||||||
|
else config.logging.log_config
|
||||||
)
|
)
|
||||||
|
|
||||||
# Perform one-time logging configuration.
|
# Perform one-time logging configuration.
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from typing import Any, List
|
from typing import Any, List, Tuple, Type
|
||||||
|
|
||||||
from synapse.util.module_loader import load_module
|
from synapse.util.module_loader import load_module
|
||||||
|
|
||||||
|
@ -25,7 +25,7 @@ class PasswordAuthProviderConfig(Config):
|
||||||
section = "authproviders"
|
section = "authproviders"
|
||||||
|
|
||||||
def read_config(self, config, **kwargs):
|
def read_config(self, config, **kwargs):
|
||||||
self.password_providers: List[Any] = []
|
self.password_providers: List[Tuple[Type, Any]] = []
|
||||||
providers = []
|
providers = []
|
||||||
|
|
||||||
# We want to be backwards compatible with the old `ldap_config`
|
# We want to be backwards compatible with the old `ldap_config`
|
||||||
|
|
|
@ -1447,7 +1447,7 @@ def read_gc_thresholds(thresholds):
|
||||||
return None
|
return None
|
||||||
try:
|
try:
|
||||||
assert len(thresholds) == 3
|
assert len(thresholds) == 3
|
||||||
return (int(thresholds[0]), int(thresholds[1]), int(thresholds[2]))
|
return int(thresholds[0]), int(thresholds[1]), int(thresholds[2])
|
||||||
except Exception:
|
except Exception:
|
||||||
raise ConfigError(
|
raise ConfigError(
|
||||||
"Value of `gc_threshold` must be a list of three integers if set"
|
"Value of `gc_threshold` must be a list of three integers if set"
|
||||||
|
|
|
@ -45,12 +45,16 @@ class UserDirectoryConfig(Config):
|
||||||
#enabled: false
|
#enabled: false
|
||||||
|
|
||||||
# Defines whether to search all users visible to your HS when searching
|
# Defines whether to search all users visible to your HS when searching
|
||||||
# the user directory, rather than limiting to users visible in public
|
# the user directory. If false, search results will only contain users
|
||||||
# rooms. Defaults to false.
|
# visible in public rooms and users sharing a room with the requester.
|
||||||
|
# Defaults to false.
|
||||||
#
|
#
|
||||||
# If you set it true, you'll have to rebuild the user_directory search
|
# NB. If you set this to true, and the last time the user_directory search
|
||||||
# indexes, see:
|
# indexes were (re)built was before Synapse 1.44, you'll have to
|
||||||
# https://matrix-org.github.io/synapse/latest/user_directory.html
|
# rebuild the indexes in order to search through all known users.
|
||||||
|
# These indexes are built the first time Synapse starts; admins can
|
||||||
|
# manually trigger a rebuild following the instructions at
|
||||||
|
# https://matrix-org.github.io/synapse/latest/user_directory.html
|
||||||
#
|
#
|
||||||
# Uncomment to return search results containing all known users, even if that
|
# Uncomment to return search results containing all known users, even if that
|
||||||
# user does not share a room with the requester.
|
# user does not share a room with the requester.
|
||||||
|
|
|
@ -74,8 +74,8 @@ class ServerContextFactory(ContextFactory):
|
||||||
context.set_options(
|
context.set_options(
|
||||||
SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3 | SSL.OP_NO_TLSv1 | SSL.OP_NO_TLSv1_1
|
SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3 | SSL.OP_NO_TLSv1 | SSL.OP_NO_TLSv1_1
|
||||||
)
|
)
|
||||||
context.use_certificate_chain_file(config.tls_certificate_file)
|
context.use_certificate_chain_file(config.tls.tls_certificate_file)
|
||||||
context.use_privatekey(config.tls_private_key)
|
context.use_privatekey(config.tls.tls_private_key)
|
||||||
|
|
||||||
# https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
|
# https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
|
||||||
context.set_cipher_list(
|
context.set_cipher_list(
|
||||||
|
@ -102,7 +102,7 @@ class FederationPolicyForHTTPS:
|
||||||
self._config = config
|
self._config = config
|
||||||
|
|
||||||
# Check if we're using a custom list of a CA certificates
|
# Check if we're using a custom list of a CA certificates
|
||||||
trust_root = config.federation_ca_trust_root
|
trust_root = config.tls.federation_ca_trust_root
|
||||||
if trust_root is None:
|
if trust_root is None:
|
||||||
# Use CA root certs provided by OpenSSL
|
# Use CA root certs provided by OpenSSL
|
||||||
trust_root = platformTrust()
|
trust_root = platformTrust()
|
||||||
|
@ -113,7 +113,7 @@ class FederationPolicyForHTTPS:
|
||||||
# moving to TLS 1.2 by default, we want to respect the config option if
|
# moving to TLS 1.2 by default, we want to respect the config option if
|
||||||
# it is set to 1.0 (which the alternate option, raiseMinimumTo, will not
|
# it is set to 1.0 (which the alternate option, raiseMinimumTo, will not
|
||||||
# let us do).
|
# let us do).
|
||||||
minTLS = _TLS_VERSION_MAP[config.federation_client_minimum_tls_version]
|
minTLS = _TLS_VERSION_MAP[config.tls.federation_client_minimum_tls_version]
|
||||||
|
|
||||||
_verify_ssl = CertificateOptions(
|
_verify_ssl = CertificateOptions(
|
||||||
trustRoot=trust_root, insecurelyLowerMinimumTo=minTLS
|
trustRoot=trust_root, insecurelyLowerMinimumTo=minTLS
|
||||||
|
@ -125,10 +125,10 @@ class FederationPolicyForHTTPS:
|
||||||
self._no_verify_ssl_context = _no_verify_ssl.getContext()
|
self._no_verify_ssl_context = _no_verify_ssl.getContext()
|
||||||
self._no_verify_ssl_context.set_info_callback(_context_info_cb)
|
self._no_verify_ssl_context.set_info_callback(_context_info_cb)
|
||||||
|
|
||||||
self._should_verify = self._config.federation_verify_certificates
|
self._should_verify = self._config.tls.federation_verify_certificates
|
||||||
|
|
||||||
self._federation_certificate_verification_whitelist = (
|
self._federation_certificate_verification_whitelist = (
|
||||||
self._config.federation_certificate_verification_whitelist
|
self._config.tls.federation_certificate_verification_whitelist
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_options(self, host: bytes):
|
def get_options(self, host: bytes):
|
||||||
|
|
|
@ -572,7 +572,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
||||||
super().__init__(hs)
|
super().__init__(hs)
|
||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
self.client = hs.get_federation_http_client()
|
self.client = hs.get_federation_http_client()
|
||||||
self.key_servers = self.config.key_servers
|
self.key_servers = self.config.key.key_servers
|
||||||
|
|
||||||
async def _fetch_keys(
|
async def _fetch_keys(
|
||||||
self, keys_to_fetch: List[_FetchKeyRequest]
|
self, keys_to_fetch: List[_FetchKeyRequest]
|
||||||
|
|
|
@ -213,7 +213,7 @@ def check(
|
||||||
|
|
||||||
if (
|
if (
|
||||||
event.type == EventTypes.MSC2716_INSERTION
|
event.type == EventTypes.MSC2716_INSERTION
|
||||||
or event.type == EventTypes.MSC2716_CHUNK
|
or event.type == EventTypes.MSC2716_BATCH
|
||||||
or event.type == EventTypes.MSC2716_MARKER
|
or event.type == EventTypes.MSC2716_MARKER
|
||||||
):
|
):
|
||||||
check_historical(room_version_obj, event, auth_events)
|
check_historical(room_version_obj, event, auth_events)
|
||||||
|
@ -552,14 +552,14 @@ def check_historical(
|
||||||
auth_events: StateMap[EventBase],
|
auth_events: StateMap[EventBase],
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Check whether the event sender is allowed to send historical related
|
"""Check whether the event sender is allowed to send historical related
|
||||||
events like "insertion", "chunk", and "marker".
|
events like "insertion", "batch", and "marker".
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
None
|
None
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
AuthError if the event sender is not allowed to send historical related events
|
AuthError if the event sender is not allowed to send historical related events
|
||||||
("insertion", "chunk", and "marker").
|
("insertion", "batch", and "marker").
|
||||||
"""
|
"""
|
||||||
# Ignore the auth checks in room versions that do not support historical
|
# Ignore the auth checks in room versions that do not support historical
|
||||||
# events
|
# events
|
||||||
|
@ -573,7 +573,7 @@ def check_historical(
|
||||||
if user_level < historical_level:
|
if user_level < historical_level:
|
||||||
raise AuthError(
|
raise AuthError(
|
||||||
403,
|
403,
|
||||||
'You don\'t have permission to send send historical related events ("insertion", "chunk", and "marker")',
|
'You don\'t have permission to send send historical related events ("insertion", "batch", and "marker")',
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -344,6 +344,18 @@ class EventBase(metaclass=abc.ABCMeta):
|
||||||
# this will be a no-op if the event dict is already frozen.
|
# this will be a no-op if the event dict is already frozen.
|
||||||
self._dict = freeze(self._dict)
|
self._dict = freeze(self._dict)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self.__repr__()
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<%s event_id=%r, type=%r, state_key=%r, outlier=%s>" % (
|
||||||
|
self.__class__.__name__,
|
||||||
|
self.event_id,
|
||||||
|
self.get("type", None),
|
||||||
|
self.get("state_key", None),
|
||||||
|
self.internal_metadata.is_outlier(),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class FrozenEvent(EventBase):
|
class FrozenEvent(EventBase):
|
||||||
format_version = EventFormatVersions.V1 # All events of this type are V1
|
format_version = EventFormatVersions.V1 # All events of this type are V1
|
||||||
|
@ -392,17 +404,6 @@ class FrozenEvent(EventBase):
|
||||||
def event_id(self) -> str:
|
def event_id(self) -> str:
|
||||||
return self._event_id
|
return self._event_id
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return self.__repr__()
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return "<FrozenEvent event_id=%r, type=%r, state_key=%r, outlier=%s>" % (
|
|
||||||
self.get("event_id", None),
|
|
||||||
self.get("type", None),
|
|
||||||
self.get("state_key", None),
|
|
||||||
self.internal_metadata.is_outlier(),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class FrozenEventV2(EventBase):
|
class FrozenEventV2(EventBase):
|
||||||
format_version = EventFormatVersions.V2 # All events of this type are V2
|
format_version = EventFormatVersions.V2 # All events of this type are V2
|
||||||
|
@ -478,17 +479,6 @@ class FrozenEventV2(EventBase):
|
||||||
"""
|
"""
|
||||||
return self.auth_events
|
return self.auth_events
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return self.__repr__()
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return "<%s event_id=%r, type=%r, state_key=%r>" % (
|
|
||||||
self.__class__.__name__,
|
|
||||||
self.event_id,
|
|
||||||
self.get("type", None),
|
|
||||||
self.get("state_key", None),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class FrozenEventV3(FrozenEventV2):
|
class FrozenEventV3(FrozenEventV2):
|
||||||
"""FrozenEventV3, which differs from FrozenEventV2 only in the event_id format"""
|
"""FrozenEventV3, which differs from FrozenEventV2 only in the event_id format"""
|
||||||
|
|
|
@ -80,9 +80,7 @@ class EventContext:
|
||||||
|
|
||||||
(type, state_key) -> event_id
|
(type, state_key) -> event_id
|
||||||
|
|
||||||
FIXME: what is this for an outlier? it seems ill-defined. It seems like
|
For an outlier, this is {}
|
||||||
it could be either {}, or the state we were given by the remote
|
|
||||||
server, depending on $THINGS
|
|
||||||
|
|
||||||
Note that this is a private attribute: it should be accessed via
|
Note that this is a private attribute: it should be accessed via
|
||||||
``get_current_state_ids``. _AsyncEventContext impl calculates this
|
``get_current_state_ids``. _AsyncEventContext impl calculates this
|
||||||
|
@ -96,7 +94,7 @@ class EventContext:
|
||||||
|
|
||||||
(type, state_key) -> event_id
|
(type, state_key) -> event_id
|
||||||
|
|
||||||
FIXME: again, what is this for an outlier?
|
For an outlier, this is {}
|
||||||
|
|
||||||
As with _current_state_ids, this is a private attribute. It should be
|
As with _current_state_ids, this is a private attribute. It should be
|
||||||
accessed via get_prev_state_ids.
|
accessed via get_prev_state_ids.
|
||||||
|
@ -130,6 +128,14 @@ class EventContext:
|
||||||
delta_ids=delta_ids,
|
delta_ids=delta_ids,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def for_outlier():
|
||||||
|
"""Return an EventContext instance suitable for persisting an outlier event"""
|
||||||
|
return EventContext(
|
||||||
|
current_state_ids={},
|
||||||
|
prev_state_ids={},
|
||||||
|
)
|
||||||
|
|
||||||
async def serialize(self, event: EventBase, store: "DataStore") -> dict:
|
async def serialize(self, event: EventBase, store: "DataStore") -> dict:
|
||||||
"""Converts self to a type that can be serialized as JSON, and then
|
"""Converts self to a type that can be serialized as JSON, and then
|
||||||
deserialized by `deserialize`
|
deserialized by `deserialize`
|
||||||
|
|
|
@ -46,6 +46,9 @@ CHECK_EVENT_FOR_SPAM_CALLBACK = Callable[
|
||||||
]
|
]
|
||||||
USER_MAY_INVITE_CALLBACK = Callable[[str, str, str], Awaitable[bool]]
|
USER_MAY_INVITE_CALLBACK = Callable[[str, str, str], Awaitable[bool]]
|
||||||
USER_MAY_CREATE_ROOM_CALLBACK = Callable[[str], Awaitable[bool]]
|
USER_MAY_CREATE_ROOM_CALLBACK = Callable[[str], Awaitable[bool]]
|
||||||
|
USER_MAY_CREATE_ROOM_WITH_INVITES_CALLBACK = Callable[
|
||||||
|
[str, List[str], List[Dict[str, str]]], Awaitable[bool]
|
||||||
|
]
|
||||||
USER_MAY_CREATE_ROOM_ALIAS_CALLBACK = Callable[[str, RoomAlias], Awaitable[bool]]
|
USER_MAY_CREATE_ROOM_ALIAS_CALLBACK = Callable[[str, RoomAlias], Awaitable[bool]]
|
||||||
USER_MAY_PUBLISH_ROOM_CALLBACK = Callable[[str, str], Awaitable[bool]]
|
USER_MAY_PUBLISH_ROOM_CALLBACK = Callable[[str, str], Awaitable[bool]]
|
||||||
CHECK_USERNAME_FOR_SPAM_CALLBACK = Callable[[Dict[str, str]], Awaitable[bool]]
|
CHECK_USERNAME_FOR_SPAM_CALLBACK = Callable[[Dict[str, str]], Awaitable[bool]]
|
||||||
|
@ -78,7 +81,7 @@ def load_legacy_spam_checkers(hs: "synapse.server.HomeServer"):
|
||||||
"""
|
"""
|
||||||
spam_checkers: List[Any] = []
|
spam_checkers: List[Any] = []
|
||||||
api = hs.get_module_api()
|
api = hs.get_module_api()
|
||||||
for module, config in hs.config.spam_checkers:
|
for module, config in hs.config.spamchecker.spam_checkers:
|
||||||
# Older spam checkers don't accept the `api` argument, so we
|
# Older spam checkers don't accept the `api` argument, so we
|
||||||
# try and detect support.
|
# try and detect support.
|
||||||
spam_args = inspect.getfullargspec(module)
|
spam_args = inspect.getfullargspec(module)
|
||||||
|
@ -164,6 +167,9 @@ class SpamChecker:
|
||||||
self._check_event_for_spam_callbacks: List[CHECK_EVENT_FOR_SPAM_CALLBACK] = []
|
self._check_event_for_spam_callbacks: List[CHECK_EVENT_FOR_SPAM_CALLBACK] = []
|
||||||
self._user_may_invite_callbacks: List[USER_MAY_INVITE_CALLBACK] = []
|
self._user_may_invite_callbacks: List[USER_MAY_INVITE_CALLBACK] = []
|
||||||
self._user_may_create_room_callbacks: List[USER_MAY_CREATE_ROOM_CALLBACK] = []
|
self._user_may_create_room_callbacks: List[USER_MAY_CREATE_ROOM_CALLBACK] = []
|
||||||
|
self._user_may_create_room_with_invites_callbacks: List[
|
||||||
|
USER_MAY_CREATE_ROOM_WITH_INVITES_CALLBACK
|
||||||
|
] = []
|
||||||
self._user_may_create_room_alias_callbacks: List[
|
self._user_may_create_room_alias_callbacks: List[
|
||||||
USER_MAY_CREATE_ROOM_ALIAS_CALLBACK
|
USER_MAY_CREATE_ROOM_ALIAS_CALLBACK
|
||||||
] = []
|
] = []
|
||||||
|
@ -183,6 +189,9 @@ class SpamChecker:
|
||||||
check_event_for_spam: Optional[CHECK_EVENT_FOR_SPAM_CALLBACK] = None,
|
check_event_for_spam: Optional[CHECK_EVENT_FOR_SPAM_CALLBACK] = None,
|
||||||
user_may_invite: Optional[USER_MAY_INVITE_CALLBACK] = None,
|
user_may_invite: Optional[USER_MAY_INVITE_CALLBACK] = None,
|
||||||
user_may_create_room: Optional[USER_MAY_CREATE_ROOM_CALLBACK] = None,
|
user_may_create_room: Optional[USER_MAY_CREATE_ROOM_CALLBACK] = None,
|
||||||
|
user_may_create_room_with_invites: Optional[
|
||||||
|
USER_MAY_CREATE_ROOM_WITH_INVITES_CALLBACK
|
||||||
|
] = None,
|
||||||
user_may_create_room_alias: Optional[
|
user_may_create_room_alias: Optional[
|
||||||
USER_MAY_CREATE_ROOM_ALIAS_CALLBACK
|
USER_MAY_CREATE_ROOM_ALIAS_CALLBACK
|
||||||
] = None,
|
] = None,
|
||||||
|
@ -203,6 +212,11 @@ class SpamChecker:
|
||||||
if user_may_create_room is not None:
|
if user_may_create_room is not None:
|
||||||
self._user_may_create_room_callbacks.append(user_may_create_room)
|
self._user_may_create_room_callbacks.append(user_may_create_room)
|
||||||
|
|
||||||
|
if user_may_create_room_with_invites is not None:
|
||||||
|
self._user_may_create_room_with_invites_callbacks.append(
|
||||||
|
user_may_create_room_with_invites,
|
||||||
|
)
|
||||||
|
|
||||||
if user_may_create_room_alias is not None:
|
if user_may_create_room_alias is not None:
|
||||||
self._user_may_create_room_alias_callbacks.append(
|
self._user_may_create_room_alias_callbacks.append(
|
||||||
user_may_create_room_alias,
|
user_may_create_room_alias,
|
||||||
|
@ -283,6 +297,34 @@ class SpamChecker:
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
async def user_may_create_room_with_invites(
|
||||||
|
self,
|
||||||
|
userid: str,
|
||||||
|
invites: List[str],
|
||||||
|
threepid_invites: List[Dict[str, str]],
|
||||||
|
) -> bool:
|
||||||
|
"""Checks if a given user may create a room with invites
|
||||||
|
|
||||||
|
If this method returns false, the creation request will be rejected.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
userid: The ID of the user attempting to create a room
|
||||||
|
invites: The IDs of the Matrix users to be invited if the room creation is
|
||||||
|
allowed.
|
||||||
|
threepid_invites: The threepids to be invited if the room creation is allowed,
|
||||||
|
as a dict including a "medium" key indicating the threepid's medium (e.g.
|
||||||
|
"email") and an "address" key indicating the threepid's address (e.g.
|
||||||
|
"alice@example.com")
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if the user may create the room, otherwise False
|
||||||
|
"""
|
||||||
|
for callback in self._user_may_create_room_with_invites_callbacks:
|
||||||
|
if await callback(userid, invites, threepid_invites) is False:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
async def user_may_create_room_alias(
|
async def user_may_create_room_alias(
|
||||||
self, userid: str, room_alias: RoomAlias
|
self, userid: str, room_alias: RoomAlias
|
||||||
) -> bool:
|
) -> bool:
|
||||||
|
|
|
@ -42,10 +42,10 @@ def load_legacy_third_party_event_rules(hs: "HomeServer"):
|
||||||
"""Wrapper that loads a third party event rules module configured using the old
|
"""Wrapper that loads a third party event rules module configured using the old
|
||||||
configuration, and registers the hooks they implement.
|
configuration, and registers the hooks they implement.
|
||||||
"""
|
"""
|
||||||
if hs.config.third_party_event_rules is None:
|
if hs.config.thirdpartyrules.third_party_event_rules is None:
|
||||||
return
|
return
|
||||||
|
|
||||||
module, config = hs.config.third_party_event_rules
|
module, config = hs.config.thirdpartyrules.third_party_event_rules
|
||||||
|
|
||||||
api = hs.get_module_api()
|
api = hs.get_module_api()
|
||||||
third_party_rules = module(config=config, module_api=api)
|
third_party_rules = module(config=config, module_api=api)
|
||||||
|
|
|
@ -141,9 +141,9 @@ def prune_event_dict(room_version: RoomVersion, event_dict: dict) -> dict:
|
||||||
elif event_type == EventTypes.Redaction and room_version.msc2176_redaction_rules:
|
elif event_type == EventTypes.Redaction and room_version.msc2176_redaction_rules:
|
||||||
add_fields("redacts")
|
add_fields("redacts")
|
||||||
elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_INSERTION:
|
elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_INSERTION:
|
||||||
add_fields(EventContentFields.MSC2716_NEXT_CHUNK_ID)
|
add_fields(EventContentFields.MSC2716_NEXT_BATCH_ID)
|
||||||
elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_CHUNK:
|
elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_BATCH:
|
||||||
add_fields(EventContentFields.MSC2716_CHUNK_ID)
|
add_fields(EventContentFields.MSC2716_BATCH_ID)
|
||||||
elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_MARKER:
|
elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_MARKER:
|
||||||
add_fields(EventContentFields.MSC2716_MARKER_INSERTION)
|
add_fields(EventContentFields.MSC2716_MARKER_INSERTION)
|
||||||
|
|
||||||
|
|
|
@ -501,8 +501,6 @@ class FederationClient(FederationBase):
|
||||||
destination, auth_chain, outlier=True, room_version=room_version
|
destination, auth_chain, outlier=True, room_version=room_version
|
||||||
)
|
)
|
||||||
|
|
||||||
signed_auth.sort(key=lambda e: e.depth)
|
|
||||||
|
|
||||||
return signed_auth
|
return signed_auth
|
||||||
|
|
||||||
def _is_unknown_endpoint(
|
def _is_unknown_endpoint(
|
||||||
|
|
|
@ -1237,7 +1237,7 @@ class FederationHandlerRegistry:
|
||||||
self._edu_type_to_instance[edu_type] = instance_names
|
self._edu_type_to_instance[edu_type] = instance_names
|
||||||
|
|
||||||
async def on_edu(self, edu_type: str, origin: str, content: dict) -> None:
|
async def on_edu(self, edu_type: str, origin: str, content: dict) -> None:
|
||||||
if not self.config.use_presence and edu_type == EduTypes.Presence:
|
if not self.config.server.use_presence and edu_type == EduTypes.Presence:
|
||||||
return
|
return
|
||||||
|
|
||||||
# Check if we have a handler on this instance
|
# Check if we have a handler on this instance
|
||||||
|
|
|
@ -594,7 +594,7 @@ class FederationSender(AbstractFederationSender):
|
||||||
destinations (list[str])
|
destinations (list[str])
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not states or not self.hs.config.use_presence:
|
if not states or not self.hs.config.server.use_presence:
|
||||||
# No-op if presence is disabled.
|
# No-op if presence is disabled.
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
|
@ -560,7 +560,7 @@ class PerDestinationQueue:
|
||||||
|
|
||||||
assert len(edus) <= limit, "get_device_updates_by_remote returned too many EDUs"
|
assert len(edus) <= limit, "get_device_updates_by_remote returned too many EDUs"
|
||||||
|
|
||||||
return (edus, now_stream_id)
|
return edus, now_stream_id
|
||||||
|
|
||||||
async def _get_to_device_message_edus(self, limit: int) -> Tuple[List[Edu], int]:
|
async def _get_to_device_message_edus(self, limit: int) -> Tuple[List[Edu], int]:
|
||||||
last_device_stream_id = self._last_device_stream_id
|
last_device_stream_id = self._last_device_stream_id
|
||||||
|
@ -593,7 +593,7 @@ class PerDestinationQueue:
|
||||||
stream_id,
|
stream_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
return (edus, stream_id)
|
return edus, stream_id
|
||||||
|
|
||||||
def _start_catching_up(self) -> None:
|
def _start_catching_up(self) -> None:
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -49,7 +49,9 @@ class Authenticator:
|
||||||
self.keyring = hs.get_keyring()
|
self.keyring = hs.get_keyring()
|
||||||
self.server_name = hs.hostname
|
self.server_name = hs.hostname
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
self.federation_domain_whitelist = hs.config.federation_domain_whitelist
|
self.federation_domain_whitelist = (
|
||||||
|
hs.config.federation.federation_domain_whitelist
|
||||||
|
)
|
||||||
self.notifier = hs.get_notifier()
|
self.notifier = hs.get_notifier()
|
||||||
|
|
||||||
self.replication_client = None
|
self.replication_client = None
|
||||||
|
|
|
@ -847,16 +847,16 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||||
UserID.from_string(requester_user_id)
|
UserID.from_string(requester_user_id)
|
||||||
)
|
)
|
||||||
if not is_admin:
|
if not is_admin:
|
||||||
if not self.hs.config.enable_group_creation:
|
if not self.hs.config.groups.enable_group_creation:
|
||||||
raise SynapseError(
|
raise SynapseError(
|
||||||
403, "Only a server admin can create groups on this server"
|
403, "Only a server admin can create groups on this server"
|
||||||
)
|
)
|
||||||
localpart = group_id_obj.localpart
|
localpart = group_id_obj.localpart
|
||||||
if not localpart.startswith(self.hs.config.group_creation_prefix):
|
if not localpart.startswith(self.hs.config.groups.group_creation_prefix):
|
||||||
raise SynapseError(
|
raise SynapseError(
|
||||||
400,
|
400,
|
||||||
"Can only create groups with prefix %r on this server"
|
"Can only create groups with prefix %r on this server"
|
||||||
% (self.hs.config.group_creation_prefix,),
|
% (self.hs.config.groups.group_creation_prefix,),
|
||||||
)
|
)
|
||||||
|
|
||||||
profile = content.get("profile", {})
|
profile = content.get("profile", {})
|
||||||
|
|
|
@ -16,6 +16,7 @@ import logging
|
||||||
from typing import TYPE_CHECKING, Optional
|
from typing import TYPE_CHECKING, Optional
|
||||||
|
|
||||||
from synapse.api.ratelimiting import Ratelimiter
|
from synapse.api.ratelimiting import Ratelimiter
|
||||||
|
from synapse.types import Requester
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
|
@ -63,16 +64,21 @@ class BaseHandler:
|
||||||
|
|
||||||
self.event_builder_factory = hs.get_event_builder_factory()
|
self.event_builder_factory = hs.get_event_builder_factory()
|
||||||
|
|
||||||
async def ratelimit(self, requester, update=True, is_admin_redaction=False):
|
async def ratelimit(
|
||||||
|
self,
|
||||||
|
requester: Requester,
|
||||||
|
update: bool = True,
|
||||||
|
is_admin_redaction: bool = False,
|
||||||
|
) -> None:
|
||||||
"""Ratelimits requests.
|
"""Ratelimits requests.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
requester (Requester)
|
requester
|
||||||
update (bool): Whether to record that a request is being processed.
|
update: Whether to record that a request is being processed.
|
||||||
Set to False when doing multiple checks for one request (e.g.
|
Set to False when doing multiple checks for one request (e.g.
|
||||||
to check up front if we would reject the request), and set to
|
to check up front if we would reject the request), and set to
|
||||||
True for the last call for a given request.
|
True for the last call for a given request.
|
||||||
is_admin_redaction (bool): Whether this is a room admin/moderator
|
is_admin_redaction: Whether this is a room admin/moderator
|
||||||
redacting an event. If so then we may apply different
|
redacting an event. If so then we may apply different
|
||||||
ratelimits depending on config.
|
ratelimits depending on config.
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
import random
|
import random
|
||||||
from typing import TYPE_CHECKING, List, Tuple
|
from typing import TYPE_CHECKING, Collection, List, Optional, Tuple
|
||||||
|
|
||||||
from synapse.replication.http.account_data import (
|
from synapse.replication.http.account_data import (
|
||||||
ReplicationAddTagRestServlet,
|
ReplicationAddTagRestServlet,
|
||||||
|
@ -21,6 +21,7 @@ from synapse.replication.http.account_data import (
|
||||||
ReplicationRoomAccountDataRestServlet,
|
ReplicationRoomAccountDataRestServlet,
|
||||||
ReplicationUserAccountDataRestServlet,
|
ReplicationUserAccountDataRestServlet,
|
||||||
)
|
)
|
||||||
|
from synapse.streams import EventSource
|
||||||
from synapse.types import JsonDict, UserID
|
from synapse.types import JsonDict, UserID
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
|
@ -163,7 +164,7 @@ class AccountDataHandler:
|
||||||
return response["max_stream_id"]
|
return response["max_stream_id"]
|
||||||
|
|
||||||
|
|
||||||
class AccountDataEventSource:
|
class AccountDataEventSource(EventSource[int, JsonDict]):
|
||||||
def __init__(self, hs: "HomeServer"):
|
def __init__(self, hs: "HomeServer"):
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
|
|
||||||
|
@ -171,7 +172,13 @@ class AccountDataEventSource:
|
||||||
return self.store.get_max_account_data_stream_id()
|
return self.store.get_max_account_data_stream_id()
|
||||||
|
|
||||||
async def get_new_events(
|
async def get_new_events(
|
||||||
self, user: UserID, from_key: int, **kwargs
|
self,
|
||||||
|
user: UserID,
|
||||||
|
from_key: int,
|
||||||
|
limit: Optional[int],
|
||||||
|
room_ids: Collection[str],
|
||||||
|
is_guest: bool,
|
||||||
|
explicit_room_id: Optional[str] = None,
|
||||||
) -> Tuple[List[JsonDict], int]:
|
) -> Tuple[List[JsonDict], int]:
|
||||||
user_id = user.to_string()
|
user_id = user.to_string()
|
||||||
last_stream_id = from_key
|
last_stream_id = from_key
|
||||||
|
|
|
@ -47,7 +47,7 @@ class AccountValidityHandler:
|
||||||
self.send_email_handler = self.hs.get_send_email_handler()
|
self.send_email_handler = self.hs.get_send_email_handler()
|
||||||
self.clock = self.hs.get_clock()
|
self.clock = self.hs.get_clock()
|
||||||
|
|
||||||
self._app_name = self.hs.config.email_app_name
|
self._app_name = self.hs.config.email.email_app_name
|
||||||
|
|
||||||
self._account_validity_enabled = (
|
self._account_validity_enabled = (
|
||||||
hs.config.account_validity.account_validity_enabled
|
hs.config.account_validity.account_validity_enabled
|
||||||
|
@ -99,7 +99,7 @@ class AccountValidityHandler:
|
||||||
on_legacy_send_mail: Optional[ON_LEGACY_SEND_MAIL_CALLBACK] = None,
|
on_legacy_send_mail: Optional[ON_LEGACY_SEND_MAIL_CALLBACK] = None,
|
||||||
on_legacy_renew: Optional[ON_LEGACY_RENEW_CALLBACK] = None,
|
on_legacy_renew: Optional[ON_LEGACY_RENEW_CALLBACK] = None,
|
||||||
on_legacy_admin_request: Optional[ON_LEGACY_ADMIN_REQUEST] = None,
|
on_legacy_admin_request: Optional[ON_LEGACY_ADMIN_REQUEST] = None,
|
||||||
):
|
) -> None:
|
||||||
"""Register callbacks from module for each hook."""
|
"""Register callbacks from module for each hook."""
|
||||||
if is_user_expired is not None:
|
if is_user_expired is not None:
|
||||||
self._is_user_expired_callbacks.append(is_user_expired)
|
self._is_user_expired_callbacks.append(is_user_expired)
|
||||||
|
@ -165,7 +165,7 @@ class AccountValidityHandler:
|
||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
async def on_user_registration(self, user_id: str):
|
async def on_user_registration(self, user_id: str) -> None:
|
||||||
"""Tell third-party modules about a user's registration.
|
"""Tell third-party modules about a user's registration.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
import logging
|
import logging
|
||||||
from typing import TYPE_CHECKING, Collection, Dict, List, Optional, Union
|
from typing import TYPE_CHECKING, Collection, Dict, Iterable, List, Optional, Union
|
||||||
|
|
||||||
from prometheus_client import Counter
|
from prometheus_client import Counter
|
||||||
|
|
||||||
|
@ -52,13 +52,13 @@ class ApplicationServicesHandler:
|
||||||
self.scheduler = hs.get_application_service_scheduler()
|
self.scheduler = hs.get_application_service_scheduler()
|
||||||
self.started_scheduler = False
|
self.started_scheduler = False
|
||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
self.notify_appservices = hs.config.notify_appservices
|
self.notify_appservices = hs.config.appservice.notify_appservices
|
||||||
self.event_sources = hs.get_event_sources()
|
self.event_sources = hs.get_event_sources()
|
||||||
|
|
||||||
self.current_max = 0
|
self.current_max = 0
|
||||||
self.is_processing = False
|
self.is_processing = False
|
||||||
|
|
||||||
def notify_interested_services(self, max_token: RoomStreamToken):
|
def notify_interested_services(self, max_token: RoomStreamToken) -> None:
|
||||||
"""Notifies (pushes) all application services interested in this event.
|
"""Notifies (pushes) all application services interested in this event.
|
||||||
|
|
||||||
Pushing is done asynchronously, so this method won't block for any
|
Pushing is done asynchronously, so this method won't block for any
|
||||||
|
@ -82,7 +82,7 @@ class ApplicationServicesHandler:
|
||||||
self._notify_interested_services(max_token)
|
self._notify_interested_services(max_token)
|
||||||
|
|
||||||
@wrap_as_background_process("notify_interested_services")
|
@wrap_as_background_process("notify_interested_services")
|
||||||
async def _notify_interested_services(self, max_token: RoomStreamToken):
|
async def _notify_interested_services(self, max_token: RoomStreamToken) -> None:
|
||||||
with Measure(self.clock, "notify_interested_services"):
|
with Measure(self.clock, "notify_interested_services"):
|
||||||
self.is_processing = True
|
self.is_processing = True
|
||||||
try:
|
try:
|
||||||
|
@ -100,7 +100,7 @@ class ApplicationServicesHandler:
|
||||||
for event in events:
|
for event in events:
|
||||||
events_by_room.setdefault(event.room_id, []).append(event)
|
events_by_room.setdefault(event.room_id, []).append(event)
|
||||||
|
|
||||||
async def handle_event(event):
|
async def handle_event(event: EventBase) -> None:
|
||||||
# Gather interested services
|
# Gather interested services
|
||||||
services = await self._get_services_for_event(event)
|
services = await self._get_services_for_event(event)
|
||||||
if len(services) == 0:
|
if len(services) == 0:
|
||||||
|
@ -116,9 +116,9 @@ class ApplicationServicesHandler:
|
||||||
|
|
||||||
if not self.started_scheduler:
|
if not self.started_scheduler:
|
||||||
|
|
||||||
async def start_scheduler():
|
async def start_scheduler() -> None:
|
||||||
try:
|
try:
|
||||||
return await self.scheduler.start()
|
await self.scheduler.start()
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.error("Application Services Failure")
|
logger.error("Application Services Failure")
|
||||||
|
|
||||||
|
@ -137,7 +137,7 @@ class ApplicationServicesHandler:
|
||||||
"appservice_sender"
|
"appservice_sender"
|
||||||
).observe((now - ts) / 1000)
|
).observe((now - ts) / 1000)
|
||||||
|
|
||||||
async def handle_room_events(events):
|
async def handle_room_events(events: Iterable[EventBase]) -> None:
|
||||||
for event in events:
|
for event in events:
|
||||||
await handle_event(event)
|
await handle_event(event)
|
||||||
|
|
||||||
|
@ -184,7 +184,7 @@ class ApplicationServicesHandler:
|
||||||
stream_key: str,
|
stream_key: str,
|
||||||
new_token: Optional[int],
|
new_token: Optional[int],
|
||||||
users: Optional[Collection[Union[str, UserID]]] = None,
|
users: Optional[Collection[Union[str, UserID]]] = None,
|
||||||
):
|
) -> None:
|
||||||
"""This is called by the notifier in the background
|
"""This is called by the notifier in the background
|
||||||
when a ephemeral event handled by the homeserver.
|
when a ephemeral event handled by the homeserver.
|
||||||
|
|
||||||
|
@ -226,7 +226,7 @@ class ApplicationServicesHandler:
|
||||||
stream_key: str,
|
stream_key: str,
|
||||||
new_token: Optional[int],
|
new_token: Optional[int],
|
||||||
users: Collection[Union[str, UserID]],
|
users: Collection[Union[str, UserID]],
|
||||||
):
|
) -> None:
|
||||||
logger.debug("Checking interested services for %s" % (stream_key))
|
logger.debug("Checking interested services for %s" % (stream_key))
|
||||||
with Measure(self.clock, "notify_interested_services_ephemeral"):
|
with Measure(self.clock, "notify_interested_services_ephemeral"):
|
||||||
for service in services:
|
for service in services:
|
||||||
|
@ -254,7 +254,7 @@ class ApplicationServicesHandler:
|
||||||
async def _handle_typing(
|
async def _handle_typing(
|
||||||
self, service: ApplicationService, new_token: int
|
self, service: ApplicationService, new_token: int
|
||||||
) -> List[JsonDict]:
|
) -> List[JsonDict]:
|
||||||
typing_source = self.event_sources.sources["typing"]
|
typing_source = self.event_sources.sources.typing
|
||||||
# Get the typing events from just before current
|
# Get the typing events from just before current
|
||||||
typing, _ = await typing_source.get_new_events_as(
|
typing, _ = await typing_source.get_new_events_as(
|
||||||
service=service,
|
service=service,
|
||||||
|
@ -269,7 +269,7 @@ class ApplicationServicesHandler:
|
||||||
from_key = await self.store.get_type_stream_id_for_appservice(
|
from_key = await self.store.get_type_stream_id_for_appservice(
|
||||||
service, "read_receipt"
|
service, "read_receipt"
|
||||||
)
|
)
|
||||||
receipts_source = self.event_sources.sources["receipt"]
|
receipts_source = self.event_sources.sources.receipt
|
||||||
receipts, _ = await receipts_source.get_new_events_as(
|
receipts, _ = await receipts_source.get_new_events_as(
|
||||||
service=service, from_key=from_key
|
service=service, from_key=from_key
|
||||||
)
|
)
|
||||||
|
@ -279,7 +279,7 @@ class ApplicationServicesHandler:
|
||||||
self, service: ApplicationService, users: Collection[Union[str, UserID]]
|
self, service: ApplicationService, users: Collection[Union[str, UserID]]
|
||||||
) -> List[JsonDict]:
|
) -> List[JsonDict]:
|
||||||
events: List[JsonDict] = []
|
events: List[JsonDict] = []
|
||||||
presence_source = self.event_sources.sources["presence"]
|
presence_source = self.event_sources.sources.presence
|
||||||
from_key = await self.store.get_type_stream_id_for_appservice(
|
from_key = await self.store.get_type_stream_id_for_appservice(
|
||||||
service, "presence"
|
service, "presence"
|
||||||
)
|
)
|
||||||
|
|
|
@ -29,6 +29,7 @@ from typing import (
|
||||||
Mapping,
|
Mapping,
|
||||||
Optional,
|
Optional,
|
||||||
Tuple,
|
Tuple,
|
||||||
|
Type,
|
||||||
Union,
|
Union,
|
||||||
cast,
|
cast,
|
||||||
)
|
)
|
||||||
|
@ -209,15 +210,15 @@ class AuthHandler(BaseHandler):
|
||||||
|
|
||||||
self.password_providers = [
|
self.password_providers = [
|
||||||
PasswordProvider.load(module, config, account_handler)
|
PasswordProvider.load(module, config, account_handler)
|
||||||
for module, config in hs.config.password_providers
|
for module, config in hs.config.authproviders.password_providers
|
||||||
]
|
]
|
||||||
|
|
||||||
logger.info("Extra password_providers: %s", self.password_providers)
|
logger.info("Extra password_providers: %s", self.password_providers)
|
||||||
|
|
||||||
self.hs = hs # FIXME better possibility to access registrationHandler later?
|
self.hs = hs # FIXME better possibility to access registrationHandler later?
|
||||||
self.macaroon_gen = hs.get_macaroon_generator()
|
self.macaroon_gen = hs.get_macaroon_generator()
|
||||||
self._password_enabled = hs.config.password_enabled
|
self._password_enabled = hs.config.auth.password_enabled
|
||||||
self._password_localdb_enabled = hs.config.password_localdb_enabled
|
self._password_localdb_enabled = hs.config.auth.password_localdb_enabled
|
||||||
|
|
||||||
# start out by assuming PASSWORD is enabled; we will remove it later if not.
|
# start out by assuming PASSWORD is enabled; we will remove it later if not.
|
||||||
login_types = set()
|
login_types = set()
|
||||||
|
@ -249,7 +250,7 @@ class AuthHandler(BaseHandler):
|
||||||
)
|
)
|
||||||
|
|
||||||
# The number of seconds to keep a UI auth session active.
|
# The number of seconds to keep a UI auth session active.
|
||||||
self._ui_auth_session_timeout = hs.config.ui_auth_session_timeout
|
self._ui_auth_session_timeout = hs.config.auth.ui_auth_session_timeout
|
||||||
|
|
||||||
# Ratelimitier for failed /login attempts
|
# Ratelimitier for failed /login attempts
|
||||||
self._failed_login_attempts_ratelimiter = Ratelimiter(
|
self._failed_login_attempts_ratelimiter = Ratelimiter(
|
||||||
|
@ -276,23 +277,25 @@ class AuthHandler(BaseHandler):
|
||||||
# after the SSO completes and before redirecting them back to their client.
|
# after the SSO completes and before redirecting them back to their client.
|
||||||
# It notifies the user they are about to give access to their matrix account
|
# It notifies the user they are about to give access to their matrix account
|
||||||
# to the client.
|
# to the client.
|
||||||
self._sso_redirect_confirm_template = hs.config.sso_redirect_confirm_template
|
self._sso_redirect_confirm_template = (
|
||||||
|
hs.config.sso.sso_redirect_confirm_template
|
||||||
|
)
|
||||||
|
|
||||||
# The following template is shown during user interactive authentication
|
# The following template is shown during user interactive authentication
|
||||||
# in the fallback auth scenario. It notifies the user that they are
|
# in the fallback auth scenario. It notifies the user that they are
|
||||||
# authenticating for an operation to occur on their account.
|
# authenticating for an operation to occur on their account.
|
||||||
self._sso_auth_confirm_template = hs.config.sso_auth_confirm_template
|
self._sso_auth_confirm_template = hs.config.sso.sso_auth_confirm_template
|
||||||
|
|
||||||
# The following template is shown during the SSO authentication process if
|
# The following template is shown during the SSO authentication process if
|
||||||
# the account is deactivated.
|
# the account is deactivated.
|
||||||
self._sso_account_deactivated_template = (
|
self._sso_account_deactivated_template = (
|
||||||
hs.config.sso_account_deactivated_template
|
hs.config.sso.sso_account_deactivated_template
|
||||||
)
|
)
|
||||||
|
|
||||||
self._server_name = hs.config.server.server_name
|
self._server_name = hs.config.server.server_name
|
||||||
|
|
||||||
# cast to tuple for use with str.startswith
|
# cast to tuple for use with str.startswith
|
||||||
self._whitelisted_sso_clients = tuple(hs.config.sso_client_whitelist)
|
self._whitelisted_sso_clients = tuple(hs.config.sso.sso_client_whitelist)
|
||||||
|
|
||||||
# A mapping of user ID to extra attributes to include in the login
|
# A mapping of user ID to extra attributes to include in the login
|
||||||
# response.
|
# response.
|
||||||
|
@ -439,7 +442,7 @@ class AuthHandler(BaseHandler):
|
||||||
|
|
||||||
return ui_auth_types
|
return ui_auth_types
|
||||||
|
|
||||||
def get_enabled_auth_types(self):
|
def get_enabled_auth_types(self) -> Iterable[str]:
|
||||||
"""Return the enabled user-interactive authentication types
|
"""Return the enabled user-interactive authentication types
|
||||||
|
|
||||||
Returns the UI-Auth types which are supported by the homeserver's current
|
Returns the UI-Auth types which are supported by the homeserver's current
|
||||||
|
@ -702,7 +705,7 @@ class AuthHandler(BaseHandler):
|
||||||
except StoreError:
|
except StoreError:
|
||||||
raise SynapseError(400, "Unknown session ID: %s" % (session_id,))
|
raise SynapseError(400, "Unknown session ID: %s" % (session_id,))
|
||||||
|
|
||||||
async def _expire_old_sessions(self):
|
async def _expire_old_sessions(self) -> None:
|
||||||
"""
|
"""
|
||||||
Invalidate any user interactive authentication sessions that have expired.
|
Invalidate any user interactive authentication sessions that have expired.
|
||||||
"""
|
"""
|
||||||
|
@ -738,19 +741,19 @@ class AuthHandler(BaseHandler):
|
||||||
return canonical_id
|
return canonical_id
|
||||||
|
|
||||||
def _get_params_recaptcha(self) -> dict:
|
def _get_params_recaptcha(self) -> dict:
|
||||||
return {"public_key": self.hs.config.recaptcha_public_key}
|
return {"public_key": self.hs.config.captcha.recaptcha_public_key}
|
||||||
|
|
||||||
def _get_params_terms(self) -> dict:
|
def _get_params_terms(self) -> dict:
|
||||||
return {
|
return {
|
||||||
"policies": {
|
"policies": {
|
||||||
"privacy_policy": {
|
"privacy_policy": {
|
||||||
"version": self.hs.config.user_consent_version,
|
"version": self.hs.config.consent.user_consent_version,
|
||||||
"en": {
|
"en": {
|
||||||
"name": self.hs.config.user_consent_policy_name,
|
"name": self.hs.config.consent.user_consent_policy_name,
|
||||||
"url": "%s_matrix/consent?v=%s"
|
"url": "%s_matrix/consent?v=%s"
|
||||||
% (
|
% (
|
||||||
self.hs.config.server.public_baseurl,
|
self.hs.config.server.public_baseurl,
|
||||||
self.hs.config.user_consent_version,
|
self.hs.config.consent.user_consent_version,
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1015,7 +1018,7 @@ class AuthHandler(BaseHandler):
|
||||||
def can_change_password(self) -> bool:
|
def can_change_password(self) -> bool:
|
||||||
"""Get whether users on this server are allowed to change or set a password.
|
"""Get whether users on this server are allowed to change or set a password.
|
||||||
|
|
||||||
Both `config.password_enabled` and `config.password_localdb_enabled` must be true.
|
Both `config.auth.password_enabled` and `config.auth.password_localdb_enabled` must be true.
|
||||||
|
|
||||||
Note that any account (even SSO accounts) are allowed to add passwords if the above
|
Note that any account (even SSO accounts) are allowed to add passwords if the above
|
||||||
is true.
|
is true.
|
||||||
|
@ -1347,12 +1350,12 @@ class AuthHandler(BaseHandler):
|
||||||
try:
|
try:
|
||||||
res = self.macaroon_gen.verify_short_term_login_token(login_token)
|
res = self.macaroon_gen.verify_short_term_login_token(login_token)
|
||||||
except Exception:
|
except Exception:
|
||||||
raise AuthError(403, "Invalid token", errcode=Codes.FORBIDDEN)
|
raise AuthError(403, "Invalid login token", errcode=Codes.FORBIDDEN)
|
||||||
|
|
||||||
await self.auth.check_auth_blocking(res.user_id)
|
await self.auth.check_auth_blocking(res.user_id)
|
||||||
return res
|
return res
|
||||||
|
|
||||||
async def delete_access_token(self, access_token: str):
|
async def delete_access_token(self, access_token: str) -> None:
|
||||||
"""Invalidate a single access token
|
"""Invalidate a single access token
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
@ -1381,7 +1384,7 @@ class AuthHandler(BaseHandler):
|
||||||
user_id: str,
|
user_id: str,
|
||||||
except_token_id: Optional[int] = None,
|
except_token_id: Optional[int] = None,
|
||||||
device_id: Optional[str] = None,
|
device_id: Optional[str] = None,
|
||||||
):
|
) -> None:
|
||||||
"""Invalidate access tokens belonging to a user
|
"""Invalidate access tokens belonging to a user
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
@ -1409,7 +1412,7 @@ class AuthHandler(BaseHandler):
|
||||||
|
|
||||||
async def add_threepid(
|
async def add_threepid(
|
||||||
self, user_id: str, medium: str, address: str, validated_at: int
|
self, user_id: str, medium: str, address: str, validated_at: int
|
||||||
):
|
) -> None:
|
||||||
# check if medium has a valid value
|
# check if medium has a valid value
|
||||||
if medium not in ["email", "msisdn"]:
|
if medium not in ["email", "msisdn"]:
|
||||||
raise SynapseError(
|
raise SynapseError(
|
||||||
|
@ -1480,12 +1483,12 @@ class AuthHandler(BaseHandler):
|
||||||
Hashed password.
|
Hashed password.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def _do_hash():
|
def _do_hash() -> str:
|
||||||
# Normalise the Unicode in the password
|
# Normalise the Unicode in the password
|
||||||
pw = unicodedata.normalize("NFKC", password)
|
pw = unicodedata.normalize("NFKC", password)
|
||||||
|
|
||||||
return bcrypt.hashpw(
|
return bcrypt.hashpw(
|
||||||
pw.encode("utf8") + self.hs.config.password_pepper.encode("utf8"),
|
pw.encode("utf8") + self.hs.config.auth.password_pepper.encode("utf8"),
|
||||||
bcrypt.gensalt(self.bcrypt_rounds),
|
bcrypt.gensalt(self.bcrypt_rounds),
|
||||||
).decode("ascii")
|
).decode("ascii")
|
||||||
|
|
||||||
|
@ -1504,12 +1507,12 @@ class AuthHandler(BaseHandler):
|
||||||
Whether self.hash(password) == stored_hash.
|
Whether self.hash(password) == stored_hash.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def _do_validate_hash(checked_hash: bytes):
|
def _do_validate_hash(checked_hash: bytes) -> bool:
|
||||||
# Normalise the Unicode in the password
|
# Normalise the Unicode in the password
|
||||||
pw = unicodedata.normalize("NFKC", password)
|
pw = unicodedata.normalize("NFKC", password)
|
||||||
|
|
||||||
return bcrypt.checkpw(
|
return bcrypt.checkpw(
|
||||||
pw.encode("utf8") + self.hs.config.password_pepper.encode("utf8"),
|
pw.encode("utf8") + self.hs.config.auth.password_pepper.encode("utf8"),
|
||||||
checked_hash,
|
checked_hash,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -1581,7 +1584,7 @@ class AuthHandler(BaseHandler):
|
||||||
client_redirect_url: str,
|
client_redirect_url: str,
|
||||||
extra_attributes: Optional[JsonDict] = None,
|
extra_attributes: Optional[JsonDict] = None,
|
||||||
new_user: bool = False,
|
new_user: bool = False,
|
||||||
):
|
) -> None:
|
||||||
"""Having figured out a mxid for this user, complete the HTTP request
|
"""Having figured out a mxid for this user, complete the HTTP request
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
@ -1627,7 +1630,7 @@ class AuthHandler(BaseHandler):
|
||||||
extra_attributes: Optional[JsonDict] = None,
|
extra_attributes: Optional[JsonDict] = None,
|
||||||
new_user: bool = False,
|
new_user: bool = False,
|
||||||
user_profile_data: Optional[ProfileInfo] = None,
|
user_profile_data: Optional[ProfileInfo] = None,
|
||||||
):
|
) -> None:
|
||||||
"""
|
"""
|
||||||
The synchronous portion of complete_sso_login.
|
The synchronous portion of complete_sso_login.
|
||||||
|
|
||||||
|
@ -1726,7 +1729,7 @@ class AuthHandler(BaseHandler):
|
||||||
del self._extra_attributes[user_id]
|
del self._extra_attributes[user_id]
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def add_query_param_to_url(url: str, param_name: str, param: Any):
|
def add_query_param_to_url(url: str, param_name: str, param: Any) -> str:
|
||||||
url_parts = list(urllib.parse.urlparse(url))
|
url_parts = list(urllib.parse.urlparse(url))
|
||||||
query = urllib.parse.parse_qsl(url_parts[4], keep_blank_values=True)
|
query = urllib.parse.parse_qsl(url_parts[4], keep_blank_values=True)
|
||||||
query.append((param_name, param))
|
query.append((param_name, param))
|
||||||
|
@ -1734,9 +1737,9 @@ class AuthHandler(BaseHandler):
|
||||||
return urllib.parse.urlunparse(url_parts)
|
return urllib.parse.urlunparse(url_parts)
|
||||||
|
|
||||||
|
|
||||||
@attr.s(slots=True)
|
@attr.s(slots=True, auto_attribs=True)
|
||||||
class MacaroonGenerator:
|
class MacaroonGenerator:
|
||||||
hs = attr.ib()
|
hs: "HomeServer"
|
||||||
|
|
||||||
def generate_guest_access_token(self, user_id: str) -> str:
|
def generate_guest_access_token(self, user_id: str) -> str:
|
||||||
macaroon = self._generate_base_macaroon(user_id)
|
macaroon = self._generate_base_macaroon(user_id)
|
||||||
|
@ -1801,7 +1804,7 @@ class MacaroonGenerator:
|
||||||
macaroon = pymacaroons.Macaroon(
|
macaroon = pymacaroons.Macaroon(
|
||||||
location=self.hs.config.server.server_name,
|
location=self.hs.config.server.server_name,
|
||||||
identifier="key",
|
identifier="key",
|
||||||
key=self.hs.config.macaroon_secret_key,
|
key=self.hs.config.key.macaroon_secret_key,
|
||||||
)
|
)
|
||||||
macaroon.add_first_party_caveat("gen = 1")
|
macaroon.add_first_party_caveat("gen = 1")
|
||||||
macaroon.add_first_party_caveat("user_id = %s" % (user_id,))
|
macaroon.add_first_party_caveat("user_id = %s" % (user_id,))
|
||||||
|
@ -1816,7 +1819,9 @@ class PasswordProvider:
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def load(cls, module, config, module_api: ModuleApi) -> "PasswordProvider":
|
def load(
|
||||||
|
cls, module: Type, config: JsonDict, module_api: ModuleApi
|
||||||
|
) -> "PasswordProvider":
|
||||||
try:
|
try:
|
||||||
pp = module(config=config, account_handler=module_api)
|
pp = module(config=config, account_handler=module_api)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -1824,7 +1829,7 @@ class PasswordProvider:
|
||||||
raise
|
raise
|
||||||
return cls(pp, module_api)
|
return cls(pp, module_api)
|
||||||
|
|
||||||
def __init__(self, pp, module_api: ModuleApi):
|
def __init__(self, pp: "PasswordProvider", module_api: ModuleApi):
|
||||||
self._pp = pp
|
self._pp = pp
|
||||||
self._module_api = module_api
|
self._module_api = module_api
|
||||||
|
|
||||||
|
@ -1838,7 +1843,7 @@ class PasswordProvider:
|
||||||
if g:
|
if g:
|
||||||
self._supported_login_types.update(g())
|
self._supported_login_types.update(g())
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self) -> str:
|
||||||
return str(self._pp)
|
return str(self._pp)
|
||||||
|
|
||||||
def get_supported_login_types(self) -> Mapping[str, Iterable[str]]:
|
def get_supported_login_types(self) -> Mapping[str, Iterable[str]]:
|
||||||
|
@ -1876,19 +1881,19 @@ class PasswordProvider:
|
||||||
"""
|
"""
|
||||||
# first grandfather in a call to check_password
|
# first grandfather in a call to check_password
|
||||||
if login_type == LoginType.PASSWORD:
|
if login_type == LoginType.PASSWORD:
|
||||||
g = getattr(self._pp, "check_password", None)
|
check_password = getattr(self._pp, "check_password", None)
|
||||||
if g:
|
if check_password:
|
||||||
qualified_user_id = self._module_api.get_qualified_user_id(username)
|
qualified_user_id = self._module_api.get_qualified_user_id(username)
|
||||||
is_valid = await self._pp.check_password(
|
is_valid = await check_password(
|
||||||
qualified_user_id, login_dict["password"]
|
qualified_user_id, login_dict["password"]
|
||||||
)
|
)
|
||||||
if is_valid:
|
if is_valid:
|
||||||
return qualified_user_id, None
|
return qualified_user_id, None
|
||||||
|
|
||||||
g = getattr(self._pp, "check_auth", None)
|
check_auth = getattr(self._pp, "check_auth", None)
|
||||||
if not g:
|
if not check_auth:
|
||||||
return None
|
return None
|
||||||
result = await g(username, login_type, login_dict)
|
result = await check_auth(username, login_type, login_dict)
|
||||||
|
|
||||||
# Check if the return value is a str or a tuple
|
# Check if the return value is a str or a tuple
|
||||||
if isinstance(result, str):
|
if isinstance(result, str):
|
||||||
|
|
|
@ -34,20 +34,20 @@ logger = logging.getLogger(__name__)
|
||||||
class CasError(Exception):
|
class CasError(Exception):
|
||||||
"""Used to catch errors when validating the CAS ticket."""
|
"""Used to catch errors when validating the CAS ticket."""
|
||||||
|
|
||||||
def __init__(self, error, error_description=None):
|
def __init__(self, error: str, error_description: Optional[str] = None):
|
||||||
self.error = error
|
self.error = error
|
||||||
self.error_description = error_description
|
self.error_description = error_description
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self) -> str:
|
||||||
if self.error_description:
|
if self.error_description:
|
||||||
return f"{self.error}: {self.error_description}"
|
return f"{self.error}: {self.error_description}"
|
||||||
return self.error
|
return self.error
|
||||||
|
|
||||||
|
|
||||||
@attr.s(slots=True, frozen=True)
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||||
class CasResponse:
|
class CasResponse:
|
||||||
username = attr.ib(type=str)
|
username: str
|
||||||
attributes = attr.ib(type=Dict[str, List[Optional[str]]])
|
attributes: Dict[str, List[Optional[str]]]
|
||||||
|
|
||||||
|
|
||||||
class CasHandler:
|
class CasHandler:
|
||||||
|
@ -65,10 +65,10 @@ class CasHandler:
|
||||||
self._auth_handler = hs.get_auth_handler()
|
self._auth_handler = hs.get_auth_handler()
|
||||||
self._registration_handler = hs.get_registration_handler()
|
self._registration_handler = hs.get_registration_handler()
|
||||||
|
|
||||||
self._cas_server_url = hs.config.cas_server_url
|
self._cas_server_url = hs.config.cas.cas_server_url
|
||||||
self._cas_service_url = hs.config.cas_service_url
|
self._cas_service_url = hs.config.cas.cas_service_url
|
||||||
self._cas_displayname_attribute = hs.config.cas_displayname_attribute
|
self._cas_displayname_attribute = hs.config.cas.cas_displayname_attribute
|
||||||
self._cas_required_attributes = hs.config.cas_required_attributes
|
self._cas_required_attributes = hs.config.cas.cas_required_attributes
|
||||||
|
|
||||||
self._http_client = hs.get_proxied_http_client()
|
self._http_client = hs.get_proxied_http_client()
|
||||||
|
|
||||||
|
@ -133,11 +133,9 @@ class CasHandler:
|
||||||
body = pde.response
|
body = pde.response
|
||||||
except HttpResponseException as e:
|
except HttpResponseException as e:
|
||||||
description = (
|
description = (
|
||||||
(
|
'Authorization server responded with a "{status}" error '
|
||||||
'Authorization server responded with a "{status}" error '
|
"while exchanging the authorization code."
|
||||||
"while exchanging the authorization code."
|
).format(status=e.code)
|
||||||
).format(status=e.code),
|
|
||||||
)
|
|
||||||
raise CasError("server_error", description) from e
|
raise CasError("server_error", description) from e
|
||||||
|
|
||||||
return self._parse_cas_response(body)
|
return self._parse_cas_response(body)
|
||||||
|
|
|
@ -255,16 +255,16 @@ class DeactivateAccountHandler(BaseHandler):
|
||||||
Args:
|
Args:
|
||||||
user_id: ID of user to be re-activated
|
user_id: ID of user to be re-activated
|
||||||
"""
|
"""
|
||||||
# Add the user to the directory, if necessary.
|
|
||||||
user = UserID.from_string(user_id)
|
user = UserID.from_string(user_id)
|
||||||
if self.hs.config.user_directory_search_all_users:
|
|
||||||
profile = await self.store.get_profileinfo(user.localpart)
|
|
||||||
await self.user_directory_handler.handle_local_profile_change(
|
|
||||||
user_id, profile
|
|
||||||
)
|
|
||||||
|
|
||||||
# Ensure the user is not marked as erased.
|
# Ensure the user is not marked as erased.
|
||||||
await self.store.mark_user_not_erased(user_id)
|
await self.store.mark_user_not_erased(user_id)
|
||||||
|
|
||||||
# Mark the user as active.
|
# Mark the user as active.
|
||||||
await self.store.set_user_deactivated_status(user_id, False)
|
await self.store.set_user_deactivated_status(user_id, False)
|
||||||
|
|
||||||
|
# Add the user to the directory, if necessary. Note that
|
||||||
|
# this must be done after the user is re-activated, because
|
||||||
|
# deactivated users are excluded from the user directory.
|
||||||
|
profile = await self.store.get_profileinfo(user.localpart)
|
||||||
|
await self.user_directory_handler.handle_local_profile_change(user_id, profile)
|
||||||
|
|
|
@ -267,7 +267,7 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||||
|
|
||||||
hs.get_distributor().observe("user_left_room", self.user_left_room)
|
hs.get_distributor().observe("user_left_room", self.user_left_room)
|
||||||
|
|
||||||
def _check_device_name_length(self, name: Optional[str]):
|
def _check_device_name_length(self, name: Optional[str]) -> None:
|
||||||
"""
|
"""
|
||||||
Checks whether a device name is longer than the maximum allowed length.
|
Checks whether a device name is longer than the maximum allowed length.
|
||||||
|
|
||||||
|
|
|
@ -48,7 +48,7 @@ class DirectoryHandler(BaseHandler):
|
||||||
self.event_creation_handler = hs.get_event_creation_handler()
|
self.event_creation_handler = hs.get_event_creation_handler()
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
self.config = hs.config
|
self.config = hs.config
|
||||||
self.enable_room_list_search = hs.config.enable_room_list_search
|
self.enable_room_list_search = hs.config.roomdirectory.enable_room_list_search
|
||||||
self.require_membership = hs.config.require_membership_for_aliases
|
self.require_membership = hs.config.require_membership_for_aliases
|
||||||
self.third_party_event_rules = hs.get_third_party_event_rules()
|
self.third_party_event_rules = hs.get_third_party_event_rules()
|
||||||
|
|
||||||
|
@ -145,7 +145,7 @@ class DirectoryHandler(BaseHandler):
|
||||||
):
|
):
|
||||||
raise AuthError(403, "This user is not permitted to create this alias")
|
raise AuthError(403, "This user is not permitted to create this alias")
|
||||||
|
|
||||||
if not self.config.is_alias_creation_allowed(
|
if not self.config.roomdirectory.is_alias_creation_allowed(
|
||||||
user_id, room_id, room_alias_str
|
user_id, room_id, room_alias_str
|
||||||
):
|
):
|
||||||
# Lets just return a generic message, as there may be all sorts of
|
# Lets just return a generic message, as there may be all sorts of
|
||||||
|
@ -461,7 +461,7 @@ class DirectoryHandler(BaseHandler):
|
||||||
if canonical_alias:
|
if canonical_alias:
|
||||||
room_aliases.append(canonical_alias)
|
room_aliases.append(canonical_alias)
|
||||||
|
|
||||||
if not self.config.is_publishing_room_allowed(
|
if not self.config.roomdirectory.is_publishing_room_allowed(
|
||||||
user_id, room_id, room_aliases
|
user_id, room_id, room_aliases
|
||||||
):
|
):
|
||||||
# Lets just return a generic message, as there may be all sorts of
|
# Lets just return a generic message, as there may be all sorts of
|
||||||
|
|
|
@ -202,7 +202,7 @@ class E2eKeysHandler:
|
||||||
|
|
||||||
# Now fetch any devices that we don't have in our cache
|
# Now fetch any devices that we don't have in our cache
|
||||||
@trace
|
@trace
|
||||||
async def do_remote_query(destination):
|
async def do_remote_query(destination: str) -> None:
|
||||||
"""This is called when we are querying the device list of a user on
|
"""This is called when we are querying the device list of a user on
|
||||||
a remote homeserver and their device list is not in the device list
|
a remote homeserver and their device list is not in the device list
|
||||||
cache. If we share a room with this user and we're not querying for
|
cache. If we share a room with this user and we're not querying for
|
||||||
|
@ -447,7 +447,7 @@ class E2eKeysHandler:
|
||||||
}
|
}
|
||||||
|
|
||||||
@trace
|
@trace
|
||||||
async def claim_client_keys(destination):
|
async def claim_client_keys(destination: str) -> None:
|
||||||
set_tag("destination", destination)
|
set_tag("destination", destination)
|
||||||
device_keys = remote_queries[destination]
|
device_keys = remote_queries[destination]
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -25,6 +25,7 @@ from synapse.api.errors import AuthError, Codes, SynapseError
|
||||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
|
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
|
||||||
from synapse.events import EventBase
|
from synapse.events import EventBase
|
||||||
from synapse.events.builder import EventBuilder
|
from synapse.events.builder import EventBuilder
|
||||||
|
from synapse.events.snapshot import EventContext
|
||||||
from synapse.types import StateMap, get_domain_from_id
|
from synapse.types import StateMap, get_domain_from_id
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
|
|
||||||
|
@ -45,7 +46,11 @@ class EventAuthHandler:
|
||||||
self._server_name = hs.hostname
|
self._server_name = hs.hostname
|
||||||
|
|
||||||
async def check_from_context(
|
async def check_from_context(
|
||||||
self, room_version: str, event, context, do_sig_check=True
|
self,
|
||||||
|
room_version: str,
|
||||||
|
event: EventBase,
|
||||||
|
context: EventContext,
|
||||||
|
do_sig_check: bool = True,
|
||||||
) -> None:
|
) -> None:
|
||||||
auth_event_ids = event.auth_event_ids()
|
auth_event_ids = event.auth_event_ids()
|
||||||
auth_events_by_id = await self._store.get_events(auth_event_ids)
|
auth_events_by_id = await self._store.get_events(auth_event_ids)
|
||||||
|
|
|
@ -91,7 +91,7 @@ class FederationHandler(BaseHandler):
|
||||||
self.spam_checker = hs.get_spam_checker()
|
self.spam_checker = hs.get_spam_checker()
|
||||||
self.event_creation_handler = hs.get_event_creation_handler()
|
self.event_creation_handler = hs.get_event_creation_handler()
|
||||||
self._event_auth_handler = hs.get_event_auth_handler()
|
self._event_auth_handler = hs.get_event_auth_handler()
|
||||||
self._server_notices_mxid = hs.config.server_notices_mxid
|
self._server_notices_mxid = hs.config.servernotices.server_notices_mxid
|
||||||
self.config = hs.config
|
self.config = hs.config
|
||||||
self.http_client = hs.get_proxied_blacklisted_http_client()
|
self.http_client = hs.get_proxied_blacklisted_http_client()
|
||||||
self._replication = hs.get_replication_data_handler()
|
self._replication = hs.get_replication_data_handler()
|
||||||
|
@ -593,6 +593,13 @@ class FederationHandler(BaseHandler):
|
||||||
target_hosts, room_id, knockee, Membership.KNOCK, content, params=params
|
target_hosts, room_id, knockee, Membership.KNOCK, content, params=params
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Mark the knock as an outlier as we don't yet have the state at this point in
|
||||||
|
# the DAG.
|
||||||
|
event.internal_metadata.outlier = True
|
||||||
|
|
||||||
|
# ... but tell /sync to send it to clients anyway.
|
||||||
|
event.internal_metadata.out_of_band_membership = True
|
||||||
|
|
||||||
# Record the room ID and its version so that we have a record of the room
|
# Record the room ID and its version so that we have a record of the room
|
||||||
await self._maybe_store_room_on_outlier_membership(
|
await self._maybe_store_room_on_outlier_membership(
|
||||||
room_id=event.room_id, room_version=event_format_version
|
room_id=event.room_id, room_version=event_format_version
|
||||||
|
@ -617,7 +624,7 @@ class FederationHandler(BaseHandler):
|
||||||
# in the invitee's sync stream. It is stripped out for all other local users.
|
# in the invitee's sync stream. It is stripped out for all other local users.
|
||||||
event.unsigned["knock_room_state"] = stripped_room_state["knock_state_events"]
|
event.unsigned["knock_room_state"] = stripped_room_state["knock_state_events"]
|
||||||
|
|
||||||
context = await self.state_handler.compute_event_context(event)
|
context = EventContext.for_outlier()
|
||||||
stream_id = await self._federation_event_handler.persist_events_and_notify(
|
stream_id = await self._federation_event_handler.persist_events_and_notify(
|
||||||
event.room_id, [(event, context)]
|
event.room_id, [(event, context)]
|
||||||
)
|
)
|
||||||
|
@ -807,7 +814,7 @@ class FederationHandler(BaseHandler):
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
context = await self.state_handler.compute_event_context(event)
|
context = EventContext.for_outlier()
|
||||||
await self._federation_event_handler.persist_events_and_notify(
|
await self._federation_event_handler.persist_events_and_notify(
|
||||||
event.room_id, [(event, context)]
|
event.room_id, [(event, context)]
|
||||||
)
|
)
|
||||||
|
@ -836,7 +843,7 @@ class FederationHandler(BaseHandler):
|
||||||
|
|
||||||
await self.federation_client.send_leave(host_list, event)
|
await self.federation_client.send_leave(host_list, event)
|
||||||
|
|
||||||
context = await self.state_handler.compute_event_context(event)
|
context = EventContext.for_outlier()
|
||||||
stream_id = await self._federation_event_handler.persist_events_and_notify(
|
stream_id = await self._federation_event_handler.persist_events_and_notify(
|
||||||
event.room_id, [(event, context)]
|
event.room_id, [(event, context)]
|
||||||
)
|
)
|
||||||
|
@ -1108,8 +1115,7 @@ class FederationHandler(BaseHandler):
|
||||||
events_to_context = {}
|
events_to_context = {}
|
||||||
for e in itertools.chain(auth_events, state):
|
for e in itertools.chain(auth_events, state):
|
||||||
e.internal_metadata.outlier = True
|
e.internal_metadata.outlier = True
|
||||||
ctx = await self.state_handler.compute_event_context(e)
|
events_to_context[e.event_id] = EventContext.for_outlier()
|
||||||
events_to_context[e.event_id] = ctx
|
|
||||||
|
|
||||||
event_map = {
|
event_map = {
|
||||||
e.event_id: e for e in itertools.chain(auth_events, state, [event])
|
e.event_id: e for e in itertools.chain(auth_events, state, [event])
|
||||||
|
@ -1221,136 +1227,6 @@ class FederationHandler(BaseHandler):
|
||||||
|
|
||||||
return missing_events
|
return missing_events
|
||||||
|
|
||||||
async def construct_auth_difference(
|
|
||||||
self, local_auth: Iterable[EventBase], remote_auth: Iterable[EventBase]
|
|
||||||
) -> Dict:
|
|
||||||
"""Given a local and remote auth chain, find the differences. This
|
|
||||||
assumes that we have already processed all events in remote_auth
|
|
||||||
|
|
||||||
Params:
|
|
||||||
local_auth
|
|
||||||
remote_auth
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
dict
|
|
||||||
"""
|
|
||||||
|
|
||||||
logger.debug("construct_auth_difference Start!")
|
|
||||||
|
|
||||||
# TODO: Make sure we are OK with local_auth or remote_auth having more
|
|
||||||
# auth events in them than strictly necessary.
|
|
||||||
|
|
||||||
def sort_fun(ev):
|
|
||||||
return ev.depth, ev.event_id
|
|
||||||
|
|
||||||
logger.debug("construct_auth_difference after sort_fun!")
|
|
||||||
|
|
||||||
# We find the differences by starting at the "bottom" of each list
|
|
||||||
# and iterating up on both lists. The lists are ordered by depth and
|
|
||||||
# then event_id, we iterate up both lists until we find the event ids
|
|
||||||
# don't match. Then we look at depth/event_id to see which side is
|
|
||||||
# missing that event, and iterate only up that list. Repeat.
|
|
||||||
|
|
||||||
remote_list = list(remote_auth)
|
|
||||||
remote_list.sort(key=sort_fun)
|
|
||||||
|
|
||||||
local_list = list(local_auth)
|
|
||||||
local_list.sort(key=sort_fun)
|
|
||||||
|
|
||||||
local_iter = iter(local_list)
|
|
||||||
remote_iter = iter(remote_list)
|
|
||||||
|
|
||||||
logger.debug("construct_auth_difference before get_next!")
|
|
||||||
|
|
||||||
def get_next(it, opt=None):
|
|
||||||
try:
|
|
||||||
return next(it)
|
|
||||||
except Exception:
|
|
||||||
return opt
|
|
||||||
|
|
||||||
current_local = get_next(local_iter)
|
|
||||||
current_remote = get_next(remote_iter)
|
|
||||||
|
|
||||||
logger.debug("construct_auth_difference before while")
|
|
||||||
|
|
||||||
missing_remotes = []
|
|
||||||
missing_locals = []
|
|
||||||
while current_local or current_remote:
|
|
||||||
if current_remote is None:
|
|
||||||
missing_locals.append(current_local)
|
|
||||||
current_local = get_next(local_iter)
|
|
||||||
continue
|
|
||||||
|
|
||||||
if current_local is None:
|
|
||||||
missing_remotes.append(current_remote)
|
|
||||||
current_remote = get_next(remote_iter)
|
|
||||||
continue
|
|
||||||
|
|
||||||
if current_local.event_id == current_remote.event_id:
|
|
||||||
current_local = get_next(local_iter)
|
|
||||||
current_remote = get_next(remote_iter)
|
|
||||||
continue
|
|
||||||
|
|
||||||
if current_local.depth < current_remote.depth:
|
|
||||||
missing_locals.append(current_local)
|
|
||||||
current_local = get_next(local_iter)
|
|
||||||
continue
|
|
||||||
|
|
||||||
if current_local.depth > current_remote.depth:
|
|
||||||
missing_remotes.append(current_remote)
|
|
||||||
current_remote = get_next(remote_iter)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# They have the same depth, so we fall back to the event_id order
|
|
||||||
if current_local.event_id < current_remote.event_id:
|
|
||||||
missing_locals.append(current_local)
|
|
||||||
current_local = get_next(local_iter)
|
|
||||||
|
|
||||||
if current_local.event_id > current_remote.event_id:
|
|
||||||
missing_remotes.append(current_remote)
|
|
||||||
current_remote = get_next(remote_iter)
|
|
||||||
continue
|
|
||||||
|
|
||||||
logger.debug("construct_auth_difference after while")
|
|
||||||
|
|
||||||
# missing locals should be sent to the server
|
|
||||||
# We should find why we are missing remotes, as they will have been
|
|
||||||
# rejected.
|
|
||||||
|
|
||||||
# Remove events from missing_remotes if they are referencing a missing
|
|
||||||
# remote. We only care about the "root" rejected ones.
|
|
||||||
missing_remote_ids = [e.event_id for e in missing_remotes]
|
|
||||||
base_remote_rejected = list(missing_remotes)
|
|
||||||
for e in missing_remotes:
|
|
||||||
for e_id in e.auth_event_ids():
|
|
||||||
if e_id in missing_remote_ids:
|
|
||||||
try:
|
|
||||||
base_remote_rejected.remove(e)
|
|
||||||
except ValueError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
reason_map = {}
|
|
||||||
|
|
||||||
for e in base_remote_rejected:
|
|
||||||
reason = await self.store.get_rejection_reason(e.event_id)
|
|
||||||
if reason is None:
|
|
||||||
# TODO: e is not in the current state, so we should
|
|
||||||
# construct some proof of that.
|
|
||||||
continue
|
|
||||||
|
|
||||||
reason_map[e.event_id] = reason
|
|
||||||
|
|
||||||
logger.debug("construct_auth_difference returning")
|
|
||||||
|
|
||||||
return {
|
|
||||||
"auth_chain": local_auth,
|
|
||||||
"rejects": {
|
|
||||||
e.event_id: {"reason": reason_map[e.event_id], "proof": None}
|
|
||||||
for e in base_remote_rejected
|
|
||||||
},
|
|
||||||
"missing": [e.event_id for e in missing_locals],
|
|
||||||
}
|
|
||||||
|
|
||||||
@log_function
|
@log_function
|
||||||
async def exchange_third_party_invite(
|
async def exchange_third_party_invite(
|
||||||
self, sender_user_id: str, target_user_id: str, room_id: str, signed: JsonDict
|
self, sender_user_id: str, target_user_id: str, room_id: str, signed: JsonDict
|
||||||
|
@ -1493,7 +1369,7 @@ class FederationHandler(BaseHandler):
|
||||||
builder=builder
|
builder=builder
|
||||||
)
|
)
|
||||||
EventValidator().validate_new(event, self.config)
|
EventValidator().validate_new(event, self.config)
|
||||||
return (event, context)
|
return event, context
|
||||||
|
|
||||||
async def _check_signature(self, event: EventBase, context: EventContext) -> None:
|
async def _check_signature(self, event: EventBase, context: EventContext) -> None:
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -27,11 +27,8 @@ from typing import (
|
||||||
Tuple,
|
Tuple,
|
||||||
)
|
)
|
||||||
|
|
||||||
import attr
|
|
||||||
from prometheus_client import Counter
|
from prometheus_client import Counter
|
||||||
|
|
||||||
from twisted.internet import defer
|
|
||||||
|
|
||||||
from synapse import event_auth
|
from synapse import event_auth
|
||||||
from synapse.api.constants import (
|
from synapse.api.constants import (
|
||||||
EventContentFields,
|
EventContentFields,
|
||||||
|
@ -54,11 +51,7 @@ from synapse.event_auth import auth_types_for_event
|
||||||
from synapse.events import EventBase
|
from synapse.events import EventBase
|
||||||
from synapse.events.snapshot import EventContext
|
from synapse.events.snapshot import EventContext
|
||||||
from synapse.federation.federation_client import InvalidResponseError
|
from synapse.federation.federation_client import InvalidResponseError
|
||||||
from synapse.logging.context import (
|
from synapse.logging.context import nested_logging_context, run_in_background
|
||||||
make_deferred_yieldable,
|
|
||||||
nested_logging_context,
|
|
||||||
run_in_background,
|
|
||||||
)
|
|
||||||
from synapse.logging.utils import log_function
|
from synapse.logging.utils import log_function
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet
|
from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet
|
||||||
|
@ -75,7 +68,11 @@ from synapse.types import (
|
||||||
UserID,
|
UserID,
|
||||||
get_domain_from_id,
|
get_domain_from_id,
|
||||||
)
|
)
|
||||||
from synapse.util.async_helpers import Linearizer, concurrently_execute
|
from synapse.util.async_helpers import (
|
||||||
|
Linearizer,
|
||||||
|
concurrently_execute,
|
||||||
|
yieldable_gather_results,
|
||||||
|
)
|
||||||
from synapse.util.iterutils import batch_iter
|
from synapse.util.iterutils import batch_iter
|
||||||
from synapse.util.retryutils import NotRetryingDestination
|
from synapse.util.retryutils import NotRetryingDestination
|
||||||
from synapse.util.stringutils import shortstr
|
from synapse.util.stringutils import shortstr
|
||||||
|
@ -92,30 +89,6 @@ soft_failed_event_counter = Counter(
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
|
||||||
class _NewEventInfo:
|
|
||||||
"""Holds information about a received event, ready for passing to _auth_and_persist_events
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
event: the received event
|
|
||||||
|
|
||||||
claimed_auth_event_map: a map of (type, state_key) => event for the event's
|
|
||||||
claimed auth_events.
|
|
||||||
|
|
||||||
This can include events which have not yet been persisted, in the case that
|
|
||||||
we are backfilling a batch of events.
|
|
||||||
|
|
||||||
Note: May be incomplete: if we were unable to find all of the claimed auth
|
|
||||||
events. Also, treat the contents with caution: the events might also have
|
|
||||||
been rejected, might not yet have been authorized themselves, or they might
|
|
||||||
be in the wrong room.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
event: EventBase
|
|
||||||
claimed_auth_event_map: StateMap[EventBase]
|
|
||||||
|
|
||||||
|
|
||||||
class FederationEventHandler:
|
class FederationEventHandler:
|
||||||
"""Handles events that originated from federation.
|
"""Handles events that originated from federation.
|
||||||
|
|
||||||
|
@ -1016,7 +989,7 @@ class FederationEventHandler:
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.exception("Failed to resync device for %s", sender)
|
logger.exception("Failed to resync device for %s", sender)
|
||||||
|
|
||||||
async def _handle_marker_event(self, origin: str, marker_event: EventBase):
|
async def _handle_marker_event(self, origin: str, marker_event: EventBase) -> None:
|
||||||
"""Handles backfilling the insertion event when we receive a marker
|
"""Handles backfilling the insertion event when we receive a marker
|
||||||
event that points to one.
|
event that points to one.
|
||||||
|
|
||||||
|
@ -1107,9 +1080,9 @@ class FederationEventHandler:
|
||||||
|
|
||||||
room_version = await self._store.get_room_version(room_id)
|
room_version = await self._store.get_room_version(room_id)
|
||||||
|
|
||||||
event_map: Dict[str, EventBase] = {}
|
events: List[EventBase] = []
|
||||||
|
|
||||||
async def get_event(event_id: str):
|
async def get_event(event_id: str) -> None:
|
||||||
with nested_logging_context(event_id):
|
with nested_logging_context(event_id):
|
||||||
try:
|
try:
|
||||||
event = await self._federation_client.get_pdu(
|
event = await self._federation_client.get_pdu(
|
||||||
|
@ -1125,8 +1098,7 @@ class FederationEventHandler:
|
||||||
event_id,
|
event_id,
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
events.append(event)
|
||||||
event_map[event.event_id] = event
|
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
|
@ -1137,11 +1109,29 @@ class FederationEventHandler:
|
||||||
)
|
)
|
||||||
|
|
||||||
await concurrently_execute(get_event, event_ids, 5)
|
await concurrently_execute(get_event, event_ids, 5)
|
||||||
logger.info("Fetched %i events of %i requested", len(event_map), len(event_ids))
|
logger.info("Fetched %i events of %i requested", len(events), len(event_ids))
|
||||||
|
await self._auth_and_persist_fetched_events(destination, room_id, events)
|
||||||
|
|
||||||
|
async def _auth_and_persist_fetched_events(
|
||||||
|
self, origin: str, room_id: str, events: Iterable[EventBase]
|
||||||
|
) -> None:
|
||||||
|
"""Persist the events fetched by _get_events_and_persist or _get_remote_auth_chain_for_event
|
||||||
|
|
||||||
|
The events to be persisted must be outliers.
|
||||||
|
|
||||||
|
We first sort the events to make sure that we process each event's auth_events
|
||||||
|
before the event itself, and then auth and persist them.
|
||||||
|
|
||||||
|
Notifies about the events where appropriate.
|
||||||
|
|
||||||
|
Params:
|
||||||
|
origin: where the events came from
|
||||||
|
room_id: the room that the events are meant to be in (though this has
|
||||||
|
not yet been checked)
|
||||||
|
events: the events that have been fetched
|
||||||
|
"""
|
||||||
|
event_map = {event.event_id: event for event in events}
|
||||||
|
|
||||||
# we now need to auth the events in an order which ensures that each event's
|
|
||||||
# auth_events are authed before the event itself.
|
|
||||||
#
|
|
||||||
# XXX: it might be possible to kick this process off in parallel with fetching
|
# XXX: it might be possible to kick this process off in parallel with fetching
|
||||||
# the events.
|
# the events.
|
||||||
while event_map:
|
while event_map:
|
||||||
|
@ -1168,22 +1158,18 @@ class FederationEventHandler:
|
||||||
"Persisting %i of %i remaining events", len(roots), len(event_map)
|
"Persisting %i of %i remaining events", len(roots), len(event_map)
|
||||||
)
|
)
|
||||||
|
|
||||||
await self._auth_and_persist_fetched_events(destination, room_id, roots)
|
await self._auth_and_persist_fetched_events_inner(origin, room_id, roots)
|
||||||
|
|
||||||
for ev in roots:
|
for ev in roots:
|
||||||
del event_map[ev.event_id]
|
del event_map[ev.event_id]
|
||||||
|
|
||||||
async def _auth_and_persist_fetched_events(
|
async def _auth_and_persist_fetched_events_inner(
|
||||||
self, origin: str, room_id: str, fetched_events: Collection[EventBase]
|
self, origin: str, room_id: str, fetched_events: Collection[EventBase]
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Persist the events fetched by _get_events_and_persist.
|
"""Helper for _auth_and_persist_fetched_events
|
||||||
|
|
||||||
The events should not depend on one another, e.g. this should be used to persist
|
Persists a batch of events where we have (theoretically) already persisted all
|
||||||
a bunch of outliers, but not a chunk of individual events that depend
|
of their auth events.
|
||||||
on each other for state calculations.
|
|
||||||
|
|
||||||
We also assume that all of the auth events for all of the events have already
|
|
||||||
been persisted.
|
|
||||||
|
|
||||||
Notifies about the events where appropriate.
|
Notifies about the events where appropriate.
|
||||||
|
|
||||||
|
@ -1191,7 +1177,7 @@ class FederationEventHandler:
|
||||||
origin: where the events came from
|
origin: where the events came from
|
||||||
room_id: the room that the events are meant to be in (though this has
|
room_id: the room that the events are meant to be in (though this has
|
||||||
not yet been checked)
|
not yet been checked)
|
||||||
event_id: map from event_id -> event for the fetched events
|
fetched_events: the events to persist
|
||||||
"""
|
"""
|
||||||
# get all the auth events for all the events in this batch. By now, they should
|
# get all the auth events for all the events in this batch. By now, they should
|
||||||
# have been persisted.
|
# have been persisted.
|
||||||
|
@ -1203,47 +1189,36 @@ class FederationEventHandler:
|
||||||
allow_rejected=True,
|
allow_rejected=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
event_infos = []
|
async def prep(event: EventBase) -> Optional[Tuple[EventBase, EventContext]]:
|
||||||
for event in fetched_events:
|
|
||||||
auth = {}
|
|
||||||
for auth_event_id in event.auth_event_ids():
|
|
||||||
ae = persisted_events.get(auth_event_id)
|
|
||||||
if ae:
|
|
||||||
auth[(ae.type, ae.state_key)] = ae
|
|
||||||
else:
|
|
||||||
logger.info("Missing auth event %s", auth_event_id)
|
|
||||||
|
|
||||||
event_infos.append(_NewEventInfo(event, auth))
|
|
||||||
|
|
||||||
if not event_infos:
|
|
||||||
return
|
|
||||||
|
|
||||||
async def prep(ev_info: _NewEventInfo):
|
|
||||||
event = ev_info.event
|
|
||||||
with nested_logging_context(suffix=event.event_id):
|
with nested_logging_context(suffix=event.event_id):
|
||||||
res = await self._state_handler.compute_event_context(event)
|
auth = {}
|
||||||
res = await self._check_event_auth(
|
for auth_event_id in event.auth_event_ids():
|
||||||
|
ae = persisted_events.get(auth_event_id)
|
||||||
|
if not ae:
|
||||||
|
logger.warning(
|
||||||
|
"Event %s relies on auth_event %s, which could not be found.",
|
||||||
|
event,
|
||||||
|
auth_event_id,
|
||||||
|
)
|
||||||
|
# the fact we can't find the auth event doesn't mean it doesn't
|
||||||
|
# exist, which means it is premature to reject `event`. Instead we
|
||||||
|
# just ignore it for now.
|
||||||
|
return None
|
||||||
|
auth[(ae.type, ae.state_key)] = ae
|
||||||
|
|
||||||
|
context = EventContext.for_outlier()
|
||||||
|
context = await self._check_event_auth(
|
||||||
origin,
|
origin,
|
||||||
event,
|
event,
|
||||||
res,
|
context,
|
||||||
claimed_auth_event_map=ev_info.claimed_auth_event_map,
|
claimed_auth_event_map=auth,
|
||||||
)
|
)
|
||||||
return res
|
return event, context
|
||||||
|
|
||||||
contexts = await make_deferred_yieldable(
|
events_to_persist = (
|
||||||
defer.gatherResults(
|
x for x in await yieldable_gather_results(prep, fetched_events) if x
|
||||||
[run_in_background(prep, ev_info) for ev_info in event_infos],
|
|
||||||
consumeErrors=True,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
await self.persist_events_and_notify(
|
|
||||||
room_id,
|
|
||||||
[
|
|
||||||
(ev_info.event, context)
|
|
||||||
for ev_info, context in zip(event_infos, contexts)
|
|
||||||
],
|
|
||||||
)
|
)
|
||||||
|
await self.persist_events_and_notify(room_id, tuple(events_to_persist))
|
||||||
|
|
||||||
async def _check_event_auth(
|
async def _check_event_auth(
|
||||||
self,
|
self,
|
||||||
|
@ -1269,8 +1244,7 @@ class FederationEventHandler:
|
||||||
|
|
||||||
claimed_auth_event_map:
|
claimed_auth_event_map:
|
||||||
A map of (type, state_key) => event for the event's claimed auth_events.
|
A map of (type, state_key) => event for the event's claimed auth_events.
|
||||||
Possibly incomplete, and possibly including events that are not yet
|
Possibly including events that were rejected, or are in the wrong room.
|
||||||
persisted, or authed, or in the right room.
|
|
||||||
|
|
||||||
Only populated when populating outliers.
|
Only populated when populating outliers.
|
||||||
|
|
||||||
|
@ -1505,64 +1479,22 @@ class FederationEventHandler:
|
||||||
# If we don't have all the auth events, we need to get them.
|
# If we don't have all the auth events, we need to get them.
|
||||||
logger.info("auth_events contains unknown events: %s", missing_auth)
|
logger.info("auth_events contains unknown events: %s", missing_auth)
|
||||||
try:
|
try:
|
||||||
try:
|
await self._get_remote_auth_chain_for_event(
|
||||||
remote_auth_chain = await self._federation_client.get_event_auth(
|
origin, event.room_id, event.event_id
|
||||||
origin, event.room_id, event.event_id
|
|
||||||
)
|
|
||||||
except RequestSendFailed as e1:
|
|
||||||
# The other side isn't around or doesn't implement the
|
|
||||||
# endpoint, so lets just bail out.
|
|
||||||
logger.info("Failed to get event auth from remote: %s", e1)
|
|
||||||
return context, auth_events
|
|
||||||
|
|
||||||
seen_remotes = await self._store.have_seen_events(
|
|
||||||
event.room_id, [e.event_id for e in remote_auth_chain]
|
|
||||||
)
|
)
|
||||||
|
|
||||||
for auth_event in remote_auth_chain:
|
|
||||||
if auth_event.event_id in seen_remotes:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if auth_event.event_id == event.event_id:
|
|
||||||
continue
|
|
||||||
|
|
||||||
try:
|
|
||||||
auth_ids = auth_event.auth_event_ids()
|
|
||||||
auth = {
|
|
||||||
(e.type, e.state_key): e
|
|
||||||
for e in remote_auth_chain
|
|
||||||
if e.event_id in auth_ids or e.type == EventTypes.Create
|
|
||||||
}
|
|
||||||
auth_event.internal_metadata.outlier = True
|
|
||||||
|
|
||||||
logger.debug(
|
|
||||||
"_check_event_auth %s missing_auth: %s",
|
|
||||||
event.event_id,
|
|
||||||
auth_event.event_id,
|
|
||||||
)
|
|
||||||
missing_auth_event_context = (
|
|
||||||
await self._state_handler.compute_event_context(auth_event)
|
|
||||||
)
|
|
||||||
|
|
||||||
missing_auth_event_context = await self._check_event_auth(
|
|
||||||
origin,
|
|
||||||
auth_event,
|
|
||||||
missing_auth_event_context,
|
|
||||||
claimed_auth_event_map=auth,
|
|
||||||
)
|
|
||||||
await self.persist_events_and_notify(
|
|
||||||
event.room_id, [(auth_event, missing_auth_event_context)]
|
|
||||||
)
|
|
||||||
|
|
||||||
if auth_event.event_id in event_auth_events:
|
|
||||||
auth_events[
|
|
||||||
(auth_event.type, auth_event.state_key)
|
|
||||||
] = auth_event
|
|
||||||
except AuthError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.exception("Failed to get auth chain")
|
logger.exception("Failed to get auth chain")
|
||||||
|
else:
|
||||||
|
# load any auth events we might have persisted from the database. This
|
||||||
|
# has the side-effect of correctly setting the rejected_reason on them.
|
||||||
|
auth_events.update(
|
||||||
|
{
|
||||||
|
(ae.type, ae.state_key): ae
|
||||||
|
for ae in await self._store.get_events_as_list(
|
||||||
|
missing_auth, allow_rejected=True
|
||||||
|
)
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
if event.internal_metadata.is_outlier():
|
if event.internal_metadata.is_outlier():
|
||||||
# XXX: given that, for an outlier, we'll be working with the
|
# XXX: given that, for an outlier, we'll be working with the
|
||||||
|
@ -1636,6 +1568,45 @@ class FederationEventHandler:
|
||||||
|
|
||||||
return context, auth_events
|
return context, auth_events
|
||||||
|
|
||||||
|
async def _get_remote_auth_chain_for_event(
|
||||||
|
self, destination: str, room_id: str, event_id: str
|
||||||
|
) -> None:
|
||||||
|
"""If we are missing some of an event's auth events, attempt to request them
|
||||||
|
|
||||||
|
Args:
|
||||||
|
destination: where to fetch the auth tree from
|
||||||
|
room_id: the room in which we are lacking auth events
|
||||||
|
event_id: the event for which we are lacking auth events
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
remote_event_map = {
|
||||||
|
e.event_id: e
|
||||||
|
for e in await self._federation_client.get_event_auth(
|
||||||
|
destination, room_id, event_id
|
||||||
|
)
|
||||||
|
}
|
||||||
|
except RequestSendFailed as e1:
|
||||||
|
# The other side isn't around or doesn't implement the
|
||||||
|
# endpoint, so lets just bail out.
|
||||||
|
logger.info("Failed to get event auth from remote: %s", e1)
|
||||||
|
return
|
||||||
|
|
||||||
|
logger.info("/event_auth returned %i events", len(remote_event_map))
|
||||||
|
|
||||||
|
# `event` may be returned, but we should not yet process it.
|
||||||
|
remote_event_map.pop(event_id, None)
|
||||||
|
|
||||||
|
# nor should we reprocess any events we have already seen.
|
||||||
|
seen_remotes = await self._store.have_seen_events(
|
||||||
|
room_id, remote_event_map.keys()
|
||||||
|
)
|
||||||
|
for s in seen_remotes:
|
||||||
|
remote_event_map.pop(s, None)
|
||||||
|
|
||||||
|
await self._auth_and_persist_fetched_events(
|
||||||
|
destination, room_id, remote_event_map.values()
|
||||||
|
)
|
||||||
|
|
||||||
async def _update_context_for_auth_events(
|
async def _update_context_for_auth_events(
|
||||||
self, event: EventBase, context: EventContext, auth_events: StateMap[EventBase]
|
self, event: EventBase, context: EventContext, auth_events: StateMap[EventBase]
|
||||||
) -> EventContext:
|
) -> EventContext:
|
||||||
|
@ -1692,7 +1663,7 @@ class FederationEventHandler:
|
||||||
|
|
||||||
async def _run_push_actions_and_persist_event(
|
async def _run_push_actions_and_persist_event(
|
||||||
self, event: EventBase, context: EventContext, backfilled: bool = False
|
self, event: EventBase, context: EventContext, backfilled: bool = False
|
||||||
):
|
) -> None:
|
||||||
"""Run the push actions for a received event, and persist it.
|
"""Run the push actions for a received event, and persist it.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from typing import TYPE_CHECKING, Dict, Iterable, List, Set
|
from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, Iterable, List, Set
|
||||||
|
|
||||||
from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
|
from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
|
||||||
from synapse.types import GroupID, JsonDict, get_domain_from_id
|
from synapse.types import GroupID, JsonDict, get_domain_from_id
|
||||||
|
@ -25,12 +25,14 @@ if TYPE_CHECKING:
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def _create_rerouter(func_name):
|
def _create_rerouter(func_name: str) -> Callable[..., Awaitable[JsonDict]]:
|
||||||
"""Returns an async function that looks at the group id and calls the function
|
"""Returns an async function that looks at the group id and calls the function
|
||||||
on federation or the local group server if the group is local
|
on federation or the local group server if the group is local
|
||||||
"""
|
"""
|
||||||
|
|
||||||
async def f(self, group_id, *args, **kwargs):
|
async def f(
|
||||||
|
self: "GroupsLocalWorkerHandler", group_id: str, *args: Any, **kwargs: Any
|
||||||
|
) -> JsonDict:
|
||||||
if not GroupID.is_valid(group_id):
|
if not GroupID.is_valid(group_id):
|
||||||
raise SynapseError(400, "%s is not a legal group ID" % (group_id,))
|
raise SynapseError(400, "%s is not a legal group ID" % (group_id,))
|
||||||
|
|
||||||
|
|
|
@ -62,7 +62,7 @@ class IdentityHandler(BaseHandler):
|
||||||
self.federation_http_client = hs.get_federation_http_client()
|
self.federation_http_client = hs.get_federation_http_client()
|
||||||
self.hs = hs
|
self.hs = hs
|
||||||
|
|
||||||
self._web_client_location = hs.config.invite_client_location
|
self._web_client_location = hs.config.email.invite_client_location
|
||||||
|
|
||||||
# Ratelimiters for `/requestToken` endpoints.
|
# Ratelimiters for `/requestToken` endpoints.
|
||||||
self._3pid_validation_ratelimiter_ip = Ratelimiter(
|
self._3pid_validation_ratelimiter_ip = Ratelimiter(
|
||||||
|
@ -419,7 +419,7 @@ class IdentityHandler(BaseHandler):
|
||||||
|
|
||||||
token_expires = (
|
token_expires = (
|
||||||
self.hs.get_clock().time_msec()
|
self.hs.get_clock().time_msec()
|
||||||
+ self.hs.config.email_validation_token_lifetime
|
+ self.hs.config.email.email_validation_token_lifetime
|
||||||
)
|
)
|
||||||
|
|
||||||
await self.store.start_or_continue_validation_session(
|
await self.store.start_or_continue_validation_session(
|
||||||
|
@ -465,7 +465,7 @@ class IdentityHandler(BaseHandler):
|
||||||
if next_link:
|
if next_link:
|
||||||
params["next_link"] = next_link
|
params["next_link"] = next_link
|
||||||
|
|
||||||
if self.hs.config.using_identity_server_from_trusted_list:
|
if self.hs.config.email.using_identity_server_from_trusted_list:
|
||||||
# Warn that a deprecated config option is in use
|
# Warn that a deprecated config option is in use
|
||||||
logger.warning(
|
logger.warning(
|
||||||
'The config option "trust_identity_server_for_password_resets" '
|
'The config option "trust_identity_server_for_password_resets" '
|
||||||
|
@ -518,7 +518,7 @@ class IdentityHandler(BaseHandler):
|
||||||
if next_link:
|
if next_link:
|
||||||
params["next_link"] = next_link
|
params["next_link"] = next_link
|
||||||
|
|
||||||
if self.hs.config.using_identity_server_from_trusted_list:
|
if self.hs.config.email.using_identity_server_from_trusted_list:
|
||||||
# Warn that a deprecated config option is in use
|
# Warn that a deprecated config option is in use
|
||||||
logger.warning(
|
logger.warning(
|
||||||
'The config option "trust_identity_server_for_password_resets" '
|
'The config option "trust_identity_server_for_password_resets" '
|
||||||
|
@ -572,12 +572,12 @@ class IdentityHandler(BaseHandler):
|
||||||
validation_session = None
|
validation_session = None
|
||||||
|
|
||||||
# Try to validate as email
|
# Try to validate as email
|
||||||
if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
|
if self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
|
||||||
# Ask our delegated email identity server
|
# Ask our delegated email identity server
|
||||||
validation_session = await self.threepid_from_creds(
|
validation_session = await self.threepid_from_creds(
|
||||||
self.hs.config.account_threepid_delegate_email, threepid_creds
|
self.hs.config.account_threepid_delegate_email, threepid_creds
|
||||||
)
|
)
|
||||||
elif self.hs.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
|
elif self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
|
||||||
# Get a validated session matching these details
|
# Get a validated session matching these details
|
||||||
validation_session = await self.store.get_threepid_validation_session(
|
validation_session = await self.store.get_threepid_validation_session(
|
||||||
"email", client_secret, sid=sid, validated=True
|
"email", client_secret, sid=sid, validated=True
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from typing import TYPE_CHECKING, Optional, Tuple
|
from typing import TYPE_CHECKING, List, Optional, Tuple
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
|
@ -125,7 +125,7 @@ class InitialSyncHandler(BaseHandler):
|
||||||
|
|
||||||
now_token = self.hs.get_event_sources().get_current_token()
|
now_token = self.hs.get_event_sources().get_current_token()
|
||||||
|
|
||||||
presence_stream = self.hs.get_event_sources().sources["presence"]
|
presence_stream = self.hs.get_event_sources().sources.presence
|
||||||
presence, _ = await presence_stream.get_new_events(
|
presence, _ = await presence_stream.get_new_events(
|
||||||
user, from_key=None, include_offline=False
|
user, from_key=None, include_offline=False
|
||||||
)
|
)
|
||||||
|
@ -150,7 +150,7 @@ class InitialSyncHandler(BaseHandler):
|
||||||
if limit is None:
|
if limit is None:
|
||||||
limit = 10
|
limit = 10
|
||||||
|
|
||||||
async def handle_room(event: RoomsForUser):
|
async def handle_room(event: RoomsForUser) -> None:
|
||||||
d: JsonDict = {
|
d: JsonDict = {
|
||||||
"room_id": event.room_id,
|
"room_id": event.room_id,
|
||||||
"membership": event.membership,
|
"membership": event.membership,
|
||||||
|
@ -411,9 +411,9 @@ class InitialSyncHandler(BaseHandler):
|
||||||
|
|
||||||
presence_handler = self.hs.get_presence_handler()
|
presence_handler = self.hs.get_presence_handler()
|
||||||
|
|
||||||
async def get_presence():
|
async def get_presence() -> List[JsonDict]:
|
||||||
# If presence is disabled, return an empty list
|
# If presence is disabled, return an empty list
|
||||||
if not self.hs.config.use_presence:
|
if not self.hs.config.server.use_presence:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
states = await presence_handler.get_states(
|
states = await presence_handler.get_states(
|
||||||
|
@ -428,7 +428,7 @@ class InitialSyncHandler(BaseHandler):
|
||||||
for s in states
|
for s in states
|
||||||
]
|
]
|
||||||
|
|
||||||
async def get_receipts():
|
async def get_receipts() -> List[JsonDict]:
|
||||||
receipts = await self.store.get_linearized_receipts_for_room(
|
receipts = await self.store.get_linearized_receipts_for_room(
|
||||||
room_id, to_key=now_token.receipt_key
|
room_id, to_key=now_token.receipt_key
|
||||||
)
|
)
|
||||||
|
|
|
@ -46,6 +46,7 @@ from synapse.events import EventBase
|
||||||
from synapse.events.builder import EventBuilder
|
from synapse.events.builder import EventBuilder
|
||||||
from synapse.events.snapshot import EventContext
|
from synapse.events.snapshot import EventContext
|
||||||
from synapse.events.validator import EventValidator
|
from synapse.events.validator import EventValidator
|
||||||
|
from synapse.handlers.directory import DirectoryHandler
|
||||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.replication.http.send_event import ReplicationSendEventRestServlet
|
from synapse.replication.http.send_event import ReplicationSendEventRestServlet
|
||||||
|
@ -298,7 +299,7 @@ class MessageHandler:
|
||||||
for user_id, profile in users_with_profile.items()
|
for user_id, profile in users_with_profile.items()
|
||||||
}
|
}
|
||||||
|
|
||||||
def maybe_schedule_expiry(self, event: EventBase):
|
def maybe_schedule_expiry(self, event: EventBase) -> None:
|
||||||
"""Schedule the expiry of an event if there's not already one scheduled,
|
"""Schedule the expiry of an event if there's not already one scheduled,
|
||||||
or if the one running is for an event that will expire after the provided
|
or if the one running is for an event that will expire after the provided
|
||||||
timestamp.
|
timestamp.
|
||||||
|
@ -318,7 +319,7 @@ class MessageHandler:
|
||||||
# a task scheduled for a timestamp that's sooner than the provided one.
|
# a task scheduled for a timestamp that's sooner than the provided one.
|
||||||
self._schedule_expiry_for_event(event.event_id, expiry_ts)
|
self._schedule_expiry_for_event(event.event_id, expiry_ts)
|
||||||
|
|
||||||
async def _schedule_next_expiry(self):
|
async def _schedule_next_expiry(self) -> None:
|
||||||
"""Retrieve the ID and the expiry timestamp of the next event to be expired,
|
"""Retrieve the ID and the expiry timestamp of the next event to be expired,
|
||||||
and schedule an expiry task for it.
|
and schedule an expiry task for it.
|
||||||
|
|
||||||
|
@ -331,7 +332,7 @@ class MessageHandler:
|
||||||
event_id, expiry_ts = res
|
event_id, expiry_ts = res
|
||||||
self._schedule_expiry_for_event(event_id, expiry_ts)
|
self._schedule_expiry_for_event(event_id, expiry_ts)
|
||||||
|
|
||||||
def _schedule_expiry_for_event(self, event_id: str, expiry_ts: int):
|
def _schedule_expiry_for_event(self, event_id: str, expiry_ts: int) -> None:
|
||||||
"""Schedule an expiry task for the provided event if there's not already one
|
"""Schedule an expiry task for the provided event if there's not already one
|
||||||
scheduled at a timestamp that's sooner than the provided one.
|
scheduled at a timestamp that's sooner than the provided one.
|
||||||
|
|
||||||
|
@ -367,7 +368,7 @@ class MessageHandler:
|
||||||
event_id,
|
event_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
async def _expire_event(self, event_id: str):
|
async def _expire_event(self, event_id: str) -> None:
|
||||||
"""Retrieve and expire an event that needs to be expired from the database.
|
"""Retrieve and expire an event that needs to be expired from the database.
|
||||||
|
|
||||||
If the event doesn't exist in the database, log it and delete the expiry date
|
If the event doesn't exist in the database, log it and delete the expiry date
|
||||||
|
@ -444,7 +445,7 @@ class EventCreationHandler:
|
||||||
)
|
)
|
||||||
|
|
||||||
self._block_events_without_consent_error = (
|
self._block_events_without_consent_error = (
|
||||||
self.config.block_events_without_consent_error
|
self.config.consent.block_events_without_consent_error
|
||||||
)
|
)
|
||||||
|
|
||||||
# we need to construct a ConsentURIBuilder here, as it checks that the necessary
|
# we need to construct a ConsentURIBuilder here, as it checks that the necessary
|
||||||
|
@ -667,7 +668,7 @@ class EventCreationHandler:
|
||||||
|
|
||||||
self.validator.validate_new(event, self.config)
|
self.validator.validate_new(event, self.config)
|
||||||
|
|
||||||
return (event, context)
|
return event, context
|
||||||
|
|
||||||
async def _is_exempt_from_privacy_policy(
|
async def _is_exempt_from_privacy_policy(
|
||||||
self, builder: EventBuilder, requester: Requester
|
self, builder: EventBuilder, requester: Requester
|
||||||
|
@ -693,10 +694,10 @@ class EventCreationHandler:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
async def _is_server_notices_room(self, room_id: str) -> bool:
|
async def _is_server_notices_room(self, room_id: str) -> bool:
|
||||||
if self.config.server_notices_mxid is None:
|
if self.config.servernotices.server_notices_mxid is None:
|
||||||
return False
|
return False
|
||||||
user_ids = await self.store.get_users_in_room(room_id)
|
user_ids = await self.store.get_users_in_room(room_id)
|
||||||
return self.config.server_notices_mxid in user_ids
|
return self.config.servernotices.server_notices_mxid in user_ids
|
||||||
|
|
||||||
async def assert_accepted_privacy_policy(self, requester: Requester) -> None:
|
async def assert_accepted_privacy_policy(self, requester: Requester) -> None:
|
||||||
"""Check if a user has accepted the privacy policy
|
"""Check if a user has accepted the privacy policy
|
||||||
|
@ -732,8 +733,8 @@ class EventCreationHandler:
|
||||||
|
|
||||||
# exempt the system notices user
|
# exempt the system notices user
|
||||||
if (
|
if (
|
||||||
self.config.server_notices_mxid is not None
|
self.config.servernotices.server_notices_mxid is not None
|
||||||
and user_id == self.config.server_notices_mxid
|
and user_id == self.config.servernotices.server_notices_mxid
|
||||||
):
|
):
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -745,7 +746,7 @@ class EventCreationHandler:
|
||||||
if u["appservice_id"] is not None:
|
if u["appservice_id"] is not None:
|
||||||
# users registered by an appservice are exempt
|
# users registered by an appservice are exempt
|
||||||
return
|
return
|
||||||
if u["consent_version"] == self.config.user_consent_version:
|
if u["consent_version"] == self.config.consent.user_consent_version:
|
||||||
return
|
return
|
||||||
|
|
||||||
consent_uri = self._consent_uri_builder.build_user_consent_uri(user.localpart)
|
consent_uri = self._consent_uri_builder.build_user_consent_uri(user.localpart)
|
||||||
|
@ -1005,7 +1006,7 @@ class EventCreationHandler:
|
||||||
|
|
||||||
logger.debug("Created event %s", event.event_id)
|
logger.debug("Created event %s", event.event_id)
|
||||||
|
|
||||||
return (event, context)
|
return event, context
|
||||||
|
|
||||||
@measure_func("handle_new_client_event")
|
@measure_func("handle_new_client_event")
|
||||||
async def handle_new_client_event(
|
async def handle_new_client_event(
|
||||||
|
@ -1231,7 +1232,10 @@ class EventCreationHandler:
|
||||||
self._external_cache_joined_hosts_updates[state_entry.state_group] = None
|
self._external_cache_joined_hosts_updates[state_entry.state_group] = None
|
||||||
|
|
||||||
async def _validate_canonical_alias(
|
async def _validate_canonical_alias(
|
||||||
self, directory_handler, room_alias_str: str, expected_room_id: str
|
self,
|
||||||
|
directory_handler: DirectoryHandler,
|
||||||
|
room_alias_str: str,
|
||||||
|
expected_room_id: str,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Ensure that the given room alias points to the expected room ID.
|
Ensure that the given room alias points to the expected room ID.
|
||||||
|
@ -1424,7 +1428,7 @@ class EventCreationHandler:
|
||||||
# structural protocol level).
|
# structural protocol level).
|
||||||
is_msc2716_event = (
|
is_msc2716_event = (
|
||||||
original_event.type == EventTypes.MSC2716_INSERTION
|
original_event.type == EventTypes.MSC2716_INSERTION
|
||||||
or original_event.type == EventTypes.MSC2716_CHUNK
|
or original_event.type == EventTypes.MSC2716_BATCH
|
||||||
or original_event.type == EventTypes.MSC2716_MARKER
|
or original_event.type == EventTypes.MSC2716_MARKER
|
||||||
)
|
)
|
||||||
if not room_version_obj.msc2716_historical and is_msc2716_event:
|
if not room_version_obj.msc2716_historical and is_msc2716_event:
|
||||||
|
@ -1480,7 +1484,7 @@ class EventCreationHandler:
|
||||||
# If there's an expiry timestamp on the event, schedule its expiry.
|
# If there's an expiry timestamp on the event, schedule its expiry.
|
||||||
self._message_handler.maybe_schedule_expiry(event)
|
self._message_handler.maybe_schedule_expiry(event)
|
||||||
|
|
||||||
def _notify():
|
def _notify() -> None:
|
||||||
try:
|
try:
|
||||||
self.notifier.on_new_room_event(
|
self.notifier.on_new_room_event(
|
||||||
event, event_pos, max_stream_token, extra_users=extra_users
|
event, event_pos, max_stream_token, extra_users=extra_users
|
||||||
|
@ -1526,7 +1530,7 @@ class EventCreationHandler:
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.exception("Error bumping presence active time")
|
logger.exception("Error bumping presence active time")
|
||||||
|
|
||||||
async def _send_dummy_events_to_fill_extremities(self):
|
async def _send_dummy_events_to_fill_extremities(self) -> None:
|
||||||
"""Background task to send dummy events into rooms that have a large
|
"""Background task to send dummy events into rooms that have a large
|
||||||
number of extremities
|
number of extremities
|
||||||
"""
|
"""
|
||||||
|
@ -1603,7 +1607,7 @@ class EventCreationHandler:
|
||||||
)
|
)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def _expire_rooms_to_exclude_from_dummy_event_insertion(self):
|
def _expire_rooms_to_exclude_from_dummy_event_insertion(self) -> None:
|
||||||
expire_before = self.clock.time_msec() - _DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY
|
expire_before = self.clock.time_msec() - _DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY
|
||||||
to_expire = set()
|
to_expire = set()
|
||||||
for room_id, time in self._rooms_to_exclude_from_dummy_event_insertion.items():
|
for room_id, time in self._rooms_to_exclude_from_dummy_event_insertion.items():
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
import inspect
|
import inspect
|
||||||
import logging
|
import logging
|
||||||
from typing import TYPE_CHECKING, Dict, Generic, List, Optional, TypeVar, Union
|
from typing import TYPE_CHECKING, Any, Dict, Generic, List, Optional, TypeVar, Union
|
||||||
from urllib.parse import urlencode, urlparse
|
from urllib.parse import urlencode, urlparse
|
||||||
|
|
||||||
import attr
|
import attr
|
||||||
|
@ -249,11 +249,11 @@ class OidcHandler:
|
||||||
class OidcError(Exception):
|
class OidcError(Exception):
|
||||||
"""Used to catch errors when calling the token_endpoint"""
|
"""Used to catch errors when calling the token_endpoint"""
|
||||||
|
|
||||||
def __init__(self, error, error_description=None):
|
def __init__(self, error: str, error_description: Optional[str] = None):
|
||||||
self.error = error
|
self.error = error
|
||||||
self.error_description = error_description
|
self.error_description = error_description
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self) -> str:
|
||||||
if self.error_description:
|
if self.error_description:
|
||||||
return f"{self.error}: {self.error_description}"
|
return f"{self.error}: {self.error_description}"
|
||||||
return self.error
|
return self.error
|
||||||
|
@ -277,7 +277,7 @@ class OidcProvider:
|
||||||
self._token_generator = token_generator
|
self._token_generator = token_generator
|
||||||
|
|
||||||
self._config = provider
|
self._config = provider
|
||||||
self._callback_url: str = hs.config.oidc_callback_url
|
self._callback_url: str = hs.config.oidc.oidc_callback_url
|
||||||
|
|
||||||
# Calculate the prefix for OIDC callback paths based on the public_baseurl.
|
# Calculate the prefix for OIDC callback paths based on the public_baseurl.
|
||||||
# We'll insert this into the Path= parameter of any session cookies we set.
|
# We'll insert this into the Path= parameter of any session cookies we set.
|
||||||
|
@ -1057,13 +1057,13 @@ class JwtClientSecret:
|
||||||
self._cached_secret = b""
|
self._cached_secret = b""
|
||||||
self._cached_secret_replacement_time = 0
|
self._cached_secret_replacement_time = 0
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self) -> str:
|
||||||
# if client_auth_method is client_secret_basic, then ClientAuth.prepare calls
|
# if client_auth_method is client_secret_basic, then ClientAuth.prepare calls
|
||||||
# encode_client_secret_basic, which calls "{}".format(secret), which ends up
|
# encode_client_secret_basic, which calls "{}".format(secret), which ends up
|
||||||
# here.
|
# here.
|
||||||
return self._get_secret().decode("ascii")
|
return self._get_secret().decode("ascii")
|
||||||
|
|
||||||
def __bytes__(self):
|
def __bytes__(self) -> bytes:
|
||||||
# if client_auth_method is client_secret_post, then ClientAuth.prepare calls
|
# if client_auth_method is client_secret_post, then ClientAuth.prepare calls
|
||||||
# encode_client_secret_post, which ends up here.
|
# encode_client_secret_post, which ends up here.
|
||||||
return self._get_secret()
|
return self._get_secret()
|
||||||
|
@ -1197,21 +1197,21 @@ class OidcSessionTokenGenerator:
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@attr.s(frozen=True, slots=True)
|
@attr.s(frozen=True, slots=True, auto_attribs=True)
|
||||||
class OidcSessionData:
|
class OidcSessionData:
|
||||||
"""The attributes which are stored in a OIDC session cookie"""
|
"""The attributes which are stored in a OIDC session cookie"""
|
||||||
|
|
||||||
# the Identity Provider being used
|
# the Identity Provider being used
|
||||||
idp_id = attr.ib(type=str)
|
idp_id: str
|
||||||
|
|
||||||
# The `nonce` parameter passed to the OIDC provider.
|
# The `nonce` parameter passed to the OIDC provider.
|
||||||
nonce = attr.ib(type=str)
|
nonce: str
|
||||||
|
|
||||||
# The URL the client gave when it initiated the flow. ("" if this is a UI Auth)
|
# The URL the client gave when it initiated the flow. ("" if this is a UI Auth)
|
||||||
client_redirect_url = attr.ib(type=str)
|
client_redirect_url: str
|
||||||
|
|
||||||
# The session ID of the ongoing UI Auth ("" if this is a login)
|
# The session ID of the ongoing UI Auth ("" if this is a login)
|
||||||
ui_auth_session_id = attr.ib(type=str)
|
ui_auth_session_id: str
|
||||||
|
|
||||||
|
|
||||||
class UserAttributeDict(TypedDict):
|
class UserAttributeDict(TypedDict):
|
||||||
|
@ -1290,20 +1290,20 @@ class OidcMappingProvider(Generic[C]):
|
||||||
|
|
||||||
|
|
||||||
# Used to clear out "None" values in templates
|
# Used to clear out "None" values in templates
|
||||||
def jinja_finalize(thing):
|
def jinja_finalize(thing: Any) -> Any:
|
||||||
return thing if thing is not None else ""
|
return thing if thing is not None else ""
|
||||||
|
|
||||||
|
|
||||||
env = Environment(finalize=jinja_finalize)
|
env = Environment(finalize=jinja_finalize)
|
||||||
|
|
||||||
|
|
||||||
@attr.s(slots=True, frozen=True)
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||||
class JinjaOidcMappingConfig:
|
class JinjaOidcMappingConfig:
|
||||||
subject_claim = attr.ib(type=str)
|
subject_claim: str
|
||||||
localpart_template = attr.ib(type=Optional[Template])
|
localpart_template: Optional[Template]
|
||||||
display_name_template = attr.ib(type=Optional[Template])
|
display_name_template: Optional[Template]
|
||||||
email_template = attr.ib(type=Optional[Template])
|
email_template: Optional[Template]
|
||||||
extra_attributes = attr.ib(type=Dict[str, Template])
|
extra_attributes: Dict[str, Template]
|
||||||
|
|
||||||
|
|
||||||
class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
|
class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
|
||||||
|
|
|
@ -15,6 +15,8 @@
|
||||||
import logging
|
import logging
|
||||||
from typing import TYPE_CHECKING, Any, Dict, Optional, Set
|
from typing import TYPE_CHECKING, Any, Dict, Optional, Set
|
||||||
|
|
||||||
|
import attr
|
||||||
|
|
||||||
from twisted.python.failure import Failure
|
from twisted.python.failure import Failure
|
||||||
|
|
||||||
from synapse.api.constants import EventTypes, Membership
|
from synapse.api.constants import EventTypes, Membership
|
||||||
|
@ -24,7 +26,7 @@ from synapse.logging.context import run_in_background
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.storage.state import StateFilter
|
from synapse.storage.state import StateFilter
|
||||||
from synapse.streams.config import PaginationConfig
|
from synapse.streams.config import PaginationConfig
|
||||||
from synapse.types import Requester
|
from synapse.types import JsonDict, Requester
|
||||||
from synapse.util.async_helpers import ReadWriteLock
|
from synapse.util.async_helpers import ReadWriteLock
|
||||||
from synapse.util.stringutils import random_string
|
from synapse.util.stringutils import random_string
|
||||||
from synapse.visibility import filter_events_for_client
|
from synapse.visibility import filter_events_for_client
|
||||||
|
@ -36,15 +38,12 @@ if TYPE_CHECKING:
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@attr.s(slots=True, auto_attribs=True)
|
||||||
class PurgeStatus:
|
class PurgeStatus:
|
||||||
"""Object tracking the status of a purge request
|
"""Object tracking the status of a purge request
|
||||||
|
|
||||||
This class contains information on the progress of a purge request, for
|
This class contains information on the progress of a purge request, for
|
||||||
return by get_purge_status.
|
return by get_purge_status.
|
||||||
|
|
||||||
Attributes:
|
|
||||||
status (int): Tracks whether this request has completed. One of
|
|
||||||
STATUS_{ACTIVE,COMPLETE,FAILED}
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
STATUS_ACTIVE = 0
|
STATUS_ACTIVE = 0
|
||||||
|
@ -57,10 +56,10 @@ class PurgeStatus:
|
||||||
STATUS_FAILED: "failed",
|
STATUS_FAILED: "failed",
|
||||||
}
|
}
|
||||||
|
|
||||||
def __init__(self):
|
# Tracks whether this request has completed. One of STATUS_{ACTIVE,COMPLETE,FAILED}.
|
||||||
self.status = PurgeStatus.STATUS_ACTIVE
|
status: int = STATUS_ACTIVE
|
||||||
|
|
||||||
def asdict(self):
|
def asdict(self) -> JsonDict:
|
||||||
return {"status": PurgeStatus.STATUS_TEXT[self.status]}
|
return {"status": PurgeStatus.STATUS_TEXT[self.status]}
|
||||||
|
|
||||||
|
|
||||||
|
@ -107,7 +106,7 @@ class PaginationHandler:
|
||||||
|
|
||||||
async def purge_history_for_rooms_in_range(
|
async def purge_history_for_rooms_in_range(
|
||||||
self, min_ms: Optional[int], max_ms: Optional[int]
|
self, min_ms: Optional[int], max_ms: Optional[int]
|
||||||
):
|
) -> None:
|
||||||
"""Purge outdated events from rooms within the given retention range.
|
"""Purge outdated events from rooms within the given retention range.
|
||||||
|
|
||||||
If a default retention policy is defined in the server's configuration and its
|
If a default retention policy is defined in the server's configuration and its
|
||||||
|
@ -291,7 +290,7 @@ class PaginationHandler:
|
||||||
self._purges_in_progress_by_room.discard(room_id)
|
self._purges_in_progress_by_room.discard(room_id)
|
||||||
|
|
||||||
# remove the purge from the list 24 hours after it completes
|
# remove the purge from the list 24 hours after it completes
|
||||||
def clear_purge():
|
def clear_purge() -> None:
|
||||||
del self._purges_by_id[purge_id]
|
del self._purges_by_id[purge_id]
|
||||||
|
|
||||||
self.hs.get_reactor().callLater(24 * 3600, clear_purge)
|
self.hs.get_reactor().callLater(24 * 3600, clear_purge)
|
||||||
|
|
|
@ -27,8 +27,8 @@ logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
class PasswordPolicyHandler:
|
class PasswordPolicyHandler:
|
||||||
def __init__(self, hs: "HomeServer"):
|
def __init__(self, hs: "HomeServer"):
|
||||||
self.policy = hs.config.password_policy
|
self.policy = hs.config.auth.password_policy
|
||||||
self.enabled = hs.config.password_policy_enabled
|
self.enabled = hs.config.auth.password_policy_enabled
|
||||||
|
|
||||||
# Regexps for the spec'd policy parameters.
|
# Regexps for the spec'd policy parameters.
|
||||||
self.regexp_digit = re.compile("[0-9]")
|
self.regexp_digit = re.compile("[0-9]")
|
||||||
|
|
|
@ -26,18 +26,22 @@ import contextlib
|
||||||
import logging
|
import logging
|
||||||
from bisect import bisect
|
from bisect import bisect
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
|
from types import TracebackType
|
||||||
from typing import (
|
from typing import (
|
||||||
TYPE_CHECKING,
|
TYPE_CHECKING,
|
||||||
Any,
|
Any,
|
||||||
|
Awaitable,
|
||||||
Callable,
|
Callable,
|
||||||
Collection,
|
Collection,
|
||||||
Dict,
|
Dict,
|
||||||
FrozenSet,
|
FrozenSet,
|
||||||
|
Generator,
|
||||||
Iterable,
|
Iterable,
|
||||||
List,
|
List,
|
||||||
Optional,
|
Optional,
|
||||||
Set,
|
Set,
|
||||||
Tuple,
|
Tuple,
|
||||||
|
Type,
|
||||||
Union,
|
Union,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -61,6 +65,7 @@ from synapse.replication.http.streams import ReplicationGetStreamUpdates
|
||||||
from synapse.replication.tcp.commands import ClearUserSyncsCommand
|
from synapse.replication.tcp.commands import ClearUserSyncsCommand
|
||||||
from synapse.replication.tcp.streams import PresenceFederationStream, PresenceStream
|
from synapse.replication.tcp.streams import PresenceFederationStream, PresenceStream
|
||||||
from synapse.storage.databases.main import DataStore
|
from synapse.storage.databases.main import DataStore
|
||||||
|
from synapse.streams import EventSource
|
||||||
from synapse.types import JsonDict, UserID, get_domain_from_id
|
from synapse.types import JsonDict, UserID, get_domain_from_id
|
||||||
from synapse.util.async_helpers import Linearizer
|
from synapse.util.async_helpers import Linearizer
|
||||||
from synapse.util.caches.descriptors import _CacheContext, cached
|
from synapse.util.caches.descriptors import _CacheContext, cached
|
||||||
|
@ -240,7 +245,7 @@ class BasePresenceHandler(abc.ABC):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
async def bump_presence_active_time(self, user: UserID):
|
async def bump_presence_active_time(self, user: UserID) -> None:
|
||||||
"""We've seen the user do something that indicates they're interacting
|
"""We've seen the user do something that indicates they're interacting
|
||||||
with the app.
|
with the app.
|
||||||
"""
|
"""
|
||||||
|
@ -274,7 +279,7 @@ class BasePresenceHandler(abc.ABC):
|
||||||
|
|
||||||
async def process_replication_rows(
|
async def process_replication_rows(
|
||||||
self, stream_name: str, instance_name: str, token: int, rows: list
|
self, stream_name: str, instance_name: str, token: int, rows: list
|
||||||
):
|
) -> None:
|
||||||
"""Process streams received over replication."""
|
"""Process streams received over replication."""
|
||||||
await self._federation_queue.process_replication_rows(
|
await self._federation_queue.process_replication_rows(
|
||||||
stream_name, instance_name, token, rows
|
stream_name, instance_name, token, rows
|
||||||
|
@ -286,7 +291,7 @@ class BasePresenceHandler(abc.ABC):
|
||||||
|
|
||||||
async def maybe_send_presence_to_interested_destinations(
|
async def maybe_send_presence_to_interested_destinations(
|
||||||
self, states: List[UserPresenceState]
|
self, states: List[UserPresenceState]
|
||||||
):
|
) -> None:
|
||||||
"""If this instance is a federation sender, send the states to all
|
"""If this instance is a federation sender, send the states to all
|
||||||
destinations that are interested. Filters out any states for remote
|
destinations that are interested. Filters out any states for remote
|
||||||
users.
|
users.
|
||||||
|
@ -309,7 +314,7 @@ class BasePresenceHandler(abc.ABC):
|
||||||
for destination, host_states in hosts_to_states.items():
|
for destination, host_states in hosts_to_states.items():
|
||||||
self._federation.send_presence_to_destinations(host_states, [destination])
|
self._federation.send_presence_to_destinations(host_states, [destination])
|
||||||
|
|
||||||
async def send_full_presence_to_users(self, user_ids: Collection[str]):
|
async def send_full_presence_to_users(self, user_ids: Collection[str]) -> None:
|
||||||
"""
|
"""
|
||||||
Adds to the list of users who should receive a full snapshot of presence
|
Adds to the list of users who should receive a full snapshot of presence
|
||||||
upon their next sync. Note that this only works for local users.
|
upon their next sync. Note that this only works for local users.
|
||||||
|
@ -363,7 +368,12 @@ class BasePresenceHandler(abc.ABC):
|
||||||
class _NullContextManager(ContextManager[None]):
|
class _NullContextManager(ContextManager[None]):
|
||||||
"""A context manager which does nothing."""
|
"""A context manager which does nothing."""
|
||||||
|
|
||||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
def __exit__(
|
||||||
|
self,
|
||||||
|
exc_type: Optional[Type[BaseException]],
|
||||||
|
exc_val: Optional[BaseException],
|
||||||
|
exc_tb: Optional[TracebackType],
|
||||||
|
) -> None:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@ -374,7 +384,7 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||||
|
|
||||||
self._presence_writer_instance = hs.config.worker.writers.presence[0]
|
self._presence_writer_instance = hs.config.worker.writers.presence[0]
|
||||||
|
|
||||||
self._presence_enabled = hs.config.use_presence
|
self._presence_enabled = hs.config.server.use_presence
|
||||||
|
|
||||||
# Route presence EDUs to the right worker
|
# Route presence EDUs to the right worker
|
||||||
hs.get_federation_registry().register_instances_for_edu(
|
hs.get_federation_registry().register_instances_for_edu(
|
||||||
|
@ -468,7 +478,7 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||||
if self._user_to_num_current_syncs[user_id] == 1:
|
if self._user_to_num_current_syncs[user_id] == 1:
|
||||||
self.mark_as_coming_online(user_id)
|
self.mark_as_coming_online(user_id)
|
||||||
|
|
||||||
def _end():
|
def _end() -> None:
|
||||||
# We check that the user_id is in user_to_num_current_syncs because
|
# We check that the user_id is in user_to_num_current_syncs because
|
||||||
# user_to_num_current_syncs may have been cleared if we are
|
# user_to_num_current_syncs may have been cleared if we are
|
||||||
# shutting down.
|
# shutting down.
|
||||||
|
@ -480,7 +490,7 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||||
self.mark_as_going_offline(user_id)
|
self.mark_as_going_offline(user_id)
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def _user_syncing():
|
def _user_syncing() -> Generator[None, None, None]:
|
||||||
try:
|
try:
|
||||||
yield
|
yield
|
||||||
finally:
|
finally:
|
||||||
|
@ -503,7 +513,7 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||||
|
|
||||||
async def process_replication_rows(
|
async def process_replication_rows(
|
||||||
self, stream_name: str, instance_name: str, token: int, rows: list
|
self, stream_name: str, instance_name: str, token: int, rows: list
|
||||||
):
|
) -> None:
|
||||||
await super().process_replication_rows(stream_name, instance_name, token, rows)
|
await super().process_replication_rows(stream_name, instance_name, token, rows)
|
||||||
|
|
||||||
if stream_name != PresenceStream.NAME:
|
if stream_name != PresenceStream.NAME:
|
||||||
|
@ -584,7 +594,7 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||||
user_id = target_user.to_string()
|
user_id = target_user.to_string()
|
||||||
|
|
||||||
# If presence is disabled, no-op
|
# If presence is disabled, no-op
|
||||||
if not self.hs.config.use_presence:
|
if not self.hs.config.server.use_presence:
|
||||||
return
|
return
|
||||||
|
|
||||||
# Proxy request to instance that writes presence
|
# Proxy request to instance that writes presence
|
||||||
|
@ -601,7 +611,7 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||||
with the app.
|
with the app.
|
||||||
"""
|
"""
|
||||||
# If presence is disabled, no-op
|
# If presence is disabled, no-op
|
||||||
if not self.hs.config.use_presence:
|
if not self.hs.config.server.use_presence:
|
||||||
return
|
return
|
||||||
|
|
||||||
# Proxy request to instance that writes presence
|
# Proxy request to instance that writes presence
|
||||||
|
@ -618,7 +628,7 @@ class PresenceHandler(BasePresenceHandler):
|
||||||
self.server_name = hs.hostname
|
self.server_name = hs.hostname
|
||||||
self.wheel_timer: WheelTimer[str] = WheelTimer()
|
self.wheel_timer: WheelTimer[str] = WheelTimer()
|
||||||
self.notifier = hs.get_notifier()
|
self.notifier = hs.get_notifier()
|
||||||
self._presence_enabled = hs.config.use_presence
|
self._presence_enabled = hs.config.server.use_presence
|
||||||
|
|
||||||
federation_registry = hs.get_federation_registry()
|
federation_registry = hs.get_federation_registry()
|
||||||
|
|
||||||
|
@ -689,7 +699,7 @@ class PresenceHandler(BasePresenceHandler):
|
||||||
# Start a LoopingCall in 30s that fires every 5s.
|
# Start a LoopingCall in 30s that fires every 5s.
|
||||||
# The initial delay is to allow disconnected clients a chance to
|
# The initial delay is to allow disconnected clients a chance to
|
||||||
# reconnect before we treat them as offline.
|
# reconnect before we treat them as offline.
|
||||||
def run_timeout_handler():
|
def run_timeout_handler() -> Awaitable[None]:
|
||||||
return run_as_background_process(
|
return run_as_background_process(
|
||||||
"handle_presence_timeouts", self._handle_timeouts
|
"handle_presence_timeouts", self._handle_timeouts
|
||||||
)
|
)
|
||||||
|
@ -698,7 +708,7 @@ class PresenceHandler(BasePresenceHandler):
|
||||||
30, self.clock.looping_call, run_timeout_handler, 5000
|
30, self.clock.looping_call, run_timeout_handler, 5000
|
||||||
)
|
)
|
||||||
|
|
||||||
def run_persister():
|
def run_persister() -> Awaitable[None]:
|
||||||
return run_as_background_process(
|
return run_as_background_process(
|
||||||
"persist_presence_changes", self._persist_unpersisted_changes
|
"persist_presence_changes", self._persist_unpersisted_changes
|
||||||
)
|
)
|
||||||
|
@ -916,7 +926,7 @@ class PresenceHandler(BasePresenceHandler):
|
||||||
with the app.
|
with the app.
|
||||||
"""
|
"""
|
||||||
# If presence is disabled, no-op
|
# If presence is disabled, no-op
|
||||||
if not self.hs.config.use_presence:
|
if not self.hs.config.server.use_presence:
|
||||||
return
|
return
|
||||||
|
|
||||||
user_id = user.to_string()
|
user_id = user.to_string()
|
||||||
|
@ -942,14 +952,14 @@ class PresenceHandler(BasePresenceHandler):
|
||||||
when users disconnect/reconnect.
|
when users disconnect/reconnect.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
user_id (str)
|
user_id
|
||||||
affect_presence (bool): If false this function will be a no-op.
|
affect_presence: If false this function will be a no-op.
|
||||||
Useful for streams that are not associated with an actual
|
Useful for streams that are not associated with an actual
|
||||||
client that is being used by a user.
|
client that is being used by a user.
|
||||||
"""
|
"""
|
||||||
# Override if it should affect the user's presence, if presence is
|
# Override if it should affect the user's presence, if presence is
|
||||||
# disabled.
|
# disabled.
|
||||||
if not self.hs.config.use_presence:
|
if not self.hs.config.server.use_presence:
|
||||||
affect_presence = False
|
affect_presence = False
|
||||||
|
|
||||||
if affect_presence:
|
if affect_presence:
|
||||||
|
@ -978,7 +988,7 @@ class PresenceHandler(BasePresenceHandler):
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
async def _end():
|
async def _end() -> None:
|
||||||
try:
|
try:
|
||||||
self.user_to_num_current_syncs[user_id] -= 1
|
self.user_to_num_current_syncs[user_id] -= 1
|
||||||
|
|
||||||
|
@ -994,7 +1004,7 @@ class PresenceHandler(BasePresenceHandler):
|
||||||
logger.exception("Error updating presence after sync")
|
logger.exception("Error updating presence after sync")
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def _user_syncing():
|
def _user_syncing() -> Generator[None, None, None]:
|
||||||
try:
|
try:
|
||||||
yield
|
yield
|
||||||
finally:
|
finally:
|
||||||
|
@ -1264,7 +1274,7 @@ class PresenceHandler(BasePresenceHandler):
|
||||||
if self._event_processing:
|
if self._event_processing:
|
||||||
return
|
return
|
||||||
|
|
||||||
async def _process_presence():
|
async def _process_presence() -> None:
|
||||||
assert not self._event_processing
|
assert not self._event_processing
|
||||||
|
|
||||||
self._event_processing = True
|
self._event_processing = True
|
||||||
|
@ -1491,7 +1501,7 @@ def format_user_presence_state(
|
||||||
return content
|
return content
|
||||||
|
|
||||||
|
|
||||||
class PresenceEventSource:
|
class PresenceEventSource(EventSource[int, UserPresenceState]):
|
||||||
def __init__(self, hs: "HomeServer"):
|
def __init__(self, hs: "HomeServer"):
|
||||||
# We can't call get_presence_handler here because there's a cycle:
|
# We can't call get_presence_handler here because there's a cycle:
|
||||||
#
|
#
|
||||||
|
@ -1510,10 +1520,11 @@ class PresenceEventSource:
|
||||||
self,
|
self,
|
||||||
user: UserID,
|
user: UserID,
|
||||||
from_key: Optional[int],
|
from_key: Optional[int],
|
||||||
|
limit: Optional[int] = None,
|
||||||
room_ids: Optional[List[str]] = None,
|
room_ids: Optional[List[str]] = None,
|
||||||
include_offline: bool = True,
|
is_guest: bool = False,
|
||||||
explicit_room_id: Optional[str] = None,
|
explicit_room_id: Optional[str] = None,
|
||||||
**kwargs,
|
include_offline: bool = True,
|
||||||
) -> Tuple[List[UserPresenceState], int]:
|
) -> Tuple[List[UserPresenceState], int]:
|
||||||
# The process for getting presence events are:
|
# The process for getting presence events are:
|
||||||
# 1. Get the rooms the user is in.
|
# 1. Get the rooms the user is in.
|
||||||
|
@ -2074,7 +2085,7 @@ class PresenceFederationQueue:
|
||||||
if self._queue_presence_updates:
|
if self._queue_presence_updates:
|
||||||
self._clock.looping_call(self._clear_queue, self._CLEAR_ITEMS_EVERY_MS)
|
self._clock.looping_call(self._clear_queue, self._CLEAR_ITEMS_EVERY_MS)
|
||||||
|
|
||||||
def _clear_queue(self):
|
def _clear_queue(self) -> None:
|
||||||
"""Clear out older entries from the queue."""
|
"""Clear out older entries from the queue."""
|
||||||
clear_before = self._clock.time_msec() - self._KEEP_ITEMS_IN_QUEUE_FOR_MS
|
clear_before = self._clock.time_msec() - self._KEEP_ITEMS_IN_QUEUE_FOR_MS
|
||||||
|
|
||||||
|
@ -2205,7 +2216,7 @@ class PresenceFederationQueue:
|
||||||
|
|
||||||
async def process_replication_rows(
|
async def process_replication_rows(
|
||||||
self, stream_name: str, instance_name: str, token: int, rows: list
|
self, stream_name: str, instance_name: str, token: int, rows: list
|
||||||
):
|
) -> None:
|
||||||
if stream_name != PresenceFederationStream.NAME:
|
if stream_name != PresenceFederationStream.NAME:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
|
@ -214,11 +214,10 @@ class ProfileHandler(BaseHandler):
|
||||||
target_user.localpart, displayname_to_set
|
target_user.localpart, displayname_to_set
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.hs.config.user_directory_search_all_users:
|
profile = await self.store.get_profileinfo(target_user.localpart)
|
||||||
profile = await self.store.get_profileinfo(target_user.localpart)
|
await self.user_directory_handler.handle_local_profile_change(
|
||||||
await self.user_directory_handler.handle_local_profile_change(
|
target_user.to_string(), profile
|
||||||
target_user.to_string(), profile
|
)
|
||||||
)
|
|
||||||
|
|
||||||
await self._update_join_states(requester, target_user)
|
await self._update_join_states(requester, target_user)
|
||||||
|
|
||||||
|
@ -254,7 +253,7 @@ class ProfileHandler(BaseHandler):
|
||||||
requester: Requester,
|
requester: Requester,
|
||||||
new_avatar_url: str,
|
new_avatar_url: str,
|
||||||
by_admin: bool = False,
|
by_admin: bool = False,
|
||||||
):
|
) -> None:
|
||||||
"""Set a new avatar URL for a user.
|
"""Set a new avatar URL for a user.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
@ -300,18 +299,17 @@ class ProfileHandler(BaseHandler):
|
||||||
target_user.localpart, avatar_url_to_set
|
target_user.localpart, avatar_url_to_set
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.hs.config.user_directory_search_all_users:
|
profile = await self.store.get_profileinfo(target_user.localpart)
|
||||||
profile = await self.store.get_profileinfo(target_user.localpart)
|
await self.user_directory_handler.handle_local_profile_change(
|
||||||
await self.user_directory_handler.handle_local_profile_change(
|
target_user.to_string(), profile
|
||||||
target_user.to_string(), profile
|
)
|
||||||
)
|
|
||||||
|
|
||||||
await self._update_join_states(requester, target_user)
|
await self._update_join_states(requester, target_user)
|
||||||
|
|
||||||
async def on_profile_query(self, args: JsonDict) -> JsonDict:
|
async def on_profile_query(self, args: JsonDict) -> JsonDict:
|
||||||
"""Handles federation profile query requests."""
|
"""Handles federation profile query requests."""
|
||||||
|
|
||||||
if not self.hs.config.allow_profile_lookup_over_federation:
|
if not self.hs.config.federation.allow_profile_lookup_over_federation:
|
||||||
raise SynapseError(
|
raise SynapseError(
|
||||||
403,
|
403,
|
||||||
"Profile lookup over federation is disabled on this homeserver",
|
"Profile lookup over federation is disabled on this homeserver",
|
||||||
|
@ -425,7 +423,7 @@ class ProfileHandler(BaseHandler):
|
||||||
raise
|
raise
|
||||||
|
|
||||||
@wrap_as_background_process("Update remote profile")
|
@wrap_as_background_process("Update remote profile")
|
||||||
async def _update_remote_profile_cache(self):
|
async def _update_remote_profile_cache(self) -> None:
|
||||||
"""Called periodically to check profiles of remote users we haven't
|
"""Called periodically to check profiles of remote users we haven't
|
||||||
checked in a while.
|
checked in a while.
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -12,11 +12,12 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
import logging
|
import logging
|
||||||
from typing import TYPE_CHECKING, List, Optional, Tuple
|
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple
|
||||||
|
|
||||||
from synapse.api.constants import ReadReceiptEventFields
|
from synapse.api.constants import ReadReceiptEventFields
|
||||||
from synapse.appservice import ApplicationService
|
from synapse.appservice import ApplicationService
|
||||||
from synapse.handlers._base import BaseHandler
|
from synapse.handlers._base import BaseHandler
|
||||||
|
from synapse.streams import EventSource
|
||||||
from synapse.types import JsonDict, ReadReceipt, UserID, get_domain_from_id
|
from synapse.types import JsonDict, ReadReceipt, UserID, get_domain_from_id
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
|
@ -163,7 +164,7 @@ class ReceiptsHandler(BaseHandler):
|
||||||
await self.federation_sender.send_read_receipt(receipt)
|
await self.federation_sender.send_read_receipt(receipt)
|
||||||
|
|
||||||
|
|
||||||
class ReceiptEventSource:
|
class ReceiptEventSource(EventSource[int, JsonDict]):
|
||||||
def __init__(self, hs: "HomeServer"):
|
def __init__(self, hs: "HomeServer"):
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
self.config = hs.config
|
self.config = hs.config
|
||||||
|
@ -217,7 +218,13 @@ class ReceiptEventSource:
|
||||||
return visible_events
|
return visible_events
|
||||||
|
|
||||||
async def get_new_events(
|
async def get_new_events(
|
||||||
self, from_key: int, room_ids: List[str], user: UserID, **kwargs
|
self,
|
||||||
|
user: UserID,
|
||||||
|
from_key: int,
|
||||||
|
limit: Optional[int],
|
||||||
|
room_ids: Iterable[str],
|
||||||
|
is_guest: bool,
|
||||||
|
explicit_room_id: Optional[str] = None,
|
||||||
) -> Tuple[List[JsonDict], int]:
|
) -> Tuple[List[JsonDict], int]:
|
||||||
from_key = int(from_key)
|
from_key = int(from_key)
|
||||||
to_key = self.get_current_key()
|
to_key = self.get_current_key()
|
||||||
|
@ -232,7 +239,7 @@ class ReceiptEventSource:
|
||||||
if self.config.experimental.msc2285_enabled:
|
if self.config.experimental.msc2285_enabled:
|
||||||
events = ReceiptEventSource.filter_out_hidden(events, user.to_string())
|
events = ReceiptEventSource.filter_out_hidden(events, user.to_string())
|
||||||
|
|
||||||
return (events, to_key)
|
return events, to_key
|
||||||
|
|
||||||
async def get_new_events_as(
|
async def get_new_events_as(
|
||||||
self, from_key: int, service: ApplicationService
|
self, from_key: int, service: ApplicationService
|
||||||
|
@ -264,7 +271,7 @@ class ReceiptEventSource:
|
||||||
|
|
||||||
events.append(event)
|
events.append(event)
|
||||||
|
|
||||||
return (events, to_key)
|
return events, to_key
|
||||||
|
|
||||||
def get_current_key(self, direction: str = "f") -> int:
|
def get_current_key(self, direction: str = "f") -> int:
|
||||||
return self.store.get_max_receipt_stream_id()
|
return self.store.get_max_receipt_stream_id()
|
||||||
|
|
|
@ -97,7 +97,8 @@ class RegistrationHandler(BaseHandler):
|
||||||
self.ratelimiter = hs.get_registration_ratelimiter()
|
self.ratelimiter = hs.get_registration_ratelimiter()
|
||||||
self.macaroon_gen = hs.get_macaroon_generator()
|
self.macaroon_gen = hs.get_macaroon_generator()
|
||||||
self._account_validity_handler = hs.get_account_validity_handler()
|
self._account_validity_handler = hs.get_account_validity_handler()
|
||||||
self._server_notices_mxid = hs.config.server_notices_mxid
|
self._user_consent_version = self.hs.config.consent.user_consent_version
|
||||||
|
self._server_notices_mxid = hs.config.servernotices.server_notices_mxid
|
||||||
self._server_name = hs.hostname
|
self._server_name = hs.hostname
|
||||||
|
|
||||||
self.spam_checker = hs.get_spam_checker()
|
self.spam_checker = hs.get_spam_checker()
|
||||||
|
@ -126,7 +127,7 @@ class RegistrationHandler(BaseHandler):
|
||||||
guest_access_token: Optional[str] = None,
|
guest_access_token: Optional[str] = None,
|
||||||
assigned_user_id: Optional[str] = None,
|
assigned_user_id: Optional[str] = None,
|
||||||
allow_invalid: bool = False,
|
allow_invalid: bool = False,
|
||||||
):
|
) -> None:
|
||||||
# meow: allow admins to register invalid user ids
|
# meow: allow admins to register invalid user ids
|
||||||
if not allow_invalid:
|
if not allow_invalid:
|
||||||
if types.contains_invalid_mxid_characters(localpart):
|
if types.contains_invalid_mxid_characters(localpart):
|
||||||
|
@ -302,11 +303,10 @@ class RegistrationHandler(BaseHandler):
|
||||||
shadow_banned=shadow_banned,
|
shadow_banned=shadow_banned,
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.hs.config.user_directory_search_all_users:
|
profile = await self.store.get_profileinfo(localpart)
|
||||||
profile = await self.store.get_profileinfo(localpart)
|
await self.user_directory_handler.handle_local_profile_change(
|
||||||
await self.user_directory_handler.handle_local_profile_change(
|
user_id, profile
|
||||||
user_id, profile
|
)
|
||||||
)
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# autogen a sequential user ID
|
# autogen a sequential user ID
|
||||||
|
@ -347,7 +347,7 @@ class RegistrationHandler(BaseHandler):
|
||||||
auth_provider=(auth_provider_id or ""),
|
auth_provider=(auth_provider_id or ""),
|
||||||
).inc()
|
).inc()
|
||||||
|
|
||||||
if not self.hs.config.user_consent_at_registration:
|
if not self.hs.config.consent.user_consent_at_registration:
|
||||||
if not self.hs.config.auto_join_rooms_for_guests and make_guest:
|
if not self.hs.config.auto_join_rooms_for_guests and make_guest:
|
||||||
logger.info(
|
logger.info(
|
||||||
"Skipping auto-join for %s because auto-join for guests is disabled",
|
"Skipping auto-join for %s because auto-join for guests is disabled",
|
||||||
|
@ -872,7 +872,9 @@ class RegistrationHandler(BaseHandler):
|
||||||
await self._register_msisdn_threepid(user_id, threepid)
|
await self._register_msisdn_threepid(user_id, threepid)
|
||||||
|
|
||||||
if auth_result and LoginType.TERMS in auth_result:
|
if auth_result and LoginType.TERMS in auth_result:
|
||||||
await self._on_user_consented(user_id, self.hs.config.user_consent_version)
|
# The terms type should only exist if consent is enabled.
|
||||||
|
assert self._user_consent_version is not None
|
||||||
|
await self._on_user_consented(user_id, self._user_consent_version)
|
||||||
|
|
||||||
async def _on_user_consented(self, user_id: str, consent_version: str) -> None:
|
async def _on_user_consented(self, user_id: str, consent_version: str) -> None:
|
||||||
"""A user consented to the terms on registration
|
"""A user consented to the terms on registration
|
||||||
|
@ -918,8 +920,8 @@ class RegistrationHandler(BaseHandler):
|
||||||
# getting mail spam where they weren't before if email
|
# getting mail spam where they weren't before if email
|
||||||
# notifs are set up on a homeserver)
|
# notifs are set up on a homeserver)
|
||||||
if (
|
if (
|
||||||
self.hs.config.email_enable_notifs
|
self.hs.config.email.email_enable_notifs
|
||||||
and self.hs.config.email_notif_for_new_users
|
and self.hs.config.email.email_notif_for_new_users
|
||||||
and token
|
and token
|
||||||
):
|
):
|
||||||
# Pull the ID of the access token back out of the db
|
# Pull the ID of the access token back out of the db
|
||||||
|
|
|
@ -1,6 +1,4 @@
|
||||||
# Copyright 2014 - 2016 OpenMarket Ltd
|
# Copyright 2016-2021 The Matrix.org Foundation C.I.C.
|
||||||
# Copyright 2018-2019 New Vector Ltd
|
|
||||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -22,7 +20,16 @@ import math
|
||||||
import random
|
import random
|
||||||
import string
|
import string
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
from typing import TYPE_CHECKING, Any, Awaitable, Dict, List, Optional, Tuple
|
from typing import (
|
||||||
|
TYPE_CHECKING,
|
||||||
|
Any,
|
||||||
|
Awaitable,
|
||||||
|
Collection,
|
||||||
|
Dict,
|
||||||
|
List,
|
||||||
|
Optional,
|
||||||
|
Tuple,
|
||||||
|
)
|
||||||
|
|
||||||
from synapse.api.constants import (
|
from synapse.api.constants import (
|
||||||
EventContentFields,
|
EventContentFields,
|
||||||
|
@ -49,6 +56,7 @@ from synapse.events import EventBase
|
||||||
from synapse.events.utils import copy_power_levels_contents
|
from synapse.events.utils import copy_power_levels_contents
|
||||||
from synapse.rest.admin._base import assert_user_is_admin
|
from synapse.rest.admin._base import assert_user_is_admin
|
||||||
from synapse.storage.state import StateFilter
|
from synapse.storage.state import StateFilter
|
||||||
|
from synapse.streams import EventSource
|
||||||
from synapse.types import (
|
from synapse.types import (
|
||||||
JsonDict,
|
JsonDict,
|
||||||
MutableStateMap,
|
MutableStateMap,
|
||||||
|
@ -118,7 +126,7 @@ class RoomCreationHandler(BaseHandler):
|
||||||
for preset_name, preset_config in self._presets_dict.items():
|
for preset_name, preset_config in self._presets_dict.items():
|
||||||
encrypted = (
|
encrypted = (
|
||||||
preset_name
|
preset_name
|
||||||
in self.config.encryption_enabled_by_default_for_room_presets
|
in self.config.room.encryption_enabled_by_default_for_room_presets
|
||||||
)
|
)
|
||||||
preset_config["encrypted"] = encrypted
|
preset_config["encrypted"] = encrypted
|
||||||
|
|
||||||
|
@ -133,7 +141,7 @@ class RoomCreationHandler(BaseHandler):
|
||||||
self._upgrade_response_cache: ResponseCache[Tuple[str, str]] = ResponseCache(
|
self._upgrade_response_cache: ResponseCache[Tuple[str, str]] = ResponseCache(
|
||||||
hs.get_clock(), "room_upgrade", timeout_ms=FIVE_MINUTES_IN_MS
|
hs.get_clock(), "room_upgrade", timeout_ms=FIVE_MINUTES_IN_MS
|
||||||
)
|
)
|
||||||
self._server_notices_mxid = hs.config.server_notices_mxid
|
self._server_notices_mxid = hs.config.servernotices.server_notices_mxid
|
||||||
|
|
||||||
self.third_party_event_rules = hs.get_third_party_event_rules()
|
self.third_party_event_rules = hs.get_third_party_event_rules()
|
||||||
|
|
||||||
|
@ -186,7 +194,7 @@ class RoomCreationHandler(BaseHandler):
|
||||||
|
|
||||||
async def _upgrade_room(
|
async def _upgrade_room(
|
||||||
self, requester: Requester, old_room_id: str, new_version: RoomVersion
|
self, requester: Requester, old_room_id: str, new_version: RoomVersion
|
||||||
):
|
) -> str:
|
||||||
"""
|
"""
|
||||||
Args:
|
Args:
|
||||||
requester: the user requesting the upgrade
|
requester: the user requesting the upgrade
|
||||||
|
@ -512,7 +520,7 @@ class RoomCreationHandler(BaseHandler):
|
||||||
old_room_id: str,
|
old_room_id: str,
|
||||||
new_room_id: str,
|
new_room_id: str,
|
||||||
old_room_state: StateMap[str],
|
old_room_state: StateMap[str],
|
||||||
):
|
) -> None:
|
||||||
# check to see if we have a canonical alias.
|
# check to see if we have a canonical alias.
|
||||||
canonical_alias_event = None
|
canonical_alias_event = None
|
||||||
canonical_alias_event_id = old_room_state.get((EventTypes.CanonicalAlias, ""))
|
canonical_alias_event_id = old_room_state.get((EventTypes.CanonicalAlias, ""))
|
||||||
|
@ -641,8 +649,16 @@ class RoomCreationHandler(BaseHandler):
|
||||||
requester, config, is_requester_admin=is_requester_admin
|
requester, config, is_requester_admin=is_requester_admin
|
||||||
)
|
)
|
||||||
|
|
||||||
if not is_requester_admin and not await self.spam_checker.user_may_create_room(
|
invite_3pid_list = config.get("invite_3pid", [])
|
||||||
user_id
|
invite_list = config.get("invite", [])
|
||||||
|
|
||||||
|
if not is_requester_admin and not (
|
||||||
|
await self.spam_checker.user_may_create_room(user_id)
|
||||||
|
and await self.spam_checker.user_may_create_room_with_invites(
|
||||||
|
user_id,
|
||||||
|
invite_list,
|
||||||
|
invite_3pid_list,
|
||||||
|
)
|
||||||
):
|
):
|
||||||
raise SynapseError(403, "You are not permitted to create rooms")
|
raise SynapseError(403, "You are not permitted to create rooms")
|
||||||
|
|
||||||
|
@ -676,8 +692,6 @@ class RoomCreationHandler(BaseHandler):
|
||||||
if mapping:
|
if mapping:
|
||||||
raise SynapseError(400, "Room alias already taken", Codes.ROOM_IN_USE)
|
raise SynapseError(400, "Room alias already taken", Codes.ROOM_IN_USE)
|
||||||
|
|
||||||
invite_3pid_list = config.get("invite_3pid", [])
|
|
||||||
invite_list = config.get("invite", [])
|
|
||||||
for i in invite_list:
|
for i in invite_list:
|
||||||
try:
|
try:
|
||||||
uid = UserID.from_string(i)
|
uid = UserID.from_string(i)
|
||||||
|
@ -759,7 +773,9 @@ class RoomCreationHandler(BaseHandler):
|
||||||
)
|
)
|
||||||
|
|
||||||
if is_public:
|
if is_public:
|
||||||
if not self.config.is_publishing_room_allowed(user_id, room_id, room_alias):
|
if not self.config.roomdirectory.is_publishing_room_allowed(
|
||||||
|
user_id, room_id, room_alias
|
||||||
|
):
|
||||||
# Lets just return a generic message, as there may be all sorts of
|
# Lets just return a generic message, as there may be all sorts of
|
||||||
# reasons why we said no. TODO: Allow configurable error messages
|
# reasons why we said no. TODO: Allow configurable error messages
|
||||||
# per alias creation rule?
|
# per alias creation rule?
|
||||||
|
@ -912,7 +928,7 @@ class RoomCreationHandler(BaseHandler):
|
||||||
|
|
||||||
event_keys = {"room_id": room_id, "sender": creator_id, "state_key": ""}
|
event_keys = {"room_id": room_id, "sender": creator_id, "state_key": ""}
|
||||||
|
|
||||||
def create(etype: str, content: JsonDict, **kwargs) -> JsonDict:
|
def create(etype: str, content: JsonDict, **kwargs: Any) -> JsonDict:
|
||||||
e = {"type": etype, "content": content}
|
e = {"type": etype, "content": content}
|
||||||
|
|
||||||
e.update(event_keys)
|
e.update(event_keys)
|
||||||
|
@ -920,7 +936,7 @@ class RoomCreationHandler(BaseHandler):
|
||||||
|
|
||||||
return e
|
return e
|
||||||
|
|
||||||
async def send(etype: str, content: JsonDict, **kwargs) -> int:
|
async def send(etype: str, content: JsonDict, **kwargs: Any) -> int:
|
||||||
event = create(etype, content, **kwargs)
|
event = create(etype, content, **kwargs)
|
||||||
logger.debug("Sending %s in new room", etype)
|
logger.debug("Sending %s in new room", etype)
|
||||||
# Allow these events to be sent even if the user is shadow-banned to
|
# Allow these events to be sent even if the user is shadow-banned to
|
||||||
|
@ -1043,7 +1059,7 @@ class RoomCreationHandler(BaseHandler):
|
||||||
creator_id: str,
|
creator_id: str,
|
||||||
is_public: bool,
|
is_public: bool,
|
||||||
room_version: RoomVersion,
|
room_version: RoomVersion,
|
||||||
):
|
) -> str:
|
||||||
# autogen room IDs and try to create it. We may clash, so just
|
# autogen room IDs and try to create it. We may clash, so just
|
||||||
# try a few times till one goes through, giving up eventually.
|
# try a few times till one goes through, giving up eventually.
|
||||||
attempts = 0
|
attempts = 0
|
||||||
|
@ -1107,7 +1123,7 @@ class RoomContextHandler:
|
||||||
users = await self.store.get_users_in_room(room_id)
|
users = await self.store.get_users_in_room(room_id)
|
||||||
is_peeking = user.to_string() not in users
|
is_peeking = user.to_string() not in users
|
||||||
|
|
||||||
async def filter_evts(events):
|
async def filter_evts(events: List[EventBase]) -> List[EventBase]:
|
||||||
if use_admin_priviledge:
|
if use_admin_priviledge:
|
||||||
return events
|
return events
|
||||||
return await filter_events_for_client(
|
return await filter_events_for_client(
|
||||||
|
@ -1185,7 +1201,7 @@ class RoomContextHandler:
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
class RoomEventSource:
|
class RoomEventSource(EventSource[RoomStreamToken, EventBase]):
|
||||||
def __init__(self, hs: "HomeServer"):
|
def __init__(self, hs: "HomeServer"):
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
|
|
||||||
|
@ -1193,8 +1209,8 @@ class RoomEventSource:
|
||||||
self,
|
self,
|
||||||
user: UserID,
|
user: UserID,
|
||||||
from_key: RoomStreamToken,
|
from_key: RoomStreamToken,
|
||||||
limit: int,
|
limit: Optional[int],
|
||||||
room_ids: List[str],
|
room_ids: Collection[str],
|
||||||
is_guest: bool,
|
is_guest: bool,
|
||||||
explicit_room_id: Optional[str] = None,
|
explicit_room_id: Optional[str] = None,
|
||||||
) -> Tuple[List[EventBase], RoomStreamToken]:
|
) -> Tuple[List[EventBase], RoomStreamToken]:
|
||||||
|
@ -1237,7 +1253,7 @@ class RoomEventSource:
|
||||||
else:
|
else:
|
||||||
end_key = to_key
|
end_key = to_key
|
||||||
|
|
||||||
return (events, end_key)
|
return events, end_key
|
||||||
|
|
||||||
def get_current_key(self) -> RoomStreamToken:
|
def get_current_key(self) -> RoomStreamToken:
|
||||||
return self.store.get_room_max_token()
|
return self.store.get_room_max_token()
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
from typing import TYPE_CHECKING, Optional, Tuple
|
from typing import TYPE_CHECKING, Any, Optional, Tuple
|
||||||
|
|
||||||
import msgpack
|
import msgpack
|
||||||
from unpaddedbase64 import decode_base64, encode_base64
|
from unpaddedbase64 import decode_base64, encode_base64
|
||||||
|
@ -33,7 +33,7 @@ from synapse.api.errors import (
|
||||||
SynapseError,
|
SynapseError,
|
||||||
)
|
)
|
||||||
from synapse.types import JsonDict, ThirdPartyInstanceID
|
from synapse.types import JsonDict, ThirdPartyInstanceID
|
||||||
from synapse.util.caches.descriptors import cached
|
from synapse.util.caches.descriptors import _CacheContext, cached
|
||||||
from synapse.util.caches.response_cache import ResponseCache
|
from synapse.util.caches.response_cache import ResponseCache
|
||||||
|
|
||||||
from ._base import BaseHandler
|
from ._base import BaseHandler
|
||||||
|
@ -52,7 +52,7 @@ EMPTY_THIRD_PARTY_ID = ThirdPartyInstanceID(None, None)
|
||||||
class RoomListHandler(BaseHandler):
|
class RoomListHandler(BaseHandler):
|
||||||
def __init__(self, hs: "HomeServer"):
|
def __init__(self, hs: "HomeServer"):
|
||||||
super().__init__(hs)
|
super().__init__(hs)
|
||||||
self.enable_room_list_search = hs.config.enable_room_list_search
|
self.enable_room_list_search = hs.config.roomdirectory.enable_room_list_search
|
||||||
self.response_cache: ResponseCache[
|
self.response_cache: ResponseCache[
|
||||||
Tuple[Optional[int], Optional[str], Optional[ThirdPartyInstanceID]]
|
Tuple[Optional[int], Optional[str], Optional[ThirdPartyInstanceID]]
|
||||||
] = ResponseCache(hs.get_clock(), "room_list")
|
] = ResponseCache(hs.get_clock(), "room_list")
|
||||||
|
@ -169,7 +169,7 @@ class RoomListHandler(BaseHandler):
|
||||||
ignore_non_federatable=from_federation,
|
ignore_non_federatable=from_federation,
|
||||||
)
|
)
|
||||||
|
|
||||||
def build_room_entry(room):
|
def build_room_entry(room: JsonDict) -> JsonDict:
|
||||||
entry = {
|
entry = {
|
||||||
"room_id": room["room_id"],
|
"room_id": room["room_id"],
|
||||||
"name": room["name"],
|
"name": room["name"],
|
||||||
|
@ -249,10 +249,10 @@ class RoomListHandler(BaseHandler):
|
||||||
self,
|
self,
|
||||||
room_id: str,
|
room_id: str,
|
||||||
num_joined_users: int,
|
num_joined_users: int,
|
||||||
cache_context,
|
cache_context: _CacheContext,
|
||||||
with_alias: bool = True,
|
with_alias: bool = True,
|
||||||
allow_private: bool = False,
|
allow_private: bool = False,
|
||||||
) -> Optional[dict]:
|
) -> Optional[JsonDict]:
|
||||||
"""Returns the entry for a room
|
"""Returns the entry for a room
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
@ -507,7 +507,7 @@ class RoomListNextBatch(
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
def copy_and_replace(self, **kwds) -> "RoomListNextBatch":
|
def copy_and_replace(self, **kwds: Any) -> "RoomListNextBatch":
|
||||||
return self._replace(**kwds)
|
return self._replace(**kwds)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -88,7 +88,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
self.spam_checker = hs.get_spam_checker()
|
self.spam_checker = hs.get_spam_checker()
|
||||||
self.third_party_event_rules = hs.get_third_party_event_rules()
|
self.third_party_event_rules = hs.get_third_party_event_rules()
|
||||||
self._server_notices_mxid = self.config.server_notices_mxid
|
self._server_notices_mxid = self.config.servernotices.server_notices_mxid
|
||||||
self._enable_lookup = hs.config.enable_3pid_lookup
|
self._enable_lookup = hs.config.enable_3pid_lookup
|
||||||
self.allow_per_room_profiles = self.config.allow_per_room_profiles
|
self.allow_per_room_profiles = self.config.allow_per_room_profiles
|
||||||
|
|
||||||
|
@ -225,7 +225,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||||
room_id: Optional[str],
|
room_id: Optional[str],
|
||||||
n_invites: int,
|
n_invites: int,
|
||||||
update: bool = True,
|
update: bool = True,
|
||||||
):
|
) -> None:
|
||||||
"""Ratelimit more than one invite sent by the given requester in the given room.
|
"""Ratelimit more than one invite sent by the given requester in the given room.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
@ -249,7 +249,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||||
requester: Optional[Requester],
|
requester: Optional[Requester],
|
||||||
room_id: Optional[str],
|
room_id: Optional[str],
|
||||||
invitee_user_id: str,
|
invitee_user_id: str,
|
||||||
):
|
) -> None:
|
||||||
"""Ratelimit invites by room and by target user.
|
"""Ratelimit invites by room and by target user.
|
||||||
|
|
||||||
If room ID is missing then we just rate limit by target user.
|
If room ID is missing then we just rate limit by target user.
|
||||||
|
@ -386,7 +386,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||||
return result_event.event_id, result_event.internal_metadata.stream_ordering
|
return result_event.event_id, result_event.internal_metadata.stream_ordering
|
||||||
|
|
||||||
async def copy_room_tags_and_direct_to_room(
|
async def copy_room_tags_and_direct_to_room(
|
||||||
self, old_room_id, new_room_id, user_id
|
self, old_room_id: str, new_room_id: str, user_id: str
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Copies the tags and direct room state from one room to another.
|
"""Copies the tags and direct room state from one room to another.
|
||||||
|
|
||||||
|
@ -668,7 +668,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||||
" (membership=%s)" % old_membership,
|
" (membership=%s)" % old_membership,
|
||||||
errcode=Codes.BAD_STATE,
|
errcode=Codes.BAD_STATE,
|
||||||
)
|
)
|
||||||
if old_membership == "ban" and action != "unban":
|
if old_membership == "ban" and action not in ["ban", "unban", "leave"]:
|
||||||
raise SynapseError(
|
raise SynapseError(
|
||||||
403,
|
403,
|
||||||
"Cannot %s user who was banned" % (action,),
|
"Cannot %s user who was banned" % (action,),
|
||||||
|
@ -1030,7 +1030,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||||
event: EventBase,
|
event: EventBase,
|
||||||
context: EventContext,
|
context: EventContext,
|
||||||
ratelimit: bool = True,
|
ratelimit: bool = True,
|
||||||
):
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Change the membership status of a user in a room.
|
Change the membership status of a user in a room.
|
||||||
|
|
||||||
|
|
|
@ -541,7 +541,7 @@ class RoomSummaryHandler:
|
||||||
origin: str,
|
origin: str,
|
||||||
requested_room_id: str,
|
requested_room_id: str,
|
||||||
suggested_only: bool,
|
suggested_only: bool,
|
||||||
):
|
) -> JsonDict:
|
||||||
"""
|
"""
|
||||||
Implementation of the room hierarchy Federation API.
|
Implementation of the room hierarchy Federation API.
|
||||||
|
|
||||||
|
@ -1179,4 +1179,4 @@ def _child_events_comparison_key(
|
||||||
order = None
|
order = None
|
||||||
|
|
||||||
# Items without an order come last.
|
# Items without an order come last.
|
||||||
return (order is None, order, child.origin_server_ts, child.room_id)
|
return order is None, order, child.origin_server_ts, child.room_id
|
||||||
|
|
|
@ -40,33 +40,32 @@ if TYPE_CHECKING:
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@attr.s(slots=True)
|
@attr.s(slots=True, auto_attribs=True)
|
||||||
class Saml2SessionData:
|
class Saml2SessionData:
|
||||||
"""Data we track about SAML2 sessions"""
|
"""Data we track about SAML2 sessions"""
|
||||||
|
|
||||||
# time the session was created, in milliseconds
|
# time the session was created, in milliseconds
|
||||||
creation_time = attr.ib()
|
creation_time: int
|
||||||
# The user interactive authentication session ID associated with this SAML
|
# The user interactive authentication session ID associated with this SAML
|
||||||
# session (or None if this SAML session is for an initial login).
|
# session (or None if this SAML session is for an initial login).
|
||||||
ui_auth_session_id = attr.ib(type=Optional[str], default=None)
|
ui_auth_session_id: Optional[str] = None
|
||||||
|
|
||||||
|
|
||||||
class SamlHandler(BaseHandler):
|
class SamlHandler(BaseHandler):
|
||||||
def __init__(self, hs: "HomeServer"):
|
def __init__(self, hs: "HomeServer"):
|
||||||
super().__init__(hs)
|
super().__init__(hs)
|
||||||
self._saml_client = Saml2Client(hs.config.saml2_sp_config)
|
self._saml_client = Saml2Client(hs.config.saml2.saml2_sp_config)
|
||||||
self._saml_idp_entityid = hs.config.saml2_idp_entityid
|
self._saml_idp_entityid = hs.config.saml2.saml2_idp_entityid
|
||||||
|
|
||||||
self._saml2_session_lifetime = hs.config.saml2_session_lifetime
|
self._saml2_session_lifetime = hs.config.saml2.saml2_session_lifetime
|
||||||
self._grandfathered_mxid_source_attribute = (
|
self._grandfathered_mxid_source_attribute = (
|
||||||
hs.config.saml2_grandfathered_mxid_source_attribute
|
hs.config.saml2.saml2_grandfathered_mxid_source_attribute
|
||||||
)
|
)
|
||||||
self._saml2_attribute_requirements = hs.config.saml2.attribute_requirements
|
self._saml2_attribute_requirements = hs.config.saml2.attribute_requirements
|
||||||
self._error_template = hs.config.sso_error_template
|
|
||||||
|
|
||||||
# plugin to do custom mapping from saml response to mxid
|
# plugin to do custom mapping from saml response to mxid
|
||||||
self._user_mapping_provider = hs.config.saml2_user_mapping_provider_class(
|
self._user_mapping_provider = hs.config.saml2.saml2_user_mapping_provider_class(
|
||||||
hs.config.saml2_user_mapping_provider_config,
|
hs.config.saml2.saml2_user_mapping_provider_config,
|
||||||
ModuleApi(hs, hs.get_auth_handler()),
|
ModuleApi(hs, hs.get_auth_handler()),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -359,7 +358,7 @@ class SamlHandler(BaseHandler):
|
||||||
|
|
||||||
return remote_user_id
|
return remote_user_id
|
||||||
|
|
||||||
def expire_sessions(self):
|
def expire_sessions(self) -> None:
|
||||||
expire_before = self.clock.time_msec() - self._saml2_session_lifetime
|
expire_before = self.clock.time_msec() - self._saml2_session_lifetime
|
||||||
to_expire = set()
|
to_expire = set()
|
||||||
for reqid, data in self._outstanding_requests_dict.items():
|
for reqid, data in self._outstanding_requests_dict.items():
|
||||||
|
@ -391,10 +390,10 @@ MXID_MAPPER_MAP: Dict[str, Callable[[str], str]] = {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@attr.s
|
@attr.s(auto_attribs=True)
|
||||||
class SamlConfig:
|
class SamlConfig:
|
||||||
mxid_source_attribute = attr.ib()
|
mxid_source_attribute: str
|
||||||
mxid_mapper = attr.ib()
|
mxid_mapper: Callable[[str], str]
|
||||||
|
|
||||||
|
|
||||||
class DefaultSamlMappingProvider:
|
class DefaultSamlMappingProvider:
|
||||||
|
@ -411,7 +410,7 @@ class DefaultSamlMappingProvider:
|
||||||
self._mxid_mapper = parsed_config.mxid_mapper
|
self._mxid_mapper = parsed_config.mxid_mapper
|
||||||
|
|
||||||
self._grandfathered_mxid_source_attribute = (
|
self._grandfathered_mxid_source_attribute = (
|
||||||
module_api._hs.config.saml2_grandfathered_mxid_source_attribute
|
module_api._hs.config.saml2.saml2_grandfathered_mxid_source_attribute
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_remote_user_id(
|
def get_remote_user_id(
|
||||||
|
|
|
@ -17,7 +17,7 @@ import logging
|
||||||
from email.mime.multipart import MIMEMultipart
|
from email.mime.multipart import MIMEMultipart
|
||||||
from email.mime.text import MIMEText
|
from email.mime.text import MIMEText
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
from typing import TYPE_CHECKING, Optional
|
from typing import TYPE_CHECKING, Any, Optional
|
||||||
|
|
||||||
from pkg_resources import parse_version
|
from pkg_resources import parse_version
|
||||||
|
|
||||||
|
@ -79,7 +79,7 @@ async def _sendmail(
|
||||||
msg = BytesIO(msg_bytes)
|
msg = BytesIO(msg_bytes)
|
||||||
d: "Deferred[object]" = Deferred()
|
d: "Deferred[object]" = Deferred()
|
||||||
|
|
||||||
def build_sender_factory(**kwargs) -> ESMTPSenderFactory:
|
def build_sender_factory(**kwargs: Any) -> ESMTPSenderFactory:
|
||||||
return ESMTPSenderFactory(
|
return ESMTPSenderFactory(
|
||||||
username,
|
username,
|
||||||
password,
|
password,
|
||||||
|
|
|
@ -184,15 +184,17 @@ class SsoHandler:
|
||||||
self._server_name = hs.hostname
|
self._server_name = hs.hostname
|
||||||
self._registration_handler = hs.get_registration_handler()
|
self._registration_handler = hs.get_registration_handler()
|
||||||
self._auth_handler = hs.get_auth_handler()
|
self._auth_handler = hs.get_auth_handler()
|
||||||
self._error_template = hs.config.sso_error_template
|
self._error_template = hs.config.sso.sso_error_template
|
||||||
self._bad_user_template = hs.config.sso_auth_bad_user_template
|
self._bad_user_template = hs.config.sso.sso_auth_bad_user_template
|
||||||
self._profile_handler = hs.get_profile_handler()
|
self._profile_handler = hs.get_profile_handler()
|
||||||
|
|
||||||
# The following template is shown after a successful user interactive
|
# The following template is shown after a successful user interactive
|
||||||
# authentication session. It tells the user they can close the window.
|
# authentication session. It tells the user they can close the window.
|
||||||
self._sso_auth_success_template = hs.config.sso_auth_success_template
|
self._sso_auth_success_template = hs.config.sso.sso_auth_success_template
|
||||||
|
|
||||||
self._sso_update_profile_information = hs.config.sso_update_profile_information
|
self._sso_update_profile_information = (
|
||||||
|
hs.config.sso.sso_update_profile_information
|
||||||
|
)
|
||||||
|
|
||||||
# a lock on the mappings
|
# a lock on the mappings
|
||||||
self._mapping_lock = Linearizer(name="sso_user_mapping", clock=hs.get_clock())
|
self._mapping_lock = Linearizer(name="sso_user_mapping", clock=hs.get_clock())
|
||||||
|
@ -205,7 +207,7 @@ class SsoHandler:
|
||||||
|
|
||||||
self._consent_at_registration = hs.config.consent.user_consent_at_registration
|
self._consent_at_registration = hs.config.consent.user_consent_at_registration
|
||||||
|
|
||||||
def register_identity_provider(self, p: SsoIdentityProvider):
|
def register_identity_provider(self, p: SsoIdentityProvider) -> None:
|
||||||
p_id = p.idp_id
|
p_id = p.idp_id
|
||||||
assert p_id not in self._identity_providers
|
assert p_id not in self._identity_providers
|
||||||
self._identity_providers[p_id] = p
|
self._identity_providers[p_id] = p
|
||||||
|
@ -856,7 +858,7 @@ class SsoHandler:
|
||||||
|
|
||||||
async def handle_terms_accepted(
|
async def handle_terms_accepted(
|
||||||
self, request: Request, session_id: str, terms_version: str
|
self, request: Request, session_id: str, terms_version: str
|
||||||
):
|
) -> None:
|
||||||
"""Handle a request to the new-user 'consent' endpoint
|
"""Handle a request to the new-user 'consent' endpoint
|
||||||
|
|
||||||
Will serve an HTTP response to the request.
|
Will serve an HTTP response to the request.
|
||||||
|
@ -959,7 +961,7 @@ class SsoHandler:
|
||||||
new_user=True,
|
new_user=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
def _expire_old_sessions(self):
|
def _expire_old_sessions(self) -> None:
|
||||||
to_expire = []
|
to_expire = []
|
||||||
now = int(self._clock.time_msec())
|
now = int(self._clock.time_msec())
|
||||||
|
|
||||||
|
|
|
@ -46,7 +46,7 @@ class StatsHandler:
|
||||||
self.notifier = hs.get_notifier()
|
self.notifier = hs.get_notifier()
|
||||||
self.is_mine_id = hs.is_mine_id
|
self.is_mine_id = hs.is_mine_id
|
||||||
|
|
||||||
self.stats_enabled = hs.config.stats_enabled
|
self.stats_enabled = hs.config.stats.stats_enabled
|
||||||
|
|
||||||
# The current position in the current_state_delta stream
|
# The current position in the current_state_delta stream
|
||||||
self.pos: Optional[int] = None
|
self.pos: Optional[int] = None
|
||||||
|
@ -68,7 +68,7 @@ class StatsHandler:
|
||||||
|
|
||||||
self._is_processing = True
|
self._is_processing = True
|
||||||
|
|
||||||
async def process():
|
async def process() -> None:
|
||||||
try:
|
try:
|
||||||
await self._unsafe_process()
|
await self._unsafe_process()
|
||||||
finally:
|
finally:
|
||||||
|
|
|
@ -364,7 +364,9 @@ class SyncHandler:
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
|
|
||||||
async def current_sync_callback(before_token, after_token) -> SyncResult:
|
async def current_sync_callback(
|
||||||
|
before_token: StreamToken, after_token: StreamToken
|
||||||
|
) -> SyncResult:
|
||||||
return await self.current_sync_for_user(sync_config, since_token)
|
return await self.current_sync_for_user(sync_config, since_token)
|
||||||
|
|
||||||
result = await self.notifier.wait_for_events(
|
result = await self.notifier.wait_for_events(
|
||||||
|
@ -441,7 +443,7 @@ class SyncHandler:
|
||||||
|
|
||||||
room_ids = sync_result_builder.joined_room_ids
|
room_ids = sync_result_builder.joined_room_ids
|
||||||
|
|
||||||
typing_source = self.event_sources.sources["typing"]
|
typing_source = self.event_sources.sources.typing
|
||||||
typing, typing_key = await typing_source.get_new_events(
|
typing, typing_key = await typing_source.get_new_events(
|
||||||
user=sync_config.user,
|
user=sync_config.user,
|
||||||
from_key=typing_key,
|
from_key=typing_key,
|
||||||
|
@ -463,7 +465,7 @@ class SyncHandler:
|
||||||
|
|
||||||
receipt_key = since_token.receipt_key if since_token else 0
|
receipt_key = since_token.receipt_key if since_token else 0
|
||||||
|
|
||||||
receipt_source = self.event_sources.sources["receipt"]
|
receipt_source = self.event_sources.sources.receipt
|
||||||
receipts, receipt_key = await receipt_source.get_new_events(
|
receipts, receipt_key = await receipt_source.get_new_events(
|
||||||
user=sync_config.user,
|
user=sync_config.user,
|
||||||
from_key=receipt_key,
|
from_key=receipt_key,
|
||||||
|
@ -1089,7 +1091,7 @@ class SyncHandler:
|
||||||
block_all_presence_data = (
|
block_all_presence_data = (
|
||||||
since_token is None and sync_config.filter_collection.blocks_all_presence()
|
since_token is None and sync_config.filter_collection.blocks_all_presence()
|
||||||
)
|
)
|
||||||
if self.hs_config.use_presence and not block_all_presence_data:
|
if self.hs_config.server.use_presence and not block_all_presence_data:
|
||||||
logger.debug("Fetching presence data")
|
logger.debug("Fetching presence data")
|
||||||
await self._generate_sync_entry_for_presence(
|
await self._generate_sync_entry_for_presence(
|
||||||
sync_result_builder,
|
sync_result_builder,
|
||||||
|
@ -1412,7 +1414,7 @@ class SyncHandler:
|
||||||
sync_config = sync_result_builder.sync_config
|
sync_config = sync_result_builder.sync_config
|
||||||
user = sync_result_builder.sync_config.user
|
user = sync_result_builder.sync_config.user
|
||||||
|
|
||||||
presence_source = self.event_sources.sources["presence"]
|
presence_source = self.event_sources.sources.presence
|
||||||
|
|
||||||
since_token = sync_result_builder.since_token
|
since_token = sync_result_builder.since_token
|
||||||
presence_key = None
|
presence_key = None
|
||||||
|
@ -1531,9 +1533,9 @@ class SyncHandler:
|
||||||
newly_joined_rooms = room_changes.newly_joined_rooms
|
newly_joined_rooms = room_changes.newly_joined_rooms
|
||||||
newly_left_rooms = room_changes.newly_left_rooms
|
newly_left_rooms = room_changes.newly_left_rooms
|
||||||
|
|
||||||
async def handle_room_entries(room_entry: "RoomSyncResultBuilder"):
|
async def handle_room_entries(room_entry: "RoomSyncResultBuilder") -> None:
|
||||||
logger.debug("Generating room entry for %s", room_entry.room_id)
|
logger.debug("Generating room entry for %s", room_entry.room_id)
|
||||||
res = await self._generate_room_entry(
|
await self._generate_room_entry(
|
||||||
sync_result_builder,
|
sync_result_builder,
|
||||||
ignored_users,
|
ignored_users,
|
||||||
room_entry,
|
room_entry,
|
||||||
|
@ -1543,7 +1545,6 @@ class SyncHandler:
|
||||||
always_include=sync_result_builder.full_state,
|
always_include=sync_result_builder.full_state,
|
||||||
)
|
)
|
||||||
logger.debug("Generated room entry for %s", room_entry.room_id)
|
logger.debug("Generated room entry for %s", room_entry.room_id)
|
||||||
return res
|
|
||||||
|
|
||||||
await concurrently_execute(handle_room_entries, room_entries, 10)
|
await concurrently_execute(handle_room_entries, room_entries, 10)
|
||||||
|
|
||||||
|
@ -1924,7 +1925,7 @@ class SyncHandler:
|
||||||
tags: Optional[Dict[str, Dict[str, Any]]],
|
tags: Optional[Dict[str, Dict[str, Any]]],
|
||||||
account_data: Dict[str, JsonDict],
|
account_data: Dict[str, JsonDict],
|
||||||
always_include: bool = False,
|
always_include: bool = False,
|
||||||
):
|
) -> None:
|
||||||
"""Populates the `joined` and `archived` section of `sync_result_builder`
|
"""Populates the `joined` and `archived` section of `sync_result_builder`
|
||||||
based on the `room_builder`.
|
based on the `room_builder`.
|
||||||
|
|
||||||
|
|
|
@ -23,6 +23,7 @@ from synapse.metrics.background_process_metrics import (
|
||||||
wrap_as_background_process,
|
wrap_as_background_process,
|
||||||
)
|
)
|
||||||
from synapse.replication.tcp.streams import TypingStream
|
from synapse.replication.tcp.streams import TypingStream
|
||||||
|
from synapse.streams import EventSource
|
||||||
from synapse.types import JsonDict, Requester, UserID, get_domain_from_id
|
from synapse.types import JsonDict, Requester, UserID, get_domain_from_id
|
||||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
|
@ -439,7 +440,7 @@ class TypingWriterHandler(FollowerTypingHandler):
|
||||||
raise Exception("Typing writer instance got typing info over replication")
|
raise Exception("Typing writer instance got typing info over replication")
|
||||||
|
|
||||||
|
|
||||||
class TypingNotificationEventSource:
|
class TypingNotificationEventSource(EventSource[int, JsonDict]):
|
||||||
def __init__(self, hs: "HomeServer"):
|
def __init__(self, hs: "HomeServer"):
|
||||||
self.hs = hs
|
self.hs = hs
|
||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
|
@ -482,10 +483,16 @@ class TypingNotificationEventSource:
|
||||||
|
|
||||||
events.append(self._make_event_for(room_id))
|
events.append(self._make_event_for(room_id))
|
||||||
|
|
||||||
return (events, handler._latest_room_serial)
|
return events, handler._latest_room_serial
|
||||||
|
|
||||||
async def get_new_events(
|
async def get_new_events(
|
||||||
self, from_key: int, room_ids: Iterable[str], **kwargs
|
self,
|
||||||
|
user: UserID,
|
||||||
|
from_key: int,
|
||||||
|
limit: Optional[int],
|
||||||
|
room_ids: Iterable[str],
|
||||||
|
is_guest: bool,
|
||||||
|
explicit_room_id: Optional[str] = None,
|
||||||
) -> Tuple[List[JsonDict], int]:
|
) -> Tuple[List[JsonDict], int]:
|
||||||
with Measure(self.clock, "typing.get_new_events"):
|
with Measure(self.clock, "typing.get_new_events"):
|
||||||
from_key = int(from_key)
|
from_key = int(from_key)
|
||||||
|
@ -500,7 +507,7 @@ class TypingNotificationEventSource:
|
||||||
|
|
||||||
events.append(self._make_event_for(room_id))
|
events.append(self._make_event_for(room_id))
|
||||||
|
|
||||||
return (events, handler._latest_room_serial)
|
return events, handler._latest_room_serial
|
||||||
|
|
||||||
def get_current_key(self) -> int:
|
def get_current_key(self) -> int:
|
||||||
return self.get_typing_handler()._latest_room_serial
|
return self.get_typing_handler()._latest_room_serial
|
||||||
|
|
|
@ -70,7 +70,7 @@ class DummyAuthChecker(UserInteractiveAuthChecker):
|
||||||
class TermsAuthChecker(UserInteractiveAuthChecker):
|
class TermsAuthChecker(UserInteractiveAuthChecker):
|
||||||
AUTH_TYPE = LoginType.TERMS
|
AUTH_TYPE = LoginType.TERMS
|
||||||
|
|
||||||
def is_enabled(self):
|
def is_enabled(self) -> bool:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
async def check_auth(self, authdict: dict, clientip: str) -> Any:
|
async def check_auth(self, authdict: dict, clientip: str) -> Any:
|
||||||
|
@ -82,10 +82,10 @@ class RecaptchaAuthChecker(UserInteractiveAuthChecker):
|
||||||
|
|
||||||
def __init__(self, hs: "HomeServer"):
|
def __init__(self, hs: "HomeServer"):
|
||||||
super().__init__(hs)
|
super().__init__(hs)
|
||||||
self._enabled = bool(hs.config.recaptcha_private_key)
|
self._enabled = bool(hs.config.captcha.recaptcha_private_key)
|
||||||
self._http_client = hs.get_proxied_http_client()
|
self._http_client = hs.get_proxied_http_client()
|
||||||
self._url = hs.config.recaptcha_siteverify_api
|
self._url = hs.config.captcha.recaptcha_siteverify_api
|
||||||
self._secret = hs.config.recaptcha_private_key
|
self._secret = hs.config.captcha.recaptcha_private_key
|
||||||
|
|
||||||
def is_enabled(self) -> bool:
|
def is_enabled(self) -> bool:
|
||||||
return self._enabled
|
return self._enabled
|
||||||
|
@ -161,12 +161,17 @@ class _BaseThreepidAuthChecker:
|
||||||
self.hs.config.account_threepid_delegate_msisdn, threepid_creds
|
self.hs.config.account_threepid_delegate_msisdn, threepid_creds
|
||||||
)
|
)
|
||||||
elif medium == "email":
|
elif medium == "email":
|
||||||
if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
|
if (
|
||||||
|
self.hs.config.email.threepid_behaviour_email
|
||||||
|
== ThreepidBehaviour.REMOTE
|
||||||
|
):
|
||||||
assert self.hs.config.account_threepid_delegate_email
|
assert self.hs.config.account_threepid_delegate_email
|
||||||
threepid = await identity_handler.threepid_from_creds(
|
threepid = await identity_handler.threepid_from_creds(
|
||||||
self.hs.config.account_threepid_delegate_email, threepid_creds
|
self.hs.config.account_threepid_delegate_email, threepid_creds
|
||||||
)
|
)
|
||||||
elif self.hs.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
|
elif (
|
||||||
|
self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL
|
||||||
|
):
|
||||||
threepid = None
|
threepid = None
|
||||||
row = await self.store.get_threepid_validation_session(
|
row = await self.store.get_threepid_validation_session(
|
||||||
medium,
|
medium,
|
||||||
|
@ -218,7 +223,7 @@ class EmailIdentityAuthChecker(UserInteractiveAuthChecker, _BaseThreepidAuthChec
|
||||||
_BaseThreepidAuthChecker.__init__(self, hs)
|
_BaseThreepidAuthChecker.__init__(self, hs)
|
||||||
|
|
||||||
def is_enabled(self) -> bool:
|
def is_enabled(self) -> bool:
|
||||||
return self.hs.config.threepid_behaviour_email in (
|
return self.hs.config.email.threepid_behaviour_email in (
|
||||||
ThreepidBehaviour.REMOTE,
|
ThreepidBehaviour.REMOTE,
|
||||||
ThreepidBehaviour.LOCAL,
|
ThreepidBehaviour.LOCAL,
|
||||||
)
|
)
|
||||||
|
|
|
@ -61,7 +61,7 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||||
self.notifier = hs.get_notifier()
|
self.notifier = hs.get_notifier()
|
||||||
self.is_mine_id = hs.is_mine_id
|
self.is_mine_id = hs.is_mine_id
|
||||||
self.update_user_directory = hs.config.update_user_directory
|
self.update_user_directory = hs.config.update_user_directory
|
||||||
self.search_all_users = hs.config.user_directory_search_all_users
|
self.search_all_users = hs.config.userdirectory.user_directory_search_all_users
|
||||||
self.spam_checker = hs.get_spam_checker()
|
self.spam_checker = hs.get_spam_checker()
|
||||||
# The current position in the current_state_delta stream
|
# The current position in the current_state_delta stream
|
||||||
self.pos: Optional[int] = None
|
self.pos: Optional[int] = None
|
||||||
|
@ -114,7 +114,7 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||||
if self._is_processing:
|
if self._is_processing:
|
||||||
return
|
return
|
||||||
|
|
||||||
async def process():
|
async def process() -> None:
|
||||||
try:
|
try:
|
||||||
await self._unsafe_process()
|
await self._unsafe_process()
|
||||||
finally:
|
finally:
|
||||||
|
|
|
@ -322,8 +322,11 @@ class SimpleHttpClient:
|
||||||
|
|
||||||
self.user_agent = user_agent or hs.version_string
|
self.user_agent = user_agent or hs.version_string
|
||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
if hs.config.user_agent_suffix:
|
if hs.config.server.user_agent_suffix:
|
||||||
self.user_agent = "%s %s" % (self.user_agent, hs.config.user_agent_suffix)
|
self.user_agent = "%s %s" % (
|
||||||
|
self.user_agent,
|
||||||
|
hs.config.server.user_agent_suffix,
|
||||||
|
)
|
||||||
|
|
||||||
# We use this for our body producers to ensure that they use the correct
|
# We use this for our body producers to ensure that they use the correct
|
||||||
# reactor.
|
# reactor.
|
||||||
|
|
|
@ -66,7 +66,7 @@ from synapse.http.client import (
|
||||||
)
|
)
|
||||||
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
|
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
|
||||||
from synapse.logging import opentracing
|
from synapse.logging import opentracing
|
||||||
from synapse.logging.context import make_deferred_yieldable
|
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||||
from synapse.logging.opentracing import set_tag, start_active_span, tags
|
from synapse.logging.opentracing import set_tag, start_active_span, tags
|
||||||
from synapse.types import JsonDict
|
from synapse.types import JsonDict
|
||||||
from synapse.util import json_decoder
|
from synapse.util import json_decoder
|
||||||
|
@ -465,8 +465,9 @@ class MatrixFederationHttpClient:
|
||||||
_sec_timeout = self.default_timeout
|
_sec_timeout = self.default_timeout
|
||||||
|
|
||||||
if (
|
if (
|
||||||
self.hs.config.federation_domain_whitelist is not None
|
self.hs.config.federation.federation_domain_whitelist is not None
|
||||||
and request.destination not in self.hs.config.federation_domain_whitelist
|
and request.destination
|
||||||
|
not in self.hs.config.federation.federation_domain_whitelist
|
||||||
):
|
):
|
||||||
raise FederationDeniedError(request.destination)
|
raise FederationDeniedError(request.destination)
|
||||||
|
|
||||||
|
@ -553,20 +554,29 @@ class MatrixFederationHttpClient:
|
||||||
with Measure(self.clock, "outbound_request"):
|
with Measure(self.clock, "outbound_request"):
|
||||||
# we don't want all the fancy cookie and redirect handling
|
# we don't want all the fancy cookie and redirect handling
|
||||||
# that treq.request gives: just use the raw Agent.
|
# that treq.request gives: just use the raw Agent.
|
||||||
request_deferred = self.agent.request(
|
|
||||||
|
# To preserve the logging context, the timeout is treated
|
||||||
|
# in a similar way to `defer.gatherResults`:
|
||||||
|
# * Each logging context-preserving fork is wrapped in
|
||||||
|
# `run_in_background`. In this case there is only one,
|
||||||
|
# since the timeout fork is not logging-context aware.
|
||||||
|
# * The `Deferred` that joins the forks back together is
|
||||||
|
# wrapped in `make_deferred_yieldable` to restore the
|
||||||
|
# logging context regardless of the path taken.
|
||||||
|
request_deferred = run_in_background(
|
||||||
|
self.agent.request,
|
||||||
method_bytes,
|
method_bytes,
|
||||||
url_bytes,
|
url_bytes,
|
||||||
headers=Headers(headers_dict),
|
headers=Headers(headers_dict),
|
||||||
bodyProducer=producer,
|
bodyProducer=producer,
|
||||||
)
|
)
|
||||||
|
|
||||||
request_deferred = timeout_deferred(
|
request_deferred = timeout_deferred(
|
||||||
request_deferred,
|
request_deferred,
|
||||||
timeout=_sec_timeout,
|
timeout=_sec_timeout,
|
||||||
reactor=self.reactor,
|
reactor=self.reactor,
|
||||||
)
|
)
|
||||||
|
|
||||||
response = await request_deferred
|
response = await make_deferred_yieldable(request_deferred)
|
||||||
except DNSLookupError as e:
|
except DNSLookupError as e:
|
||||||
raise RequestSendFailed(e, can_retry=retry_on_dns_fail) from e
|
raise RequestSendFailed(e, can_retry=retry_on_dns_fail) from e
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -1177,7 +1187,7 @@ class MatrixFederationHttpClient:
|
||||||
request.method,
|
request.method,
|
||||||
request.uri.decode("ascii"),
|
request.uri.decode("ascii"),
|
||||||
)
|
)
|
||||||
return (length, headers)
|
return length, headers
|
||||||
|
|
||||||
|
|
||||||
def _flatten_response_never_received(e):
|
def _flatten_response_never_received(e):
|
||||||
|
|
|
@ -21,7 +21,6 @@ import types
|
||||||
import urllib
|
import urllib
|
||||||
from http import HTTPStatus
|
from http import HTTPStatus
|
||||||
from inspect import isawaitable
|
from inspect import isawaitable
|
||||||
from io import BytesIO
|
|
||||||
from typing import (
|
from typing import (
|
||||||
Any,
|
Any,
|
||||||
Awaitable,
|
Awaitable,
|
||||||
|
@ -37,7 +36,7 @@ from typing import (
|
||||||
)
|
)
|
||||||
|
|
||||||
import jinja2
|
import jinja2
|
||||||
from canonicaljson import iterencode_canonical_json
|
from canonicaljson import encode_canonical_json
|
||||||
from typing_extensions import Protocol
|
from typing_extensions import Protocol
|
||||||
from zope.interface import implementer
|
from zope.interface import implementer
|
||||||
|
|
||||||
|
@ -45,7 +44,7 @@ from twisted.internet import defer, interfaces
|
||||||
from twisted.python import failure
|
from twisted.python import failure
|
||||||
from twisted.web import resource
|
from twisted.web import resource
|
||||||
from twisted.web.server import NOT_DONE_YET, Request
|
from twisted.web.server import NOT_DONE_YET, Request
|
||||||
from twisted.web.static import File, NoRangeStaticProducer
|
from twisted.web.static import File
|
||||||
from twisted.web.util import redirectTo
|
from twisted.web.util import redirectTo
|
||||||
|
|
||||||
from synapse.api.errors import (
|
from synapse.api.errors import (
|
||||||
|
@ -56,10 +55,11 @@ from synapse.api.errors import (
|
||||||
UnrecognizedRequestError,
|
UnrecognizedRequestError,
|
||||||
)
|
)
|
||||||
from synapse.http.site import SynapseRequest
|
from synapse.http.site import SynapseRequest
|
||||||
from synapse.logging.context import preserve_fn
|
from synapse.logging.context import defer_to_thread, preserve_fn, run_in_background
|
||||||
from synapse.logging.opentracing import trace_servlet
|
from synapse.logging.opentracing import trace_servlet
|
||||||
from synapse.util import json_encoder
|
from synapse.util import json_encoder
|
||||||
from synapse.util.caches import intern_dict
|
from synapse.util.caches import intern_dict
|
||||||
|
from synapse.util.iterutils import chunk_seq
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -320,7 +320,7 @@ class DirectServeJsonResource(_AsyncResource):
|
||||||
|
|
||||||
def _send_response(
|
def _send_response(
|
||||||
self,
|
self,
|
||||||
request: Request,
|
request: SynapseRequest,
|
||||||
code: int,
|
code: int,
|
||||||
response_object: Any,
|
response_object: Any,
|
||||||
):
|
):
|
||||||
|
@ -561,9 +561,17 @@ class _ByteProducer:
|
||||||
self._iterator = iterator
|
self._iterator = iterator
|
||||||
self._paused = False
|
self._paused = False
|
||||||
|
|
||||||
# Register the producer and start producing data.
|
try:
|
||||||
self._request.registerProducer(self, True)
|
self._request.registerProducer(self, True)
|
||||||
self.resumeProducing()
|
except RuntimeError as e:
|
||||||
|
logger.info("Connection disconnected before response was written: %r", e)
|
||||||
|
|
||||||
|
# We drop our references to data we'll not use.
|
||||||
|
self._request = None
|
||||||
|
self._iterator = iter(())
|
||||||
|
else:
|
||||||
|
# Start producing if `registerProducer` was successful
|
||||||
|
self.resumeProducing()
|
||||||
|
|
||||||
def _send_data(self, data: List[bytes]) -> None:
|
def _send_data(self, data: List[bytes]) -> None:
|
||||||
"""
|
"""
|
||||||
|
@ -620,16 +628,15 @@ class _ByteProducer:
|
||||||
self._request = None
|
self._request = None
|
||||||
|
|
||||||
|
|
||||||
def _encode_json_bytes(json_object: Any) -> Iterator[bytes]:
|
def _encode_json_bytes(json_object: Any) -> bytes:
|
||||||
"""
|
"""
|
||||||
Encode an object into JSON. Returns an iterator of bytes.
|
Encode an object into JSON. Returns an iterator of bytes.
|
||||||
"""
|
"""
|
||||||
for chunk in json_encoder.iterencode(json_object):
|
return json_encoder.encode(json_object).encode("utf-8")
|
||||||
yield chunk.encode("utf-8")
|
|
||||||
|
|
||||||
|
|
||||||
def respond_with_json(
|
def respond_with_json(
|
||||||
request: Request,
|
request: SynapseRequest,
|
||||||
code: int,
|
code: int,
|
||||||
json_object: Any,
|
json_object: Any,
|
||||||
send_cors: bool = False,
|
send_cors: bool = False,
|
||||||
|
@ -659,7 +666,7 @@ def respond_with_json(
|
||||||
return None
|
return None
|
||||||
|
|
||||||
if canonical_json:
|
if canonical_json:
|
||||||
encoder = iterencode_canonical_json
|
encoder = encode_canonical_json
|
||||||
else:
|
else:
|
||||||
encoder = _encode_json_bytes
|
encoder = _encode_json_bytes
|
||||||
|
|
||||||
|
@ -670,7 +677,9 @@ def respond_with_json(
|
||||||
if send_cors:
|
if send_cors:
|
||||||
set_cors_headers(request)
|
set_cors_headers(request)
|
||||||
|
|
||||||
_ByteProducer(request, encoder(json_object))
|
run_in_background(
|
||||||
|
_async_write_json_to_request_in_thread, request, encoder, json_object
|
||||||
|
)
|
||||||
return NOT_DONE_YET
|
return NOT_DONE_YET
|
||||||
|
|
||||||
|
|
||||||
|
@ -706,15 +715,56 @@ def respond_with_json_bytes(
|
||||||
if send_cors:
|
if send_cors:
|
||||||
set_cors_headers(request)
|
set_cors_headers(request)
|
||||||
|
|
||||||
# note that this is zero-copy (the bytesio shares a copy-on-write buffer with
|
_write_bytes_to_request(request, json_bytes)
|
||||||
# the original `bytes`).
|
|
||||||
bytes_io = BytesIO(json_bytes)
|
|
||||||
|
|
||||||
producer = NoRangeStaticProducer(request, bytes_io)
|
|
||||||
producer.start()
|
|
||||||
return NOT_DONE_YET
|
return NOT_DONE_YET
|
||||||
|
|
||||||
|
|
||||||
|
async def _async_write_json_to_request_in_thread(
|
||||||
|
request: SynapseRequest,
|
||||||
|
json_encoder: Callable[[Any], bytes],
|
||||||
|
json_object: Any,
|
||||||
|
):
|
||||||
|
"""Encodes the given JSON object on a thread and then writes it to the
|
||||||
|
request.
|
||||||
|
|
||||||
|
This is done so that encoding large JSON objects doesn't block the reactor
|
||||||
|
thread.
|
||||||
|
|
||||||
|
Note: We don't use JsonEncoder.iterencode here as that falls back to the
|
||||||
|
Python implementation (rather than the C backend), which is *much* more
|
||||||
|
expensive.
|
||||||
|
"""
|
||||||
|
|
||||||
|
json_str = await defer_to_thread(request.reactor, json_encoder, json_object)
|
||||||
|
|
||||||
|
_write_bytes_to_request(request, json_str)
|
||||||
|
|
||||||
|
|
||||||
|
def _write_bytes_to_request(request: Request, bytes_to_write: bytes) -> None:
|
||||||
|
"""Writes the bytes to the request using an appropriate producer.
|
||||||
|
|
||||||
|
Note: This should be used instead of `Request.write` to correctly handle
|
||||||
|
large response bodies.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# The problem with dumping all of the response into the `Request` object at
|
||||||
|
# once (via `Request.write`) is that doing so starts the timeout for the
|
||||||
|
# next request to be received: so if it takes longer than 60s to stream back
|
||||||
|
# the response to the client, the client never gets it.
|
||||||
|
#
|
||||||
|
# The correct solution is to use a Producer; then the timeout is only
|
||||||
|
# started once all of the content is sent over the TCP connection.
|
||||||
|
|
||||||
|
# To make sure we don't write all of the bytes at once we split it up into
|
||||||
|
# chunks.
|
||||||
|
chunk_size = 4096
|
||||||
|
bytes_generator = chunk_seq(bytes_to_write, chunk_size)
|
||||||
|
|
||||||
|
# We use a `_ByteProducer` here rather than `NoRangeStaticProducer` as the
|
||||||
|
# unit tests can't cope with being given a pull producer.
|
||||||
|
_ByteProducer(request, bytes_generator)
|
||||||
|
|
||||||
|
|
||||||
def set_cors_headers(request: Request):
|
def set_cors_headers(request: Request):
|
||||||
"""Set the CORS headers so that javascript running in a web browsers can
|
"""Set the CORS headers so that javascript running in a web browsers can
|
||||||
use this API
|
use this API
|
||||||
|
|
|
@ -14,14 +14,15 @@
|
||||||
import contextlib
|
import contextlib
|
||||||
import logging
|
import logging
|
||||||
import time
|
import time
|
||||||
from typing import Optional, Tuple, Union
|
from typing import Generator, Optional, Tuple, Union
|
||||||
|
|
||||||
import attr
|
import attr
|
||||||
from zope.interface import implementer
|
from zope.interface import implementer
|
||||||
|
|
||||||
from twisted.internet.interfaces import IAddress, IReactorTime
|
from twisted.internet.interfaces import IAddress, IReactorTime
|
||||||
from twisted.python.failure import Failure
|
from twisted.python.failure import Failure
|
||||||
from twisted.web.resource import IResource
|
from twisted.web.http import HTTPChannel
|
||||||
|
from twisted.web.resource import IResource, Resource
|
||||||
from twisted.web.server import Request, Site
|
from twisted.web.server import Request, Site
|
||||||
|
|
||||||
from synapse.config.server import ListenerConfig
|
from synapse.config.server import ListenerConfig
|
||||||
|
@ -61,10 +62,18 @@ class SynapseRequest(Request):
|
||||||
logcontext: the log context for this request
|
logcontext: the log context for this request
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, channel, *args, max_request_body_size=1024, **kw):
|
def __init__(
|
||||||
Request.__init__(self, channel, *args, **kw)
|
self,
|
||||||
|
channel: HTTPChannel,
|
||||||
|
site: "SynapseSite",
|
||||||
|
*args,
|
||||||
|
max_request_body_size: int = 1024,
|
||||||
|
**kw,
|
||||||
|
):
|
||||||
|
super().__init__(channel, *args, **kw)
|
||||||
self._max_request_body_size = max_request_body_size
|
self._max_request_body_size = max_request_body_size
|
||||||
self.site: SynapseSite = channel.site
|
self.synapse_site = site
|
||||||
|
self.reactor = site.reactor
|
||||||
self._channel = channel # this is used by the tests
|
self._channel = channel # this is used by the tests
|
||||||
self.start_time = 0.0
|
self.start_time = 0.0
|
||||||
|
|
||||||
|
@ -83,13 +92,13 @@ class SynapseRequest(Request):
|
||||||
self._is_processing = False
|
self._is_processing = False
|
||||||
|
|
||||||
# the time when the asynchronous request handler completed its processing
|
# the time when the asynchronous request handler completed its processing
|
||||||
self._processing_finished_time = None
|
self._processing_finished_time: Optional[float] = None
|
||||||
|
|
||||||
# what time we finished sending the response to the client (or the connection
|
# what time we finished sending the response to the client (or the connection
|
||||||
# dropped)
|
# dropped)
|
||||||
self.finish_time = None
|
self.finish_time: Optional[float] = None
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self) -> str:
|
||||||
# We overwrite this so that we don't log ``access_token``
|
# We overwrite this so that we don't log ``access_token``
|
||||||
return "<%s at 0x%x method=%r uri=%r clientproto=%r site=%r>" % (
|
return "<%s at 0x%x method=%r uri=%r clientproto=%r site=%r>" % (
|
||||||
self.__class__.__name__,
|
self.__class__.__name__,
|
||||||
|
@ -97,10 +106,10 @@ class SynapseRequest(Request):
|
||||||
self.get_method(),
|
self.get_method(),
|
||||||
self.get_redacted_uri(),
|
self.get_redacted_uri(),
|
||||||
self.clientproto.decode("ascii", errors="replace"),
|
self.clientproto.decode("ascii", errors="replace"),
|
||||||
self.site.site_tag,
|
self.synapse_site.site_tag,
|
||||||
)
|
)
|
||||||
|
|
||||||
def handleContentChunk(self, data):
|
def handleContentChunk(self, data: bytes) -> None:
|
||||||
# we should have a `content` by now.
|
# we should have a `content` by now.
|
||||||
assert self.content, "handleContentChunk() called before gotLength()"
|
assert self.content, "handleContentChunk() called before gotLength()"
|
||||||
if self.content.tell() + len(data) > self._max_request_body_size:
|
if self.content.tell() + len(data) > self._max_request_body_size:
|
||||||
|
@ -139,7 +148,7 @@ class SynapseRequest(Request):
|
||||||
# If there's no authenticated entity, it was the requester.
|
# If there's no authenticated entity, it was the requester.
|
||||||
self.logcontext.request.authenticated_entity = authenticated_entity or requester
|
self.logcontext.request.authenticated_entity = authenticated_entity or requester
|
||||||
|
|
||||||
def get_request_id(self):
|
def get_request_id(self) -> str:
|
||||||
return "%s-%i" % (self.get_method(), self.request_seq)
|
return "%s-%i" % (self.get_method(), self.request_seq)
|
||||||
|
|
||||||
def get_redacted_uri(self) -> str:
|
def get_redacted_uri(self) -> str:
|
||||||
|
@ -205,7 +214,7 @@ class SynapseRequest(Request):
|
||||||
|
|
||||||
return None, None
|
return None, None
|
||||||
|
|
||||||
def render(self, resrc):
|
def render(self, resrc: Resource) -> None:
|
||||||
# this is called once a Resource has been found to serve the request; in our
|
# this is called once a Resource has been found to serve the request; in our
|
||||||
# case the Resource in question will normally be a JsonResource.
|
# case the Resource in question will normally be a JsonResource.
|
||||||
|
|
||||||
|
@ -216,7 +225,7 @@ class SynapseRequest(Request):
|
||||||
request=ContextRequest(
|
request=ContextRequest(
|
||||||
request_id=request_id,
|
request_id=request_id,
|
||||||
ip_address=self.getClientIP(),
|
ip_address=self.getClientIP(),
|
||||||
site_tag=self.site.site_tag,
|
site_tag=self.synapse_site.site_tag,
|
||||||
# The requester is going to be unknown at this point.
|
# The requester is going to be unknown at this point.
|
||||||
requester=None,
|
requester=None,
|
||||||
authenticated_entity=None,
|
authenticated_entity=None,
|
||||||
|
@ -228,7 +237,7 @@ class SynapseRequest(Request):
|
||||||
)
|
)
|
||||||
|
|
||||||
# override the Server header which is set by twisted
|
# override the Server header which is set by twisted
|
||||||
self.setHeader("Server", self.site.server_version_string)
|
self.setHeader("Server", self.synapse_site.server_version_string)
|
||||||
|
|
||||||
with PreserveLoggingContext(self.logcontext):
|
with PreserveLoggingContext(self.logcontext):
|
||||||
# we start the request metrics timer here with an initial stab
|
# we start the request metrics timer here with an initial stab
|
||||||
|
@ -247,7 +256,7 @@ class SynapseRequest(Request):
|
||||||
requests_counter.labels(self.get_method(), self.request_metrics.name).inc()
|
requests_counter.labels(self.get_method(), self.request_metrics.name).inc()
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def processing(self):
|
def processing(self) -> Generator[None, None, None]:
|
||||||
"""Record the fact that we are processing this request.
|
"""Record the fact that we are processing this request.
|
||||||
|
|
||||||
Returns a context manager; the correct way to use this is:
|
Returns a context manager; the correct way to use this is:
|
||||||
|
@ -282,7 +291,7 @@ class SynapseRequest(Request):
|
||||||
if self.finish_time is not None:
|
if self.finish_time is not None:
|
||||||
self._finished_processing()
|
self._finished_processing()
|
||||||
|
|
||||||
def finish(self):
|
def finish(self) -> None:
|
||||||
"""Called when all response data has been written to this Request.
|
"""Called when all response data has been written to this Request.
|
||||||
|
|
||||||
Overrides twisted.web.server.Request.finish to record the finish time and do
|
Overrides twisted.web.server.Request.finish to record the finish time and do
|
||||||
|
@ -295,7 +304,7 @@ class SynapseRequest(Request):
|
||||||
with PreserveLoggingContext(self.logcontext):
|
with PreserveLoggingContext(self.logcontext):
|
||||||
self._finished_processing()
|
self._finished_processing()
|
||||||
|
|
||||||
def connectionLost(self, reason):
|
def connectionLost(self, reason: Union[Failure, Exception]) -> None:
|
||||||
"""Called when the client connection is closed before the response is written.
|
"""Called when the client connection is closed before the response is written.
|
||||||
|
|
||||||
Overrides twisted.web.server.Request.connectionLost to record the finish time and
|
Overrides twisted.web.server.Request.connectionLost to record the finish time and
|
||||||
|
@ -327,7 +336,7 @@ class SynapseRequest(Request):
|
||||||
if not self._is_processing:
|
if not self._is_processing:
|
||||||
self._finished_processing()
|
self._finished_processing()
|
||||||
|
|
||||||
def _started_processing(self, servlet_name):
|
def _started_processing(self, servlet_name: str) -> None:
|
||||||
"""Record the fact that we are processing this request.
|
"""Record the fact that we are processing this request.
|
||||||
|
|
||||||
This will log the request's arrival. Once the request completes,
|
This will log the request's arrival. Once the request completes,
|
||||||
|
@ -346,17 +355,19 @@ class SynapseRequest(Request):
|
||||||
self.start_time, name=servlet_name, method=self.get_method()
|
self.start_time, name=servlet_name, method=self.get_method()
|
||||||
)
|
)
|
||||||
|
|
||||||
self.site.access_logger.debug(
|
self.synapse_site.access_logger.debug(
|
||||||
"%s - %s - Received request: %s %s",
|
"%s - %s - Received request: %s %s",
|
||||||
self.getClientIP(),
|
self.getClientIP(),
|
||||||
self.site.site_tag,
|
self.synapse_site.site_tag,
|
||||||
self.get_method(),
|
self.get_method(),
|
||||||
self.get_redacted_uri(),
|
self.get_redacted_uri(),
|
||||||
)
|
)
|
||||||
|
|
||||||
def _finished_processing(self):
|
def _finished_processing(self) -> None:
|
||||||
"""Log the completion of this request and update the metrics"""
|
"""Log the completion of this request and update the metrics"""
|
||||||
assert self.logcontext is not None
|
assert self.logcontext is not None
|
||||||
|
assert self.finish_time is not None
|
||||||
|
|
||||||
usage = self.logcontext.get_resource_usage()
|
usage = self.logcontext.get_resource_usage()
|
||||||
|
|
||||||
if self._processing_finished_time is None:
|
if self._processing_finished_time is None:
|
||||||
|
@ -386,13 +397,13 @@ class SynapseRequest(Request):
|
||||||
if authenticated_entity:
|
if authenticated_entity:
|
||||||
requester = f"{authenticated_entity}|{requester}"
|
requester = f"{authenticated_entity}|{requester}"
|
||||||
|
|
||||||
self.site.access_logger.log(
|
self.synapse_site.access_logger.log(
|
||||||
log_level,
|
log_level,
|
||||||
"%s - %s - {%s}"
|
"%s - %s - {%s}"
|
||||||
" Processed request: %.3fsec/%.3fsec (%.3fsec, %.3fsec) (%.3fsec/%.3fsec/%d)"
|
" Processed request: %.3fsec/%.3fsec (%.3fsec, %.3fsec) (%.3fsec/%.3fsec/%d)"
|
||||||
' %sB %s "%s %s %s" "%s" [%d dbevts]',
|
' %sB %s "%s %s %s" "%s" [%d dbevts]',
|
||||||
self.getClientIP(),
|
self.getClientIP(),
|
||||||
self.site.site_tag,
|
self.synapse_site.site_tag,
|
||||||
requester,
|
requester,
|
||||||
processing_time,
|
processing_time,
|
||||||
response_send_time,
|
response_send_time,
|
||||||
|
@ -437,7 +448,7 @@ class XForwardedForRequest(SynapseRequest):
|
||||||
_forwarded_for: "Optional[_XForwardedForAddress]" = None
|
_forwarded_for: "Optional[_XForwardedForAddress]" = None
|
||||||
_forwarded_https: bool = False
|
_forwarded_https: bool = False
|
||||||
|
|
||||||
def requestReceived(self, command, path, version):
|
def requestReceived(self, command: bytes, path: bytes, version: bytes) -> None:
|
||||||
# this method is called by the Channel once the full request has been
|
# this method is called by the Channel once the full request has been
|
||||||
# received, to dispatch the request to a resource.
|
# received, to dispatch the request to a resource.
|
||||||
# We can use it to set the IP address and protocol according to the
|
# We can use it to set the IP address and protocol according to the
|
||||||
|
@ -445,7 +456,7 @@ class XForwardedForRequest(SynapseRequest):
|
||||||
self._process_forwarded_headers()
|
self._process_forwarded_headers()
|
||||||
return super().requestReceived(command, path, version)
|
return super().requestReceived(command, path, version)
|
||||||
|
|
||||||
def _process_forwarded_headers(self):
|
def _process_forwarded_headers(self) -> None:
|
||||||
headers = self.requestHeaders.getRawHeaders(b"x-forwarded-for")
|
headers = self.requestHeaders.getRawHeaders(b"x-forwarded-for")
|
||||||
if not headers:
|
if not headers:
|
||||||
return
|
return
|
||||||
|
@ -470,7 +481,7 @@ class XForwardedForRequest(SynapseRequest):
|
||||||
)
|
)
|
||||||
self._forwarded_https = True
|
self._forwarded_https = True
|
||||||
|
|
||||||
def isSecure(self):
|
def isSecure(self) -> bool:
|
||||||
if self._forwarded_https:
|
if self._forwarded_https:
|
||||||
return True
|
return True
|
||||||
return super().isSecure()
|
return super().isSecure()
|
||||||
|
@ -520,7 +531,7 @@ class SynapseSite(Site):
|
||||||
site_tag: str,
|
site_tag: str,
|
||||||
config: ListenerConfig,
|
config: ListenerConfig,
|
||||||
resource: IResource,
|
resource: IResource,
|
||||||
server_version_string,
|
server_version_string: str,
|
||||||
max_request_body_size: int,
|
max_request_body_size: int,
|
||||||
reactor: IReactorTime,
|
reactor: IReactorTime,
|
||||||
):
|
):
|
||||||
|
@ -540,19 +551,23 @@ class SynapseSite(Site):
|
||||||
Site.__init__(self, resource, reactor=reactor)
|
Site.__init__(self, resource, reactor=reactor)
|
||||||
|
|
||||||
self.site_tag = site_tag
|
self.site_tag = site_tag
|
||||||
|
self.reactor = reactor
|
||||||
|
|
||||||
assert config.http_options is not None
|
assert config.http_options is not None
|
||||||
proxied = config.http_options.x_forwarded
|
proxied = config.http_options.x_forwarded
|
||||||
request_class = XForwardedForRequest if proxied else SynapseRequest
|
request_class = XForwardedForRequest if proxied else SynapseRequest
|
||||||
|
|
||||||
def request_factory(channel, queued) -> Request:
|
def request_factory(channel, queued: bool) -> Request:
|
||||||
return request_class(
|
return request_class(
|
||||||
channel, max_request_body_size=max_request_body_size, queued=queued
|
channel,
|
||||||
|
self,
|
||||||
|
max_request_body_size=max_request_body_size,
|
||||||
|
queued=queued,
|
||||||
)
|
)
|
||||||
|
|
||||||
self.requestFactory = request_factory # type: ignore
|
self.requestFactory = request_factory # type: ignore
|
||||||
self.access_logger = logging.getLogger(logger_name)
|
self.access_logger = logging.getLogger(logger_name)
|
||||||
self.server_version_string = server_version_string.encode("ascii")
|
self.server_version_string = server_version_string.encode("ascii")
|
||||||
|
|
||||||
def log(self, request):
|
def log(self, request: SynapseRequest) -> None:
|
||||||
pass
|
pass
|
||||||
|
|
|
@ -363,7 +363,7 @@ def noop_context_manager(*args, **kwargs):
|
||||||
def init_tracer(hs: "HomeServer"):
|
def init_tracer(hs: "HomeServer"):
|
||||||
"""Set the whitelists and initialise the JaegerClient tracer"""
|
"""Set the whitelists and initialise the JaegerClient tracer"""
|
||||||
global opentracing
|
global opentracing
|
||||||
if not hs.config.opentracer_enabled:
|
if not hs.config.tracing.opentracer_enabled:
|
||||||
# We don't have a tracer
|
# We don't have a tracer
|
||||||
opentracing = None
|
opentracing = None
|
||||||
return
|
return
|
||||||
|
@ -377,12 +377,12 @@ def init_tracer(hs: "HomeServer"):
|
||||||
# Pull out the jaeger config if it was given. Otherwise set it to something sensible.
|
# Pull out the jaeger config if it was given. Otherwise set it to something sensible.
|
||||||
# See https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/config.py
|
# See https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/config.py
|
||||||
|
|
||||||
set_homeserver_whitelist(hs.config.opentracer_whitelist)
|
set_homeserver_whitelist(hs.config.tracing.opentracer_whitelist)
|
||||||
|
|
||||||
from jaeger_client.metrics.prometheus import PrometheusMetricsFactory
|
from jaeger_client.metrics.prometheus import PrometheusMetricsFactory
|
||||||
|
|
||||||
config = JaegerConfig(
|
config = JaegerConfig(
|
||||||
config=hs.config.jaeger_config,
|
config=hs.config.tracing.jaeger_config,
|
||||||
service_name=f"{hs.config.server.server_name} {hs.get_instance_name()}",
|
service_name=f"{hs.config.server.server_name} {hs.get_instance_name()}",
|
||||||
scope_manager=LogContextScopeManager(hs.config),
|
scope_manager=LogContextScopeManager(hs.config),
|
||||||
metrics_factory=PrometheusMetricsFactory(),
|
metrics_factory=PrometheusMetricsFactory(),
|
||||||
|
|
|
@ -24,8 +24,10 @@ from typing import (
|
||||||
List,
|
List,
|
||||||
Optional,
|
Optional,
|
||||||
Tuple,
|
Tuple,
|
||||||
|
Union,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
import attr
|
||||||
import jinja2
|
import jinja2
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
@ -46,7 +48,14 @@ from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.storage.database import DatabasePool, LoggingTransaction
|
from synapse.storage.database import DatabasePool, LoggingTransaction
|
||||||
from synapse.storage.databases.main.roommember import ProfileInfo
|
from synapse.storage.databases.main.roommember import ProfileInfo
|
||||||
from synapse.storage.state import StateFilter
|
from synapse.storage.state import StateFilter
|
||||||
from synapse.types import JsonDict, Requester, UserID, UserInfo, create_requester
|
from synapse.types import (
|
||||||
|
DomainSpecificString,
|
||||||
|
JsonDict,
|
||||||
|
Requester,
|
||||||
|
UserID,
|
||||||
|
UserInfo,
|
||||||
|
create_requester,
|
||||||
|
)
|
||||||
from synapse.util import Clock
|
from synapse.util import Clock
|
||||||
from synapse.util.caches.descriptors import cached
|
from synapse.util.caches.descriptors import cached
|
||||||
|
|
||||||
|
@ -79,6 +88,18 @@ __all__ = [
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@attr.s(auto_attribs=True)
|
||||||
|
class UserIpAndAgent:
|
||||||
|
"""
|
||||||
|
An IP address and user agent used by a user to connect to this homeserver.
|
||||||
|
"""
|
||||||
|
|
||||||
|
ip: str
|
||||||
|
user_agent: str
|
||||||
|
# The time at which this user agent/ip was last seen.
|
||||||
|
last_seen: int
|
||||||
|
|
||||||
|
|
||||||
class ModuleApi:
|
class ModuleApi:
|
||||||
"""A proxy object that gets passed to various plugin modules so they
|
"""A proxy object that gets passed to various plugin modules so they
|
||||||
can register new users etc if necessary.
|
can register new users etc if necessary.
|
||||||
|
@ -91,21 +112,23 @@ class ModuleApi:
|
||||||
self._auth = hs.get_auth()
|
self._auth = hs.get_auth()
|
||||||
self._auth_handler = auth_handler
|
self._auth_handler = auth_handler
|
||||||
self._server_name = hs.hostname
|
self._server_name = hs.hostname
|
||||||
self._presence_stream = hs.get_event_sources().sources["presence"]
|
self._presence_stream = hs.get_event_sources().sources.presence
|
||||||
self._state = hs.get_state_handler()
|
self._state = hs.get_state_handler()
|
||||||
self._clock: Clock = hs.get_clock()
|
self._clock: Clock = hs.get_clock()
|
||||||
self._send_email_handler = hs.get_send_email_handler()
|
self._send_email_handler = hs.get_send_email_handler()
|
||||||
self.custom_template_dir = hs.config.server.custom_template_directory
|
self.custom_template_dir = hs.config.server.custom_template_directory
|
||||||
|
|
||||||
try:
|
try:
|
||||||
app_name = self._hs.config.email_app_name
|
app_name = self._hs.config.email.email_app_name
|
||||||
|
|
||||||
self._from_string = self._hs.config.email_notif_from % {"app": app_name}
|
self._from_string = self._hs.config.email.email_notif_from % {
|
||||||
|
"app": app_name
|
||||||
|
}
|
||||||
except (KeyError, TypeError):
|
except (KeyError, TypeError):
|
||||||
# If substitution failed (which can happen if the string contains
|
# If substitution failed (which can happen if the string contains
|
||||||
# placeholders other than just "app", or if the type of the placeholder is
|
# placeholders other than just "app", or if the type of the placeholder is
|
||||||
# not a string), fall back to the bare strings.
|
# not a string), fall back to the bare strings.
|
||||||
self._from_string = self._hs.config.email_notif_from
|
self._from_string = self._hs.config.email.email_notif_from
|
||||||
|
|
||||||
self._raw_from = email.utils.parseaddr(self._from_string)[1]
|
self._raw_from = email.utils.parseaddr(self._from_string)[1]
|
||||||
|
|
||||||
|
@ -700,6 +723,65 @@ class ModuleApi:
|
||||||
(td for td in (self.custom_template_dir, custom_template_directory) if td),
|
(td for td in (self.custom_template_dir, custom_template_directory) if td),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def is_mine(self, id: Union[str, DomainSpecificString]) -> bool:
|
||||||
|
"""
|
||||||
|
Checks whether an ID (user id, room, ...) comes from this homeserver.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
id: any Matrix id (e.g. user id, room id, ...), either as a raw id,
|
||||||
|
e.g. string "@user:example.com" or as a parsed UserID, RoomID, ...
|
||||||
|
Returns:
|
||||||
|
True if id comes from this homeserver, False otherwise.
|
||||||
|
|
||||||
|
Added in Synapse v1.44.0.
|
||||||
|
"""
|
||||||
|
if isinstance(id, DomainSpecificString):
|
||||||
|
return self._hs.is_mine(id)
|
||||||
|
else:
|
||||||
|
return self._hs.is_mine_id(id)
|
||||||
|
|
||||||
|
async def get_user_ip_and_agents(
|
||||||
|
self, user_id: str, since_ts: int = 0
|
||||||
|
) -> List[UserIpAndAgent]:
|
||||||
|
"""
|
||||||
|
Return the list of user IPs and agents for a user.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id: the id of a user, local or remote
|
||||||
|
since_ts: a timestamp in seconds since the epoch,
|
||||||
|
or the epoch itself if not specified.
|
||||||
|
Returns:
|
||||||
|
The list of all UserIpAndAgent that the user has
|
||||||
|
used to connect to this homeserver since `since_ts`.
|
||||||
|
If the user is remote, this list is empty.
|
||||||
|
|
||||||
|
Added in Synapse v1.44.0.
|
||||||
|
"""
|
||||||
|
# Don't hit the db if this is not a local user.
|
||||||
|
is_mine = False
|
||||||
|
try:
|
||||||
|
# Let's be defensive against ill-formed strings.
|
||||||
|
if self.is_mine(user_id):
|
||||||
|
is_mine = True
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if is_mine:
|
||||||
|
raw_data = await self._store.get_user_ip_and_agents(
|
||||||
|
UserID.from_string(user_id), since_ts
|
||||||
|
)
|
||||||
|
# Sanitize some of the data. We don't want to return tokens.
|
||||||
|
return [
|
||||||
|
UserIpAndAgent(
|
||||||
|
ip=str(data["ip"]),
|
||||||
|
user_agent=str(data["user_agent"]),
|
||||||
|
last_seen=int(data["last_seen"]),
|
||||||
|
)
|
||||||
|
for data in raw_data
|
||||||
|
]
|
||||||
|
else:
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
class PublicRoomListManager:
|
class PublicRoomListManager:
|
||||||
"""Contains methods for adding to, removing from and querying whether a room
|
"""Contains methods for adding to, removing from and querying whether a room
|
||||||
|
|
|
@ -584,7 +584,7 @@ class Notifier:
|
||||||
events: List[EventBase] = []
|
events: List[EventBase] = []
|
||||||
end_token = from_token
|
end_token = from_token
|
||||||
|
|
||||||
for name, source in self.event_sources.sources.items():
|
for name, source in self.event_sources.sources.get_sources():
|
||||||
keyname = "%s_key" % name
|
keyname = "%s_key" % name
|
||||||
before_id = getattr(before_token, keyname)
|
before_id = getattr(before_token, keyname)
|
||||||
after_id = getattr(after_token, keyname)
|
after_id = getattr(after_token, keyname)
|
||||||
|
|
|
@ -184,7 +184,7 @@ class EmailPusher(Pusher):
|
||||||
|
|
||||||
should_notify_at = max(notif_ready_at, room_ready_at)
|
should_notify_at = max(notif_ready_at, room_ready_at)
|
||||||
|
|
||||||
if should_notify_at < self.clock.time_msec():
|
if should_notify_at <= self.clock.time_msec():
|
||||||
# one of our notifications is ready for sending, so we send
|
# one of our notifications is ready for sending, so we send
|
||||||
# *one* email updating the user on their notifications,
|
# *one* email updating the user on their notifications,
|
||||||
# we then consider all previously outstanding notifications
|
# we then consider all previously outstanding notifications
|
||||||
|
|
|
@ -73,7 +73,9 @@ class HttpPusher(Pusher):
|
||||||
self.failing_since = pusher_config.failing_since
|
self.failing_since = pusher_config.failing_since
|
||||||
self.timed_call: Optional[IDelayedCall] = None
|
self.timed_call: Optional[IDelayedCall] = None
|
||||||
self._is_processing = False
|
self._is_processing = False
|
||||||
self._group_unread_count_by_room = hs.config.push_group_unread_count_by_room
|
self._group_unread_count_by_room = (
|
||||||
|
hs.config.push.push_group_unread_count_by_room
|
||||||
|
)
|
||||||
self._pusherpool = hs.get_pusherpool()
|
self._pusherpool = hs.get_pusherpool()
|
||||||
|
|
||||||
self.data = pusher_config.data
|
self.data = pusher_config.data
|
||||||
|
@ -358,7 +360,7 @@ class HttpPusher(Pusher):
|
||||||
if event.type == "m.room.member" and event.is_state():
|
if event.type == "m.room.member" and event.is_state():
|
||||||
d["notification"]["membership"] = event.content["membership"]
|
d["notification"]["membership"] = event.content["membership"]
|
||||||
d["notification"]["user_is_target"] = event.state_key == self.user_id
|
d["notification"]["user_is_target"] = event.state_key == self.user_id
|
||||||
if self.hs.config.push_include_content and event.content:
|
if self.hs.config.push.push_include_content and event.content:
|
||||||
d["notification"]["content"] = event.content
|
d["notification"]["content"] = event.content
|
||||||
|
|
||||||
# We no longer send aliases separately, instead, we send the human
|
# We no longer send aliases separately, instead, we send the human
|
||||||
|
|
|
@ -110,7 +110,7 @@ class Mailer:
|
||||||
self.state_handler = self.hs.get_state_handler()
|
self.state_handler = self.hs.get_state_handler()
|
||||||
self.storage = hs.get_storage()
|
self.storage = hs.get_storage()
|
||||||
self.app_name = app_name
|
self.app_name = app_name
|
||||||
self.email_subjects: EmailSubjectConfig = hs.config.email_subjects
|
self.email_subjects: EmailSubjectConfig = hs.config.email.email_subjects
|
||||||
|
|
||||||
logger.info("Created Mailer for app_name %s" % app_name)
|
logger.info("Created Mailer for app_name %s" % app_name)
|
||||||
|
|
||||||
|
@ -796,8 +796,8 @@ class Mailer:
|
||||||
Returns:
|
Returns:
|
||||||
A link to open a room in the web client.
|
A link to open a room in the web client.
|
||||||
"""
|
"""
|
||||||
if self.hs.config.email_riot_base_url:
|
if self.hs.config.email.email_riot_base_url:
|
||||||
base_url = "%s/#/room" % (self.hs.config.email_riot_base_url)
|
base_url = "%s/#/room" % (self.hs.config.email.email_riot_base_url)
|
||||||
elif self.app_name == "Vector":
|
elif self.app_name == "Vector":
|
||||||
# need /beta for Universal Links to work on iOS
|
# need /beta for Universal Links to work on iOS
|
||||||
base_url = "https://vector.im/beta/#/room"
|
base_url = "https://vector.im/beta/#/room"
|
||||||
|
@ -815,9 +815,9 @@ class Mailer:
|
||||||
Returns:
|
Returns:
|
||||||
A link to open the notification in the web client.
|
A link to open the notification in the web client.
|
||||||
"""
|
"""
|
||||||
if self.hs.config.email_riot_base_url:
|
if self.hs.config.email.email_riot_base_url:
|
||||||
return "%s/#/room/%s/%s" % (
|
return "%s/#/room/%s/%s" % (
|
||||||
self.hs.config.email_riot_base_url,
|
self.hs.config.email.email_riot_base_url,
|
||||||
notif["room_id"],
|
notif["room_id"],
|
||||||
notif["event_id"],
|
notif["event_id"],
|
||||||
)
|
)
|
||||||
|
|
|
@ -35,12 +35,12 @@ class PusherFactory:
|
||||||
"http": HttpPusher
|
"http": HttpPusher
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.info("email enable notifs: %r", hs.config.email_enable_notifs)
|
logger.info("email enable notifs: %r", hs.config.email.email_enable_notifs)
|
||||||
if hs.config.email_enable_notifs:
|
if hs.config.email.email_enable_notifs:
|
||||||
self.mailers: Dict[str, Mailer] = {}
|
self.mailers: Dict[str, Mailer] = {}
|
||||||
|
|
||||||
self._notif_template_html = hs.config.email_notif_template_html
|
self._notif_template_html = hs.config.email.email_notif_template_html
|
||||||
self._notif_template_text = hs.config.email_notif_template_text
|
self._notif_template_text = hs.config.email.email_notif_template_text
|
||||||
|
|
||||||
self.pusher_types["email"] = self._create_email_pusher
|
self.pusher_types["email"] = self._create_email_pusher
|
||||||
|
|
||||||
|
@ -77,4 +77,4 @@ class PusherFactory:
|
||||||
if isinstance(brand, str):
|
if isinstance(brand, str):
|
||||||
return brand
|
return brand
|
||||||
|
|
||||||
return self.config.email_app_name
|
return self.config.email.email_app_name
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue