mirror of
https://git.anonymousland.org/anonymousland/synapse.git
synced 2025-08-16 08:20:19 -04:00
Merge remote-tracking branch 'upstream/release-v1.40'
This commit is contained in:
commit
7359964d9f
124 changed files with 3989 additions and 904 deletions
43
.github/workflows/release-artifacts.yml
vendored
43
.github/workflows/release-artifacts.yml
vendored
|
@ -12,6 +12,10 @@ on:
|
||||||
# we do the full build on tags.
|
# we do the full build on tags.
|
||||||
tags: ["v*"]
|
tags: ["v*"]
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
|
|
||||||
|
@ -44,12 +48,43 @@ jobs:
|
||||||
distro: ${{ fromJson(needs.get-distros.outputs.distros) }}
|
distro: ${{ fromJson(needs.get-distros.outputs.distros) }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
path: src
|
path: src
|
||||||
- uses: actions/setup-python@v2
|
|
||||||
- run: ./src/scripts-dev/build_debian_packages "${{ matrix.distro }}"
|
- name: Set up Docker Buildx
|
||||||
- uses: actions/upload-artifact@v2
|
id: buildx
|
||||||
|
uses: docker/setup-buildx-action@v1
|
||||||
|
with:
|
||||||
|
install: true
|
||||||
|
|
||||||
|
- name: Set up docker layer caching
|
||||||
|
uses: actions/cache@v2
|
||||||
|
with:
|
||||||
|
path: /tmp/.buildx-cache
|
||||||
|
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-buildx-
|
||||||
|
|
||||||
|
- name: Set up python
|
||||||
|
uses: actions/setup-python@v2
|
||||||
|
|
||||||
|
- name: Build the packages
|
||||||
|
# see https://github.com/docker/build-push-action/issues/252
|
||||||
|
# for the cache magic here
|
||||||
|
run: |
|
||||||
|
./src/scripts-dev/build_debian_packages \
|
||||||
|
--docker-build-arg=--cache-from=type=local,src=/tmp/.buildx-cache \
|
||||||
|
--docker-build-arg=--cache-to=type=local,mode=max,dest=/tmp/.buildx-cache-new \
|
||||||
|
--docker-build-arg=--progress=plain \
|
||||||
|
--docker-build-arg=--load \
|
||||||
|
"${{ matrix.distro }}"
|
||||||
|
rm -rf /tmp/.buildx-cache
|
||||||
|
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||||
|
|
||||||
|
- name: Upload debs as artifacts
|
||||||
|
uses: actions/upload-artifact@v2
|
||||||
with:
|
with:
|
||||||
name: debs
|
name: debs
|
||||||
path: debs/*
|
path: debs/*
|
||||||
|
|
28
.github/workflows/tests.yml
vendored
28
.github/workflows/tests.yml
vendored
|
@ -5,6 +5,10 @@ on:
|
||||||
branches: ["develop", "release-*"]
|
branches: ["develop", "release-*"]
|
||||||
pull_request:
|
pull_request:
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
lint:
|
lint:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
@ -340,14 +344,19 @@ jobs:
|
||||||
working-directory: complement/dockerfiles
|
working-directory: complement/dockerfiles
|
||||||
|
|
||||||
# Run Complement
|
# Run Complement
|
||||||
- run: go test -v -tags synapse_blacklist,msc2403,msc2946,msc3083 ./tests
|
- run: go test -v -tags synapse_blacklist,msc2403,msc2946,msc3083 ./tests/...
|
||||||
env:
|
env:
|
||||||
COMPLEMENT_BASE_IMAGE: complement-synapse:latest
|
COMPLEMENT_BASE_IMAGE: complement-synapse:latest
|
||||||
working-directory: complement
|
working-directory: complement
|
||||||
|
|
||||||
# a job which marks all the other jobs as complete, thus allowing PRs to be merged.
|
# a job which marks all the other jobs as complete, thus allowing PRs to be merged.
|
||||||
tests-done:
|
tests-done:
|
||||||
|
if: ${{ always() }}
|
||||||
needs:
|
needs:
|
||||||
|
- lint
|
||||||
|
- lint-crlf
|
||||||
|
- lint-newsfile
|
||||||
|
- lint-sdist
|
||||||
- trial
|
- trial
|
||||||
- trial-olddeps
|
- trial-olddeps
|
||||||
- sytest
|
- sytest
|
||||||
|
@ -355,4 +364,19 @@ jobs:
|
||||||
- complement
|
- complement
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- run: "true"
|
- name: Set build result
|
||||||
|
env:
|
||||||
|
NEEDS_CONTEXT: ${{ toJSON(needs) }}
|
||||||
|
# the `jq` incantation dumps out a series of "<job> <result>" lines.
|
||||||
|
# we set it to an intermediate variable to avoid a pipe, which makes it
|
||||||
|
# hard to set $rc.
|
||||||
|
run: |
|
||||||
|
rc=0
|
||||||
|
results=$(jq -r 'to_entries[] | [.key,.value.result] | join(" ")' <<< $NEEDS_CONTEXT)
|
||||||
|
while read job result ; do
|
||||||
|
if [ "$result" != "success" ]; then
|
||||||
|
echo "::set-failed ::Job $job returned $result"
|
||||||
|
rc=1
|
||||||
|
fi
|
||||||
|
done <<< $results
|
||||||
|
exit $rc
|
||||||
|
|
58
CHANGES.md
58
CHANGES.md
|
@ -1,3 +1,61 @@
|
||||||
|
Synapse 1.40.0rc1 (2021-08-03)
|
||||||
|
==============================
|
||||||
|
|
||||||
|
Features
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Add support for [MSC2033](https://github.com/matrix-org/matrix-doc/pull/2033): `device_id` on `/account/whoami`. ([\#9918](https://github.com/matrix-org/synapse/issues/9918))
|
||||||
|
- Update support for [MSC2716 - Incrementally importing history into existing rooms](https://github.com/matrix-org/matrix-doc/pull/2716). ([\#10245](https://github.com/matrix-org/synapse/issues/10245), [\#10432](https://github.com/matrix-org/synapse/issues/10432), [\#10463](https://github.com/matrix-org/synapse/issues/10463))
|
||||||
|
- Update support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083) to consider changes in the MSC around which servers can issue join events. ([\#10254](https://github.com/matrix-org/synapse/issues/10254), [\#10447](https://github.com/matrix-org/synapse/issues/10447), [\#10489](https://github.com/matrix-org/synapse/issues/10489))
|
||||||
|
- Initial support for [MSC3244](https://github.com/matrix-org/matrix-doc/pull/3244), Room version capabilities over the /capabilities API. ([\#10283](https://github.com/matrix-org/synapse/issues/10283))
|
||||||
|
- Add a buffered logging handler which periodically flushes itself. ([\#10407](https://github.com/matrix-org/synapse/issues/10407), [\#10515](https://github.com/matrix-org/synapse/issues/10515))
|
||||||
|
- Add support for https connections to a proxy server. Contributed by @Bubu and @dklimpel. ([\#10411](https://github.com/matrix-org/synapse/issues/10411))
|
||||||
|
- Support for [MSC2285 (hidden read receipts)](https://github.com/matrix-org/matrix-doc/pull/2285). Contributed by @SimonBrandner. ([\#10413](https://github.com/matrix-org/synapse/issues/10413))
|
||||||
|
- Email notifications now state whether an invitation is to a room or a space. ([\#10426](https://github.com/matrix-org/synapse/issues/10426))
|
||||||
|
- Allow setting transaction limit for database connections. ([\#10440](https://github.com/matrix-org/synapse/issues/10440), [\#10511](https://github.com/matrix-org/synapse/issues/10511))
|
||||||
|
- Add `creation_ts` to "list users" admin API. ([\#10448](https://github.com/matrix-org/synapse/issues/10448))
|
||||||
|
|
||||||
|
|
||||||
|
Bugfixes
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Improve character set detection in URL previews by supporting underscores (in addition to hyphens). Contributed by @srividyut. ([\#10410](https://github.com/matrix-org/synapse/issues/10410))
|
||||||
|
- Fix events being incorrectly rejected over federation if they reference auth events that the server needed to fetch. ([\#10439](https://github.com/matrix-org/synapse/issues/10439))
|
||||||
|
- Fix `synapse_federation_server_oldest_inbound_pdu_in_staging` Prometheus metric to not report a max age of 51 years when the queue is empty. ([\#10455](https://github.com/matrix-org/synapse/issues/10455))
|
||||||
|
- Fix a bug which caused an explicit assignment of power-level 0 to a user to be misinterpreted in rare circumstances. ([\#10499](https://github.com/matrix-org/synapse/issues/10499))
|
||||||
|
|
||||||
|
|
||||||
|
Improved Documentation
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
- Fix hierarchy of providers on the OpenID page. ([\#10445](https://github.com/matrix-org/synapse/issues/10445))
|
||||||
|
- Consolidate development documentation to `docs/development/`. ([\#10453](https://github.com/matrix-org/synapse/issues/10453))
|
||||||
|
- Add some developer docs to explain room DAG concepts like `outliers`, `state_groups`, `depth`, etc. ([\#10464](https://github.com/matrix-org/synapse/issues/10464))
|
||||||
|
- Document how to use Complement while developing a new Synapse feature. ([\#10483](https://github.com/matrix-org/synapse/issues/10483))
|
||||||
|
|
||||||
|
|
||||||
|
Internal Changes
|
||||||
|
----------------
|
||||||
|
|
||||||
|
- Prune inbound federation queues for a room if they get too large. ([\#10390](https://github.com/matrix-org/synapse/issues/10390))
|
||||||
|
- Add type hints to `synapse.federation.transport.client` module. ([\#10408](https://github.com/matrix-org/synapse/issues/10408))
|
||||||
|
- Remove shebang line from module files. ([\#10415](https://github.com/matrix-org/synapse/issues/10415))
|
||||||
|
- Drop backwards-compatibility code that was required to support Ubuntu Xenial. ([\#10429](https://github.com/matrix-org/synapse/issues/10429))
|
||||||
|
- Use a docker image cache for the prerequisites for the debian package build. ([\#10431](https://github.com/matrix-org/synapse/issues/10431))
|
||||||
|
- Improve servlet type hints. ([\#10437](https://github.com/matrix-org/synapse/issues/10437), [\#10438](https://github.com/matrix-org/synapse/issues/10438))
|
||||||
|
- Replace usage of `or_ignore` in `simple_insert` with `simple_upsert` usage, to stop spamming postgres logs with spurious ERROR messages. ([\#10442](https://github.com/matrix-org/synapse/issues/10442))
|
||||||
|
- Update the `tests-done` Github Actions status. ([\#10444](https://github.com/matrix-org/synapse/issues/10444), [\#10512](https://github.com/matrix-org/synapse/issues/10512))
|
||||||
|
- Update type annotations to work with forthcoming Twisted 21.7.0 release. ([\#10446](https://github.com/matrix-org/synapse/issues/10446), [\#10450](https://github.com/matrix-org/synapse/issues/10450))
|
||||||
|
- Cancel redundant GHA workflows when a new commit is pushed. ([\#10451](https://github.com/matrix-org/synapse/issues/10451))
|
||||||
|
- Mitigate media repo XSS attacks on IE11 via the non-standard X-Content-Security-Policy header. ([\#10468](https://github.com/matrix-org/synapse/issues/10468))
|
||||||
|
- Additional type hints in the state handler. ([\#10482](https://github.com/matrix-org/synapse/issues/10482))
|
||||||
|
- Update syntax used to run complement tests. ([\#10488](https://github.com/matrix-org/synapse/issues/10488))
|
||||||
|
- Fix up type annotations to work with Twisted 21.7. ([\#10490](https://github.com/matrix-org/synapse/issues/10490))
|
||||||
|
- Improve type annotations for `ObservableDeferred`. ([\#10491](https://github.com/matrix-org/synapse/issues/10491))
|
||||||
|
- Extend release script to also tag and create GitHub releases. ([\#10496](https://github.com/matrix-org/synapse/issues/10496))
|
||||||
|
- Fix a bug which caused production debian packages to be incorrectly marked as 'prerelease'. ([\#10500](https://github.com/matrix-org/synapse/issues/10500))
|
||||||
|
|
||||||
|
|
||||||
Synapse 1.39.0 (2021-07-29)
|
Synapse 1.39.0 (2021-07-29)
|
||||||
===========================
|
===========================
|
||||||
|
|
||||||
|
|
|
@ -155,7 +155,7 @@ source ./env/bin/activate
|
||||||
./scripts-dev/lint.sh path/to/file1.py path/to/file2.py path/to/folder
|
./scripts-dev/lint.sh path/to/file1.py path/to/file2.py path/to/folder
|
||||||
```
|
```
|
||||||
|
|
||||||
## Run the unit tests.
|
## Run the unit tests (Twisted trial).
|
||||||
|
|
||||||
The unit tests run parts of Synapse, including your changes, to see if anything
|
The unit tests run parts of Synapse, including your changes, to see if anything
|
||||||
was broken. They are slower than the linters but will typically catch more errors.
|
was broken. They are slower than the linters but will typically catch more errors.
|
||||||
|
@ -186,7 +186,7 @@ SYNAPSE_TEST_LOG_LEVEL=DEBUG trial tests
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## Run the integration tests.
|
## Run the integration tests ([Sytest](https://github.com/matrix-org/sytest)).
|
||||||
|
|
||||||
The integration tests are a more comprehensive suite of tests. They
|
The integration tests are a more comprehensive suite of tests. They
|
||||||
run a full version of Synapse, including your changes, to check if
|
run a full version of Synapse, including your changes, to check if
|
||||||
|
@ -203,6 +203,43 @@ $ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v
|
||||||
This configuration should generally cover your needs. For more details about other configurations, see [documentation in the SyTest repo](https://github.com/matrix-org/sytest/blob/develop/docker/README.md).
|
This configuration should generally cover your needs. For more details about other configurations, see [documentation in the SyTest repo](https://github.com/matrix-org/sytest/blob/develop/docker/README.md).
|
||||||
|
|
||||||
|
|
||||||
|
## Run the integration tests ([Complement](https://github.com/matrix-org/complement)).
|
||||||
|
|
||||||
|
[Complement](https://github.com/matrix-org/complement) is a suite of black box tests that can be run on any homeserver implementation. It can also be thought of as end-to-end (e2e) tests.
|
||||||
|
|
||||||
|
It's often nice to develop on Synapse and write Complement tests at the same time.
|
||||||
|
Here is how to run your local Synapse checkout against your local Complement checkout.
|
||||||
|
|
||||||
|
(checkout [`complement`](https://github.com/matrix-org/complement) alongside your `synapse` checkout)
|
||||||
|
```sh
|
||||||
|
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
To run a specific test file, you can pass the test name at the end of the command. The name passed comes from the naming structure in your Complement tests. If you're unsure of the name, you can do a full run and copy it from the test output:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh TestBackfillingHistory
|
||||||
|
```
|
||||||
|
|
||||||
|
To run a specific test, you can specify the whole name structure:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh TestBackfillingHistory/parallel/Backfilled_historical_events_resolve_with_proper_state_in_correct_order
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### Access database for homeserver after Complement test runs.
|
||||||
|
|
||||||
|
If you're curious what the database looks like after you run some tests, here are some steps to get you going in Synapse:
|
||||||
|
|
||||||
|
1. In your Complement test comment out `defer deployment.Destroy(t)` and replace with `defer time.Sleep(2 * time.Hour)` to keep the homeserver running after the tests complete
|
||||||
|
1. Start the Complement tests
|
||||||
|
1. Find the name of the container, `docker ps -f name=complement_` (this will filter for just the Compelement related Docker containers)
|
||||||
|
1. Access the container replacing the name with what you found in the previous step: `docker exec -it complement_1_hs_with_application_service.hs1_2 /bin/bash`
|
||||||
|
1. Install sqlite (database driver), `apt-get update && apt-get install -y sqlite3`
|
||||||
|
1. Then run `sqlite3` and open the database `.open /conf/homeserver.db` (this db path comes from the Synapse homeserver.yaml)
|
||||||
|
|
||||||
|
|
||||||
# 9. Submit your patch.
|
# 9. Submit your patch.
|
||||||
|
|
||||||
Once you're happy with your patch, it's time to prepare a Pull Request.
|
Once you're happy with your patch, it's time to prepare a Pull Request.
|
||||||
|
@ -392,7 +429,7 @@ By now, you know the drill!
|
||||||
# Notes for maintainers on merging PRs etc
|
# Notes for maintainers on merging PRs etc
|
||||||
|
|
||||||
There are some notes for those with commit access to the project on how we
|
There are some notes for those with commit access to the project on how we
|
||||||
manage git [here](docs/dev/git.md).
|
manage git [here](docs/development/git.md).
|
||||||
|
|
||||||
# Conclusion
|
# Conclusion
|
||||||
|
|
||||||
|
|
4
debian/build_virtualenv
vendored
4
debian/build_virtualenv
vendored
|
@ -33,13 +33,11 @@ esac
|
||||||
# Use --builtin-venv to use the better `venv` module from CPython 3.4+ rather
|
# Use --builtin-venv to use the better `venv` module from CPython 3.4+ rather
|
||||||
# than the 2/3 compatible `virtualenv`.
|
# than the 2/3 compatible `virtualenv`.
|
||||||
|
|
||||||
# Pin pip to 20.3.4 to fix breakage in 21.0 on py3.5 (xenial)
|
|
||||||
|
|
||||||
dh_virtualenv \
|
dh_virtualenv \
|
||||||
--install-suffix "matrix-synapse" \
|
--install-suffix "matrix-synapse" \
|
||||||
--builtin-venv \
|
--builtin-venv \
|
||||||
--python "$SNAKE" \
|
--python "$SNAKE" \
|
||||||
--upgrade-pip-to="20.3.4" \
|
--upgrade-pip \
|
||||||
--preinstall="lxml" \
|
--preinstall="lxml" \
|
||||||
--preinstall="mock" \
|
--preinstall="mock" \
|
||||||
--extra-pip-arg="--no-cache-dir" \
|
--extra-pip-arg="--no-cache-dir" \
|
||||||
|
|
10
debian/changelog
vendored
10
debian/changelog
vendored
|
@ -1,3 +1,13 @@
|
||||||
|
matrix-synapse-py3 (1.40.0~rc1) stable; urgency=medium
|
||||||
|
|
||||||
|
[ Richard van der Hoff ]
|
||||||
|
* Drop backwards-compatibility code that was required to support Ubuntu Xenial.
|
||||||
|
|
||||||
|
[ Synapse Packaging team ]
|
||||||
|
* New synapse release 1.40.0~rc1.
|
||||||
|
|
||||||
|
-- Synapse Packaging team <packages@matrix.org> Tue, 03 Aug 2021 11:31:49 +0100
|
||||||
|
|
||||||
matrix-synapse-py3 (1.39.0) stable; urgency=medium
|
matrix-synapse-py3 (1.39.0) stable; urgency=medium
|
||||||
|
|
||||||
* New synapse release 1.39.0.
|
* New synapse release 1.39.0.
|
||||||
|
|
2
debian/compat
vendored
2
debian/compat
vendored
|
@ -1 +1 @@
|
||||||
9
|
10
|
||||||
|
|
5
debian/control
vendored
5
debian/control
vendored
|
@ -3,11 +3,8 @@ Section: contrib/python
|
||||||
Priority: extra
|
Priority: extra
|
||||||
Maintainer: Synapse Packaging team <packages@matrix.org>
|
Maintainer: Synapse Packaging team <packages@matrix.org>
|
||||||
# keep this list in sync with the build dependencies in docker/Dockerfile-dhvirtualenv.
|
# keep this list in sync with the build dependencies in docker/Dockerfile-dhvirtualenv.
|
||||||
# TODO: Remove the dependency on dh-systemd after dropping support for Ubuntu xenial
|
|
||||||
# On all other supported releases, it's merely a transitional package which
|
|
||||||
# does nothing but depends on debhelper (> 9.20160709)
|
|
||||||
Build-Depends:
|
Build-Depends:
|
||||||
debhelper (>= 9.20160709) | dh-systemd,
|
debhelper (>= 10),
|
||||||
dh-virtualenv (>= 1.1),
|
dh-virtualenv (>= 1.1),
|
||||||
libsystemd-dev,
|
libsystemd-dev,
|
||||||
libpq-dev,
|
libpq-dev,
|
||||||
|
|
4
debian/rules
vendored
4
debian/rules
vendored
|
@ -51,7 +51,5 @@ override_dh_shlibdeps:
|
||||||
override_dh_virtualenv:
|
override_dh_virtualenv:
|
||||||
./debian/build_virtualenv
|
./debian/build_virtualenv
|
||||||
|
|
||||||
# We are restricted to compat level 9 (because xenial), so have to
|
|
||||||
# enable the systemd bits manually.
|
|
||||||
%:
|
%:
|
||||||
dh $@ --with python-virtualenv --with systemd
|
dh $@ --with python-virtualenv
|
||||||
|
|
|
@ -15,6 +15,15 @@ ARG distro=""
|
||||||
###
|
###
|
||||||
### Stage 0: build a dh-virtualenv
|
### Stage 0: build a dh-virtualenv
|
||||||
###
|
###
|
||||||
|
|
||||||
|
# This is only really needed on bionic and focal, since other distributions we
|
||||||
|
# care about have a recent version of dh-virtualenv by default. Unfortunately,
|
||||||
|
# it looks like focal is going to be with us for a while.
|
||||||
|
#
|
||||||
|
# (focal doesn't have a dh-virtualenv package at all. There is a PPA at
|
||||||
|
# https://launchpad.net/~jyrki-pulliainen/+archive/ubuntu/dh-virtualenv, but
|
||||||
|
# it's not obviously easier to use that than to build our own.)
|
||||||
|
|
||||||
FROM ${distro} as builder
|
FROM ${distro} as builder
|
||||||
|
|
||||||
RUN apt-get update -qq -o Acquire::Languages=none
|
RUN apt-get update -qq -o Acquire::Languages=none
|
||||||
|
@ -27,7 +36,7 @@ RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||||
wget
|
wget
|
||||||
|
|
||||||
# fetch and unpack the package
|
# fetch and unpack the package
|
||||||
# TODO: Upgrade to 1.2.2 once xenial is dropped
|
# TODO: Upgrade to 1.2.2 once bionic is dropped (1.2.2 requires debhelper 12; bionic has only 11)
|
||||||
RUN mkdir /dh-virtualenv
|
RUN mkdir /dh-virtualenv
|
||||||
RUN wget -q -O /dh-virtualenv.tar.gz https://github.com/spotify/dh-virtualenv/archive/ac6e1b1.tar.gz
|
RUN wget -q -O /dh-virtualenv.tar.gz https://github.com/spotify/dh-virtualenv/archive/ac6e1b1.tar.gz
|
||||||
RUN tar -xv --strip-components=1 -C /dh-virtualenv -f /dh-virtualenv.tar.gz
|
RUN tar -xv --strip-components=1 -C /dh-virtualenv -f /dh-virtualenv.tar.gz
|
||||||
|
@ -59,8 +68,6 @@ ENV LANG C.UTF-8
|
||||||
#
|
#
|
||||||
# NB: keep this list in sync with the list of build-deps in debian/control
|
# NB: keep this list in sync with the list of build-deps in debian/control
|
||||||
# TODO: it would be nice to do that automatically.
|
# TODO: it would be nice to do that automatically.
|
||||||
# TODO: Remove the dh-systemd stanza after dropping support for Ubuntu xenial
|
|
||||||
# it's a transitional package on all other, more recent releases
|
|
||||||
RUN apt-get update -qq -o Acquire::Languages=none \
|
RUN apt-get update -qq -o Acquire::Languages=none \
|
||||||
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
|
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||||
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
|
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
|
||||||
|
@ -76,10 +83,7 @@ RUN apt-get update -qq -o Acquire::Languages=none \
|
||||||
python3-venv \
|
python3-venv \
|
||||||
sqlite3 \
|
sqlite3 \
|
||||||
libpq-dev \
|
libpq-dev \
|
||||||
xmlsec1 \
|
xmlsec1
|
||||||
&& ( env DEBIAN_FRONTEND=noninteractive apt-get install \
|
|
||||||
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
|
|
||||||
dh-systemd || true )
|
|
||||||
|
|
||||||
COPY --from=builder /dh-virtualenv_1.2~dev-1_all.deb /
|
COPY --from=builder /dh-virtualenv_1.2~dev-1_all.deb /
|
||||||
|
|
||||||
|
|
|
@ -11,10 +11,6 @@ DIST=`cut -d ':' -f2 <<< $distro`
|
||||||
cp -aT /synapse/source /synapse/build
|
cp -aT /synapse/source /synapse/build
|
||||||
cd /synapse/build
|
cd /synapse/build
|
||||||
|
|
||||||
# add an entry to the changelog for this distribution
|
|
||||||
dch -M -l "+$DIST" "build for $DIST"
|
|
||||||
dch -M -r "" --force-distribution --distribution "$DIST"
|
|
||||||
|
|
||||||
# if this is a prerelease, set the Section accordingly.
|
# if this is a prerelease, set the Section accordingly.
|
||||||
#
|
#
|
||||||
# When the package is later added to the package repo, reprepro will use the
|
# When the package is later added to the package repo, reprepro will use the
|
||||||
|
@ -23,11 +19,14 @@ dch -M -r "" --force-distribution --distribution "$DIST"
|
||||||
|
|
||||||
DEB_VERSION=`dpkg-parsechangelog -SVersion`
|
DEB_VERSION=`dpkg-parsechangelog -SVersion`
|
||||||
case $DEB_VERSION in
|
case $DEB_VERSION in
|
||||||
*rc*|*a*|*b*|*c*)
|
*~rc*|*~a*|*~b*|*~c*)
|
||||||
sed -ie '/^Section:/c\Section: prerelease' debian/control
|
sed -ie '/^Section:/c\Section: prerelease' debian/control
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
|
# add an entry to the changelog for this distribution
|
||||||
|
dch -M -l "+$DIST" "build for $DIST"
|
||||||
|
dch -M -r "" --force-distribution --distribution "$DIST"
|
||||||
|
|
||||||
dpkg-buildpackage -us -uc
|
dpkg-buildpackage -us -uc
|
||||||
|
|
||||||
|
|
|
@ -67,7 +67,7 @@
|
||||||
# Development
|
# Development
|
||||||
- [Contributing Guide](development/contributing_guide.md)
|
- [Contributing Guide](development/contributing_guide.md)
|
||||||
- [Code Style](code_style.md)
|
- [Code Style](code_style.md)
|
||||||
- [Git Usage](dev/git.md)
|
- [Git Usage](development/git.md)
|
||||||
- [Testing]()
|
- [Testing]()
|
||||||
- [OpenTracing](opentracing.md)
|
- [OpenTracing](opentracing.md)
|
||||||
- [Database Schemas](development/database_schema.md)
|
- [Database Schemas](development/database_schema.md)
|
||||||
|
@ -77,8 +77,9 @@
|
||||||
- [TCP Replication](tcp_replication.md)
|
- [TCP Replication](tcp_replication.md)
|
||||||
- [Internal Documentation](development/internal_documentation/README.md)
|
- [Internal Documentation](development/internal_documentation/README.md)
|
||||||
- [Single Sign-On]()
|
- [Single Sign-On]()
|
||||||
- [SAML](dev/saml.md)
|
- [SAML](development/saml.md)
|
||||||
- [CAS](dev/cas.md)
|
- [CAS](development/cas.md)
|
||||||
|
- [Room DAG concepts](development/room-dag-concepts.md)
|
||||||
- [State Resolution]()
|
- [State Resolution]()
|
||||||
- [The Auth Chain Difference Algorithm](auth_chain_difference_algorithm.md)
|
- [The Auth Chain Difference Algorithm](auth_chain_difference_algorithm.md)
|
||||||
- [Media Repository](media_repository.md)
|
- [Media Repository](media_repository.md)
|
||||||
|
|
|
@ -144,7 +144,8 @@ A response body like the following is returned:
|
||||||
"deactivated": 0,
|
"deactivated": 0,
|
||||||
"shadow_banned": 0,
|
"shadow_banned": 0,
|
||||||
"displayname": "<User One>",
|
"displayname": "<User One>",
|
||||||
"avatar_url": null
|
"avatar_url": null,
|
||||||
|
"creation_ts": 1560432668000
|
||||||
}, {
|
}, {
|
||||||
"name": "<user_id2>",
|
"name": "<user_id2>",
|
||||||
"is_guest": 0,
|
"is_guest": 0,
|
||||||
|
@ -153,7 +154,8 @@ A response body like the following is returned:
|
||||||
"deactivated": 0,
|
"deactivated": 0,
|
||||||
"shadow_banned": 0,
|
"shadow_banned": 0,
|
||||||
"displayname": "<User Two>",
|
"displayname": "<User Two>",
|
||||||
"avatar_url": "<avatar_url>"
|
"avatar_url": "<avatar_url>",
|
||||||
|
"creation_ts": 1561550621000
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"next_token": "100",
|
"next_token": "100",
|
||||||
|
@ -197,11 +199,12 @@ The following parameters should be set in the URL:
|
||||||
- `shadow_banned` - Users are ordered by `shadow_banned` status.
|
- `shadow_banned` - Users are ordered by `shadow_banned` status.
|
||||||
- `displayname` - Users are ordered alphabetically by `displayname`.
|
- `displayname` - Users are ordered alphabetically by `displayname`.
|
||||||
- `avatar_url` - Users are ordered alphabetically by avatar URL.
|
- `avatar_url` - Users are ordered alphabetically by avatar URL.
|
||||||
|
- `creation_ts` - Users are ordered by when the users was created in ms.
|
||||||
|
|
||||||
- `dir` - Direction of media order. Either `f` for forwards or `b` for backwards.
|
- `dir` - Direction of media order. Either `f` for forwards or `b` for backwards.
|
||||||
Setting this value to `b` will reverse the above sort order. Defaults to `f`.
|
Setting this value to `b` will reverse the above sort order. Defaults to `f`.
|
||||||
|
|
||||||
Caution. The database only has indexes on the columns `name` and `created_ts`.
|
Caution. The database only has indexes on the columns `name` and `creation_ts`.
|
||||||
This means that if a different sort order is used (`is_guest`, `admin`,
|
This means that if a different sort order is used (`is_guest`, `admin`,
|
||||||
`user_type`, `deactivated`, `shadow_banned`, `avatar_url` or `displayname`),
|
`user_type`, `deactivated`, `shadow_banned`, `avatar_url` or `displayname`),
|
||||||
this can cause a large load on the database, especially for large environments.
|
this can cause a large load on the database, especially for large environments.
|
||||||
|
@ -222,6 +225,7 @@ The following fields are returned in the JSON response body:
|
||||||
- `shadow_banned` - bool - Status if that user has been marked as shadow banned.
|
- `shadow_banned` - bool - Status if that user has been marked as shadow banned.
|
||||||
- `displayname` - string - The user's display name if they have set one.
|
- `displayname` - string - The user's display name if they have set one.
|
||||||
- `avatar_url` - string - The user's avatar URL if they have set one.
|
- `avatar_url` - string - The user's avatar URL if they have set one.
|
||||||
|
- `creation_ts` - integer - The user's creation timestamp in ms.
|
||||||
|
|
||||||
- `next_token`: string representing a positive integer - Indication for pagination. See above.
|
- `next_token`: string representing a positive integer - Indication for pagination. See above.
|
||||||
- `total` - integer - Total number of media.
|
- `total` - integer - Total number of media.
|
||||||
|
|
|
@ -9,7 +9,7 @@ commits each of which contains a single change building on what came
|
||||||
before. Here, by way of an arbitrary example, is the top of `git log --graph
|
before. Here, by way of an arbitrary example, is the top of `git log --graph
|
||||||
b2dba0607`:
|
b2dba0607`:
|
||||||
|
|
||||||
<img src="git/clean.png" alt="clean git graph" width="500px">
|
<img src="img/git/clean.png" alt="clean git graph" width="500px">
|
||||||
|
|
||||||
Note how the commit comment explains clearly what is changing and why. Also
|
Note how the commit comment explains clearly what is changing and why. Also
|
||||||
note the *absence* of merge commits, as well as the absence of commits called
|
note the *absence* of merge commits, as well as the absence of commits called
|
||||||
|
@ -61,7 +61,7 @@ Ok, so that's what we'd like to achieve. How do we achieve it?
|
||||||
The TL;DR is: when you come to merge a pull request, you *probably* want to
|
The TL;DR is: when you come to merge a pull request, you *probably* want to
|
||||||
“squash and merge”:
|
“squash and merge”:
|
||||||
|
|
||||||
.
|
.
|
||||||
|
|
||||||
(This applies whether you are merging your own PR, or that of another
|
(This applies whether you are merging your own PR, or that of another
|
||||||
contributor.)
|
contributor.)
|
||||||
|
@ -105,7 +105,7 @@ complicated. Here's how we do it.
|
||||||
|
|
||||||
Let's start with a picture:
|
Let's start with a picture:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
It looks complicated, but it's really not. There's one basic rule: *anyone* is
|
It looks complicated, but it's really not. There's one basic rule: *anyone* is
|
||||||
free to merge from *any* more-stable branch to *any* less-stable branch at
|
free to merge from *any* more-stable branch to *any* less-stable branch at
|
Before Width: | Height: | Size: 70 KiB After Width: | Height: | Size: 70 KiB |
Before Width: | Height: | Size: 108 KiB After Width: | Height: | Size: 108 KiB |
Before Width: | Height: | Size: 29 KiB After Width: | Height: | Size: 29 KiB |
79
docs/development/room-dag-concepts.md
Normal file
79
docs/development/room-dag-concepts.md
Normal file
|
@ -0,0 +1,79 @@
|
||||||
|
# Room DAG concepts
|
||||||
|
|
||||||
|
## Edges
|
||||||
|
|
||||||
|
The word "edge" comes from graph theory lingo. An edge is just a connection
|
||||||
|
between two events. In Synapse, we connect events by specifying their
|
||||||
|
`prev_events`. A subsequent event points back at a previous event.
|
||||||
|
|
||||||
|
```
|
||||||
|
A (oldest) <---- B <---- C (most recent)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Depth and stream ordering
|
||||||
|
|
||||||
|
Events are normally sorted by `(topological_ordering, stream_ordering)` where
|
||||||
|
`topological_ordering` is just `depth`. In other words, we first sort by `depth`
|
||||||
|
and then tie-break based on `stream_ordering`. `depth` is incremented as new
|
||||||
|
messages are added to the DAG. Normally, `stream_ordering` is an auto
|
||||||
|
incrementing integer, but backfilled events start with `stream_ordering=-1` and decrement.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
- `/sync` returns things in the order they arrive at the server (`stream_ordering`).
|
||||||
|
- `/messages` (and `/backfill` in the federation API) return them in the order determined by the event graph `(topological_ordering, stream_ordering)`.
|
||||||
|
|
||||||
|
The general idea is that, if you're following a room in real-time (i.e.
|
||||||
|
`/sync`), you probably want to see the messages as they arrive at your server,
|
||||||
|
rather than skipping any that arrived late; whereas if you're looking at a
|
||||||
|
historical section of timeline (i.e. `/messages`), you want to see the best
|
||||||
|
representation of the state of the room as others were seeing it at the time.
|
||||||
|
|
||||||
|
|
||||||
|
## Forward extremity
|
||||||
|
|
||||||
|
Most-recent-in-time events in the DAG which are not referenced by any other events' `prev_events` yet.
|
||||||
|
|
||||||
|
The forward extremities of a room are used as the `prev_events` when the next event is sent.
|
||||||
|
|
||||||
|
|
||||||
|
## Backwards extremity
|
||||||
|
|
||||||
|
The current marker of where we have backfilled up to and will generally be the
|
||||||
|
oldest-in-time events we know of in the DAG.
|
||||||
|
|
||||||
|
This is an event where we haven't fetched all of the `prev_events` for.
|
||||||
|
|
||||||
|
Once we have fetched all of its `prev_events`, it's unmarked as a backwards
|
||||||
|
extremity (although we may have formed new backwards extremities from the prev
|
||||||
|
events during the backfilling process).
|
||||||
|
|
||||||
|
|
||||||
|
## Outliers
|
||||||
|
|
||||||
|
We mark an event as an `outlier` when we haven't figured out the state for the
|
||||||
|
room at that point in the DAG yet.
|
||||||
|
|
||||||
|
We won't *necessarily* have the `prev_events` of an `outlier` in the database,
|
||||||
|
but it's entirely possible that we *might*. The status of whether we have all of
|
||||||
|
the `prev_events` is marked as a [backwards extremity](#backwards-extremity).
|
||||||
|
|
||||||
|
For example, when we fetch the event auth chain or state for a given event, we
|
||||||
|
mark all of those claimed auth events as outliers because we haven't done the
|
||||||
|
state calculation ourself.
|
||||||
|
|
||||||
|
|
||||||
|
## State groups
|
||||||
|
|
||||||
|
For every non-outlier event we need to know the state at that event. Instead of
|
||||||
|
storing the full state for each event in the DB (i.e. a `event_id -> state`
|
||||||
|
mapping), which is *very* space inefficient when state doesn't change, we
|
||||||
|
instead assign each different set of state a "state group" and then have
|
||||||
|
mappings of `event_id -> state_group` and `state_group -> state`.
|
||||||
|
|
||||||
|
|
||||||
|
### Stage group edges
|
||||||
|
|
||||||
|
TODO: `state_group_edges` is a further optimization...
|
||||||
|
notes from @Azrenbeth, https://pastebin.com/seUGVGeT
|
|
@ -410,7 +410,7 @@ oidc_providers:
|
||||||
display_name_template: "{{ user.name }}"
|
display_name_template: "{{ user.name }}"
|
||||||
```
|
```
|
||||||
|
|
||||||
## Apple
|
### Apple
|
||||||
|
|
||||||
Configuring "Sign in with Apple" (SiWA) requires an Apple Developer account.
|
Configuring "Sign in with Apple" (SiWA) requires an Apple Developer account.
|
||||||
|
|
||||||
|
|
|
@ -720,6 +720,9 @@ caches:
|
||||||
# 'name' gives the database engine to use: either 'sqlite3' (for SQLite) or
|
# 'name' gives the database engine to use: either 'sqlite3' (for SQLite) or
|
||||||
# 'psycopg2' (for PostgreSQL).
|
# 'psycopg2' (for PostgreSQL).
|
||||||
#
|
#
|
||||||
|
# 'txn_limit' gives the maximum number of transactions to run per connection
|
||||||
|
# before reconnecting. Defaults to 0, which means no limit.
|
||||||
|
#
|
||||||
# 'args' gives options which are passed through to the database engine,
|
# 'args' gives options which are passed through to the database engine,
|
||||||
# except for options starting 'cp_', which are used to configure the Twisted
|
# except for options starting 'cp_', which are used to configure the Twisted
|
||||||
# connection pool. For a reference to valid arguments, see:
|
# connection pool. For a reference to valid arguments, see:
|
||||||
|
@ -740,6 +743,7 @@ caches:
|
||||||
#
|
#
|
||||||
#database:
|
#database:
|
||||||
# name: psycopg2
|
# name: psycopg2
|
||||||
|
# txn_limit: 10000
|
||||||
# args:
|
# args:
|
||||||
# user: synapse_user
|
# user: synapse_user
|
||||||
# password: secretpassword
|
# password: secretpassword
|
||||||
|
|
|
@ -17,6 +17,7 @@ import subprocess
|
||||||
import sys
|
import sys
|
||||||
import threading
|
import threading
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
|
from typing import Optional, Sequence
|
||||||
|
|
||||||
DISTS = (
|
DISTS = (
|
||||||
"debian:buster",
|
"debian:buster",
|
||||||
|
@ -39,8 +40,11 @@ projdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
||||||
|
|
||||||
|
|
||||||
class Builder(object):
|
class Builder(object):
|
||||||
def __init__(self, redirect_stdout=False):
|
def __init__(
|
||||||
|
self, redirect_stdout=False, docker_build_args: Optional[Sequence[str]] = None
|
||||||
|
):
|
||||||
self.redirect_stdout = redirect_stdout
|
self.redirect_stdout = redirect_stdout
|
||||||
|
self._docker_build_args = tuple(docker_build_args or ())
|
||||||
self.active_containers = set()
|
self.active_containers = set()
|
||||||
self._lock = threading.Lock()
|
self._lock = threading.Lock()
|
||||||
self._failed = False
|
self._failed = False
|
||||||
|
@ -79,8 +83,8 @@ class Builder(object):
|
||||||
stdout = None
|
stdout = None
|
||||||
|
|
||||||
# first build a docker image for the build environment
|
# first build a docker image for the build environment
|
||||||
subprocess.check_call(
|
build_args = (
|
||||||
[
|
(
|
||||||
"docker",
|
"docker",
|
||||||
"build",
|
"build",
|
||||||
"--tag",
|
"--tag",
|
||||||
|
@ -89,8 +93,13 @@ class Builder(object):
|
||||||
"distro=" + dist,
|
"distro=" + dist,
|
||||||
"-f",
|
"-f",
|
||||||
"docker/Dockerfile-dhvirtualenv",
|
"docker/Dockerfile-dhvirtualenv",
|
||||||
"docker",
|
)
|
||||||
],
|
+ self._docker_build_args
|
||||||
|
+ ("docker",)
|
||||||
|
)
|
||||||
|
|
||||||
|
subprocess.check_call(
|
||||||
|
build_args,
|
||||||
stdout=stdout,
|
stdout=stdout,
|
||||||
stderr=subprocess.STDOUT,
|
stderr=subprocess.STDOUT,
|
||||||
cwd=projdir,
|
cwd=projdir,
|
||||||
|
@ -147,9 +156,7 @@ class Builder(object):
|
||||||
self.active_containers.remove(c)
|
self.active_containers.remove(c)
|
||||||
|
|
||||||
|
|
||||||
def run_builds(dists, jobs=1, skip_tests=False):
|
def run_builds(builder, dists, jobs=1, skip_tests=False):
|
||||||
builder = Builder(redirect_stdout=(jobs > 1))
|
|
||||||
|
|
||||||
def sig(signum, _frame):
|
def sig(signum, _frame):
|
||||||
print("Caught SIGINT")
|
print("Caught SIGINT")
|
||||||
builder.kill_containers()
|
builder.kill_containers()
|
||||||
|
@ -180,6 +187,11 @@ if __name__ == "__main__":
|
||||||
action="store_true",
|
action="store_true",
|
||||||
help="skip running tests after building",
|
help="skip running tests after building",
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--docker-build-arg",
|
||||||
|
action="append",
|
||||||
|
help="specify an argument to pass to docker build",
|
||||||
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--show-dists-json",
|
"--show-dists-json",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
|
@ -195,4 +207,12 @@ if __name__ == "__main__":
|
||||||
if args.show_dists_json:
|
if args.show_dists_json:
|
||||||
print(json.dumps(DISTS))
|
print(json.dumps(DISTS))
|
||||||
else:
|
else:
|
||||||
run_builds(dists=args.dist, jobs=args.jobs, skip_tests=args.no_check)
|
builder = Builder(
|
||||||
|
redirect_stdout=(args.jobs > 1), docker_build_args=args.docker_build_arg
|
||||||
|
)
|
||||||
|
run_builds(
|
||||||
|
builder,
|
||||||
|
dists=args.dist,
|
||||||
|
jobs=args.jobs,
|
||||||
|
skip_tests=args.no_check,
|
||||||
|
)
|
||||||
|
|
|
@ -65,4 +65,4 @@ if [[ -n "$1" ]]; then
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Run the tests!
|
# Run the tests!
|
||||||
go test -v -tags synapse_blacklist,msc2946,msc3083,msc2716,msc2403 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests
|
go test -v -tags synapse_blacklist,msc2946,msc3083,msc2403 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests/...
|
||||||
|
|
|
@ -14,29 +14,57 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
"""An interactive script for doing a release. See `run()` below.
|
"""An interactive script for doing a release. See `cli()` below.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import re
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
from typing import Optional
|
import urllib.request
|
||||||
|
from os import path
|
||||||
|
from tempfile import TemporaryDirectory
|
||||||
|
from typing import List, Optional, Tuple
|
||||||
|
|
||||||
|
import attr
|
||||||
import click
|
import click
|
||||||
|
import commonmark
|
||||||
import git
|
import git
|
||||||
|
import redbaron
|
||||||
|
from click.exceptions import ClickException
|
||||||
|
from github import Github
|
||||||
from packaging import version
|
from packaging import version
|
||||||
from redbaron import RedBaron
|
|
||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.group()
|
||||||
def run():
|
def cli():
|
||||||
"""An interactive script to walk through the initial stages of creating a
|
"""An interactive script to walk through the parts of creating a release.
|
||||||
release, including creating release branch, updating changelog and pushing to
|
|
||||||
GitHub.
|
|
||||||
|
|
||||||
Requires the dev dependencies be installed, which can be done via:
|
Requires the dev dependencies be installed, which can be done via:
|
||||||
|
|
||||||
pip install -e .[dev]
|
pip install -e .[dev]
|
||||||
|
|
||||||
|
Then to use:
|
||||||
|
|
||||||
|
./scripts-dev/release.py prepare
|
||||||
|
|
||||||
|
# ... ask others to look at the changelog ...
|
||||||
|
|
||||||
|
./scripts-dev/release.py tag
|
||||||
|
|
||||||
|
# ... wait for asssets to build ...
|
||||||
|
|
||||||
|
./scripts-dev/release.py publish
|
||||||
|
./scripts-dev/release.py upload
|
||||||
|
|
||||||
|
If the env var GH_TOKEN (or GITHUB_TOKEN) is set, or passed into the
|
||||||
|
`tag`/`publish` command, then a new draft release will be created/published.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
@cli.command()
|
||||||
|
def prepare():
|
||||||
|
"""Do the initial stages of creating a release, including creating release
|
||||||
|
branch, updating changelog and pushing to GitHub.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Make sure we're in a git repo.
|
# Make sure we're in a git repo.
|
||||||
|
@ -51,32 +79,8 @@ def run():
|
||||||
click.secho("Updating git repo...")
|
click.secho("Updating git repo...")
|
||||||
repo.remote().fetch()
|
repo.remote().fetch()
|
||||||
|
|
||||||
# Parse the AST and load the `__version__` node so that we can edit it
|
# Get the current version and AST from root Synapse module.
|
||||||
# later.
|
current_version, parsed_synapse_ast, version_node = parse_version_from_module()
|
||||||
with open("synapse/__init__.py") as f:
|
|
||||||
red = RedBaron(f.read())
|
|
||||||
|
|
||||||
version_node = None
|
|
||||||
for node in red:
|
|
||||||
if node.type != "assignment":
|
|
||||||
continue
|
|
||||||
|
|
||||||
if node.target.type != "name":
|
|
||||||
continue
|
|
||||||
|
|
||||||
if node.target.value != "__version__":
|
|
||||||
continue
|
|
||||||
|
|
||||||
version_node = node
|
|
||||||
break
|
|
||||||
|
|
||||||
if not version_node:
|
|
||||||
print("Failed to find '__version__' definition in synapse/__init__.py")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
# Parse the current version.
|
|
||||||
current_version = version.parse(version_node.value.value.strip('"'))
|
|
||||||
assert isinstance(current_version, version.Version)
|
|
||||||
|
|
||||||
# Figure out what sort of release we're doing and calcuate the new version.
|
# Figure out what sort of release we're doing and calcuate the new version.
|
||||||
rc = click.confirm("RC", default=True)
|
rc = click.confirm("RC", default=True)
|
||||||
|
@ -190,7 +194,7 @@ def run():
|
||||||
# Update the `__version__` variable and write it back to the file.
|
# Update the `__version__` variable and write it back to the file.
|
||||||
version_node.value = '"' + new_version + '"'
|
version_node.value = '"' + new_version + '"'
|
||||||
with open("synapse/__init__.py", "w") as f:
|
with open("synapse/__init__.py", "w") as f:
|
||||||
f.write(red.dumps())
|
f.write(parsed_synapse_ast.dumps())
|
||||||
|
|
||||||
# Generate changelogs
|
# Generate changelogs
|
||||||
subprocess.run("python3 -m towncrier", shell=True)
|
subprocess.run("python3 -m towncrier", shell=True)
|
||||||
|
@ -240,6 +244,180 @@ def run():
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@cli.command()
|
||||||
|
@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"])
|
||||||
|
def tag(gh_token: Optional[str]):
|
||||||
|
"""Tags the release and generates a draft GitHub release"""
|
||||||
|
|
||||||
|
# Make sure we're in a git repo.
|
||||||
|
try:
|
||||||
|
repo = git.Repo()
|
||||||
|
except git.InvalidGitRepositoryError:
|
||||||
|
raise click.ClickException("Not in Synapse repo.")
|
||||||
|
|
||||||
|
if repo.is_dirty():
|
||||||
|
raise click.ClickException("Uncommitted changes exist.")
|
||||||
|
|
||||||
|
click.secho("Updating git repo...")
|
||||||
|
repo.remote().fetch()
|
||||||
|
|
||||||
|
# Find out the version and tag name.
|
||||||
|
current_version, _, _ = parse_version_from_module()
|
||||||
|
tag_name = f"v{current_version}"
|
||||||
|
|
||||||
|
# Check we haven't released this version.
|
||||||
|
if tag_name in repo.tags:
|
||||||
|
raise click.ClickException(f"Tag {tag_name} already exists!\n")
|
||||||
|
|
||||||
|
# Get the appropriate changelogs and tag.
|
||||||
|
changes = get_changes_for_version(current_version)
|
||||||
|
|
||||||
|
click.echo_via_pager(changes)
|
||||||
|
if click.confirm("Edit text?", default=False):
|
||||||
|
changes = click.edit(changes, require_save=False)
|
||||||
|
|
||||||
|
repo.create_tag(tag_name, message=changes)
|
||||||
|
|
||||||
|
if not click.confirm("Push tag to GitHub?", default=True):
|
||||||
|
print("")
|
||||||
|
print("Run when ready to push:")
|
||||||
|
print("")
|
||||||
|
print(f"\tgit push {repo.remote().name} tag {current_version}")
|
||||||
|
print("")
|
||||||
|
return
|
||||||
|
|
||||||
|
repo.git.push(repo.remote().name, "tag", tag_name)
|
||||||
|
|
||||||
|
# If no token was given, we bail here
|
||||||
|
if not gh_token:
|
||||||
|
click.launch(f"https://github.com/matrix-org/synapse/releases/edit/{tag_name}")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Create a new draft release
|
||||||
|
gh = Github(gh_token)
|
||||||
|
gh_repo = gh.get_repo("matrix-org/synapse")
|
||||||
|
release = gh_repo.create_git_release(
|
||||||
|
tag=tag_name,
|
||||||
|
name=tag_name,
|
||||||
|
message=changes,
|
||||||
|
draft=True,
|
||||||
|
prerelease=current_version.is_prerelease,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Open the release and the actions where we are building the assets.
|
||||||
|
click.launch(release.url)
|
||||||
|
click.launch(
|
||||||
|
f"https://github.com/matrix-org/synapse/actions?query=branch%3A{tag_name}"
|
||||||
|
)
|
||||||
|
|
||||||
|
click.echo("Wait for release assets to be built")
|
||||||
|
|
||||||
|
|
||||||
|
@cli.command()
|
||||||
|
@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=True)
|
||||||
|
def publish(gh_token: str):
|
||||||
|
"""Publish release."""
|
||||||
|
|
||||||
|
# Make sure we're in a git repo.
|
||||||
|
try:
|
||||||
|
repo = git.Repo()
|
||||||
|
except git.InvalidGitRepositoryError:
|
||||||
|
raise click.ClickException("Not in Synapse repo.")
|
||||||
|
|
||||||
|
if repo.is_dirty():
|
||||||
|
raise click.ClickException("Uncommitted changes exist.")
|
||||||
|
|
||||||
|
current_version, _, _ = parse_version_from_module()
|
||||||
|
tag_name = f"v{current_version}"
|
||||||
|
|
||||||
|
if not click.confirm(f"Publish {tag_name}?", default=True):
|
||||||
|
return
|
||||||
|
|
||||||
|
# Publish the draft release
|
||||||
|
gh = Github(gh_token)
|
||||||
|
gh_repo = gh.get_repo("matrix-org/synapse")
|
||||||
|
for release in gh_repo.get_releases():
|
||||||
|
if release.title == tag_name:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise ClickException(f"Failed to find GitHub release for {tag_name}")
|
||||||
|
|
||||||
|
assert release.title == tag_name
|
||||||
|
|
||||||
|
if not release.draft:
|
||||||
|
click.echo("Release already published.")
|
||||||
|
return
|
||||||
|
|
||||||
|
release = release.update_release(
|
||||||
|
name=release.title,
|
||||||
|
message=release.body,
|
||||||
|
tag_name=release.tag_name,
|
||||||
|
prerelease=release.prerelease,
|
||||||
|
draft=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@cli.command()
|
||||||
|
def upload():
|
||||||
|
"""Upload release to pypi."""
|
||||||
|
|
||||||
|
current_version, _, _ = parse_version_from_module()
|
||||||
|
tag_name = f"v{current_version}"
|
||||||
|
|
||||||
|
pypi_asset_names = [
|
||||||
|
f"matrix_synapse-{current_version}-py3-none-any.whl",
|
||||||
|
f"matrix-synapse-{current_version}.tar.gz",
|
||||||
|
]
|
||||||
|
|
||||||
|
with TemporaryDirectory(prefix=f"synapse_upload_{tag_name}_") as tmpdir:
|
||||||
|
for name in pypi_asset_names:
|
||||||
|
filename = path.join(tmpdir, name)
|
||||||
|
url = f"https://github.com/matrix-org/synapse/releases/download/{tag_name}/{name}"
|
||||||
|
|
||||||
|
click.echo(f"Downloading {name} into {filename}")
|
||||||
|
urllib.request.urlretrieve(url, filename=filename)
|
||||||
|
|
||||||
|
if click.confirm("Upload to PyPI?", default=True):
|
||||||
|
subprocess.run("twine upload *", shell=True, cwd=tmpdir)
|
||||||
|
|
||||||
|
click.echo(
|
||||||
|
f"Done! Remember to merge the tag {tag_name} into the appropriate branches"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_version_from_module() -> Tuple[
|
||||||
|
version.Version, redbaron.RedBaron, redbaron.Node
|
||||||
|
]:
|
||||||
|
# Parse the AST and load the `__version__` node so that we can edit it
|
||||||
|
# later.
|
||||||
|
with open("synapse/__init__.py") as f:
|
||||||
|
red = redbaron.RedBaron(f.read())
|
||||||
|
|
||||||
|
version_node = None
|
||||||
|
for node in red:
|
||||||
|
if node.type != "assignment":
|
||||||
|
continue
|
||||||
|
|
||||||
|
if node.target.type != "name":
|
||||||
|
continue
|
||||||
|
|
||||||
|
if node.target.value != "__version__":
|
||||||
|
continue
|
||||||
|
|
||||||
|
version_node = node
|
||||||
|
break
|
||||||
|
|
||||||
|
if not version_node:
|
||||||
|
print("Failed to find '__version__' definition in synapse/__init__.py")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Parse the current version.
|
||||||
|
current_version = version.parse(version_node.value.value.strip('"'))
|
||||||
|
assert isinstance(current_version, version.Version)
|
||||||
|
|
||||||
|
return current_version, red, version_node
|
||||||
|
|
||||||
|
|
||||||
def find_ref(repo: git.Repo, ref_name: str) -> Optional[git.HEAD]:
|
def find_ref(repo: git.Repo, ref_name: str) -> Optional[git.HEAD]:
|
||||||
"""Find the branch/ref, looking first locally then in the remote."""
|
"""Find the branch/ref, looking first locally then in the remote."""
|
||||||
if ref_name in repo.refs:
|
if ref_name in repo.refs:
|
||||||
|
@ -256,5 +434,66 @@ def update_branch(repo: git.Repo):
|
||||||
repo.git.merge(repo.active_branch.tracking_branch().name)
|
repo.git.merge(repo.active_branch.tracking_branch().name)
|
||||||
|
|
||||||
|
|
||||||
|
def get_changes_for_version(wanted_version: version.Version) -> str:
|
||||||
|
"""Get the changelogs for the given version.
|
||||||
|
|
||||||
|
If an RC then will only get the changelog for that RC version, otherwise if
|
||||||
|
its a full release will get the changelog for the release and all its RCs.
|
||||||
|
"""
|
||||||
|
|
||||||
|
with open("CHANGES.md") as f:
|
||||||
|
changes = f.read()
|
||||||
|
|
||||||
|
# First we parse the changelog so that we can split it into sections based
|
||||||
|
# on the release headings.
|
||||||
|
ast = commonmark.Parser().parse(changes)
|
||||||
|
|
||||||
|
@attr.s(auto_attribs=True)
|
||||||
|
class VersionSection:
|
||||||
|
title: str
|
||||||
|
|
||||||
|
# These are 0-based.
|
||||||
|
start_line: int
|
||||||
|
end_line: Optional[int] = None # Is none if its the last entry
|
||||||
|
|
||||||
|
headings: List[VersionSection] = []
|
||||||
|
for node, _ in ast.walker():
|
||||||
|
# We look for all text nodes that are in a level 1 heading.
|
||||||
|
if node.t != "text":
|
||||||
|
continue
|
||||||
|
|
||||||
|
if node.parent.t != "heading" or node.parent.level != 1:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# If we have a previous heading then we update its `end_line`.
|
||||||
|
if headings:
|
||||||
|
headings[-1].end_line = node.parent.sourcepos[0][0] - 1
|
||||||
|
|
||||||
|
headings.append(VersionSection(node.literal, node.parent.sourcepos[0][0] - 1))
|
||||||
|
|
||||||
|
changes_by_line = changes.split("\n")
|
||||||
|
|
||||||
|
version_changelog = [] # The lines we want to include in the changelog
|
||||||
|
|
||||||
|
# Go through each section and find any that match the requested version.
|
||||||
|
regex = re.compile(r"^Synapse v?(\S+)")
|
||||||
|
for section in headings:
|
||||||
|
groups = regex.match(section.title)
|
||||||
|
if not groups:
|
||||||
|
continue
|
||||||
|
|
||||||
|
heading_version = version.parse(groups.group(1))
|
||||||
|
heading_base_version = version.parse(heading_version.base_version)
|
||||||
|
|
||||||
|
# Check if heading version matches the requested version, or if its an
|
||||||
|
# RC of the requested version.
|
||||||
|
if wanted_version not in (heading_version, heading_base_version):
|
||||||
|
continue
|
||||||
|
|
||||||
|
version_changelog.extend(changes_by_line[section.start_line : section.end_line])
|
||||||
|
|
||||||
|
return "\n".join(version_changelog)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
run()
|
cli()
|
||||||
|
|
2
setup.py
2
setup.py
|
@ -108,6 +108,8 @@ CONDITIONAL_REQUIREMENTS["dev"] = CONDITIONAL_REQUIREMENTS["lint"] + [
|
||||||
"click==7.1.2",
|
"click==7.1.2",
|
||||||
"redbaron==0.9.2",
|
"redbaron==0.9.2",
|
||||||
"GitPython==3.1.14",
|
"GitPython==3.1.14",
|
||||||
|
"commonmark==0.9.1",
|
||||||
|
"pygithub==1.55",
|
||||||
]
|
]
|
||||||
|
|
||||||
CONDITIONAL_REQUIREMENTS["mypy"] = ["mypy==0.812", "mypy-zope==0.2.13"]
|
CONDITIONAL_REQUIREMENTS["mypy"] = ["mypy==0.812", "mypy-zope==0.2.13"]
|
||||||
|
|
|
@ -47,7 +47,7 @@ try:
|
||||||
except ImportError:
|
except ImportError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
__version__ = "1.39.0"
|
__version__ = "1.40.0rc1"
|
||||||
|
|
||||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||||
# We import here so that we don't have to install a bunch of deps when
|
# We import here so that we don't have to install a bunch of deps when
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# Copyright 2021 The Matrix.org Foundation C.I.C.
|
# Copyright 2021 The Matrix.org Foundation C.I.C.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
|
|
@ -120,6 +120,7 @@ class EventTypes:
|
||||||
SpaceParent = "m.space.parent"
|
SpaceParent = "m.space.parent"
|
||||||
|
|
||||||
MSC2716_INSERTION = "org.matrix.msc2716.insertion"
|
MSC2716_INSERTION = "org.matrix.msc2716.insertion"
|
||||||
|
MSC2716_CHUNK = "org.matrix.msc2716.chunk"
|
||||||
MSC2716_MARKER = "org.matrix.msc2716.marker"
|
MSC2716_MARKER = "org.matrix.msc2716.marker"
|
||||||
|
|
||||||
|
|
||||||
|
@ -198,15 +199,13 @@ class EventContentFields:
|
||||||
|
|
||||||
# Used on normal messages to indicate they were historically imported after the fact
|
# Used on normal messages to indicate they were historically imported after the fact
|
||||||
MSC2716_HISTORICAL = "org.matrix.msc2716.historical"
|
MSC2716_HISTORICAL = "org.matrix.msc2716.historical"
|
||||||
# For "insertion" events
|
# For "insertion" events to indicate what the next chunk ID should be in
|
||||||
|
# order to connect to it
|
||||||
MSC2716_NEXT_CHUNK_ID = "org.matrix.msc2716.next_chunk_id"
|
MSC2716_NEXT_CHUNK_ID = "org.matrix.msc2716.next_chunk_id"
|
||||||
# Used on normal message events to indicate where the chunk connects to
|
# Used on "chunk" events to indicate which insertion event it connects to
|
||||||
MSC2716_CHUNK_ID = "org.matrix.msc2716.chunk_id"
|
MSC2716_CHUNK_ID = "org.matrix.msc2716.chunk_id"
|
||||||
# For "marker" events
|
# For "marker" events
|
||||||
MSC2716_MARKER_INSERTION = "org.matrix.msc2716.marker.insertion"
|
MSC2716_MARKER_INSERTION = "org.matrix.msc2716.marker.insertion"
|
||||||
MSC2716_MARKER_INSERTION_PREV_EVENTS = (
|
|
||||||
"org.matrix.msc2716.marker.insertion_prev_events"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class RoomTypes:
|
class RoomTypes:
|
||||||
|
@ -230,3 +229,7 @@ class HistoryVisibility:
|
||||||
JOINED = "joined"
|
JOINED = "joined"
|
||||||
SHARED = "shared"
|
SHARED = "shared"
|
||||||
WORLD_READABLE = "world_readable"
|
WORLD_READABLE = "world_readable"
|
||||||
|
|
||||||
|
|
||||||
|
class ReadReceiptEventFields:
|
||||||
|
MSC2285_HIDDEN = "org.matrix.msc2285.hidden"
|
||||||
|
|
|
@ -75,6 +75,9 @@ class Codes:
|
||||||
INVALID_SIGNATURE = "M_INVALID_SIGNATURE"
|
INVALID_SIGNATURE = "M_INVALID_SIGNATURE"
|
||||||
USER_DEACTIVATED = "M_USER_DEACTIVATED"
|
USER_DEACTIVATED = "M_USER_DEACTIVATED"
|
||||||
BAD_ALIAS = "M_BAD_ALIAS"
|
BAD_ALIAS = "M_BAD_ALIAS"
|
||||||
|
# For restricted join rules.
|
||||||
|
UNABLE_AUTHORISE_JOIN = "M_UNABLE_TO_AUTHORISE_JOIN"
|
||||||
|
UNABLE_TO_GRANT_JOIN = "M_UNABLE_TO_GRANT_JOIN"
|
||||||
|
|
||||||
|
|
||||||
class CodeMessageException(RuntimeError):
|
class CodeMessageException(RuntimeError):
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from typing import Dict
|
from typing import Callable, Dict, Optional
|
||||||
|
|
||||||
import attr
|
import attr
|
||||||
|
|
||||||
|
@ -73,6 +73,9 @@ class RoomVersion:
|
||||||
# MSC2403: Allows join_rules to be set to 'knock', changes auth rules to allow sending
|
# MSC2403: Allows join_rules to be set to 'knock', changes auth rules to allow sending
|
||||||
# m.room.membership event with membership 'knock'.
|
# m.room.membership event with membership 'knock'.
|
||||||
msc2403_knocking = attr.ib(type=bool)
|
msc2403_knocking = attr.ib(type=bool)
|
||||||
|
# MSC2716: Adds m.room.power_levels -> content.historical field to control
|
||||||
|
# whether "insertion", "chunk", "marker" events can be sent
|
||||||
|
msc2716_historical = attr.ib(type=bool)
|
||||||
|
|
||||||
|
|
||||||
class RoomVersions:
|
class RoomVersions:
|
||||||
|
@ -88,6 +91,7 @@ class RoomVersions:
|
||||||
msc2176_redaction_rules=False,
|
msc2176_redaction_rules=False,
|
||||||
msc3083_join_rules=False,
|
msc3083_join_rules=False,
|
||||||
msc2403_knocking=False,
|
msc2403_knocking=False,
|
||||||
|
msc2716_historical=False,
|
||||||
)
|
)
|
||||||
V2 = RoomVersion(
|
V2 = RoomVersion(
|
||||||
"2",
|
"2",
|
||||||
|
@ -101,6 +105,7 @@ class RoomVersions:
|
||||||
msc2176_redaction_rules=False,
|
msc2176_redaction_rules=False,
|
||||||
msc3083_join_rules=False,
|
msc3083_join_rules=False,
|
||||||
msc2403_knocking=False,
|
msc2403_knocking=False,
|
||||||
|
msc2716_historical=False,
|
||||||
)
|
)
|
||||||
V3 = RoomVersion(
|
V3 = RoomVersion(
|
||||||
"3",
|
"3",
|
||||||
|
@ -114,6 +119,7 @@ class RoomVersions:
|
||||||
msc2176_redaction_rules=False,
|
msc2176_redaction_rules=False,
|
||||||
msc3083_join_rules=False,
|
msc3083_join_rules=False,
|
||||||
msc2403_knocking=False,
|
msc2403_knocking=False,
|
||||||
|
msc2716_historical=False,
|
||||||
)
|
)
|
||||||
V4 = RoomVersion(
|
V4 = RoomVersion(
|
||||||
"4",
|
"4",
|
||||||
|
@ -127,6 +133,7 @@ class RoomVersions:
|
||||||
msc2176_redaction_rules=False,
|
msc2176_redaction_rules=False,
|
||||||
msc3083_join_rules=False,
|
msc3083_join_rules=False,
|
||||||
msc2403_knocking=False,
|
msc2403_knocking=False,
|
||||||
|
msc2716_historical=False,
|
||||||
)
|
)
|
||||||
V5 = RoomVersion(
|
V5 = RoomVersion(
|
||||||
"5",
|
"5",
|
||||||
|
@ -140,6 +147,7 @@ class RoomVersions:
|
||||||
msc2176_redaction_rules=False,
|
msc2176_redaction_rules=False,
|
||||||
msc3083_join_rules=False,
|
msc3083_join_rules=False,
|
||||||
msc2403_knocking=False,
|
msc2403_knocking=False,
|
||||||
|
msc2716_historical=False,
|
||||||
)
|
)
|
||||||
V6 = RoomVersion(
|
V6 = RoomVersion(
|
||||||
"6",
|
"6",
|
||||||
|
@ -153,6 +161,7 @@ class RoomVersions:
|
||||||
msc2176_redaction_rules=False,
|
msc2176_redaction_rules=False,
|
||||||
msc3083_join_rules=False,
|
msc3083_join_rules=False,
|
||||||
msc2403_knocking=False,
|
msc2403_knocking=False,
|
||||||
|
msc2716_historical=False,
|
||||||
)
|
)
|
||||||
MSC2176 = RoomVersion(
|
MSC2176 = RoomVersion(
|
||||||
"org.matrix.msc2176",
|
"org.matrix.msc2176",
|
||||||
|
@ -166,9 +175,10 @@ class RoomVersions:
|
||||||
msc2176_redaction_rules=True,
|
msc2176_redaction_rules=True,
|
||||||
msc3083_join_rules=False,
|
msc3083_join_rules=False,
|
||||||
msc2403_knocking=False,
|
msc2403_knocking=False,
|
||||||
|
msc2716_historical=False,
|
||||||
)
|
)
|
||||||
MSC3083 = RoomVersion(
|
MSC3083 = RoomVersion(
|
||||||
"org.matrix.msc3083",
|
"org.matrix.msc3083.v2",
|
||||||
RoomDisposition.UNSTABLE,
|
RoomDisposition.UNSTABLE,
|
||||||
EventFormatVersions.V3,
|
EventFormatVersions.V3,
|
||||||
StateResolutionVersions.V2,
|
StateResolutionVersions.V2,
|
||||||
|
@ -179,6 +189,7 @@ class RoomVersions:
|
||||||
msc2176_redaction_rules=False,
|
msc2176_redaction_rules=False,
|
||||||
msc3083_join_rules=True,
|
msc3083_join_rules=True,
|
||||||
msc2403_knocking=False,
|
msc2403_knocking=False,
|
||||||
|
msc2716_historical=False,
|
||||||
)
|
)
|
||||||
V7 = RoomVersion(
|
V7 = RoomVersion(
|
||||||
"7",
|
"7",
|
||||||
|
@ -192,6 +203,21 @@ class RoomVersions:
|
||||||
msc2176_redaction_rules=False,
|
msc2176_redaction_rules=False,
|
||||||
msc3083_join_rules=False,
|
msc3083_join_rules=False,
|
||||||
msc2403_knocking=True,
|
msc2403_knocking=True,
|
||||||
|
msc2716_historical=False,
|
||||||
|
)
|
||||||
|
MSC2716 = RoomVersion(
|
||||||
|
"org.matrix.msc2716",
|
||||||
|
RoomDisposition.STABLE,
|
||||||
|
EventFormatVersions.V3,
|
||||||
|
StateResolutionVersions.V2,
|
||||||
|
enforce_key_validity=True,
|
||||||
|
special_case_aliases_auth=False,
|
||||||
|
strict_canonicaljson=True,
|
||||||
|
limit_notifications_power_levels=True,
|
||||||
|
msc2176_redaction_rules=False,
|
||||||
|
msc3083_join_rules=False,
|
||||||
|
msc2403_knocking=True,
|
||||||
|
msc2716_historical=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -207,6 +233,41 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = {
|
||||||
RoomVersions.MSC2176,
|
RoomVersions.MSC2176,
|
||||||
RoomVersions.MSC3083,
|
RoomVersions.MSC3083,
|
||||||
RoomVersions.V7,
|
RoomVersions.V7,
|
||||||
|
RoomVersions.MSC2716,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||||
|
class RoomVersionCapability:
|
||||||
|
"""An object which describes the unique attributes of a room version."""
|
||||||
|
|
||||||
|
identifier: str # the identifier for this capability
|
||||||
|
preferred_version: Optional[RoomVersion]
|
||||||
|
support_check_lambda: Callable[[RoomVersion], bool]
|
||||||
|
|
||||||
|
|
||||||
|
MSC3244_CAPABILITIES = {
|
||||||
|
cap.identifier: {
|
||||||
|
"preferred": cap.preferred_version.identifier
|
||||||
|
if cap.preferred_version is not None
|
||||||
|
else None,
|
||||||
|
"support": [
|
||||||
|
v.identifier
|
||||||
|
for v in KNOWN_ROOM_VERSIONS.values()
|
||||||
|
if cap.support_check_lambda(v)
|
||||||
|
],
|
||||||
|
}
|
||||||
|
for cap in (
|
||||||
|
RoomVersionCapability(
|
||||||
|
"knock",
|
||||||
|
RoomVersions.V7,
|
||||||
|
lambda room_version: room_version.msc2403_knocking,
|
||||||
|
),
|
||||||
|
RoomVersionCapability(
|
||||||
|
"restricted",
|
||||||
|
None,
|
||||||
|
lambda room_version: room_version.msc3083_join_rules,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
# Note that we do not include MSC2043 here unless it is enabled in the config.
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# Copyright 2019 Matrix.org Foundation C.I.C.
|
# Copyright 2019 Matrix.org Foundation C.I.C.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# Copyright 2016 OpenMarket Ltd
|
# Copyright 2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# Copyright 2016 OpenMarket Ltd
|
# Copyright 2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# Copyright 2018 New Vector Ltd
|
# Copyright 2018 New Vector Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# Copyright 2016 OpenMarket Ltd
|
# Copyright 2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# Copyright 2016 OpenMarket Ltd
|
# Copyright 2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# Copyright 2016 OpenMarket Ltd
|
# Copyright 2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# Copyright 2016 OpenMarket Ltd
|
# Copyright 2016 OpenMarket Ltd
|
||||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||||
#
|
#
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
# Copyright 2019 New Vector Ltd
|
# Copyright 2019 New Vector Ltd
|
||||||
#
|
#
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# Copyright 2016 OpenMarket Ltd
|
# Copyright 2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# Copyright 2016 OpenMarket Ltd
|
# Copyright 2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# Copyright 2016 OpenMarket Ltd
|
# Copyright 2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# Copyright 2017 Vector Creations Ltd
|
# Copyright 2017 Vector Creations Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
|
|
@ -33,6 +33,9 @@ DEFAULT_CONFIG = """\
|
||||||
# 'name' gives the database engine to use: either 'sqlite3' (for SQLite) or
|
# 'name' gives the database engine to use: either 'sqlite3' (for SQLite) or
|
||||||
# 'psycopg2' (for PostgreSQL).
|
# 'psycopg2' (for PostgreSQL).
|
||||||
#
|
#
|
||||||
|
# 'txn_limit' gives the maximum number of transactions to run per connection
|
||||||
|
# before reconnecting. Defaults to 0, which means no limit.
|
||||||
|
#
|
||||||
# 'args' gives options which are passed through to the database engine,
|
# 'args' gives options which are passed through to the database engine,
|
||||||
# except for options starting 'cp_', which are used to configure the Twisted
|
# except for options starting 'cp_', which are used to configure the Twisted
|
||||||
# connection pool. For a reference to valid arguments, see:
|
# connection pool. For a reference to valid arguments, see:
|
||||||
|
@ -53,6 +56,7 @@ DEFAULT_CONFIG = """\
|
||||||
#
|
#
|
||||||
#database:
|
#database:
|
||||||
# name: psycopg2
|
# name: psycopg2
|
||||||
|
# txn_limit: 10000
|
||||||
# args:
|
# args:
|
||||||
# user: synapse_user
|
# user: synapse_user
|
||||||
# password: secretpassword
|
# password: secretpassword
|
||||||
|
|
|
@ -39,12 +39,13 @@ DEFAULT_SUBJECTS = {
|
||||||
"messages_from_person_and_others": "[%(app)s] You have messages on %(app)s from %(person)s and others...",
|
"messages_from_person_and_others": "[%(app)s] You have messages on %(app)s from %(person)s and others...",
|
||||||
"invite_from_person": "[%(app)s] %(person)s has invited you to chat on %(app)s...",
|
"invite_from_person": "[%(app)s] %(person)s has invited you to chat on %(app)s...",
|
||||||
"invite_from_person_to_room": "[%(app)s] %(person)s has invited you to join the %(room)s room on %(app)s...",
|
"invite_from_person_to_room": "[%(app)s] %(person)s has invited you to join the %(room)s room on %(app)s...",
|
||||||
|
"invite_from_person_to_space": "[%(app)s] %(person)s has invited you to join the %(space)s space on %(app)s...",
|
||||||
"password_reset": "[%(server_name)s] Password reset",
|
"password_reset": "[%(server_name)s] Password reset",
|
||||||
"email_validation": "[%(server_name)s] Validate your email",
|
"email_validation": "[%(server_name)s] Validate your email",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@attr.s
|
@attr.s(slots=True, frozen=True)
|
||||||
class EmailSubjectConfig:
|
class EmailSubjectConfig:
|
||||||
message_from_person_in_room = attr.ib(type=str)
|
message_from_person_in_room = attr.ib(type=str)
|
||||||
message_from_person = attr.ib(type=str)
|
message_from_person = attr.ib(type=str)
|
||||||
|
@ -54,6 +55,7 @@ class EmailSubjectConfig:
|
||||||
messages_from_person_and_others = attr.ib(type=str)
|
messages_from_person_and_others = attr.ib(type=str)
|
||||||
invite_from_person = attr.ib(type=str)
|
invite_from_person = attr.ib(type=str)
|
||||||
invite_from_person_to_room = attr.ib(type=str)
|
invite_from_person_to_room = attr.ib(type=str)
|
||||||
|
invite_from_person_to_space = attr.ib(type=str)
|
||||||
password_reset = attr.ib(type=str)
|
password_reset = attr.ib(type=str)
|
||||||
email_validation = attr.ib(type=str)
|
email_validation = attr.ib(type=str)
|
||||||
|
|
||||||
|
|
|
@ -32,3 +32,9 @@ class ExperimentalConfig(Config):
|
||||||
|
|
||||||
# MSC2716 (backfill existing history)
|
# MSC2716 (backfill existing history)
|
||||||
self.msc2716_enabled: bool = experimental.get("msc2716_enabled", False)
|
self.msc2716_enabled: bool = experimental.get("msc2716_enabled", False)
|
||||||
|
|
||||||
|
# MSC2285 (hidden read receipts)
|
||||||
|
self.msc2285_enabled: bool = experimental.get("msc2285_enabled", False)
|
||||||
|
|
||||||
|
# MSC3244 (room version capabilities)
|
||||||
|
self.msc3244_enabled: bool = experimental.get("msc3244_enabled", False)
|
||||||
|
|
|
@ -106,6 +106,18 @@ def check(
|
||||||
if not event.signatures.get(event_id_domain):
|
if not event.signatures.get(event_id_domain):
|
||||||
raise AuthError(403, "Event not signed by sending server")
|
raise AuthError(403, "Event not signed by sending server")
|
||||||
|
|
||||||
|
is_invite_via_allow_rule = (
|
||||||
|
event.type == EventTypes.Member
|
||||||
|
and event.membership == Membership.JOIN
|
||||||
|
and "join_authorised_via_users_server" in event.content
|
||||||
|
)
|
||||||
|
if is_invite_via_allow_rule:
|
||||||
|
authoriser_domain = get_domain_from_id(
|
||||||
|
event.content["join_authorised_via_users_server"]
|
||||||
|
)
|
||||||
|
if not event.signatures.get(authoriser_domain):
|
||||||
|
raise AuthError(403, "Event not signed by authorising server")
|
||||||
|
|
||||||
# Implementation of https://matrix.org/docs/spec/rooms/v1#authorization-rules
|
# Implementation of https://matrix.org/docs/spec/rooms/v1#authorization-rules
|
||||||
#
|
#
|
||||||
# 1. If type is m.room.create:
|
# 1. If type is m.room.create:
|
||||||
|
@ -177,7 +189,7 @@ def check(
|
||||||
# https://github.com/vector-im/vector-web/issues/1208 hopefully
|
# https://github.com/vector-im/vector-web/issues/1208 hopefully
|
||||||
if event.type == EventTypes.ThirdPartyInvite:
|
if event.type == EventTypes.ThirdPartyInvite:
|
||||||
user_level = get_user_power_level(event.user_id, auth_events)
|
user_level = get_user_power_level(event.user_id, auth_events)
|
||||||
invite_level = _get_named_level(auth_events, "invite", 0)
|
invite_level = get_named_level(auth_events, "invite", 0)
|
||||||
|
|
||||||
if user_level < invite_level:
|
if user_level < invite_level:
|
||||||
raise AuthError(403, "You don't have permission to invite users")
|
raise AuthError(403, "You don't have permission to invite users")
|
||||||
|
@ -193,6 +205,13 @@ def check(
|
||||||
if event.type == EventTypes.Redaction:
|
if event.type == EventTypes.Redaction:
|
||||||
check_redaction(room_version_obj, event, auth_events)
|
check_redaction(room_version_obj, event, auth_events)
|
||||||
|
|
||||||
|
if (
|
||||||
|
event.type == EventTypes.MSC2716_INSERTION
|
||||||
|
or event.type == EventTypes.MSC2716_CHUNK
|
||||||
|
or event.type == EventTypes.MSC2716_MARKER
|
||||||
|
):
|
||||||
|
check_historical(room_version_obj, event, auth_events)
|
||||||
|
|
||||||
logger.debug("Allowing! %s", event)
|
logger.debug("Allowing! %s", event)
|
||||||
|
|
||||||
|
|
||||||
|
@ -285,8 +304,8 @@ def _is_membership_change_allowed(
|
||||||
user_level = get_user_power_level(event.user_id, auth_events)
|
user_level = get_user_power_level(event.user_id, auth_events)
|
||||||
target_level = get_user_power_level(target_user_id, auth_events)
|
target_level = get_user_power_level(target_user_id, auth_events)
|
||||||
|
|
||||||
# FIXME (erikj): What should we do here as the default?
|
invite_level = get_named_level(auth_events, "invite", 0)
|
||||||
ban_level = _get_named_level(auth_events, "ban", 50)
|
ban_level = get_named_level(auth_events, "ban", 50)
|
||||||
|
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"_is_membership_change_allowed: %s",
|
"_is_membership_change_allowed: %s",
|
||||||
|
@ -336,8 +355,6 @@ def _is_membership_change_allowed(
|
||||||
elif target_in_room: # the target is already in the room.
|
elif target_in_room: # the target is already in the room.
|
||||||
raise AuthError(403, "%s is already in the room." % target_user_id)
|
raise AuthError(403, "%s is already in the room." % target_user_id)
|
||||||
else:
|
else:
|
||||||
invite_level = _get_named_level(auth_events, "invite", 0)
|
|
||||||
|
|
||||||
if user_level < invite_level:
|
if user_level < invite_level:
|
||||||
raise AuthError(403, "You don't have permission to invite users")
|
raise AuthError(403, "You don't have permission to invite users")
|
||||||
elif Membership.JOIN == membership:
|
elif Membership.JOIN == membership:
|
||||||
|
@ -345,16 +362,41 @@ def _is_membership_change_allowed(
|
||||||
# * They are not banned.
|
# * They are not banned.
|
||||||
# * They are accepting a previously sent invitation.
|
# * They are accepting a previously sent invitation.
|
||||||
# * They are already joined (it's a NOOP).
|
# * They are already joined (it's a NOOP).
|
||||||
# * The room is public or restricted.
|
# * The room is public.
|
||||||
|
# * The room is restricted and the user meets the allows rules.
|
||||||
if event.user_id != target_user_id:
|
if event.user_id != target_user_id:
|
||||||
raise AuthError(403, "Cannot force another user to join.")
|
raise AuthError(403, "Cannot force another user to join.")
|
||||||
elif target_banned:
|
elif target_banned:
|
||||||
raise AuthError(403, "You are banned from this room")
|
raise AuthError(403, "You are banned from this room")
|
||||||
elif join_rule == JoinRules.PUBLIC or (
|
elif join_rule == JoinRules.PUBLIC:
|
||||||
|
pass
|
||||||
|
elif (
|
||||||
room_version.msc3083_join_rules
|
room_version.msc3083_join_rules
|
||||||
and join_rule == JoinRules.MSC3083_RESTRICTED
|
and join_rule == JoinRules.MSC3083_RESTRICTED
|
||||||
):
|
):
|
||||||
pass
|
# This is the same as public, but the event must contain a reference
|
||||||
|
# to the server who authorised the join. If the event does not contain
|
||||||
|
# the proper content it is rejected.
|
||||||
|
#
|
||||||
|
# Note that if the caller is in the room or invited, then they do
|
||||||
|
# not need to meet the allow rules.
|
||||||
|
if not caller_in_room and not caller_invited:
|
||||||
|
authorising_user = event.content.get("join_authorised_via_users_server")
|
||||||
|
|
||||||
|
if authorising_user is None:
|
||||||
|
raise AuthError(403, "Join event is missing authorising user.")
|
||||||
|
|
||||||
|
# The authorising user must be in the room.
|
||||||
|
key = (EventTypes.Member, authorising_user)
|
||||||
|
member_event = auth_events.get(key)
|
||||||
|
_check_joined_room(member_event, authorising_user, event.room_id)
|
||||||
|
|
||||||
|
authorising_user_level = get_user_power_level(
|
||||||
|
authorising_user, auth_events
|
||||||
|
)
|
||||||
|
if authorising_user_level < invite_level:
|
||||||
|
raise AuthError(403, "Join event authorised by invalid server.")
|
||||||
|
|
||||||
elif join_rule == JoinRules.INVITE or (
|
elif join_rule == JoinRules.INVITE or (
|
||||||
room_version.msc2403_knocking and join_rule == JoinRules.KNOCK
|
room_version.msc2403_knocking and join_rule == JoinRules.KNOCK
|
||||||
):
|
):
|
||||||
|
@ -369,7 +411,7 @@ def _is_membership_change_allowed(
|
||||||
if target_banned and user_level < ban_level:
|
if target_banned and user_level < ban_level:
|
||||||
raise AuthError(403, "You cannot unban user %s." % (target_user_id,))
|
raise AuthError(403, "You cannot unban user %s." % (target_user_id,))
|
||||||
elif target_user_id != event.user_id:
|
elif target_user_id != event.user_id:
|
||||||
kick_level = _get_named_level(auth_events, "kick", 50)
|
kick_level = get_named_level(auth_events, "kick", 50)
|
||||||
|
|
||||||
if user_level < kick_level or user_level <= target_level:
|
if user_level < kick_level or user_level <= target_level:
|
||||||
raise AuthError(403, "You cannot kick user %s." % target_user_id)
|
raise AuthError(403, "You cannot kick user %s." % target_user_id)
|
||||||
|
@ -445,7 +487,7 @@ def get_send_level(
|
||||||
|
|
||||||
|
|
||||||
def _can_send_event(event: EventBase, auth_events: StateMap[EventBase]) -> bool:
|
def _can_send_event(event: EventBase, auth_events: StateMap[EventBase]) -> bool:
|
||||||
power_levels_event = _get_power_level_event(auth_events)
|
power_levels_event = get_power_level_event(auth_events)
|
||||||
|
|
||||||
send_level = get_send_level(event.type, event.get("state_key"), power_levels_event)
|
send_level = get_send_level(event.type, event.get("state_key"), power_levels_event)
|
||||||
user_level = get_user_power_level(event.user_id, auth_events)
|
user_level = get_user_power_level(event.user_id, auth_events)
|
||||||
|
@ -485,7 +527,7 @@ def check_redaction(
|
||||||
"""
|
"""
|
||||||
user_level = get_user_power_level(event.user_id, auth_events)
|
user_level = get_user_power_level(event.user_id, auth_events)
|
||||||
|
|
||||||
redact_level = _get_named_level(auth_events, "redact", 50)
|
redact_level = get_named_level(auth_events, "redact", 50)
|
||||||
|
|
||||||
if user_level >= redact_level:
|
if user_level >= redact_level:
|
||||||
return False
|
return False
|
||||||
|
@ -504,6 +546,37 @@ def check_redaction(
|
||||||
raise AuthError(403, "You don't have permission to redact events")
|
raise AuthError(403, "You don't have permission to redact events")
|
||||||
|
|
||||||
|
|
||||||
|
def check_historical(
|
||||||
|
room_version_obj: RoomVersion,
|
||||||
|
event: EventBase,
|
||||||
|
auth_events: StateMap[EventBase],
|
||||||
|
) -> None:
|
||||||
|
"""Check whether the event sender is allowed to send historical related
|
||||||
|
events like "insertion", "chunk", and "marker".
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
None
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
AuthError if the event sender is not allowed to send historical related events
|
||||||
|
("insertion", "chunk", and "marker").
|
||||||
|
"""
|
||||||
|
# Ignore the auth checks in room versions that do not support historical
|
||||||
|
# events
|
||||||
|
if not room_version_obj.msc2716_historical:
|
||||||
|
return
|
||||||
|
|
||||||
|
user_level = get_user_power_level(event.user_id, auth_events)
|
||||||
|
|
||||||
|
historical_level = get_named_level(auth_events, "historical", 100)
|
||||||
|
|
||||||
|
if user_level < historical_level:
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
'You don\'t have permission to send send historical related events ("insertion", "chunk", and "marker")',
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def _check_power_levels(
|
def _check_power_levels(
|
||||||
room_version_obj: RoomVersion,
|
room_version_obj: RoomVersion,
|
||||||
event: EventBase,
|
event: EventBase,
|
||||||
|
@ -600,7 +673,7 @@ def _check_power_levels(
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def _get_power_level_event(auth_events: StateMap[EventBase]) -> Optional[EventBase]:
|
def get_power_level_event(auth_events: StateMap[EventBase]) -> Optional[EventBase]:
|
||||||
return auth_events.get((EventTypes.PowerLevels, ""))
|
return auth_events.get((EventTypes.PowerLevels, ""))
|
||||||
|
|
||||||
|
|
||||||
|
@ -616,10 +689,10 @@ def get_user_power_level(user_id: str, auth_events: StateMap[EventBase]) -> int:
|
||||||
Returns:
|
Returns:
|
||||||
the user's power level in this room.
|
the user's power level in this room.
|
||||||
"""
|
"""
|
||||||
power_level_event = _get_power_level_event(auth_events)
|
power_level_event = get_power_level_event(auth_events)
|
||||||
if power_level_event:
|
if power_level_event:
|
||||||
level = power_level_event.content.get("users", {}).get(user_id)
|
level = power_level_event.content.get("users", {}).get(user_id)
|
||||||
if not level:
|
if level is None:
|
||||||
level = power_level_event.content.get("users_default", 0)
|
level = power_level_event.content.get("users_default", 0)
|
||||||
|
|
||||||
if level is None:
|
if level is None:
|
||||||
|
@ -640,8 +713,8 @@ def get_user_power_level(user_id: str, auth_events: StateMap[EventBase]) -> int:
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
|
||||||
def _get_named_level(auth_events: StateMap[EventBase], name: str, default: int) -> int:
|
def get_named_level(auth_events: StateMap[EventBase], name: str, default: int) -> int:
|
||||||
power_level_event = _get_power_level_event(auth_events)
|
power_level_event = get_power_level_event(auth_events)
|
||||||
|
|
||||||
if not power_level_event:
|
if not power_level_event:
|
||||||
return default
|
return default
|
||||||
|
@ -728,7 +801,9 @@ def get_public_keys(invite_event: EventBase) -> List[Dict[str, Any]]:
|
||||||
return public_keys
|
return public_keys
|
||||||
|
|
||||||
|
|
||||||
def auth_types_for_event(event: Union[EventBase, EventBuilder]) -> Set[Tuple[str, str]]:
|
def auth_types_for_event(
|
||||||
|
room_version: RoomVersion, event: Union[EventBase, EventBuilder]
|
||||||
|
) -> Set[Tuple[str, str]]:
|
||||||
"""Given an event, return a list of (EventType, StateKey) that may be
|
"""Given an event, return a list of (EventType, StateKey) that may be
|
||||||
needed to auth the event. The returned list may be a superset of what
|
needed to auth the event. The returned list may be a superset of what
|
||||||
would actually be required depending on the full state of the room.
|
would actually be required depending on the full state of the room.
|
||||||
|
@ -760,4 +835,12 @@ def auth_types_for_event(event: Union[EventBase, EventBuilder]) -> Set[Tuple[str
|
||||||
)
|
)
|
||||||
auth_types.add(key)
|
auth_types.add(key)
|
||||||
|
|
||||||
|
if room_version.msc3083_join_rules and membership == Membership.JOIN:
|
||||||
|
if "join_authorised_via_users_server" in event.content:
|
||||||
|
key = (
|
||||||
|
EventTypes.Member,
|
||||||
|
event.content["join_authorised_via_users_server"],
|
||||||
|
)
|
||||||
|
auth_types.add(key)
|
||||||
|
|
||||||
return auth_types
|
return auth_types
|
||||||
|
|
|
@ -109,6 +109,8 @@ def prune_event_dict(room_version: RoomVersion, event_dict: dict) -> dict:
|
||||||
add_fields("creator")
|
add_fields("creator")
|
||||||
elif event_type == EventTypes.JoinRules:
|
elif event_type == EventTypes.JoinRules:
|
||||||
add_fields("join_rule")
|
add_fields("join_rule")
|
||||||
|
if room_version.msc3083_join_rules:
|
||||||
|
add_fields("allow")
|
||||||
elif event_type == EventTypes.PowerLevels:
|
elif event_type == EventTypes.PowerLevels:
|
||||||
add_fields(
|
add_fields(
|
||||||
"users",
|
"users",
|
||||||
|
@ -124,6 +126,9 @@ def prune_event_dict(room_version: RoomVersion, event_dict: dict) -> dict:
|
||||||
if room_version.msc2176_redaction_rules:
|
if room_version.msc2176_redaction_rules:
|
||||||
add_fields("invite")
|
add_fields("invite")
|
||||||
|
|
||||||
|
if room_version.msc2716_historical:
|
||||||
|
add_fields("historical")
|
||||||
|
|
||||||
elif event_type == EventTypes.Aliases and room_version.special_case_aliases_auth:
|
elif event_type == EventTypes.Aliases and room_version.special_case_aliases_auth:
|
||||||
add_fields("aliases")
|
add_fields("aliases")
|
||||||
elif event_type == EventTypes.RoomHistoryVisibility:
|
elif event_type == EventTypes.RoomHistoryVisibility:
|
||||||
|
|
|
@ -178,6 +178,34 @@ async def _check_sigs_on_pdu(
|
||||||
)
|
)
|
||||||
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
|
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
|
||||||
|
|
||||||
|
# If this is a join event for a restricted room it may have been authorised
|
||||||
|
# via a different server from the sending server. Check those signatures.
|
||||||
|
if (
|
||||||
|
room_version.msc3083_join_rules
|
||||||
|
and pdu.type == EventTypes.Member
|
||||||
|
and pdu.membership == Membership.JOIN
|
||||||
|
and "join_authorised_via_users_server" in pdu.content
|
||||||
|
):
|
||||||
|
authorising_server = get_domain_from_id(
|
||||||
|
pdu.content["join_authorised_via_users_server"]
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
await keyring.verify_event_for_server(
|
||||||
|
authorising_server,
|
||||||
|
pdu,
|
||||||
|
pdu.origin_server_ts if room_version.enforce_key_validity else 0,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
errmsg = (
|
||||||
|
"event id %s: unable to verify signature for authorising server %s: %s"
|
||||||
|
% (
|
||||||
|
pdu.event_id,
|
||||||
|
authorising_server,
|
||||||
|
e,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
|
||||||
|
|
||||||
|
|
||||||
def _is_invite_via_3pid(event: EventBase) -> bool:
|
def _is_invite_via_3pid(event: EventBase) -> bool:
|
||||||
return (
|
return (
|
||||||
|
|
|
@ -19,10 +19,10 @@ import itertools
|
||||||
import logging
|
import logging
|
||||||
from typing import (
|
from typing import (
|
||||||
TYPE_CHECKING,
|
TYPE_CHECKING,
|
||||||
Any,
|
|
||||||
Awaitable,
|
Awaitable,
|
||||||
Callable,
|
Callable,
|
||||||
Collection,
|
Collection,
|
||||||
|
Container,
|
||||||
Dict,
|
Dict,
|
||||||
Iterable,
|
Iterable,
|
||||||
List,
|
List,
|
||||||
|
@ -79,7 +79,15 @@ class InvalidResponseError(RuntimeError):
|
||||||
we couldn't parse
|
we couldn't parse
|
||||||
"""
|
"""
|
||||||
|
|
||||||
pass
|
|
||||||
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||||
|
class SendJoinResult:
|
||||||
|
# The event to persist.
|
||||||
|
event: EventBase
|
||||||
|
# A string giving the server the event was sent to.
|
||||||
|
origin: str
|
||||||
|
state: List[EventBase]
|
||||||
|
auth_chain: List[EventBase]
|
||||||
|
|
||||||
|
|
||||||
class FederationClient(FederationBase):
|
class FederationClient(FederationBase):
|
||||||
|
@ -506,6 +514,7 @@ class FederationClient(FederationBase):
|
||||||
description: str,
|
description: str,
|
||||||
destinations: Iterable[str],
|
destinations: Iterable[str],
|
||||||
callback: Callable[[str], Awaitable[T]],
|
callback: Callable[[str], Awaitable[T]],
|
||||||
|
failover_errcodes: Optional[Container[str]] = None,
|
||||||
failover_on_unknown_endpoint: bool = False,
|
failover_on_unknown_endpoint: bool = False,
|
||||||
) -> T:
|
) -> T:
|
||||||
"""Try an operation on a series of servers, until it succeeds
|
"""Try an operation on a series of servers, until it succeeds
|
||||||
|
@ -526,6 +535,9 @@ class FederationClient(FederationBase):
|
||||||
next server tried. Normally the stacktrace is logged but this is
|
next server tried. Normally the stacktrace is logged but this is
|
||||||
suppressed if the exception is an InvalidResponseError.
|
suppressed if the exception is an InvalidResponseError.
|
||||||
|
|
||||||
|
failover_errcodes: Error codes (specific to this endpoint) which should
|
||||||
|
cause a failover when received as part of an HTTP 400 error.
|
||||||
|
|
||||||
failover_on_unknown_endpoint: if True, we will try other servers if it looks
|
failover_on_unknown_endpoint: if True, we will try other servers if it looks
|
||||||
like a server doesn't support the endpoint. This is typically useful
|
like a server doesn't support the endpoint. This is typically useful
|
||||||
if the endpoint in question is new or experimental.
|
if the endpoint in question is new or experimental.
|
||||||
|
@ -537,6 +549,9 @@ class FederationClient(FederationBase):
|
||||||
SynapseError if the chosen remote server returns a 300/400 code, or
|
SynapseError if the chosen remote server returns a 300/400 code, or
|
||||||
no servers were reachable.
|
no servers were reachable.
|
||||||
"""
|
"""
|
||||||
|
if failover_errcodes is None:
|
||||||
|
failover_errcodes = ()
|
||||||
|
|
||||||
for destination in destinations:
|
for destination in destinations:
|
||||||
if destination == self.server_name:
|
if destination == self.server_name:
|
||||||
continue
|
continue
|
||||||
|
@ -551,11 +566,17 @@ class FederationClient(FederationBase):
|
||||||
synapse_error = e.to_synapse_error()
|
synapse_error = e.to_synapse_error()
|
||||||
failover = False
|
failover = False
|
||||||
|
|
||||||
# Failover on an internal server error, or if the destination
|
# Failover should occur:
|
||||||
# doesn't implemented the endpoint for some reason.
|
#
|
||||||
|
# * On internal server errors.
|
||||||
|
# * If the destination responds that it cannot complete the request.
|
||||||
|
# * If the destination doesn't implemented the endpoint for some reason.
|
||||||
if 500 <= e.code < 600:
|
if 500 <= e.code < 600:
|
||||||
failover = True
|
failover = True
|
||||||
|
|
||||||
|
elif e.code == 400 and synapse_error.errcode in failover_errcodes:
|
||||||
|
failover = True
|
||||||
|
|
||||||
elif failover_on_unknown_endpoint and self._is_unknown_endpoint(
|
elif failover_on_unknown_endpoint and self._is_unknown_endpoint(
|
||||||
e, synapse_error
|
e, synapse_error
|
||||||
):
|
):
|
||||||
|
@ -671,13 +692,25 @@ class FederationClient(FederationBase):
|
||||||
|
|
||||||
return destination, ev, room_version
|
return destination, ev, room_version
|
||||||
|
|
||||||
|
# MSC3083 defines additional error codes for room joins. Unfortunately
|
||||||
|
# we do not yet know the room version, assume these will only be returned
|
||||||
|
# by valid room versions.
|
||||||
|
failover_errcodes = (
|
||||||
|
(Codes.UNABLE_AUTHORISE_JOIN, Codes.UNABLE_TO_GRANT_JOIN)
|
||||||
|
if membership == Membership.JOIN
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
|
||||||
return await self._try_destination_list(
|
return await self._try_destination_list(
|
||||||
"make_" + membership, destinations, send_request
|
"make_" + membership,
|
||||||
|
destinations,
|
||||||
|
send_request,
|
||||||
|
failover_errcodes=failover_errcodes,
|
||||||
)
|
)
|
||||||
|
|
||||||
async def send_join(
|
async def send_join(
|
||||||
self, destinations: Iterable[str], pdu: EventBase, room_version: RoomVersion
|
self, destinations: Iterable[str], pdu: EventBase, room_version: RoomVersion
|
||||||
) -> Dict[str, Any]:
|
) -> SendJoinResult:
|
||||||
"""Sends a join event to one of a list of homeservers.
|
"""Sends a join event to one of a list of homeservers.
|
||||||
|
|
||||||
Doing so will cause the remote server to add the event to the graph,
|
Doing so will cause the remote server to add the event to the graph,
|
||||||
|
@ -691,18 +724,38 @@ class FederationClient(FederationBase):
|
||||||
did the make_join)
|
did the make_join)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
a dict with members ``origin`` (a string
|
The result of the send join request.
|
||||||
giving the server the event was sent to, ``state`` (?) and
|
|
||||||
``auth_chain``.
|
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
SynapseError: if the chosen remote server returns a 300/400 code, or
|
SynapseError: if the chosen remote server returns a 300/400 code, or
|
||||||
no servers successfully handle the request.
|
no servers successfully handle the request.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
async def send_request(destination) -> Dict[str, Any]:
|
async def send_request(destination) -> SendJoinResult:
|
||||||
response = await self._do_send_join(room_version, destination, pdu)
|
response = await self._do_send_join(room_version, destination, pdu)
|
||||||
|
|
||||||
|
# If an event was returned (and expected to be returned):
|
||||||
|
#
|
||||||
|
# * Ensure it has the same event ID (note that the event ID is a hash
|
||||||
|
# of the event fields for versions which support MSC3083).
|
||||||
|
# * Ensure the signatures are good.
|
||||||
|
#
|
||||||
|
# Otherwise, fallback to the provided event.
|
||||||
|
if room_version.msc3083_join_rules and response.event:
|
||||||
|
event = response.event
|
||||||
|
|
||||||
|
valid_pdu = await self._check_sigs_and_hash_and_fetch_one(
|
||||||
|
pdu=event,
|
||||||
|
origin=destination,
|
||||||
|
outlier=True,
|
||||||
|
room_version=room_version,
|
||||||
|
)
|
||||||
|
|
||||||
|
if valid_pdu is None or event.event_id != pdu.event_id:
|
||||||
|
raise InvalidResponseError("Returned an invalid join event")
|
||||||
|
else:
|
||||||
|
event = pdu
|
||||||
|
|
||||||
state = response.state
|
state = response.state
|
||||||
auth_chain = response.auth_events
|
auth_chain = response.auth_events
|
||||||
|
|
||||||
|
@ -784,13 +837,32 @@ class FederationClient(FederationBase):
|
||||||
% (auth_chain_create_events,)
|
% (auth_chain_create_events,)
|
||||||
)
|
)
|
||||||
|
|
||||||
return {
|
return SendJoinResult(
|
||||||
"state": signed_state,
|
event=event,
|
||||||
"auth_chain": signed_auth,
|
state=signed_state,
|
||||||
"origin": destination,
|
auth_chain=signed_auth,
|
||||||
}
|
origin=destination,
|
||||||
|
)
|
||||||
|
|
||||||
return await self._try_destination_list("send_join", destinations, send_request)
|
# MSC3083 defines additional error codes for room joins.
|
||||||
|
failover_errcodes = None
|
||||||
|
if room_version.msc3083_join_rules:
|
||||||
|
failover_errcodes = (
|
||||||
|
Codes.UNABLE_AUTHORISE_JOIN,
|
||||||
|
Codes.UNABLE_TO_GRANT_JOIN,
|
||||||
|
)
|
||||||
|
|
||||||
|
# If the join is being authorised via allow rules, we need to send
|
||||||
|
# the /send_join back to the same server that was originally used
|
||||||
|
# with /make_join.
|
||||||
|
if "join_authorised_via_users_server" in pdu.content:
|
||||||
|
destinations = [
|
||||||
|
get_domain_from_id(pdu.content["join_authorised_via_users_server"])
|
||||||
|
]
|
||||||
|
|
||||||
|
return await self._try_destination_list(
|
||||||
|
"send_join", destinations, send_request, failover_errcodes=failover_errcodes
|
||||||
|
)
|
||||||
|
|
||||||
async def _do_send_join(
|
async def _do_send_join(
|
||||||
self, room_version: RoomVersion, destination: str, pdu: EventBase
|
self, room_version: RoomVersion, destination: str, pdu: EventBase
|
||||||
|
|
|
@ -45,6 +45,7 @@ from synapse.api.errors import (
|
||||||
UnsupportedRoomVersionError,
|
UnsupportedRoomVersionError,
|
||||||
)
|
)
|
||||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
|
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
|
||||||
|
from synapse.crypto.event_signing import compute_event_signature
|
||||||
from synapse.events import EventBase
|
from synapse.events import EventBase
|
||||||
from synapse.events.snapshot import EventContext
|
from synapse.events.snapshot import EventContext
|
||||||
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
||||||
|
@ -64,7 +65,7 @@ from synapse.replication.http.federation import (
|
||||||
ReplicationGetQueryRestServlet,
|
ReplicationGetQueryRestServlet,
|
||||||
)
|
)
|
||||||
from synapse.storage.databases.main.lock import Lock
|
from synapse.storage.databases.main.lock import Lock
|
||||||
from synapse.types import JsonDict
|
from synapse.types import JsonDict, get_domain_from_id
|
||||||
from synapse.util import glob_to_regex, json_decoder, unwrapFirstError
|
from synapse.util import glob_to_regex, json_decoder, unwrapFirstError
|
||||||
from synapse.util.async_helpers import Linearizer, concurrently_execute
|
from synapse.util.async_helpers import Linearizer, concurrently_execute
|
||||||
from synapse.util.caches.response_cache import ResponseCache
|
from synapse.util.caches.response_cache import ResponseCache
|
||||||
|
@ -586,7 +587,7 @@ class FederationServer(FederationBase):
|
||||||
async def on_send_join_request(
|
async def on_send_join_request(
|
||||||
self, origin: str, content: JsonDict, room_id: str
|
self, origin: str, content: JsonDict, room_id: str
|
||||||
) -> Dict[str, Any]:
|
) -> Dict[str, Any]:
|
||||||
context = await self._on_send_membership_event(
|
event, context = await self._on_send_membership_event(
|
||||||
origin, content, Membership.JOIN, room_id
|
origin, content, Membership.JOIN, room_id
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -597,6 +598,7 @@ class FederationServer(FederationBase):
|
||||||
|
|
||||||
time_now = self._clock.time_msec()
|
time_now = self._clock.time_msec()
|
||||||
return {
|
return {
|
||||||
|
"org.matrix.msc3083.v2.event": event.get_pdu_json(),
|
||||||
"state": [p.get_pdu_json(time_now) for p in state.values()],
|
"state": [p.get_pdu_json(time_now) for p in state.values()],
|
||||||
"auth_chain": [p.get_pdu_json(time_now) for p in auth_chain],
|
"auth_chain": [p.get_pdu_json(time_now) for p in auth_chain],
|
||||||
}
|
}
|
||||||
|
@ -681,7 +683,7 @@ class FederationServer(FederationBase):
|
||||||
Returns:
|
Returns:
|
||||||
The stripped room state.
|
The stripped room state.
|
||||||
"""
|
"""
|
||||||
event_context = await self._on_send_membership_event(
|
_, context = await self._on_send_membership_event(
|
||||||
origin, content, Membership.KNOCK, room_id
|
origin, content, Membership.KNOCK, room_id
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -690,14 +692,14 @@ class FederationServer(FederationBase):
|
||||||
# related to the room while the knock request is pending.
|
# related to the room while the knock request is pending.
|
||||||
stripped_room_state = (
|
stripped_room_state = (
|
||||||
await self.store.get_stripped_room_state_from_event_context(
|
await self.store.get_stripped_room_state_from_event_context(
|
||||||
event_context, self._room_prejoin_state_types
|
context, self._room_prejoin_state_types
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
return {"knock_state_events": stripped_room_state}
|
return {"knock_state_events": stripped_room_state}
|
||||||
|
|
||||||
async def _on_send_membership_event(
|
async def _on_send_membership_event(
|
||||||
self, origin: str, content: JsonDict, membership_type: str, room_id: str
|
self, origin: str, content: JsonDict, membership_type: str, room_id: str
|
||||||
) -> EventContext:
|
) -> Tuple[EventBase, EventContext]:
|
||||||
"""Handle an on_send_{join,leave,knock} request
|
"""Handle an on_send_{join,leave,knock} request
|
||||||
|
|
||||||
Does some preliminary validation before passing the request on to the
|
Does some preliminary validation before passing the request on to the
|
||||||
|
@ -712,7 +714,7 @@ class FederationServer(FederationBase):
|
||||||
in the event
|
in the event
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
The context of the event after inserting it into the room graph.
|
The event and context of the event after inserting it into the room graph.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
SynapseError if there is a problem with the request, including things like
|
SynapseError if there is a problem with the request, including things like
|
||||||
|
@ -748,6 +750,33 @@ class FederationServer(FederationBase):
|
||||||
|
|
||||||
logger.debug("_on_send_membership_event: pdu sigs: %s", event.signatures)
|
logger.debug("_on_send_membership_event: pdu sigs: %s", event.signatures)
|
||||||
|
|
||||||
|
# Sign the event since we're vouching on behalf of the remote server that
|
||||||
|
# the event is valid to be sent into the room. Currently this is only done
|
||||||
|
# if the user is being joined via restricted join rules.
|
||||||
|
if (
|
||||||
|
room_version.msc3083_join_rules
|
||||||
|
and event.membership == Membership.JOIN
|
||||||
|
and "join_authorised_via_users_server" in event.content
|
||||||
|
):
|
||||||
|
# We can only authorise our own users.
|
||||||
|
authorising_server = get_domain_from_id(
|
||||||
|
event.content["join_authorised_via_users_server"]
|
||||||
|
)
|
||||||
|
if authorising_server != self.server_name:
|
||||||
|
raise SynapseError(
|
||||||
|
400,
|
||||||
|
f"Cannot authorise request from resident server: {authorising_server}",
|
||||||
|
)
|
||||||
|
|
||||||
|
event.signatures.update(
|
||||||
|
compute_event_signature(
|
||||||
|
room_version,
|
||||||
|
event.get_pdu_json(),
|
||||||
|
self.hs.hostname,
|
||||||
|
self.hs.signing_key,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
event = await self._check_sigs_and_hash(room_version, event)
|
event = await self._check_sigs_and_hash(room_version, event)
|
||||||
|
|
||||||
return await self.handler.on_send_membership_event(origin, event)
|
return await self.handler.on_send_membership_event(origin, event)
|
||||||
|
@ -995,6 +1024,23 @@ class FederationServer(FederationBase):
|
||||||
|
|
||||||
origin, event = next
|
origin, event = next
|
||||||
|
|
||||||
|
# Prune the event queue if it's getting large.
|
||||||
|
#
|
||||||
|
# We do this *after* handling the first event as the common case is
|
||||||
|
# that the queue is empty (/has the single event in), and so there's
|
||||||
|
# no need to do this check.
|
||||||
|
pruned = await self.store.prune_staged_events_in_room(room_id, room_version)
|
||||||
|
if pruned:
|
||||||
|
# If we have pruned the queue check we need to refetch the next
|
||||||
|
# event to handle.
|
||||||
|
next = await self.store.get_next_staged_event_for_room(
|
||||||
|
room_id, room_version
|
||||||
|
)
|
||||||
|
if not next:
|
||||||
|
break
|
||||||
|
|
||||||
|
origin, event = next
|
||||||
|
|
||||||
lock = await self.store.try_acquire_lock(
|
lock = await self.store.try_acquire_lock(
|
||||||
_INBOUND_EVENT_HANDLING_LOCK_NAME, room_id
|
_INBOUND_EVENT_HANDLING_LOCK_NAME, room_id
|
||||||
)
|
)
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -984,7 +984,7 @@ class PublicRoomList(BaseFederationServlet):
|
||||||
limit = parse_integer_from_args(query, "limit", 0)
|
limit = parse_integer_from_args(query, "limit", 0)
|
||||||
since_token = parse_string_from_args(query, "since", None)
|
since_token = parse_string_from_args(query, "since", None)
|
||||||
include_all_networks = parse_boolean_from_args(
|
include_all_networks = parse_boolean_from_args(
|
||||||
query, "include_all_networks", False
|
query, "include_all_networks", default=False
|
||||||
)
|
)
|
||||||
third_party_instance_id = parse_string_from_args(
|
third_party_instance_id = parse_string_from_args(
|
||||||
query, "third_party_instance_id", None
|
query, "third_party_instance_id", None
|
||||||
|
@ -1908,16 +1908,7 @@ class FederationSpaceSummaryServlet(BaseFederationServlet):
|
||||||
suggested_only = parse_boolean_from_args(query, "suggested_only", default=False)
|
suggested_only = parse_boolean_from_args(query, "suggested_only", default=False)
|
||||||
max_rooms_per_space = parse_integer_from_args(query, "max_rooms_per_space")
|
max_rooms_per_space = parse_integer_from_args(query, "max_rooms_per_space")
|
||||||
|
|
||||||
exclude_rooms = []
|
exclude_rooms = parse_strings_from_args(query, "exclude_rooms", default=[])
|
||||||
if b"exclude_rooms" in query:
|
|
||||||
try:
|
|
||||||
exclude_rooms = [
|
|
||||||
room_id.decode("ascii") for room_id in query[b"exclude_rooms"]
|
|
||||||
]
|
|
||||||
except Exception:
|
|
||||||
raise SynapseError(
|
|
||||||
400, "Bad query parameter for exclude_rooms", Codes.INVALID_PARAM
|
|
||||||
)
|
|
||||||
|
|
||||||
return 200, await self.handler.federation_space_summary(
|
return 200, await self.handler.federation_space_summary(
|
||||||
origin, room_id, suggested_only, max_rooms_per_space, exclude_rooms
|
origin, room_id, suggested_only, max_rooms_per_space, exclude_rooms
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
from typing import TYPE_CHECKING, Collection, List, Optional, Union
|
from typing import TYPE_CHECKING, Collection, List, Optional, Union
|
||||||
|
|
||||||
from synapse import event_auth
|
from synapse import event_auth
|
||||||
|
@ -20,16 +21,18 @@ from synapse.api.constants import (
|
||||||
Membership,
|
Membership,
|
||||||
RestrictedJoinRuleTypes,
|
RestrictedJoinRuleTypes,
|
||||||
)
|
)
|
||||||
from synapse.api.errors import AuthError
|
from synapse.api.errors import AuthError, Codes, SynapseError
|
||||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
|
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
|
||||||
from synapse.events import EventBase
|
from synapse.events import EventBase
|
||||||
from synapse.events.builder import EventBuilder
|
from synapse.events.builder import EventBuilder
|
||||||
from synapse.types import StateMap
|
from synapse.types import StateMap, get_domain_from_id
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class EventAuthHandler:
|
class EventAuthHandler:
|
||||||
"""
|
"""
|
||||||
|
@ -39,6 +42,7 @@ class EventAuthHandler:
|
||||||
def __init__(self, hs: "HomeServer"):
|
def __init__(self, hs: "HomeServer"):
|
||||||
self._clock = hs.get_clock()
|
self._clock = hs.get_clock()
|
||||||
self._store = hs.get_datastore()
|
self._store = hs.get_datastore()
|
||||||
|
self._server_name = hs.hostname
|
||||||
|
|
||||||
async def check_from_context(
|
async def check_from_context(
|
||||||
self, room_version: str, event, context, do_sig_check=True
|
self, room_version: str, event, context, do_sig_check=True
|
||||||
|
@ -81,15 +85,76 @@ class EventAuthHandler:
|
||||||
# introduce undesirable "state reset" behaviour.
|
# introduce undesirable "state reset" behaviour.
|
||||||
#
|
#
|
||||||
# All of which sounds a bit tricky so we don't bother for now.
|
# All of which sounds a bit tricky so we don't bother for now.
|
||||||
|
|
||||||
auth_ids = []
|
auth_ids = []
|
||||||
for etype, state_key in event_auth.auth_types_for_event(event):
|
for etype, state_key in event_auth.auth_types_for_event(
|
||||||
|
event.room_version, event
|
||||||
|
):
|
||||||
auth_ev_id = current_state_ids.get((etype, state_key))
|
auth_ev_id = current_state_ids.get((etype, state_key))
|
||||||
if auth_ev_id:
|
if auth_ev_id:
|
||||||
auth_ids.append(auth_ev_id)
|
auth_ids.append(auth_ev_id)
|
||||||
|
|
||||||
return auth_ids
|
return auth_ids
|
||||||
|
|
||||||
|
async def get_user_which_could_invite(
|
||||||
|
self, room_id: str, current_state_ids: StateMap[str]
|
||||||
|
) -> str:
|
||||||
|
"""
|
||||||
|
Searches the room state for a local user who has the power level necessary
|
||||||
|
to invite other users.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
room_id: The room ID under search.
|
||||||
|
current_state_ids: The current state of the room.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The MXID of the user which could issue an invite.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
SynapseError if no appropriate user is found.
|
||||||
|
"""
|
||||||
|
power_level_event_id = current_state_ids.get((EventTypes.PowerLevels, ""))
|
||||||
|
invite_level = 0
|
||||||
|
users_default_level = 0
|
||||||
|
if power_level_event_id:
|
||||||
|
power_level_event = await self._store.get_event(power_level_event_id)
|
||||||
|
invite_level = power_level_event.content.get("invite", invite_level)
|
||||||
|
users_default_level = power_level_event.content.get(
|
||||||
|
"users_default", users_default_level
|
||||||
|
)
|
||||||
|
users = power_level_event.content.get("users", {})
|
||||||
|
else:
|
||||||
|
users = {}
|
||||||
|
|
||||||
|
# Find the user with the highest power level.
|
||||||
|
users_in_room = await self._store.get_users_in_room(room_id)
|
||||||
|
# Only interested in local users.
|
||||||
|
local_users_in_room = [
|
||||||
|
u for u in users_in_room if get_domain_from_id(u) == self._server_name
|
||||||
|
]
|
||||||
|
chosen_user = max(
|
||||||
|
local_users_in_room,
|
||||||
|
key=lambda user: users.get(user, users_default_level),
|
||||||
|
default=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Return the chosen if they can issue invites.
|
||||||
|
user_power_level = users.get(chosen_user, users_default_level)
|
||||||
|
if chosen_user and user_power_level >= invite_level:
|
||||||
|
logger.debug(
|
||||||
|
"Found a user who can issue invites %s with power level %d >= invite level %d",
|
||||||
|
chosen_user,
|
||||||
|
user_power_level,
|
||||||
|
invite_level,
|
||||||
|
)
|
||||||
|
return chosen_user
|
||||||
|
|
||||||
|
# No user was found.
|
||||||
|
raise SynapseError(
|
||||||
|
400,
|
||||||
|
"Unable to find a user which could issue an invite",
|
||||||
|
Codes.UNABLE_TO_GRANT_JOIN,
|
||||||
|
)
|
||||||
|
|
||||||
async def check_host_in_room(self, room_id: str, host: str) -> bool:
|
async def check_host_in_room(self, room_id: str, host: str) -> bool:
|
||||||
with Measure(self._clock, "check_host_in_room"):
|
with Measure(self._clock, "check_host_in_room"):
|
||||||
return await self._store.is_host_joined(room_id, host)
|
return await self._store.is_host_joined(room_id, host)
|
||||||
|
@ -134,6 +199,18 @@ class EventAuthHandler:
|
||||||
# in any of them.
|
# in any of them.
|
||||||
allowed_rooms = await self.get_rooms_that_allow_join(state_ids)
|
allowed_rooms = await self.get_rooms_that_allow_join(state_ids)
|
||||||
if not await self.is_user_in_rooms(allowed_rooms, user_id):
|
if not await self.is_user_in_rooms(allowed_rooms, user_id):
|
||||||
|
|
||||||
|
# If this is a remote request, the user might be in an allowed room
|
||||||
|
# that we do not know about.
|
||||||
|
if get_domain_from_id(user_id) != self._server_name:
|
||||||
|
for room_id in allowed_rooms:
|
||||||
|
if not await self._store.is_host_joined(room_id, self._server_name):
|
||||||
|
raise SynapseError(
|
||||||
|
400,
|
||||||
|
f"Unable to check if {user_id} is in allowed rooms.",
|
||||||
|
Codes.UNABLE_AUTHORISE_JOIN,
|
||||||
|
)
|
||||||
|
|
||||||
raise AuthError(
|
raise AuthError(
|
||||||
403,
|
403,
|
||||||
"You do not belong to any of the required rooms to join this room.",
|
"You do not belong to any of the required rooms to join this room.",
|
||||||
|
|
|
@ -1494,9 +1494,10 @@ class FederationHandler(BaseHandler):
|
||||||
host_list, event, room_version_obj
|
host_list, event, room_version_obj
|
||||||
)
|
)
|
||||||
|
|
||||||
origin = ret["origin"]
|
event = ret.event
|
||||||
state = ret["state"]
|
origin = ret.origin
|
||||||
auth_chain = ret["auth_chain"]
|
state = ret.state
|
||||||
|
auth_chain = ret.auth_chain
|
||||||
auth_chain.sort(key=lambda e: e.depth)
|
auth_chain.sort(key=lambda e: e.depth)
|
||||||
|
|
||||||
logger.debug("do_invite_join auth_chain: %s", auth_chain)
|
logger.debug("do_invite_join auth_chain: %s", auth_chain)
|
||||||
|
@ -1676,7 +1677,7 @@ class FederationHandler(BaseHandler):
|
||||||
|
|
||||||
# checking the room version will check that we've actually heard of the room
|
# checking the room version will check that we've actually heard of the room
|
||||||
# (and return a 404 otherwise)
|
# (and return a 404 otherwise)
|
||||||
room_version = await self.store.get_room_version_id(room_id)
|
room_version = await self.store.get_room_version(room_id)
|
||||||
|
|
||||||
# now check that we are *still* in the room
|
# now check that we are *still* in the room
|
||||||
is_in_room = await self._event_auth_handler.check_host_in_room(
|
is_in_room = await self._event_auth_handler.check_host_in_room(
|
||||||
|
@ -1691,8 +1692,38 @@ class FederationHandler(BaseHandler):
|
||||||
|
|
||||||
event_content = {"membership": Membership.JOIN}
|
event_content = {"membership": Membership.JOIN}
|
||||||
|
|
||||||
|
# If the current room is using restricted join rules, additional information
|
||||||
|
# may need to be included in the event content in order to efficiently
|
||||||
|
# validate the event.
|
||||||
|
#
|
||||||
|
# Note that this requires the /send_join request to come back to the
|
||||||
|
# same server.
|
||||||
|
if room_version.msc3083_join_rules:
|
||||||
|
state_ids = await self.store.get_current_state_ids(room_id)
|
||||||
|
if await self._event_auth_handler.has_restricted_join_rules(
|
||||||
|
state_ids, room_version
|
||||||
|
):
|
||||||
|
prev_member_event_id = state_ids.get((EventTypes.Member, user_id), None)
|
||||||
|
# If the user is invited or joined to the room already, then
|
||||||
|
# no additional info is needed.
|
||||||
|
include_auth_user_id = True
|
||||||
|
if prev_member_event_id:
|
||||||
|
prev_member_event = await self.store.get_event(prev_member_event_id)
|
||||||
|
include_auth_user_id = prev_member_event.membership not in (
|
||||||
|
Membership.JOIN,
|
||||||
|
Membership.INVITE,
|
||||||
|
)
|
||||||
|
|
||||||
|
if include_auth_user_id:
|
||||||
|
event_content[
|
||||||
|
"join_authorised_via_users_server"
|
||||||
|
] = await self._event_auth_handler.get_user_which_could_invite(
|
||||||
|
room_id,
|
||||||
|
state_ids,
|
||||||
|
)
|
||||||
|
|
||||||
builder = self.event_builder_factory.new(
|
builder = self.event_builder_factory.new(
|
||||||
room_version,
|
room_version.identifier,
|
||||||
{
|
{
|
||||||
"type": EventTypes.Member,
|
"type": EventTypes.Member,
|
||||||
"content": event_content,
|
"content": event_content,
|
||||||
|
@ -1710,10 +1741,13 @@ class FederationHandler(BaseHandler):
|
||||||
logger.warning("Failed to create join to %s because %s", room_id, e)
|
logger.warning("Failed to create join to %s because %s", room_id, e)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
# Ensure the user can even join the room.
|
||||||
|
await self._check_join_restrictions(context, event)
|
||||||
|
|
||||||
# The remote hasn't signed it yet, obviously. We'll do the full checks
|
# The remote hasn't signed it yet, obviously. We'll do the full checks
|
||||||
# when we get the event back in `on_send_join_request`
|
# when we get the event back in `on_send_join_request`
|
||||||
await self._event_auth_handler.check_from_context(
|
await self._event_auth_handler.check_from_context(
|
||||||
room_version, event, context, do_sig_check=False
|
room_version.identifier, event, context, do_sig_check=False
|
||||||
)
|
)
|
||||||
|
|
||||||
return event
|
return event
|
||||||
|
@ -1958,7 +1992,7 @@ class FederationHandler(BaseHandler):
|
||||||
@log_function
|
@log_function
|
||||||
async def on_send_membership_event(
|
async def on_send_membership_event(
|
||||||
self, origin: str, event: EventBase
|
self, origin: str, event: EventBase
|
||||||
) -> EventContext:
|
) -> Tuple[EventBase, EventContext]:
|
||||||
"""
|
"""
|
||||||
We have received a join/leave/knock event for a room via send_join/leave/knock.
|
We have received a join/leave/knock event for a room via send_join/leave/knock.
|
||||||
|
|
||||||
|
@ -1981,7 +2015,7 @@ class FederationHandler(BaseHandler):
|
||||||
event: The member event that has been signed by the remote homeserver.
|
event: The member event that has been signed by the remote homeserver.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
The context of the event after inserting it into the room graph.
|
The event and context of the event after inserting it into the room graph.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
SynapseError if the event is not accepted into the room
|
SynapseError if the event is not accepted into the room
|
||||||
|
@ -2037,7 +2071,7 @@ class FederationHandler(BaseHandler):
|
||||||
|
|
||||||
# all looks good, we can persist the event.
|
# all looks good, we can persist the event.
|
||||||
await self._run_push_actions_and_persist_event(event, context)
|
await self._run_push_actions_and_persist_event(event, context)
|
||||||
return context
|
return event, context
|
||||||
|
|
||||||
async def _check_join_restrictions(
|
async def _check_join_restrictions(
|
||||||
self, context: EventContext, event: EventBase
|
self, context: EventContext, event: EventBase
|
||||||
|
@ -2473,7 +2507,7 @@ class FederationHandler(BaseHandler):
|
||||||
)
|
)
|
||||||
|
|
||||||
# Now check if event pass auth against said current state
|
# Now check if event pass auth against said current state
|
||||||
auth_types = auth_types_for_event(event)
|
auth_types = auth_types_for_event(room_version_obj, event)
|
||||||
current_state_ids_list = [
|
current_state_ids_list = [
|
||||||
e for k, e in current_state_ids.items() if k in auth_types
|
e for k, e in current_state_ids.items() if k in auth_types
|
||||||
]
|
]
|
||||||
|
@ -2714,9 +2748,11 @@ class FederationHandler(BaseHandler):
|
||||||
event.event_id,
|
event.event_id,
|
||||||
e.event_id,
|
e.event_id,
|
||||||
)
|
)
|
||||||
context = await self.state_handler.compute_event_context(e)
|
missing_auth_event_context = (
|
||||||
|
await self.state_handler.compute_event_context(e)
|
||||||
|
)
|
||||||
await self._auth_and_persist_event(
|
await self._auth_and_persist_event(
|
||||||
origin, e, context, auth_events=auth
|
origin, e, missing_auth_event_context, auth_events=auth
|
||||||
)
|
)
|
||||||
|
|
||||||
if e.event_id in event_auth_events:
|
if e.event_id in event_auth_events:
|
||||||
|
|
|
@ -21,6 +21,7 @@ from synapse.api.constants import EduTypes, EventTypes, Membership
|
||||||
from synapse.api.errors import SynapseError
|
from synapse.api.errors import SynapseError
|
||||||
from synapse.events.validator import EventValidator
|
from synapse.events.validator import EventValidator
|
||||||
from synapse.handlers.presence import format_user_presence_state
|
from synapse.handlers.presence import format_user_presence_state
|
||||||
|
from synapse.handlers.receipts import ReceiptEventSource
|
||||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||||
from synapse.storage.roommember import RoomsForUser
|
from synapse.storage.roommember import RoomsForUser
|
||||||
from synapse.streams.config import PaginationConfig
|
from synapse.streams.config import PaginationConfig
|
||||||
|
@ -134,6 +135,8 @@ class InitialSyncHandler(BaseHandler):
|
||||||
joined_rooms,
|
joined_rooms,
|
||||||
to_key=int(now_token.receipt_key),
|
to_key=int(now_token.receipt_key),
|
||||||
)
|
)
|
||||||
|
if self.hs.config.experimental.msc2285_enabled:
|
||||||
|
receipt = ReceiptEventSource.filter_out_hidden(receipt, user_id)
|
||||||
|
|
||||||
tags_by_room = await self.store.get_tags_for_user(user_id)
|
tags_by_room = await self.store.get_tags_for_user(user_id)
|
||||||
|
|
||||||
|
@ -430,7 +433,9 @@ class InitialSyncHandler(BaseHandler):
|
||||||
room_id, to_key=now_token.receipt_key
|
room_id, to_key=now_token.receipt_key
|
||||||
)
|
)
|
||||||
if not receipts:
|
if not receipts:
|
||||||
receipts = []
|
return []
|
||||||
|
if self.hs.config.experimental.msc2285_enabled:
|
||||||
|
receipts = ReceiptEventSource.filter_out_hidden(receipts, user_id)
|
||||||
return receipts
|
return receipts
|
||||||
|
|
||||||
presence, receipts, (messages, token) = await make_deferred_yieldable(
|
presence, receipts, (messages, token) = await make_deferred_yieldable(
|
||||||
|
|
|
@ -14,9 +14,10 @@
|
||||||
import logging
|
import logging
|
||||||
from typing import TYPE_CHECKING, List, Optional, Tuple
|
from typing import TYPE_CHECKING, List, Optional, Tuple
|
||||||
|
|
||||||
|
from synapse.api.constants import ReadReceiptEventFields
|
||||||
from synapse.appservice import ApplicationService
|
from synapse.appservice import ApplicationService
|
||||||
from synapse.handlers._base import BaseHandler
|
from synapse.handlers._base import BaseHandler
|
||||||
from synapse.types import JsonDict, ReadReceipt, get_domain_from_id
|
from synapse.types import JsonDict, ReadReceipt, UserID, get_domain_from_id
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
|
@ -137,7 +138,7 @@ class ReceiptsHandler(BaseHandler):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
async def received_client_receipt(
|
async def received_client_receipt(
|
||||||
self, room_id: str, receipt_type: str, user_id: str, event_id: str,
|
self, room_id: str, receipt_type: str, user_id: str, event_id: str, hidden: bool,
|
||||||
extra_content: Optional[JsonDict] = None,
|
extra_content: Optional[JsonDict] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Called when a client tells us a local user has read up to the given
|
"""Called when a client tells us a local user has read up to the given
|
||||||
|
@ -148,23 +149,67 @@ class ReceiptsHandler(BaseHandler):
|
||||||
receipt_type=receipt_type,
|
receipt_type=receipt_type,
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
event_ids=[event_id],
|
event_ids=[event_id],
|
||||||
data={"ts": int(self.clock.time_msec()), **(extra_content or {})},
|
data={"ts": int(self.clock.time_msec()), "hidden": hidden, **(extra_content or {})},
|
||||||
)
|
)
|
||||||
|
|
||||||
is_new = await self._handle_new_receipts([receipt])
|
is_new = await self._handle_new_receipts([receipt])
|
||||||
if not is_new:
|
if not is_new:
|
||||||
return
|
return
|
||||||
|
|
||||||
if self.federation_sender:
|
if self.federation_sender and not (
|
||||||
|
self.hs.config.experimental.msc2285_enabled and hidden
|
||||||
|
):
|
||||||
await self.federation_sender.send_read_receipt(receipt)
|
await self.federation_sender.send_read_receipt(receipt)
|
||||||
|
|
||||||
|
|
||||||
class ReceiptEventSource:
|
class ReceiptEventSource:
|
||||||
def __init__(self, hs: "HomeServer"):
|
def __init__(self, hs: "HomeServer"):
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
|
self.config = hs.config
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def filter_out_hidden(events: List[JsonDict], user_id: str) -> List[JsonDict]:
|
||||||
|
visible_events = []
|
||||||
|
|
||||||
|
# filter out hidden receipts the user shouldn't see
|
||||||
|
for event in events:
|
||||||
|
content = event.get("content", {})
|
||||||
|
new_event = event.copy()
|
||||||
|
new_event["content"] = {}
|
||||||
|
|
||||||
|
for event_id in content.keys():
|
||||||
|
event_content = content.get(event_id, {})
|
||||||
|
m_read = event_content.get("m.read", {})
|
||||||
|
|
||||||
|
# If m_read is missing copy over the original event_content as there is nothing to process here
|
||||||
|
if not m_read:
|
||||||
|
new_event["content"][event_id] = event_content.copy()
|
||||||
|
continue
|
||||||
|
|
||||||
|
new_users = {}
|
||||||
|
for rr_user_id, user_rr in m_read.items():
|
||||||
|
hidden = user_rr.get("hidden", None)
|
||||||
|
if hidden is not True or rr_user_id == user_id:
|
||||||
|
new_users[rr_user_id] = user_rr.copy()
|
||||||
|
# If hidden has a value replace hidden with the correct prefixed key
|
||||||
|
if hidden is not None:
|
||||||
|
new_users[rr_user_id].pop("hidden")
|
||||||
|
new_users[rr_user_id][
|
||||||
|
ReadReceiptEventFields.MSC2285_HIDDEN
|
||||||
|
] = hidden
|
||||||
|
|
||||||
|
# Set new users unless empty
|
||||||
|
if len(new_users.keys()) > 0:
|
||||||
|
new_event["content"][event_id] = {"m.read": new_users}
|
||||||
|
|
||||||
|
# Append new_event to visible_events unless empty
|
||||||
|
if len(new_event["content"].keys()) > 0:
|
||||||
|
visible_events.append(new_event)
|
||||||
|
|
||||||
|
return visible_events
|
||||||
|
|
||||||
async def get_new_events(
|
async def get_new_events(
|
||||||
self, from_key: int, room_ids: List[str], **kwargs
|
self, from_key: int, room_ids: List[str], user: UserID, **kwargs
|
||||||
) -> Tuple[List[JsonDict], int]:
|
) -> Tuple[List[JsonDict], int]:
|
||||||
from_key = int(from_key)
|
from_key = int(from_key)
|
||||||
to_key = self.get_current_key()
|
to_key = self.get_current_key()
|
||||||
|
@ -176,6 +221,9 @@ class ReceiptEventSource:
|
||||||
room_ids, from_key=from_key, to_key=to_key
|
room_ids, from_key=from_key, to_key=to_key
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if self.config.experimental.msc2285_enabled:
|
||||||
|
events = ReceiptEventSource.filter_out_hidden(events, user.to_string())
|
||||||
|
|
||||||
return (events, to_key)
|
return (events, to_key)
|
||||||
|
|
||||||
async def get_new_events_as(
|
async def get_new_events_as(
|
||||||
|
|
|
@ -961,6 +961,7 @@ class RoomCreationHandler(BaseHandler):
|
||||||
"kick": 50,
|
"kick": 50,
|
||||||
"redact": 50,
|
"redact": 50,
|
||||||
"invite": 50,
|
"invite": 50,
|
||||||
|
"historical": 100,
|
||||||
}
|
}
|
||||||
|
|
||||||
if config["original_invitees_have_ops"]:
|
if config["original_invitees_have_ops"]:
|
||||||
|
|
|
@ -16,7 +16,7 @@ import abc
|
||||||
import logging
|
import logging
|
||||||
import random
|
import random
|
||||||
from http import HTTPStatus
|
from http import HTTPStatus
|
||||||
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple
|
from typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple
|
||||||
|
|
||||||
from synapse import types
|
from synapse import types
|
||||||
from synapse.api.constants import AccountDataTypes, EventTypes, Membership
|
from synapse.api.constants import AccountDataTypes, EventTypes, Membership
|
||||||
|
@ -28,6 +28,7 @@ from synapse.api.errors import (
|
||||||
SynapseError,
|
SynapseError,
|
||||||
)
|
)
|
||||||
from synapse.api.ratelimiting import Ratelimiter
|
from synapse.api.ratelimiting import Ratelimiter
|
||||||
|
from synapse.event_auth import get_named_level, get_power_level_event
|
||||||
from synapse.events import EventBase
|
from synapse.events import EventBase
|
||||||
from synapse.events.snapshot import EventContext
|
from synapse.events.snapshot import EventContext
|
||||||
from synapse.types import (
|
from synapse.types import (
|
||||||
|
@ -340,16 +341,10 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||||
|
|
||||||
if event.membership == Membership.JOIN:
|
if event.membership == Membership.JOIN:
|
||||||
newly_joined = True
|
newly_joined = True
|
||||||
prev_member_event = None
|
|
||||||
if prev_member_event_id:
|
if prev_member_event_id:
|
||||||
prev_member_event = await self.store.get_event(prev_member_event_id)
|
prev_member_event = await self.store.get_event(prev_member_event_id)
|
||||||
newly_joined = prev_member_event.membership != Membership.JOIN
|
newly_joined = prev_member_event.membership != Membership.JOIN
|
||||||
|
|
||||||
# Check if the member should be allowed access via membership in a space.
|
|
||||||
await self.event_auth_handler.check_restricted_join_rules(
|
|
||||||
prev_state_ids, event.room_version, user_id, prev_member_event
|
|
||||||
)
|
|
||||||
|
|
||||||
# Only rate-limit if the user actually joined the room, otherwise we'll end
|
# Only rate-limit if the user actually joined the room, otherwise we'll end
|
||||||
# up blocking profile updates.
|
# up blocking profile updates.
|
||||||
if newly_joined and ratelimit:
|
if newly_joined and ratelimit:
|
||||||
|
@ -701,7 +696,11 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||||
# so don't really fit into the general auth process.
|
# so don't really fit into the general auth process.
|
||||||
raise AuthError(403, "Guest access not allowed")
|
raise AuthError(403, "Guest access not allowed")
|
||||||
|
|
||||||
if not is_host_in_room:
|
# Check if a remote join should be performed.
|
||||||
|
remote_join, remote_room_hosts = await self._should_perform_remote_join(
|
||||||
|
target.to_string(), room_id, remote_room_hosts, content, is_host_in_room
|
||||||
|
)
|
||||||
|
if remote_join:
|
||||||
if ratelimit:
|
if ratelimit:
|
||||||
time_now_s = self.clock.time()
|
time_now_s = self.clock.time()
|
||||||
(
|
(
|
||||||
|
@ -826,6 +825,106 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||||
outlier=outlier,
|
outlier=outlier,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
async def _should_perform_remote_join(
|
||||||
|
self,
|
||||||
|
user_id: str,
|
||||||
|
room_id: str,
|
||||||
|
remote_room_hosts: List[str],
|
||||||
|
content: JsonDict,
|
||||||
|
is_host_in_room: bool,
|
||||||
|
) -> Tuple[bool, List[str]]:
|
||||||
|
"""
|
||||||
|
Check whether the server should do a remote join (as opposed to a local
|
||||||
|
join) for a user.
|
||||||
|
|
||||||
|
Generally a remote join is used if:
|
||||||
|
|
||||||
|
* The server is not yet in the room.
|
||||||
|
* The server is in the room, the room has restricted join rules, the user
|
||||||
|
is not joined or invited to the room, and the server does not have
|
||||||
|
another user who is capable of issuing invites.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id: The user joining the room.
|
||||||
|
room_id: The room being joined.
|
||||||
|
remote_room_hosts: A list of remote room hosts.
|
||||||
|
content: The content to use as the event body of the join. This may
|
||||||
|
be modified.
|
||||||
|
is_host_in_room: True if the host is in the room.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A tuple of:
|
||||||
|
True if a remote join should be performed. False if the join can be
|
||||||
|
done locally.
|
||||||
|
|
||||||
|
A list of remote room hosts to use. This is an empty list if a
|
||||||
|
local join is to be done.
|
||||||
|
"""
|
||||||
|
# If the host isn't in the room, pass through the prospective hosts.
|
||||||
|
if not is_host_in_room:
|
||||||
|
return True, remote_room_hosts
|
||||||
|
|
||||||
|
# If the host is in the room, but not one of the authorised hosts
|
||||||
|
# for restricted join rules, a remote join must be used.
|
||||||
|
room_version = await self.store.get_room_version(room_id)
|
||||||
|
current_state_ids = await self.store.get_current_state_ids(room_id)
|
||||||
|
|
||||||
|
# If restricted join rules are not being used, a local join can always
|
||||||
|
# be used.
|
||||||
|
if not await self.event_auth_handler.has_restricted_join_rules(
|
||||||
|
current_state_ids, room_version
|
||||||
|
):
|
||||||
|
return False, []
|
||||||
|
|
||||||
|
# If the user is invited to the room or already joined, the join
|
||||||
|
# event can always be issued locally.
|
||||||
|
prev_member_event_id = current_state_ids.get((EventTypes.Member, user_id), None)
|
||||||
|
prev_member_event = None
|
||||||
|
if prev_member_event_id:
|
||||||
|
prev_member_event = await self.store.get_event(prev_member_event_id)
|
||||||
|
if prev_member_event.membership in (
|
||||||
|
Membership.JOIN,
|
||||||
|
Membership.INVITE,
|
||||||
|
):
|
||||||
|
return False, []
|
||||||
|
|
||||||
|
# If the local host has a user who can issue invites, then a local
|
||||||
|
# join can be done.
|
||||||
|
#
|
||||||
|
# If not, generate a new list of remote hosts based on which
|
||||||
|
# can issue invites.
|
||||||
|
event_map = await self.store.get_events(current_state_ids.values())
|
||||||
|
current_state = {
|
||||||
|
state_key: event_map[event_id]
|
||||||
|
for state_key, event_id in current_state_ids.items()
|
||||||
|
}
|
||||||
|
allowed_servers = get_servers_from_users(
|
||||||
|
get_users_which_can_issue_invite(current_state)
|
||||||
|
)
|
||||||
|
|
||||||
|
# If the local server is not one of allowed servers, then a remote
|
||||||
|
# join must be done. Return the list of prospective servers based on
|
||||||
|
# which can issue invites.
|
||||||
|
if self.hs.hostname not in allowed_servers:
|
||||||
|
return True, list(allowed_servers)
|
||||||
|
|
||||||
|
# Ensure the member should be allowed access via membership in a room.
|
||||||
|
await self.event_auth_handler.check_restricted_join_rules(
|
||||||
|
current_state_ids, room_version, user_id, prev_member_event
|
||||||
|
)
|
||||||
|
|
||||||
|
# If this is going to be a local join, additional information must
|
||||||
|
# be included in the event content in order to efficiently validate
|
||||||
|
# the event.
|
||||||
|
content[
|
||||||
|
"join_authorised_via_users_server"
|
||||||
|
] = await self.event_auth_handler.get_user_which_could_invite(
|
||||||
|
room_id,
|
||||||
|
current_state_ids,
|
||||||
|
)
|
||||||
|
|
||||||
|
return False, []
|
||||||
|
|
||||||
async def transfer_room_state_on_room_upgrade(
|
async def transfer_room_state_on_room_upgrade(
|
||||||
self, old_room_id: str, room_id: str
|
self, old_room_id: str, room_id: str
|
||||||
) -> None:
|
) -> None:
|
||||||
|
@ -1514,3 +1613,63 @@ class RoomMemberMasterHandler(RoomMemberHandler):
|
||||||
|
|
||||||
if membership:
|
if membership:
|
||||||
await self.store.forget(user_id, room_id)
|
await self.store.forget(user_id, room_id)
|
||||||
|
|
||||||
|
|
||||||
|
def get_users_which_can_issue_invite(auth_events: StateMap[EventBase]) -> List[str]:
|
||||||
|
"""
|
||||||
|
Return the list of users which can issue invites.
|
||||||
|
|
||||||
|
This is done by exploring the joined users and comparing their power levels
|
||||||
|
to the necessyar power level to issue an invite.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
auth_events: state in force at this point in the room
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The users which can issue invites.
|
||||||
|
"""
|
||||||
|
invite_level = get_named_level(auth_events, "invite", 0)
|
||||||
|
users_default_level = get_named_level(auth_events, "users_default", 0)
|
||||||
|
power_level_event = get_power_level_event(auth_events)
|
||||||
|
|
||||||
|
# Custom power-levels for users.
|
||||||
|
if power_level_event:
|
||||||
|
users = power_level_event.content.get("users", {})
|
||||||
|
else:
|
||||||
|
users = {}
|
||||||
|
|
||||||
|
result = []
|
||||||
|
|
||||||
|
# Check which members are able to invite by ensuring they're joined and have
|
||||||
|
# the necessary power level.
|
||||||
|
for (event_type, state_key), event in auth_events.items():
|
||||||
|
if event_type != EventTypes.Member:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if event.membership != Membership.JOIN:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check if the user has a custom power level.
|
||||||
|
if users.get(state_key, users_default_level) >= invite_level:
|
||||||
|
result.append(state_key)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def get_servers_from_users(users: List[str]) -> Set[str]:
|
||||||
|
"""
|
||||||
|
Resolve a list of users into their servers.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
users: A list of users.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A set of servers.
|
||||||
|
"""
|
||||||
|
servers = set()
|
||||||
|
for user in users:
|
||||||
|
try:
|
||||||
|
servers.add(get_domain_from_id(user))
|
||||||
|
except SynapseError:
|
||||||
|
pass
|
||||||
|
return servers
|
||||||
|
|
|
@ -848,7 +848,7 @@ class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
|
||||||
|
|
||||||
def read_body_with_max_size(
|
def read_body_with_max_size(
|
||||||
response: IResponse, stream: ByteWriteable, max_size: Optional[int]
|
response: IResponse, stream: ByteWriteable, max_size: Optional[int]
|
||||||
) -> defer.Deferred:
|
) -> "defer.Deferred[int]":
|
||||||
"""
|
"""
|
||||||
Read a HTTP response body to a file-object. Optionally enforcing a maximum file size.
|
Read a HTTP response body to a file-object. Optionally enforcing a maximum file size.
|
||||||
|
|
||||||
|
@ -863,7 +863,7 @@ def read_body_with_max_size(
|
||||||
Returns:
|
Returns:
|
||||||
A Deferred which resolves to the length of the read body.
|
A Deferred which resolves to the length of the read body.
|
||||||
"""
|
"""
|
||||||
d = defer.Deferred()
|
d: "defer.Deferred[int]" = defer.Deferred()
|
||||||
|
|
||||||
# If the Content-Length header gives a size larger than the maximum allowed
|
# If the Content-Length header gives a size larger than the maximum allowed
|
||||||
# size, do not bother downloading the body.
|
# size, do not bother downloading the body.
|
||||||
|
|
|
@ -27,7 +27,7 @@ from twisted.internet.interfaces import (
|
||||||
)
|
)
|
||||||
from twisted.web.client import URI, Agent, HTTPConnectionPool
|
from twisted.web.client import URI, Agent, HTTPConnectionPool
|
||||||
from twisted.web.http_headers import Headers
|
from twisted.web.http_headers import Headers
|
||||||
from twisted.web.iweb import IAgent, IAgentEndpointFactory, IBodyProducer
|
from twisted.web.iweb import IAgent, IAgentEndpointFactory, IBodyProducer, IResponse
|
||||||
|
|
||||||
from synapse.crypto.context_factory import FederationPolicyForHTTPS
|
from synapse.crypto.context_factory import FederationPolicyForHTTPS
|
||||||
from synapse.http.client import BlacklistingAgentWrapper
|
from synapse.http.client import BlacklistingAgentWrapper
|
||||||
|
@ -116,7 +116,7 @@ class MatrixFederationAgent:
|
||||||
uri: bytes,
|
uri: bytes,
|
||||||
headers: Optional[Headers] = None,
|
headers: Optional[Headers] = None,
|
||||||
bodyProducer: Optional[IBodyProducer] = None,
|
bodyProducer: Optional[IBodyProducer] = None,
|
||||||
) -> Generator[defer.Deferred, Any, defer.Deferred]:
|
) -> Generator[defer.Deferred, Any, IResponse]:
|
||||||
"""
|
"""
|
||||||
Args:
|
Args:
|
||||||
method: HTTP method: GET/POST/etc
|
method: HTTP method: GET/POST/etc
|
||||||
|
|
|
@ -14,21 +14,32 @@
|
||||||
import base64
|
import base64
|
||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
from typing import Optional, Tuple
|
from typing import Any, Dict, Optional, Tuple
|
||||||
from urllib.request import getproxies_environment, proxy_bypass_environment
|
from urllib.parse import urlparse
|
||||||
|
from urllib.request import ( # type: ignore[attr-defined]
|
||||||
|
getproxies_environment,
|
||||||
|
proxy_bypass_environment,
|
||||||
|
)
|
||||||
|
|
||||||
import attr
|
import attr
|
||||||
from zope.interface import implementer
|
from zope.interface import implementer
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
|
from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
|
||||||
|
from twisted.internet.interfaces import IReactorCore, IStreamClientEndpoint
|
||||||
from twisted.python.failure import Failure
|
from twisted.python.failure import Failure
|
||||||
from twisted.web.client import URI, BrowserLikePolicyForHTTPS, _AgentBase
|
from twisted.web.client import (
|
||||||
|
URI,
|
||||||
|
BrowserLikePolicyForHTTPS,
|
||||||
|
HTTPConnectionPool,
|
||||||
|
_AgentBase,
|
||||||
|
)
|
||||||
from twisted.web.error import SchemeNotSupported
|
from twisted.web.error import SchemeNotSupported
|
||||||
from twisted.web.http_headers import Headers
|
from twisted.web.http_headers import Headers
|
||||||
from twisted.web.iweb import IAgent, IPolicyForHTTPS
|
from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS
|
||||||
|
|
||||||
from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint
|
from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint
|
||||||
|
from synapse.types import ISynapseReactor
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -63,35 +74,38 @@ class ProxyAgent(_AgentBase):
|
||||||
reactor might have some blacklisting applied (i.e. for DNS queries),
|
reactor might have some blacklisting applied (i.e. for DNS queries),
|
||||||
but we need unblocked access to the proxy.
|
but we need unblocked access to the proxy.
|
||||||
|
|
||||||
contextFactory (IPolicyForHTTPS): A factory for TLS contexts, to control the
|
contextFactory: A factory for TLS contexts, to control the
|
||||||
verification parameters of OpenSSL. The default is to use a
|
verification parameters of OpenSSL. The default is to use a
|
||||||
`BrowserLikePolicyForHTTPS`, so unless you have special
|
`BrowserLikePolicyForHTTPS`, so unless you have special
|
||||||
requirements you can leave this as-is.
|
requirements you can leave this as-is.
|
||||||
|
|
||||||
connectTimeout (Optional[float]): The amount of time that this Agent will wait
|
connectTimeout: The amount of time that this Agent will wait
|
||||||
for the peer to accept a connection, in seconds. If 'None',
|
for the peer to accept a connection, in seconds. If 'None',
|
||||||
HostnameEndpoint's default (30s) will be used.
|
HostnameEndpoint's default (30s) will be used.
|
||||||
|
|
||||||
This is used for connections to both proxies and destination servers.
|
This is used for connections to both proxies and destination servers.
|
||||||
|
|
||||||
bindAddress (bytes): The local address for client sockets to bind to.
|
bindAddress: The local address for client sockets to bind to.
|
||||||
|
|
||||||
pool (HTTPConnectionPool|None): connection pool to be used. If None, a
|
pool: connection pool to be used. If None, a
|
||||||
non-persistent pool instance will be created.
|
non-persistent pool instance will be created.
|
||||||
|
|
||||||
use_proxy (bool): Whether proxy settings should be discovered and used
|
use_proxy: Whether proxy settings should be discovered and used
|
||||||
from conventional environment variables.
|
from conventional environment variables.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError if use_proxy is set and the environment variables
|
||||||
|
contain an invalid proxy specification.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
reactor,
|
reactor: IReactorCore,
|
||||||
proxy_reactor=None,
|
proxy_reactor: Optional[ISynapseReactor] = None,
|
||||||
contextFactory: Optional[IPolicyForHTTPS] = None,
|
contextFactory: Optional[IPolicyForHTTPS] = None,
|
||||||
connectTimeout=None,
|
connectTimeout: Optional[float] = None,
|
||||||
bindAddress=None,
|
bindAddress: Optional[bytes] = None,
|
||||||
pool=None,
|
pool: Optional[HTTPConnectionPool] = None,
|
||||||
use_proxy=False,
|
use_proxy: bool = False,
|
||||||
):
|
):
|
||||||
contextFactory = contextFactory or BrowserLikePolicyForHTTPS()
|
contextFactory = contextFactory or BrowserLikePolicyForHTTPS()
|
||||||
|
|
||||||
|
@ -102,7 +116,7 @@ class ProxyAgent(_AgentBase):
|
||||||
else:
|
else:
|
||||||
self.proxy_reactor = proxy_reactor
|
self.proxy_reactor = proxy_reactor
|
||||||
|
|
||||||
self._endpoint_kwargs = {}
|
self._endpoint_kwargs: Dict[str, Any] = {}
|
||||||
if connectTimeout is not None:
|
if connectTimeout is not None:
|
||||||
self._endpoint_kwargs["timeout"] = connectTimeout
|
self._endpoint_kwargs["timeout"] = connectTimeout
|
||||||
if bindAddress is not None:
|
if bindAddress is not None:
|
||||||
|
@ -117,16 +131,12 @@ class ProxyAgent(_AgentBase):
|
||||||
https_proxy = proxies["https"].encode() if "https" in proxies else None
|
https_proxy = proxies["https"].encode() if "https" in proxies else None
|
||||||
no_proxy = proxies["no"] if "no" in proxies else None
|
no_proxy = proxies["no"] if "no" in proxies else None
|
||||||
|
|
||||||
# Parse credentials from http and https proxy connection string if present
|
self.http_proxy_endpoint, self.http_proxy_creds = _http_proxy_endpoint(
|
||||||
self.http_proxy_creds, http_proxy = parse_username_password(http_proxy)
|
http_proxy, self.proxy_reactor, contextFactory, **self._endpoint_kwargs
|
||||||
self.https_proxy_creds, https_proxy = parse_username_password(https_proxy)
|
|
||||||
|
|
||||||
self.http_proxy_endpoint = _http_proxy_endpoint(
|
|
||||||
http_proxy, self.proxy_reactor, **self._endpoint_kwargs
|
|
||||||
)
|
)
|
||||||
|
|
||||||
self.https_proxy_endpoint = _http_proxy_endpoint(
|
self.https_proxy_endpoint, self.https_proxy_creds = _http_proxy_endpoint(
|
||||||
https_proxy, self.proxy_reactor, **self._endpoint_kwargs
|
https_proxy, self.proxy_reactor, contextFactory, **self._endpoint_kwargs
|
||||||
)
|
)
|
||||||
|
|
||||||
self.no_proxy = no_proxy
|
self.no_proxy = no_proxy
|
||||||
|
@ -134,7 +144,13 @@ class ProxyAgent(_AgentBase):
|
||||||
self._policy_for_https = contextFactory
|
self._policy_for_https = contextFactory
|
||||||
self._reactor = reactor
|
self._reactor = reactor
|
||||||
|
|
||||||
def request(self, method, uri, headers=None, bodyProducer=None):
|
def request(
|
||||||
|
self,
|
||||||
|
method: bytes,
|
||||||
|
uri: bytes,
|
||||||
|
headers: Optional[Headers] = None,
|
||||||
|
bodyProducer: Optional[IBodyProducer] = None,
|
||||||
|
) -> defer.Deferred:
|
||||||
"""
|
"""
|
||||||
Issue a request to the server indicated by the given uri.
|
Issue a request to the server indicated by the given uri.
|
||||||
|
|
||||||
|
@ -146,16 +162,15 @@ class ProxyAgent(_AgentBase):
|
||||||
See also: twisted.web.iweb.IAgent.request
|
See also: twisted.web.iweb.IAgent.request
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
method (bytes): The request method to use, such as `GET`, `POST`, etc
|
method: The request method to use, such as `GET`, `POST`, etc
|
||||||
|
|
||||||
uri (bytes): The location of the resource to request.
|
uri: The location of the resource to request.
|
||||||
|
|
||||||
headers (Headers|None): Extra headers to send with the request
|
headers: Extra headers to send with the request
|
||||||
|
|
||||||
bodyProducer (IBodyProducer|None): An object which can generate bytes to
|
bodyProducer: An object which can generate bytes to make up the body of
|
||||||
make up the body of this request (for example, the properly encoded
|
this request (for example, the properly encoded contents of a file for
|
||||||
contents of a file for a file upload). Or, None if the request is to
|
a file upload). Or, None if the request is to have no body.
|
||||||
have no body.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Deferred[IResponse]: completes when the header of the response has
|
Deferred[IResponse]: completes when the header of the response has
|
||||||
|
@ -253,70 +268,89 @@ class ProxyAgent(_AgentBase):
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def _http_proxy_endpoint(proxy: Optional[bytes], reactor, **kwargs):
|
def _http_proxy_endpoint(
|
||||||
|
proxy: Optional[bytes],
|
||||||
|
reactor: IReactorCore,
|
||||||
|
tls_options_factory: IPolicyForHTTPS,
|
||||||
|
**kwargs,
|
||||||
|
) -> Tuple[Optional[IStreamClientEndpoint], Optional[ProxyCredentials]]:
|
||||||
"""Parses an http proxy setting and returns an endpoint for the proxy
|
"""Parses an http proxy setting and returns an endpoint for the proxy
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
proxy: the proxy setting in the form: [<username>:<password>@]<host>[:<port>]
|
proxy: the proxy setting in the form: [scheme://][<username>:<password>@]<host>[:<port>]
|
||||||
Note that compared to other apps, this function currently lacks support
|
This currently supports http:// and https:// proxies.
|
||||||
for specifying a protocol schema (i.e. protocol://...).
|
A hostname without scheme is assumed to be http.
|
||||||
|
|
||||||
reactor: reactor to be used to connect to the proxy
|
reactor: reactor to be used to connect to the proxy
|
||||||
|
|
||||||
|
tls_options_factory: the TLS options to use when connecting through a https proxy
|
||||||
|
|
||||||
kwargs: other args to be passed to HostnameEndpoint
|
kwargs: other args to be passed to HostnameEndpoint
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
interfaces.IStreamClientEndpoint|None: endpoint to use to connect to the proxy,
|
a tuple of
|
||||||
or None
|
endpoint to use to connect to the proxy, or None
|
||||||
|
ProxyCredentials or if no credentials were found, or None
|
||||||
|
|
||||||
|
Raise:
|
||||||
|
ValueError if proxy has no hostname or unsupported scheme.
|
||||||
"""
|
"""
|
||||||
if proxy is None:
|
if proxy is None:
|
||||||
return None
|
return None, None
|
||||||
|
|
||||||
# Parse the connection string
|
# Note: urlsplit/urlparse cannot be used here as that does not work (for Python
|
||||||
host, port = parse_host_port(proxy, default_port=1080)
|
# 3.9+) on scheme-less proxies, e.g. host:port.
|
||||||
return HostnameEndpoint(reactor, host, port, **kwargs)
|
scheme, host, port, credentials = parse_proxy(proxy)
|
||||||
|
|
||||||
|
proxy_endpoint = HostnameEndpoint(reactor, host, port, **kwargs)
|
||||||
|
|
||||||
|
if scheme == b"https":
|
||||||
|
tls_options = tls_options_factory.creatorForNetloc(host, port)
|
||||||
|
proxy_endpoint = wrapClientTLS(tls_options, proxy_endpoint)
|
||||||
|
|
||||||
|
return proxy_endpoint, credentials
|
||||||
|
|
||||||
|
|
||||||
def parse_username_password(proxy: bytes) -> Tuple[Optional[ProxyCredentials], bytes]:
|
def parse_proxy(
|
||||||
|
proxy: bytes, default_scheme: bytes = b"http", default_port: int = 1080
|
||||||
|
) -> Tuple[bytes, bytes, int, Optional[ProxyCredentials]]:
|
||||||
"""
|
"""
|
||||||
Parses the username and password from a proxy declaration e.g
|
Parse a proxy connection string.
|
||||||
username:password@hostname:port.
|
|
||||||
|
Given a HTTP proxy URL, breaks it down into components and checks that it
|
||||||
|
has a hostname (otherwise it is not useful to us when trying to find a
|
||||||
|
proxy) and asserts that the URL has a scheme we support.
|
||||||
|
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
proxy: The proxy connection string.
|
proxy: The proxy connection string. Must be in the form '[scheme://][<username>:<password>@]host[:port]'.
|
||||||
|
default_scheme: The default scheme to return if one is not found in `proxy`. Defaults to http
|
||||||
Returns
|
default_port: The default port to return if one is not found in `proxy`. Defaults to 1080
|
||||||
An instance of ProxyCredentials and the proxy connection string with any credentials
|
|
||||||
stripped, i.e u:p@host:port -> host:port. If no credentials were found, the
|
|
||||||
ProxyCredentials instance is replaced with None.
|
|
||||||
"""
|
|
||||||
if proxy and b"@" in proxy:
|
|
||||||
# We use rsplit here as the password could contain an @ character
|
|
||||||
credentials, proxy_without_credentials = proxy.rsplit(b"@", 1)
|
|
||||||
return ProxyCredentials(credentials), proxy_without_credentials
|
|
||||||
|
|
||||||
return None, proxy
|
|
||||||
|
|
||||||
|
|
||||||
def parse_host_port(hostport: bytes, default_port: int = None) -> Tuple[bytes, int]:
|
|
||||||
"""
|
|
||||||
Parse the hostname and port from a proxy connection byte string.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
hostport: The proxy connection string. Must be in the form 'host[:port]'.
|
|
||||||
default_port: The default port to return if one is not found in `hostport`.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
A tuple containing the hostname and port. Uses `default_port` if one was not found.
|
A tuple containing the scheme, hostname, port and ProxyCredentials.
|
||||||
"""
|
If no credentials were found, the ProxyCredentials instance is replaced with None.
|
||||||
if b":" in hostport:
|
|
||||||
host, port = hostport.rsplit(b":", 1)
|
|
||||||
try:
|
|
||||||
port = int(port)
|
|
||||||
return host, port
|
|
||||||
except ValueError:
|
|
||||||
# the thing after the : wasn't a valid port; presumably this is an
|
|
||||||
# IPv6 address.
|
|
||||||
pass
|
|
||||||
|
|
||||||
return hostport, default_port
|
Raise:
|
||||||
|
ValueError if proxy has no hostname or unsupported scheme.
|
||||||
|
"""
|
||||||
|
# First check if we have a scheme present
|
||||||
|
# Note: urlsplit/urlparse cannot be used (for Python # 3.9+) on scheme-less proxies, e.g. host:port.
|
||||||
|
if b"://" not in proxy:
|
||||||
|
proxy = b"".join([default_scheme, b"://", proxy])
|
||||||
|
|
||||||
|
url = urlparse(proxy)
|
||||||
|
|
||||||
|
if not url.hostname:
|
||||||
|
raise ValueError("Proxy URL did not contain a hostname! Please specify one.")
|
||||||
|
|
||||||
|
if url.scheme not in (b"http", b"https"):
|
||||||
|
raise ValueError(
|
||||||
|
f"Unknown proxy scheme {url.scheme!s}; only 'http' and 'https' is supported."
|
||||||
|
)
|
||||||
|
|
||||||
|
credentials = None
|
||||||
|
if url.username and url.password:
|
||||||
|
credentials = ProxyCredentials(b"".join([url.username, b":", url.password]))
|
||||||
|
|
||||||
|
return url.scheme, url.hostname, url.port or default_port, credentials
|
||||||
|
|
|
@ -14,47 +14,86 @@
|
||||||
|
|
||||||
""" This module contains base REST classes for constructing REST servlets. """
|
""" This module contains base REST classes for constructing REST servlets. """
|
||||||
import logging
|
import logging
|
||||||
from typing import Dict, Iterable, List, Optional, overload
|
from typing import Iterable, List, Mapping, Optional, Sequence, overload
|
||||||
|
|
||||||
from typing_extensions import Literal
|
from typing_extensions import Literal
|
||||||
|
|
||||||
from twisted.web.server import Request
|
from twisted.web.server import Request
|
||||||
|
|
||||||
from synapse.api.errors import Codes, SynapseError
|
from synapse.api.errors import Codes, SynapseError
|
||||||
|
from synapse.types import JsonDict
|
||||||
from synapse.util import json_decoder
|
from synapse.util import json_decoder
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def parse_integer(request, name, default=None, required=False):
|
@overload
|
||||||
|
def parse_integer(request: Request, name: str, default: int) -> int:
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
@overload
|
||||||
|
def parse_integer(request: Request, name: str, *, required: Literal[True]) -> int:
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
@overload
|
||||||
|
def parse_integer(
|
||||||
|
request: Request, name: str, default: Optional[int] = None, required: bool = False
|
||||||
|
) -> Optional[int]:
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
def parse_integer(
|
||||||
|
request: Request, name: str, default: Optional[int] = None, required: bool = False
|
||||||
|
) -> Optional[int]:
|
||||||
"""Parse an integer parameter from the request string
|
"""Parse an integer parameter from the request string
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
request: the twisted HTTP request.
|
request: the twisted HTTP request.
|
||||||
name (bytes/unicode): the name of the query parameter.
|
name: the name of the query parameter.
|
||||||
default (int|None): value to use if the parameter is absent, defaults
|
default: value to use if the parameter is absent, defaults to None.
|
||||||
to None.
|
required: whether to raise a 400 SynapseError if the parameter is absent,
|
||||||
required (bool): whether to raise a 400 SynapseError if the
|
defaults to False.
|
||||||
parameter is absent, defaults to False.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
int|None: An int value or the default.
|
An int value or the default.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
SynapseError: if the parameter is absent and required, or if the
|
SynapseError: if the parameter is absent and required, or if the
|
||||||
parameter is present and not an integer.
|
parameter is present and not an integer.
|
||||||
"""
|
"""
|
||||||
return parse_integer_from_args(request.args, name, default, required)
|
args: Mapping[bytes, Sequence[bytes]] = request.args # type: ignore
|
||||||
|
return parse_integer_from_args(args, name, default, required)
|
||||||
|
|
||||||
|
|
||||||
def parse_integer_from_args(args, name, default=None, required=False):
|
def parse_integer_from_args(
|
||||||
|
args: Mapping[bytes, Sequence[bytes]],
|
||||||
|
name: str,
|
||||||
|
default: Optional[int] = None,
|
||||||
|
required: bool = False,
|
||||||
|
) -> Optional[int]:
|
||||||
|
"""Parse an integer parameter from the request string
|
||||||
|
|
||||||
if not isinstance(name, bytes):
|
Args:
|
||||||
name = name.encode("ascii")
|
args: A mapping of request args as bytes to a list of bytes (e.g. request.args).
|
||||||
|
name: the name of the query parameter.
|
||||||
|
default: value to use if the parameter is absent, defaults to None.
|
||||||
|
required: whether to raise a 400 SynapseError if the parameter is absent,
|
||||||
|
defaults to False.
|
||||||
|
|
||||||
if name in args:
|
Returns:
|
||||||
|
An int value or the default.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
SynapseError: if the parameter is absent and required, or if the
|
||||||
|
parameter is present and not an integer.
|
||||||
|
"""
|
||||||
|
name_bytes = name.encode("ascii")
|
||||||
|
|
||||||
|
if name_bytes in args:
|
||||||
try:
|
try:
|
||||||
return int(args[name][0])
|
return int(args[name_bytes][0])
|
||||||
except Exception:
|
except Exception:
|
||||||
message = "Query parameter %r must be an integer" % (name,)
|
message = "Query parameter %r must be an integer" % (name,)
|
||||||
raise SynapseError(400, message, errcode=Codes.INVALID_PARAM)
|
raise SynapseError(400, message, errcode=Codes.INVALID_PARAM)
|
||||||
|
@ -66,36 +105,102 @@ def parse_integer_from_args(args, name, default=None, required=False):
|
||||||
return default
|
return default
|
||||||
|
|
||||||
|
|
||||||
def parse_boolean(request, name, default=None, required=False):
|
@overload
|
||||||
|
def parse_boolean(request: Request, name: str, default: bool) -> bool:
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
@overload
|
||||||
|
def parse_boolean(request: Request, name: str, *, required: Literal[True]) -> bool:
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
@overload
|
||||||
|
def parse_boolean(
|
||||||
|
request: Request, name: str, default: Optional[bool] = None, required: bool = False
|
||||||
|
) -> Optional[bool]:
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
def parse_boolean(
|
||||||
|
request: Request, name: str, default: Optional[bool] = None, required: bool = False
|
||||||
|
) -> Optional[bool]:
|
||||||
"""Parse a boolean parameter from the request query string
|
"""Parse a boolean parameter from the request query string
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
request: the twisted HTTP request.
|
request: the twisted HTTP request.
|
||||||
name (bytes/unicode): the name of the query parameter.
|
name: the name of the query parameter.
|
||||||
default (bool|None): value to use if the parameter is absent, defaults
|
default: value to use if the parameter is absent, defaults to None.
|
||||||
to None.
|
required: whether to raise a 400 SynapseError if the parameter is absent,
|
||||||
required (bool): whether to raise a 400 SynapseError if the
|
defaults to False.
|
||||||
parameter is absent, defaults to False.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
bool|None: A bool value or the default.
|
A bool value or the default.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
SynapseError: if the parameter is absent and required, or if the
|
SynapseError: if the parameter is absent and required, or if the
|
||||||
parameter is present and not one of "true" or "false".
|
parameter is present and not one of "true" or "false".
|
||||||
"""
|
"""
|
||||||
|
args: Mapping[bytes, Sequence[bytes]] = request.args # type: ignore
|
||||||
return parse_boolean_from_args(request.args, name, default, required)
|
return parse_boolean_from_args(args, name, default, required)
|
||||||
|
|
||||||
|
|
||||||
def parse_boolean_from_args(args, name, default=None, required=False):
|
@overload
|
||||||
|
def parse_boolean_from_args(
|
||||||
|
args: Mapping[bytes, Sequence[bytes]],
|
||||||
|
name: str,
|
||||||
|
default: bool,
|
||||||
|
) -> bool:
|
||||||
|
...
|
||||||
|
|
||||||
if not isinstance(name, bytes):
|
|
||||||
name = name.encode("ascii")
|
|
||||||
|
|
||||||
if name in args:
|
@overload
|
||||||
|
def parse_boolean_from_args(
|
||||||
|
args: Mapping[bytes, Sequence[bytes]],
|
||||||
|
name: str,
|
||||||
|
*,
|
||||||
|
required: Literal[True],
|
||||||
|
) -> bool:
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
@overload
|
||||||
|
def parse_boolean_from_args(
|
||||||
|
args: Mapping[bytes, Sequence[bytes]],
|
||||||
|
name: str,
|
||||||
|
default: Optional[bool] = None,
|
||||||
|
required: bool = False,
|
||||||
|
) -> Optional[bool]:
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
def parse_boolean_from_args(
|
||||||
|
args: Mapping[bytes, Sequence[bytes]],
|
||||||
|
name: str,
|
||||||
|
default: Optional[bool] = None,
|
||||||
|
required: bool = False,
|
||||||
|
) -> Optional[bool]:
|
||||||
|
"""Parse a boolean parameter from the request query string
|
||||||
|
|
||||||
|
Args:
|
||||||
|
args: A mapping of request args as bytes to a list of bytes (e.g. request.args).
|
||||||
|
name: the name of the query parameter.
|
||||||
|
default: value to use if the parameter is absent, defaults to None.
|
||||||
|
required: whether to raise a 400 SynapseError if the parameter is absent,
|
||||||
|
defaults to False.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A bool value or the default.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
SynapseError: if the parameter is absent and required, or if the
|
||||||
|
parameter is present and not one of "true" or "false".
|
||||||
|
"""
|
||||||
|
name_bytes = name.encode("ascii")
|
||||||
|
|
||||||
|
if name_bytes in args:
|
||||||
try:
|
try:
|
||||||
return {b"true": True, b"false": False}[args[name][0]]
|
return {b"true": True, b"false": False}[args[name_bytes][0]]
|
||||||
except Exception:
|
except Exception:
|
||||||
message = (
|
message = (
|
||||||
"Boolean query parameter %r must be one of ['true', 'false']"
|
"Boolean query parameter %r must be one of ['true', 'false']"
|
||||||
|
@ -111,7 +216,7 @@ def parse_boolean_from_args(args, name, default=None, required=False):
|
||||||
|
|
||||||
@overload
|
@overload
|
||||||
def parse_bytes_from_args(
|
def parse_bytes_from_args(
|
||||||
args: Dict[bytes, List[bytes]],
|
args: Mapping[bytes, Sequence[bytes]],
|
||||||
name: str,
|
name: str,
|
||||||
default: Optional[bytes] = None,
|
default: Optional[bytes] = None,
|
||||||
) -> Optional[bytes]:
|
) -> Optional[bytes]:
|
||||||
|
@ -120,7 +225,7 @@ def parse_bytes_from_args(
|
||||||
|
|
||||||
@overload
|
@overload
|
||||||
def parse_bytes_from_args(
|
def parse_bytes_from_args(
|
||||||
args: Dict[bytes, List[bytes]],
|
args: Mapping[bytes, Sequence[bytes]],
|
||||||
name: str,
|
name: str,
|
||||||
default: Literal[None] = None,
|
default: Literal[None] = None,
|
||||||
*,
|
*,
|
||||||
|
@ -131,7 +236,7 @@ def parse_bytes_from_args(
|
||||||
|
|
||||||
@overload
|
@overload
|
||||||
def parse_bytes_from_args(
|
def parse_bytes_from_args(
|
||||||
args: Dict[bytes, List[bytes]],
|
args: Mapping[bytes, Sequence[bytes]],
|
||||||
name: str,
|
name: str,
|
||||||
default: Optional[bytes] = None,
|
default: Optional[bytes] = None,
|
||||||
required: bool = False,
|
required: bool = False,
|
||||||
|
@ -140,7 +245,7 @@ def parse_bytes_from_args(
|
||||||
|
|
||||||
|
|
||||||
def parse_bytes_from_args(
|
def parse_bytes_from_args(
|
||||||
args: Dict[bytes, List[bytes]],
|
args: Mapping[bytes, Sequence[bytes]],
|
||||||
name: str,
|
name: str,
|
||||||
default: Optional[bytes] = None,
|
default: Optional[bytes] = None,
|
||||||
required: bool = False,
|
required: bool = False,
|
||||||
|
@ -172,6 +277,42 @@ def parse_bytes_from_args(
|
||||||
return default
|
return default
|
||||||
|
|
||||||
|
|
||||||
|
@overload
|
||||||
|
def parse_string(
|
||||||
|
request: Request,
|
||||||
|
name: str,
|
||||||
|
default: str,
|
||||||
|
*,
|
||||||
|
allowed_values: Optional[Iterable[str]] = None,
|
||||||
|
encoding: str = "ascii",
|
||||||
|
) -> str:
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
@overload
|
||||||
|
def parse_string(
|
||||||
|
request: Request,
|
||||||
|
name: str,
|
||||||
|
*,
|
||||||
|
required: Literal[True],
|
||||||
|
allowed_values: Optional[Iterable[str]] = None,
|
||||||
|
encoding: str = "ascii",
|
||||||
|
) -> str:
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
@overload
|
||||||
|
def parse_string(
|
||||||
|
request: Request,
|
||||||
|
name: str,
|
||||||
|
*,
|
||||||
|
required: bool = False,
|
||||||
|
allowed_values: Optional[Iterable[str]] = None,
|
||||||
|
encoding: str = "ascii",
|
||||||
|
) -> Optional[str]:
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
def parse_string(
|
def parse_string(
|
||||||
request: Request,
|
request: Request,
|
||||||
name: str,
|
name: str,
|
||||||
|
@ -179,7 +320,7 @@ def parse_string(
|
||||||
required: bool = False,
|
required: bool = False,
|
||||||
allowed_values: Optional[Iterable[str]] = None,
|
allowed_values: Optional[Iterable[str]] = None,
|
||||||
encoding: str = "ascii",
|
encoding: str = "ascii",
|
||||||
):
|
) -> Optional[str]:
|
||||||
"""
|
"""
|
||||||
Parse a string parameter from the request query string.
|
Parse a string parameter from the request query string.
|
||||||
|
|
||||||
|
@ -205,7 +346,7 @@ def parse_string(
|
||||||
parameter is present, must be one of a list of allowed values and
|
parameter is present, must be one of a list of allowed values and
|
||||||
is not one of those allowed values.
|
is not one of those allowed values.
|
||||||
"""
|
"""
|
||||||
args: Dict[bytes, List[bytes]] = request.args # type: ignore
|
args: Mapping[bytes, Sequence[bytes]] = request.args # type: ignore
|
||||||
return parse_string_from_args(
|
return parse_string_from_args(
|
||||||
args,
|
args,
|
||||||
name,
|
name,
|
||||||
|
@ -239,9 +380,8 @@ def _parse_string_value(
|
||||||
|
|
||||||
@overload
|
@overload
|
||||||
def parse_strings_from_args(
|
def parse_strings_from_args(
|
||||||
args: Dict[bytes, List[bytes]],
|
args: Mapping[bytes, Sequence[bytes]],
|
||||||
name: str,
|
name: str,
|
||||||
default: Optional[List[str]] = None,
|
|
||||||
*,
|
*,
|
||||||
allowed_values: Optional[Iterable[str]] = None,
|
allowed_values: Optional[Iterable[str]] = None,
|
||||||
encoding: str = "ascii",
|
encoding: str = "ascii",
|
||||||
|
@ -251,9 +391,20 @@ def parse_strings_from_args(
|
||||||
|
|
||||||
@overload
|
@overload
|
||||||
def parse_strings_from_args(
|
def parse_strings_from_args(
|
||||||
args: Dict[bytes, List[bytes]],
|
args: Mapping[bytes, Sequence[bytes]],
|
||||||
|
name: str,
|
||||||
|
default: List[str],
|
||||||
|
*,
|
||||||
|
allowed_values: Optional[Iterable[str]] = None,
|
||||||
|
encoding: str = "ascii",
|
||||||
|
) -> List[str]:
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
@overload
|
||||||
|
def parse_strings_from_args(
|
||||||
|
args: Mapping[bytes, Sequence[bytes]],
|
||||||
name: str,
|
name: str,
|
||||||
default: Optional[List[str]] = None,
|
|
||||||
*,
|
*,
|
||||||
required: Literal[True],
|
required: Literal[True],
|
||||||
allowed_values: Optional[Iterable[str]] = None,
|
allowed_values: Optional[Iterable[str]] = None,
|
||||||
|
@ -264,7 +415,7 @@ def parse_strings_from_args(
|
||||||
|
|
||||||
@overload
|
@overload
|
||||||
def parse_strings_from_args(
|
def parse_strings_from_args(
|
||||||
args: Dict[bytes, List[bytes]],
|
args: Mapping[bytes, Sequence[bytes]],
|
||||||
name: str,
|
name: str,
|
||||||
default: Optional[List[str]] = None,
|
default: Optional[List[str]] = None,
|
||||||
*,
|
*,
|
||||||
|
@ -276,7 +427,7 @@ def parse_strings_from_args(
|
||||||
|
|
||||||
|
|
||||||
def parse_strings_from_args(
|
def parse_strings_from_args(
|
||||||
args: Dict[bytes, List[bytes]],
|
args: Mapping[bytes, Sequence[bytes]],
|
||||||
name: str,
|
name: str,
|
||||||
default: Optional[List[str]] = None,
|
default: Optional[List[str]] = None,
|
||||||
required: bool = False,
|
required: bool = False,
|
||||||
|
@ -325,7 +476,7 @@ def parse_strings_from_args(
|
||||||
|
|
||||||
@overload
|
@overload
|
||||||
def parse_string_from_args(
|
def parse_string_from_args(
|
||||||
args: Dict[bytes, List[bytes]],
|
args: Mapping[bytes, Sequence[bytes]],
|
||||||
name: str,
|
name: str,
|
||||||
default: Optional[str] = None,
|
default: Optional[str] = None,
|
||||||
*,
|
*,
|
||||||
|
@ -337,7 +488,7 @@ def parse_string_from_args(
|
||||||
|
|
||||||
@overload
|
@overload
|
||||||
def parse_string_from_args(
|
def parse_string_from_args(
|
||||||
args: Dict[bytes, List[bytes]],
|
args: Mapping[bytes, Sequence[bytes]],
|
||||||
name: str,
|
name: str,
|
||||||
default: Optional[str] = None,
|
default: Optional[str] = None,
|
||||||
*,
|
*,
|
||||||
|
@ -350,7 +501,7 @@ def parse_string_from_args(
|
||||||
|
|
||||||
@overload
|
@overload
|
||||||
def parse_string_from_args(
|
def parse_string_from_args(
|
||||||
args: Dict[bytes, List[bytes]],
|
args: Mapping[bytes, Sequence[bytes]],
|
||||||
name: str,
|
name: str,
|
||||||
default: Optional[str] = None,
|
default: Optional[str] = None,
|
||||||
required: bool = False,
|
required: bool = False,
|
||||||
|
@ -361,7 +512,7 @@ def parse_string_from_args(
|
||||||
|
|
||||||
|
|
||||||
def parse_string_from_args(
|
def parse_string_from_args(
|
||||||
args: Dict[bytes, List[bytes]],
|
args: Mapping[bytes, Sequence[bytes]],
|
||||||
name: str,
|
name: str,
|
||||||
default: Optional[str] = None,
|
default: Optional[str] = None,
|
||||||
required: bool = False,
|
required: bool = False,
|
||||||
|
@ -409,13 +560,14 @@ def parse_string_from_args(
|
||||||
return strings[0]
|
return strings[0]
|
||||||
|
|
||||||
|
|
||||||
def parse_json_value_from_request(request, allow_empty_body=False):
|
def parse_json_value_from_request(
|
||||||
|
request: Request, allow_empty_body: bool = False
|
||||||
|
) -> Optional[JsonDict]:
|
||||||
"""Parse a JSON value from the body of a twisted HTTP request.
|
"""Parse a JSON value from the body of a twisted HTTP request.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
request: the twisted HTTP request.
|
request: the twisted HTTP request.
|
||||||
allow_empty_body (bool): if True, an empty body will be accepted and
|
allow_empty_body: if True, an empty body will be accepted and turned into None
|
||||||
turned into None
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
The JSON value.
|
The JSON value.
|
||||||
|
@ -424,7 +576,7 @@ def parse_json_value_from_request(request, allow_empty_body=False):
|
||||||
SynapseError if the request body couldn't be decoded as JSON.
|
SynapseError if the request body couldn't be decoded as JSON.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
content_bytes = request.content.read()
|
content_bytes = request.content.read() # type: ignore
|
||||||
except Exception:
|
except Exception:
|
||||||
raise SynapseError(400, "Error reading JSON content.")
|
raise SynapseError(400, "Error reading JSON content.")
|
||||||
|
|
||||||
|
@ -440,13 +592,15 @@ def parse_json_value_from_request(request, allow_empty_body=False):
|
||||||
return content
|
return content
|
||||||
|
|
||||||
|
|
||||||
def parse_json_object_from_request(request, allow_empty_body=False):
|
def parse_json_object_from_request(
|
||||||
|
request: Request, allow_empty_body: bool = False
|
||||||
|
) -> JsonDict:
|
||||||
"""Parse a JSON object from the body of a twisted HTTP request.
|
"""Parse a JSON object from the body of a twisted HTTP request.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
request: the twisted HTTP request.
|
request: the twisted HTTP request.
|
||||||
allow_empty_body (bool): if True, an empty body will be accepted and
|
allow_empty_body: if True, an empty body will be accepted and turned into
|
||||||
turned into an empty dict.
|
an empty dict.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
SynapseError if the request body couldn't be decoded as JSON or
|
SynapseError if the request body couldn't be decoded as JSON or
|
||||||
|
@ -457,14 +611,14 @@ def parse_json_object_from_request(request, allow_empty_body=False):
|
||||||
if allow_empty_body and content is None:
|
if allow_empty_body and content is None:
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
if type(content) != dict:
|
if not isinstance(content, dict):
|
||||||
message = "Content must be a JSON object."
|
message = "Content must be a JSON object."
|
||||||
raise SynapseError(400, message, errcode=Codes.BAD_JSON)
|
raise SynapseError(400, message, errcode=Codes.BAD_JSON)
|
||||||
|
|
||||||
return content
|
return content
|
||||||
|
|
||||||
|
|
||||||
def assert_params_in_dict(body, required):
|
def assert_params_in_dict(body: JsonDict, required: Iterable[str]) -> None:
|
||||||
absent = []
|
absent = []
|
||||||
for k in required:
|
for k in required:
|
||||||
if k not in body:
|
if k not in body:
|
||||||
|
|
|
@ -25,7 +25,7 @@ See doc/log_contexts.rst for details on how this works.
|
||||||
import inspect
|
import inspect
|
||||||
import logging
|
import logging
|
||||||
import threading
|
import threading
|
||||||
import types
|
import typing
|
||||||
import warnings
|
import warnings
|
||||||
from typing import TYPE_CHECKING, Optional, Tuple, TypeVar, Union
|
from typing import TYPE_CHECKING, Optional, Tuple, TypeVar, Union
|
||||||
|
|
||||||
|
@ -745,7 +745,7 @@ def run_in_background(f, *args, **kwargs) -> defer.Deferred:
|
||||||
# by synchronous exceptions, so let's turn them into Failures.
|
# by synchronous exceptions, so let's turn them into Failures.
|
||||||
return defer.fail()
|
return defer.fail()
|
||||||
|
|
||||||
if isinstance(res, types.CoroutineType):
|
if isinstance(res, typing.Coroutine):
|
||||||
res = defer.ensureDeferred(res)
|
res = defer.ensureDeferred(res)
|
||||||
|
|
||||||
# At this point we should have a Deferred, if not then f was a synchronous
|
# At this point we should have a Deferred, if not then f was a synchronous
|
||||||
|
|
88
synapse/logging/handlers.py
Normal file
88
synapse/logging/handlers.py
Normal file
|
@ -0,0 +1,88 @@
|
||||||
|
import logging
|
||||||
|
import time
|
||||||
|
from logging import Handler, LogRecord
|
||||||
|
from logging.handlers import MemoryHandler
|
||||||
|
from threading import Thread
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from twisted.internet.interfaces import IReactorCore
|
||||||
|
|
||||||
|
|
||||||
|
class PeriodicallyFlushingMemoryHandler(MemoryHandler):
|
||||||
|
"""
|
||||||
|
This is a subclass of MemoryHandler that additionally spawns a background
|
||||||
|
thread to periodically flush the buffer.
|
||||||
|
|
||||||
|
This prevents messages from being buffered for too long.
|
||||||
|
|
||||||
|
Additionally, all messages will be immediately flushed if the reactor has
|
||||||
|
not yet been started.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
capacity: int,
|
||||||
|
flushLevel: int = logging.ERROR,
|
||||||
|
target: Optional[Handler] = None,
|
||||||
|
flushOnClose: bool = True,
|
||||||
|
period: float = 5.0,
|
||||||
|
reactor: Optional[IReactorCore] = None,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
period: the period between automatic flushes
|
||||||
|
|
||||||
|
reactor: if specified, a custom reactor to use. If not specifies,
|
||||||
|
defaults to the globally-installed reactor.
|
||||||
|
Log entries will be flushed immediately until this reactor has
|
||||||
|
started.
|
||||||
|
"""
|
||||||
|
super().__init__(capacity, flushLevel, target, flushOnClose)
|
||||||
|
|
||||||
|
self._flush_period: float = period
|
||||||
|
self._active: bool = True
|
||||||
|
self._reactor_started = False
|
||||||
|
|
||||||
|
self._flushing_thread: Thread = Thread(
|
||||||
|
name="PeriodicallyFlushingMemoryHandler flushing thread",
|
||||||
|
target=self._flush_periodically,
|
||||||
|
)
|
||||||
|
self._flushing_thread.start()
|
||||||
|
|
||||||
|
def on_reactor_running():
|
||||||
|
self._reactor_started = True
|
||||||
|
|
||||||
|
reactor_to_use: IReactorCore
|
||||||
|
if reactor is None:
|
||||||
|
from twisted.internet import reactor as global_reactor
|
||||||
|
|
||||||
|
reactor_to_use = global_reactor # type: ignore[assignment]
|
||||||
|
else:
|
||||||
|
reactor_to_use = reactor
|
||||||
|
|
||||||
|
# call our hook when the reactor start up
|
||||||
|
reactor_to_use.callWhenRunning(on_reactor_running)
|
||||||
|
|
||||||
|
def shouldFlush(self, record: LogRecord) -> bool:
|
||||||
|
"""
|
||||||
|
Before reactor start-up, log everything immediately.
|
||||||
|
Otherwise, fall back to original behaviour of waiting for the buffer to fill.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if self._reactor_started:
|
||||||
|
return super().shouldFlush(record)
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
|
def _flush_periodically(self):
|
||||||
|
"""
|
||||||
|
Whilst this handler is active, flush the handler periodically.
|
||||||
|
"""
|
||||||
|
|
||||||
|
while self._active:
|
||||||
|
# flush is thread-safe; it acquires and releases the lock internally
|
||||||
|
self.flush()
|
||||||
|
time.sleep(self._flush_period)
|
||||||
|
|
||||||
|
def close(self) -> None:
|
||||||
|
self._active = False
|
||||||
|
super().close()
|
|
@ -484,7 +484,7 @@ class ModuleApi:
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_state_events_in_room(
|
def get_state_events_in_room(
|
||||||
self, room_id: str, types: Iterable[Tuple[str, Optional[str]]]
|
self, room_id: str, types: Iterable[Tuple[str, Optional[str]]]
|
||||||
) -> Generator[defer.Deferred, Any, defer.Deferred]:
|
) -> Generator[defer.Deferred, Any, Iterable[EventBase]]:
|
||||||
"""Gets current state events for the given room.
|
"""Gets current state events for the given room.
|
||||||
|
|
||||||
(This is exposed for compatibility with the old SpamCheckerApi. We should
|
(This is exposed for compatibility with the old SpamCheckerApi. We should
|
||||||
|
|
|
@ -111,8 +111,9 @@ class _NotifierUserStream:
|
||||||
self.last_notified_token = current_token
|
self.last_notified_token = current_token
|
||||||
self.last_notified_ms = time_now_ms
|
self.last_notified_ms = time_now_ms
|
||||||
|
|
||||||
with PreserveLoggingContext():
|
self.notify_deferred: ObservableDeferred[StreamToken] = ObservableDeferred(
|
||||||
self.notify_deferred = ObservableDeferred(defer.Deferred())
|
defer.Deferred()
|
||||||
|
)
|
||||||
|
|
||||||
def notify(
|
def notify(
|
||||||
self,
|
self,
|
||||||
|
|
|
@ -19,7 +19,7 @@ from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, TypeVar
|
||||||
import bleach
|
import bleach
|
||||||
import jinja2
|
import jinja2
|
||||||
|
|
||||||
from synapse.api.constants import EventTypes, Membership
|
from synapse.api.constants import EventTypes, Membership, RoomTypes
|
||||||
from synapse.api.errors import StoreError
|
from synapse.api.errors import StoreError
|
||||||
from synapse.config.emailconfig import EmailSubjectConfig
|
from synapse.config.emailconfig import EmailSubjectConfig
|
||||||
from synapse.events import EventBase
|
from synapse.events import EventBase
|
||||||
|
@ -600,6 +600,22 @@ class Mailer:
|
||||||
"app": self.app_name,
|
"app": self.app_name,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# If the room is a space, it gets a slightly different topic.
|
||||||
|
create_event_id = room_state_ids.get(("m.room.create", ""))
|
||||||
|
if create_event_id:
|
||||||
|
create_event = await self.store.get_event(
|
||||||
|
create_event_id, allow_none=True
|
||||||
|
)
|
||||||
|
if (
|
||||||
|
create_event
|
||||||
|
and create_event.content.get("room_type") == RoomTypes.SPACE
|
||||||
|
):
|
||||||
|
return self.email_subjects.invite_from_person_to_space % {
|
||||||
|
"person": inviter_name,
|
||||||
|
"space": room_name,
|
||||||
|
"app": self.app_name,
|
||||||
|
}
|
||||||
|
|
||||||
return self.email_subjects.invite_from_person_to_room % {
|
return self.email_subjects.invite_from_person_to_room % {
|
||||||
"person": inviter_name,
|
"person": inviter_name,
|
||||||
"room": room_name,
|
"room": room_name,
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# Copyright 2015, 2016 OpenMarket Ltd
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
|
|
@ -285,7 +285,7 @@ class ReplicationDataHandler:
|
||||||
|
|
||||||
# Create a new deferred that times out after N seconds, as we don't want
|
# Create a new deferred that times out after N seconds, as we don't want
|
||||||
# to wedge here forever.
|
# to wedge here forever.
|
||||||
deferred = Deferred()
|
deferred: "Deferred[None]" = Deferred()
|
||||||
deferred = timeout_deferred(
|
deferred = timeout_deferred(
|
||||||
deferred, _WAIT_FOR_REPLICATION_TIMEOUT_SECONDS, self._reactor
|
deferred, _WAIT_FOR_REPLICATION_TIMEOUT_SECONDS, self._reactor
|
||||||
)
|
)
|
||||||
|
@ -393,6 +393,11 @@ class FederationSenderHandler:
|
||||||
# we only want to send on receipts for our own users
|
# we only want to send on receipts for our own users
|
||||||
if not self._is_mine_id(receipt.user_id):
|
if not self._is_mine_id(receipt.user_id):
|
||||||
continue
|
continue
|
||||||
|
if (
|
||||||
|
receipt.data.get("hidden", False)
|
||||||
|
and self._hs.config.experimental.msc2285_enabled
|
||||||
|
):
|
||||||
|
continue
|
||||||
receipt_info = ReadReceipt(
|
receipt_info = ReadReceipt(
|
||||||
receipt.room_id,
|
receipt.room_id,
|
||||||
receipt.receipt_type,
|
receipt.receipt_type,
|
||||||
|
|
|
@ -62,6 +62,7 @@ class UsersRestServletV2(RestServlet):
|
||||||
The parameter `name` can be used to filter by user id or display name.
|
The parameter `name` can be used to filter by user id or display name.
|
||||||
The parameter `guests` can be used to exclude guest users.
|
The parameter `guests` can be used to exclude guest users.
|
||||||
The parameter `deactivated` can be used to include deactivated users.
|
The parameter `deactivated` can be used to include deactivated users.
|
||||||
|
The parameter `order_by` can be used to order the result.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, hs: "HomeServer"):
|
def __init__(self, hs: "HomeServer"):
|
||||||
|
@ -90,8 +91,8 @@ class UsersRestServletV2(RestServlet):
|
||||||
errcode=Codes.INVALID_PARAM,
|
errcode=Codes.INVALID_PARAM,
|
||||||
)
|
)
|
||||||
|
|
||||||
user_id = parse_string(request, "user_id", default=None)
|
user_id = parse_string(request, "user_id")
|
||||||
name = parse_string(request, "name", default=None)
|
name = parse_string(request, "name")
|
||||||
guests = parse_boolean(request, "guests", default=True)
|
guests = parse_boolean(request, "guests", default=True)
|
||||||
deactivated = parse_boolean(request, "deactivated", default=False)
|
deactivated = parse_boolean(request, "deactivated", default=False)
|
||||||
|
|
||||||
|
@ -108,6 +109,7 @@ class UsersRestServletV2(RestServlet):
|
||||||
UserSortOrder.USER_TYPE.value,
|
UserSortOrder.USER_TYPE.value,
|
||||||
UserSortOrder.AVATAR_URL.value,
|
UserSortOrder.AVATAR_URL.value,
|
||||||
UserSortOrder.SHADOW_BANNED.value,
|
UserSortOrder.SHADOW_BANNED.value,
|
||||||
|
UserSortOrder.CREATION_TS.value,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -416,7 +416,7 @@ class RoomBatchSendEventRestServlet(TransactionRestServlet):
|
||||||
assert_params_in_dict(body, ["state_events_at_start", "events"])
|
assert_params_in_dict(body, ["state_events_at_start", "events"])
|
||||||
|
|
||||||
prev_events_from_query = parse_strings_from_args(request.args, "prev_event")
|
prev_events_from_query = parse_strings_from_args(request.args, "prev_event")
|
||||||
chunk_id_from_query = parse_string(request, "chunk_id", default=None)
|
chunk_id_from_query = parse_string(request, "chunk_id")
|
||||||
|
|
||||||
if prev_events_from_query is None:
|
if prev_events_from_query is None:
|
||||||
raise SynapseError(
|
raise SynapseError(
|
||||||
|
@ -507,7 +507,6 @@ class RoomBatchSendEventRestServlet(TransactionRestServlet):
|
||||||
|
|
||||||
events_to_create = body["events"]
|
events_to_create = body["events"]
|
||||||
|
|
||||||
prev_event_ids = prev_events_from_query
|
|
||||||
inherited_depth = await self._inherit_depth_from_prev_ids(
|
inherited_depth = await self._inherit_depth_from_prev_ids(
|
||||||
prev_events_from_query
|
prev_events_from_query
|
||||||
)
|
)
|
||||||
|
@ -519,6 +518,10 @@ class RoomBatchSendEventRestServlet(TransactionRestServlet):
|
||||||
chunk_id_to_connect_to = chunk_id_from_query
|
chunk_id_to_connect_to = chunk_id_from_query
|
||||||
base_insertion_event = None
|
base_insertion_event = None
|
||||||
if chunk_id_from_query:
|
if chunk_id_from_query:
|
||||||
|
# All but the first base insertion event should point at a fake
|
||||||
|
# event, which causes the HS to ask for the state at the start of
|
||||||
|
# the chunk later.
|
||||||
|
prev_event_ids = [fake_prev_event_id]
|
||||||
# TODO: Verify the chunk_id_from_query corresponds to an insertion event
|
# TODO: Verify the chunk_id_from_query corresponds to an insertion event
|
||||||
pass
|
pass
|
||||||
# Otherwise, create an insertion event to act as a starting point.
|
# Otherwise, create an insertion event to act as a starting point.
|
||||||
|
@ -529,6 +532,8 @@ class RoomBatchSendEventRestServlet(TransactionRestServlet):
|
||||||
# an insertion event), in which case we just create a new insertion event
|
# an insertion event), in which case we just create a new insertion event
|
||||||
# that can then get pointed to by a "marker" event later.
|
# that can then get pointed to by a "marker" event later.
|
||||||
else:
|
else:
|
||||||
|
prev_event_ids = prev_events_from_query
|
||||||
|
|
||||||
base_insertion_event_dict = self._create_insertion_event_dict(
|
base_insertion_event_dict = self._create_insertion_event_dict(
|
||||||
sender=requester.user.to_string(),
|
sender=requester.user.to_string(),
|
||||||
room_id=room_id,
|
room_id=room_id,
|
||||||
|
@ -556,9 +561,18 @@ class RoomBatchSendEventRestServlet(TransactionRestServlet):
|
||||||
]
|
]
|
||||||
|
|
||||||
# Connect this current chunk to the insertion event from the previous chunk
|
# Connect this current chunk to the insertion event from the previous chunk
|
||||||
last_event_in_chunk["content"][
|
chunk_event = {
|
||||||
EventContentFields.MSC2716_CHUNK_ID
|
"type": EventTypes.MSC2716_CHUNK,
|
||||||
] = chunk_id_to_connect_to
|
"sender": requester.user.to_string(),
|
||||||
|
"room_id": room_id,
|
||||||
|
"content": {EventContentFields.MSC2716_CHUNK_ID: chunk_id_to_connect_to},
|
||||||
|
# Since the chunk event is put at the end of the chunk,
|
||||||
|
# where the newest-in-time event is, copy the origin_server_ts from
|
||||||
|
# the last event we're inserting
|
||||||
|
"origin_server_ts": last_event_in_chunk["origin_server_ts"],
|
||||||
|
}
|
||||||
|
# Add the chunk event to the end of the chunk (newest-in-time)
|
||||||
|
events_to_create.append(chunk_event)
|
||||||
|
|
||||||
# Add an "insertion" event to the start of each chunk (next to the oldest-in-time
|
# Add an "insertion" event to the start of each chunk (next to the oldest-in-time
|
||||||
# event in the chunk) so the next chunk can be connected to this one.
|
# event in the chunk) so the next chunk can be connected to this one.
|
||||||
|
@ -570,7 +584,7 @@ class RoomBatchSendEventRestServlet(TransactionRestServlet):
|
||||||
# the first event we're inserting
|
# the first event we're inserting
|
||||||
origin_server_ts=events_to_create[0]["origin_server_ts"],
|
origin_server_ts=events_to_create[0]["origin_server_ts"],
|
||||||
)
|
)
|
||||||
# Prepend the insertion event to the start of the chunk
|
# Prepend the insertion event to the start of the chunk (oldest-in-time)
|
||||||
events_to_create = [insertion_event] + events_to_create
|
events_to_create = [insertion_event] + events_to_create
|
||||||
|
|
||||||
event_ids = []
|
event_ids = []
|
||||||
|
@ -729,7 +743,7 @@ class PublicRoomListRestServlet(TransactionRestServlet):
|
||||||
self.auth = hs.get_auth()
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
async def on_GET(self, request):
|
async def on_GET(self, request):
|
||||||
server = parse_string(request, "server", default=None)
|
server = parse_string(request, "server")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
await self.auth.get_user_by_req(request, allow_guest=True)
|
await self.auth.get_user_by_req(request, allow_guest=True)
|
||||||
|
@ -748,8 +762,8 @@ class PublicRoomListRestServlet(TransactionRestServlet):
|
||||||
if server:
|
if server:
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
limit = parse_integer(request, "limit", 0)
|
limit: Optional[int] = parse_integer(request, "limit", 0)
|
||||||
since_token = parse_string(request, "since", None)
|
since_token = parse_string(request, "since")
|
||||||
|
|
||||||
if limit == 0:
|
if limit == 0:
|
||||||
# zero is a special value which corresponds to no limit.
|
# zero is a special value which corresponds to no limit.
|
||||||
|
@ -783,7 +797,7 @@ class PublicRoomListRestServlet(TransactionRestServlet):
|
||||||
async def on_POST(self, request):
|
async def on_POST(self, request):
|
||||||
await self.auth.get_user_by_req(request, allow_guest=True)
|
await self.auth.get_user_by_req(request, allow_guest=True)
|
||||||
|
|
||||||
server = parse_string(request, "server", default=None)
|
server = parse_string(request, "server")
|
||||||
content = parse_json_object_from_request(request)
|
content = parse_json_object_from_request(request)
|
||||||
|
|
||||||
limit: Optional[int] = int(content.get("limit", 100))
|
limit: Optional[int] = int(content.get("limit", 100))
|
||||||
|
|
|
@ -884,7 +884,14 @@ class WhoamiRestServlet(RestServlet):
|
||||||
async def on_GET(self, request):
|
async def on_GET(self, request):
|
||||||
requester = await self.auth.get_user_by_req(request)
|
requester = await self.auth.get_user_by_req(request)
|
||||||
|
|
||||||
return 200, {"user_id": requester.user.to_string()}
|
response = {"user_id": requester.user.to_string()}
|
||||||
|
|
||||||
|
# Appservices and similar accounts do not have device IDs
|
||||||
|
# that we can report on, so exclude them for compliance.
|
||||||
|
if requester.device_id is not None:
|
||||||
|
response["device_id"] = requester.device_id
|
||||||
|
|
||||||
|
return 200, response
|
||||||
|
|
||||||
|
|
||||||
def register_servlets(hs, http_server):
|
def register_servlets(hs, http_server):
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
import logging
|
import logging
|
||||||
from typing import TYPE_CHECKING, Tuple
|
from typing import TYPE_CHECKING, Tuple
|
||||||
|
|
||||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, MSC3244_CAPABILITIES
|
||||||
from synapse.http.servlet import RestServlet
|
from synapse.http.servlet import RestServlet
|
||||||
from synapse.http.site import SynapseRequest
|
from synapse.http.site import SynapseRequest
|
||||||
from synapse.types import JsonDict
|
from synapse.types import JsonDict
|
||||||
|
@ -55,6 +55,12 @@ class CapabilitiesRestServlet(RestServlet):
|
||||||
"m.change_password": {"enabled": change_password},
|
"m.change_password": {"enabled": change_password},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if self.config.experimental.msc3244_enabled:
|
||||||
|
response["capabilities"]["m.room_versions"][
|
||||||
|
"org.matrix.msc3244.room_capabilities"
|
||||||
|
] = MSC3244_CAPABILITIES
|
||||||
|
|
||||||
return 200, response
|
return 200, response
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -194,7 +194,7 @@ class KeyChangesServlet(RestServlet):
|
||||||
async def on_GET(self, request):
|
async def on_GET(self, request):
|
||||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||||
|
|
||||||
from_token_string = parse_string(request, "from")
|
from_token_string = parse_string(request, "from", required=True)
|
||||||
set_tag("from", from_token_string)
|
set_tag("from", from_token_string)
|
||||||
|
|
||||||
# We want to enforce they do pass us one, but we ignore it and return
|
# We want to enforce they do pass us one, but we ignore it and return
|
||||||
|
|
|
@ -14,6 +14,8 @@
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
from synapse.api.constants import ReadReceiptEventFields
|
||||||
|
from synapse.api.errors import Codes, SynapseError
|
||||||
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||||
|
|
||||||
from ._base import client_patterns
|
from ._base import client_patterns
|
||||||
|
@ -37,14 +39,24 @@ class ReadMarkerRestServlet(RestServlet):
|
||||||
await self.presence_handler.bump_presence_active_time(requester.user)
|
await self.presence_handler.bump_presence_active_time(requester.user)
|
||||||
|
|
||||||
body = parse_json_object_from_request(request)
|
body = parse_json_object_from_request(request)
|
||||||
|
|
||||||
read_event_id = body.get("m.read", None)
|
read_event_id = body.get("m.read", None)
|
||||||
|
hidden = body.get(ReadReceiptEventFields.MSC2285_HIDDEN, False)
|
||||||
|
|
||||||
|
if not isinstance(hidden, bool):
|
||||||
|
raise SynapseError(
|
||||||
|
400,
|
||||||
|
"Param %s must be a boolean, if given"
|
||||||
|
% ReadReceiptEventFields.MSC2285_HIDDEN,
|
||||||
|
Codes.BAD_JSON,
|
||||||
|
)
|
||||||
|
|
||||||
if read_event_id:
|
if read_event_id:
|
||||||
await self.receipts_handler.received_client_receipt(
|
await self.receipts_handler.received_client_receipt(
|
||||||
room_id,
|
room_id,
|
||||||
"m.read",
|
"m.read",
|
||||||
user_id=requester.user.to_string(),
|
user_id=requester.user.to_string(),
|
||||||
event_id=read_event_id,
|
event_id=read_event_id,
|
||||||
|
hidden=hidden,
|
||||||
)
|
)
|
||||||
|
|
||||||
read_marker_event_id = body.get("m.fully_read", None)
|
read_marker_event_id = body.get("m.fully_read", None)
|
||||||
|
|
|
@ -14,7 +14,8 @@
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from synapse.api.errors import SynapseError
|
from synapse.api.constants import ReadReceiptEventFields
|
||||||
|
from synapse.api.errors import Codes, SynapseError
|
||||||
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||||
|
|
||||||
from ._base import client_patterns
|
from ._base import client_patterns
|
||||||
|
@ -42,11 +43,25 @@ class ReceiptRestServlet(RestServlet):
|
||||||
if receipt_type != "m.read":
|
if receipt_type != "m.read":
|
||||||
raise SynapseError(400, "Receipt type must be 'm.read'")
|
raise SynapseError(400, "Receipt type must be 'm.read'")
|
||||||
|
|
||||||
|
body = parse_json_object_from_request(request, allow_empty_body=True)
|
||||||
|
hidden = body.pop(ReadReceiptEventFields.MSC2285_HIDDEN, False)
|
||||||
|
|
||||||
|
if not isinstance(hidden, bool):
|
||||||
|
raise SynapseError(
|
||||||
|
400,
|
||||||
|
"Param %s must be a boolean, if given"
|
||||||
|
% ReadReceiptEventFields.MSC2285_HIDDEN,
|
||||||
|
Codes.BAD_JSON,
|
||||||
|
)
|
||||||
|
|
||||||
await self.presence_handler.bump_presence_active_time(requester.user)
|
await self.presence_handler.bump_presence_active_time(requester.user)
|
||||||
|
|
||||||
body = parse_json_object_from_request(request, allow_empty_body=True)
|
|
||||||
await self.receipts_handler.received_client_receipt(
|
await self.receipts_handler.received_client_receipt(
|
||||||
room_id, receipt_type, user_id=requester.user.to_string(), event_id=event_id,
|
room_id,
|
||||||
|
receipt_type,
|
||||||
|
user_id=requester.user.to_string(),
|
||||||
|
event_id=event_id,
|
||||||
|
hidden=hidden,
|
||||||
extra_content=body,
|
extra_content=body,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -158,19 +158,21 @@ class RelationPaginationServlet(RestServlet):
|
||||||
event = await self.event_handler.get_event(requester.user, room_id, parent_id)
|
event = await self.event_handler.get_event(requester.user, room_id, parent_id)
|
||||||
|
|
||||||
limit = parse_integer(request, "limit", default=5)
|
limit = parse_integer(request, "limit", default=5)
|
||||||
from_token = parse_string(request, "from")
|
from_token_str = parse_string(request, "from")
|
||||||
to_token = parse_string(request, "to")
|
to_token_str = parse_string(request, "to")
|
||||||
|
|
||||||
if event.internal_metadata.is_redacted():
|
if event.internal_metadata.is_redacted():
|
||||||
# If the event is redacted, return an empty list of relations
|
# If the event is redacted, return an empty list of relations
|
||||||
pagination_chunk = PaginationChunk(chunk=[])
|
pagination_chunk = PaginationChunk(chunk=[])
|
||||||
else:
|
else:
|
||||||
# Return the relations
|
# Return the relations
|
||||||
if from_token:
|
from_token = None
|
||||||
from_token = RelationPaginationToken.from_string(from_token)
|
if from_token_str:
|
||||||
|
from_token = RelationPaginationToken.from_string(from_token_str)
|
||||||
|
|
||||||
if to_token:
|
to_token = None
|
||||||
to_token = RelationPaginationToken.from_string(to_token)
|
if to_token_str:
|
||||||
|
to_token = RelationPaginationToken.from_string(to_token_str)
|
||||||
|
|
||||||
pagination_chunk = await self.store.get_relations_for_event(
|
pagination_chunk = await self.store.get_relations_for_event(
|
||||||
event_id=parent_id,
|
event_id=parent_id,
|
||||||
|
@ -256,19 +258,21 @@ class RelationAggregationPaginationServlet(RestServlet):
|
||||||
raise SynapseError(400, "Relation type must be 'annotation'")
|
raise SynapseError(400, "Relation type must be 'annotation'")
|
||||||
|
|
||||||
limit = parse_integer(request, "limit", default=5)
|
limit = parse_integer(request, "limit", default=5)
|
||||||
from_token = parse_string(request, "from")
|
from_token_str = parse_string(request, "from")
|
||||||
to_token = parse_string(request, "to")
|
to_token_str = parse_string(request, "to")
|
||||||
|
|
||||||
if event.internal_metadata.is_redacted():
|
if event.internal_metadata.is_redacted():
|
||||||
# If the event is redacted, return an empty list of relations
|
# If the event is redacted, return an empty list of relations
|
||||||
pagination_chunk = PaginationChunk(chunk=[])
|
pagination_chunk = PaginationChunk(chunk=[])
|
||||||
else:
|
else:
|
||||||
# Return the relations
|
# Return the relations
|
||||||
if from_token:
|
from_token = None
|
||||||
from_token = AggregationPaginationToken.from_string(from_token)
|
if from_token_str:
|
||||||
|
from_token = AggregationPaginationToken.from_string(from_token_str)
|
||||||
|
|
||||||
if to_token:
|
to_token = None
|
||||||
to_token = AggregationPaginationToken.from_string(to_token)
|
if to_token_str:
|
||||||
|
to_token = AggregationPaginationToken.from_string(to_token_str)
|
||||||
|
|
||||||
pagination_chunk = await self.store.get_aggregation_groups_for_event(
|
pagination_chunk = await self.store.get_aggregation_groups_for_event(
|
||||||
event_id=parent_id,
|
event_id=parent_id,
|
||||||
|
@ -336,14 +340,16 @@ class RelationAggregationGroupPaginationServlet(RestServlet):
|
||||||
raise SynapseError(400, "Relation type must be 'annotation'")
|
raise SynapseError(400, "Relation type must be 'annotation'")
|
||||||
|
|
||||||
limit = parse_integer(request, "limit", default=5)
|
limit = parse_integer(request, "limit", default=5)
|
||||||
from_token = parse_string(request, "from")
|
from_token_str = parse_string(request, "from")
|
||||||
to_token = parse_string(request, "to")
|
to_token_str = parse_string(request, "to")
|
||||||
|
|
||||||
if from_token:
|
from_token = None
|
||||||
from_token = RelationPaginationToken.from_string(from_token)
|
if from_token_str:
|
||||||
|
from_token = RelationPaginationToken.from_string(from_token_str)
|
||||||
|
|
||||||
if to_token:
|
to_token = None
|
||||||
to_token = RelationPaginationToken.from_string(to_token)
|
if to_token_str:
|
||||||
|
to_token = RelationPaginationToken.from_string(to_token_str)
|
||||||
|
|
||||||
result = await self.store.get_relations_for_event(
|
result = await self.store.get_relations_for_event(
|
||||||
event_id=parent_id,
|
event_id=parent_id,
|
||||||
|
|
|
@ -112,7 +112,7 @@ class SyncRestServlet(RestServlet):
|
||||||
default="online",
|
default="online",
|
||||||
allowed_values=self.ALLOWED_PRESENCE,
|
allowed_values=self.ALLOWED_PRESENCE,
|
||||||
)
|
)
|
||||||
filter_id = parse_string(request, "filter", default=None)
|
filter_id = parse_string(request, "filter")
|
||||||
full_state = parse_boolean(request, "full_state", default=False)
|
full_state = parse_boolean(request, "full_state", default=False)
|
||||||
|
|
||||||
logger.debug(
|
logger.debug(
|
||||||
|
|
|
@ -82,6 +82,8 @@ class VersionsRestServlet(RestServlet):
|
||||||
"io.element.e2ee_forced.trusted_private": self.e2ee_forced_trusted_private,
|
"io.element.e2ee_forced.trusted_private": self.e2ee_forced_trusted_private,
|
||||||
# Supports the busy presence state described in MSC3026.
|
# Supports the busy presence state described in MSC3026.
|
||||||
"org.matrix.msc3026.busy_presence": self.config.experimental.msc3026_enabled,
|
"org.matrix.msc3026.busy_presence": self.config.experimental.msc3026_enabled,
|
||||||
|
# Supports receiving hidden read receipts as per MSC2285
|
||||||
|
"org.matrix.msc2285": self.config.experimental.msc2285_enabled,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
|
@ -112,7 +112,7 @@ class ConsentResource(DirectServeHtmlResource):
|
||||||
request (twisted.web.http.Request):
|
request (twisted.web.http.Request):
|
||||||
"""
|
"""
|
||||||
version = parse_string(request, "v", default=self._default_consent_version)
|
version = parse_string(request, "v", default=self._default_consent_version)
|
||||||
username = parse_string(request, "u", required=False, default="")
|
username = parse_string(request, "u", default="")
|
||||||
userhmac = None
|
userhmac = None
|
||||||
has_consented = False
|
has_consented = False
|
||||||
public_version = username == ""
|
public_version = username == ""
|
||||||
|
|
|
@ -49,6 +49,8 @@ class DownloadResource(DirectServeJsonResource):
|
||||||
b" media-src 'self';"
|
b" media-src 'self';"
|
||||||
b" object-src 'self';",
|
b" object-src 'self';",
|
||||||
)
|
)
|
||||||
|
# Limited non-standard form of CSP for IE11
|
||||||
|
request.setHeader(b"X-Content-Security-Policy", b"sandbox;")
|
||||||
request.setHeader(
|
request.setHeader(
|
||||||
b"Referrer-Policy",
|
b"Referrer-Policy",
|
||||||
b"no-referrer",
|
b"no-referrer",
|
||||||
|
|
|
@ -58,9 +58,11 @@ if TYPE_CHECKING:
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
_charset_match = re.compile(br'<\s*meta[^>]*charset\s*=\s*"?([a-z0-9-]+)"?', flags=re.I)
|
_charset_match = re.compile(
|
||||||
|
br'<\s*meta[^>]*charset\s*=\s*"?([a-z0-9_-]+)"?', flags=re.I
|
||||||
|
)
|
||||||
_xml_encoding_match = re.compile(
|
_xml_encoding_match = re.compile(
|
||||||
br'\s*<\s*\?\s*xml[^>]*encoding="([a-z0-9-]+)"', flags=re.I
|
br'\s*<\s*\?\s*xml[^>]*encoding="([a-z0-9_-]+)"', flags=re.I
|
||||||
)
|
)
|
||||||
_content_type_match = re.compile(r'.*; *charset="?(.*?)"?(;|$)', flags=re.I)
|
_content_type_match = re.compile(r'.*; *charset="?(.*?)"?(;|$)', flags=re.I)
|
||||||
|
|
||||||
|
@ -175,15 +177,11 @@ class PreviewUrlResource(DirectServeJsonResource):
|
||||||
respond_with_json(request, 200, {}, send_cors=True)
|
respond_with_json(request, 200, {}, send_cors=True)
|
||||||
|
|
||||||
async def _async_render_GET(self, request: SynapseRequest) -> None:
|
async def _async_render_GET(self, request: SynapseRequest) -> None:
|
||||||
# This will always be set by the time Twisted calls us.
|
|
||||||
assert request.args is not None
|
|
||||||
|
|
||||||
# XXX: if get_user_by_req fails, what should we do in an async render?
|
# XXX: if get_user_by_req fails, what should we do in an async render?
|
||||||
requester = await self.auth.get_user_by_req(request)
|
requester = await self.auth.get_user_by_req(request)
|
||||||
url = parse_string(request, "url")
|
url = parse_string(request, "url", required=True)
|
||||||
if b"ts" in request.args:
|
|
||||||
ts = parse_integer(request, "ts")
|
ts = parse_integer(request, "ts")
|
||||||
else:
|
if ts is None:
|
||||||
ts = self.clock.time_msec()
|
ts = self.clock.time_msec()
|
||||||
|
|
||||||
# XXX: we could move this into _do_preview if we wanted.
|
# XXX: we could move this into _do_preview if we wanted.
|
||||||
|
|
|
@ -16,6 +16,7 @@ import heapq
|
||||||
import logging
|
import logging
|
||||||
from collections import defaultdict, namedtuple
|
from collections import defaultdict, namedtuple
|
||||||
from typing import (
|
from typing import (
|
||||||
|
TYPE_CHECKING,
|
||||||
Any,
|
Any,
|
||||||
Awaitable,
|
Awaitable,
|
||||||
Callable,
|
Callable,
|
||||||
|
@ -52,6 +53,10 @@ from synapse.util.async_helpers import Linearizer
|
||||||
from synapse.util.caches.expiringcache import ExpiringCache
|
from synapse.util.caches.expiringcache import ExpiringCache
|
||||||
from synapse.util.metrics import Measure, measure_func
|
from synapse.util.metrics import Measure, measure_func
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
from synapse.storage.databases.main import DataStore
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
metrics_logger = logging.getLogger("synapse.state.metrics")
|
metrics_logger = logging.getLogger("synapse.state.metrics")
|
||||||
|
|
||||||
|
@ -74,7 +79,7 @@ _NEXT_STATE_ID = 1
|
||||||
POWER_KEY = (EventTypes.PowerLevels, "")
|
POWER_KEY = (EventTypes.PowerLevels, "")
|
||||||
|
|
||||||
|
|
||||||
def _gen_state_id():
|
def _gen_state_id() -> str:
|
||||||
global _NEXT_STATE_ID
|
global _NEXT_STATE_ID
|
||||||
s = "X%d" % (_NEXT_STATE_ID,)
|
s = "X%d" % (_NEXT_STATE_ID,)
|
||||||
_NEXT_STATE_ID += 1
|
_NEXT_STATE_ID += 1
|
||||||
|
@ -109,7 +114,7 @@ class _StateCacheEntry:
|
||||||
# `state_id` is either a state_group (and so an int) or a string. This
|
# `state_id` is either a state_group (and so an int) or a string. This
|
||||||
# ensures we don't accidentally persist a state_id as a stateg_group
|
# ensures we don't accidentally persist a state_id as a stateg_group
|
||||||
if state_group:
|
if state_group:
|
||||||
self.state_id = state_group
|
self.state_id: Union[str, int] = state_group
|
||||||
else:
|
else:
|
||||||
self.state_id = _gen_state_id()
|
self.state_id = _gen_state_id()
|
||||||
|
|
||||||
|
@ -122,7 +127,7 @@ class StateHandler:
|
||||||
where necessary
|
where necessary
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs: "HomeServer"):
|
||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
self.state_store = hs.get_storage().state
|
self.state_store = hs.get_storage().state
|
||||||
|
@ -507,7 +512,7 @@ class StateResolutionHandler:
|
||||||
be storage-independent.
|
be storage-independent.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs: "HomeServer"):
|
||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
|
|
||||||
self.resolve_linearizer = Linearizer(name="state_resolve_lock")
|
self.resolve_linearizer = Linearizer(name="state_resolve_lock")
|
||||||
|
@ -636,16 +641,20 @@ class StateResolutionHandler:
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
with Measure(self.clock, "state._resolve_events") as m:
|
with Measure(self.clock, "state._resolve_events") as m:
|
||||||
v = KNOWN_ROOM_VERSIONS[room_version]
|
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
|
||||||
if v.state_res == StateResolutionVersions.V1:
|
if room_version_obj.state_res == StateResolutionVersions.V1:
|
||||||
return await v1.resolve_events_with_store(
|
return await v1.resolve_events_with_store(
|
||||||
room_id, state_sets, event_map, state_res_store.get_events
|
room_id,
|
||||||
|
room_version_obj,
|
||||||
|
state_sets,
|
||||||
|
event_map,
|
||||||
|
state_res_store.get_events,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
return await v2.resolve_events_with_store(
|
return await v2.resolve_events_with_store(
|
||||||
self.clock,
|
self.clock,
|
||||||
room_id,
|
room_id,
|
||||||
room_version,
|
room_version_obj,
|
||||||
state_sets,
|
state_sets,
|
||||||
event_map,
|
event_map,
|
||||||
state_res_store,
|
state_res_store,
|
||||||
|
@ -653,13 +662,15 @@ class StateResolutionHandler:
|
||||||
finally:
|
finally:
|
||||||
self._record_state_res_metrics(room_id, m.get_resource_usage())
|
self._record_state_res_metrics(room_id, m.get_resource_usage())
|
||||||
|
|
||||||
def _record_state_res_metrics(self, room_id: str, rusage: ContextResourceUsage):
|
def _record_state_res_metrics(
|
||||||
|
self, room_id: str, rusage: ContextResourceUsage
|
||||||
|
) -> None:
|
||||||
room_metrics = self._state_res_metrics[room_id]
|
room_metrics = self._state_res_metrics[room_id]
|
||||||
room_metrics.cpu_time += rusage.ru_utime + rusage.ru_stime
|
room_metrics.cpu_time += rusage.ru_utime + rusage.ru_stime
|
||||||
room_metrics.db_time += rusage.db_txn_duration_sec
|
room_metrics.db_time += rusage.db_txn_duration_sec
|
||||||
room_metrics.db_events += rusage.evt_db_fetch_count
|
room_metrics.db_events += rusage.evt_db_fetch_count
|
||||||
|
|
||||||
def _report_metrics(self):
|
def _report_metrics(self) -> None:
|
||||||
if not self._state_res_metrics:
|
if not self._state_res_metrics:
|
||||||
# no state res has happened since the last iteration: don't bother logging.
|
# no state res has happened since the last iteration: don't bother logging.
|
||||||
return
|
return
|
||||||
|
@ -769,16 +780,13 @@ def _make_state_cache_entry(
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@attr.s(slots=True)
|
@attr.s(slots=True, auto_attribs=True)
|
||||||
class StateResolutionStore:
|
class StateResolutionStore:
|
||||||
"""Interface that allows state resolution algorithms to access the database
|
"""Interface that allows state resolution algorithms to access the database
|
||||||
in well defined way.
|
in well defined way.
|
||||||
|
|
||||||
Args:
|
|
||||||
store (DataStore)
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
store = attr.ib()
|
store: "DataStore"
|
||||||
|
|
||||||
def get_events(
|
def get_events(
|
||||||
self, event_ids: Iterable[str], allow_rejected: bool = False
|
self, event_ids: Iterable[str], allow_rejected: bool = False
|
||||||
|
|
|
@ -29,7 +29,7 @@ from typing import (
|
||||||
from synapse import event_auth
|
from synapse import event_auth
|
||||||
from synapse.api.constants import EventTypes
|
from synapse.api.constants import EventTypes
|
||||||
from synapse.api.errors import AuthError
|
from synapse.api.errors import AuthError
|
||||||
from synapse.api.room_versions import RoomVersions
|
from synapse.api.room_versions import RoomVersion, RoomVersions
|
||||||
from synapse.events import EventBase
|
from synapse.events import EventBase
|
||||||
from synapse.types import MutableStateMap, StateMap
|
from synapse.types import MutableStateMap, StateMap
|
||||||
|
|
||||||
|
@ -41,6 +41,7 @@ POWER_KEY = (EventTypes.PowerLevels, "")
|
||||||
|
|
||||||
async def resolve_events_with_store(
|
async def resolve_events_with_store(
|
||||||
room_id: str,
|
room_id: str,
|
||||||
|
room_version: RoomVersion,
|
||||||
state_sets: Sequence[StateMap[str]],
|
state_sets: Sequence[StateMap[str]],
|
||||||
event_map: Optional[Dict[str, EventBase]],
|
event_map: Optional[Dict[str, EventBase]],
|
||||||
state_map_factory: Callable[[Iterable[str]], Awaitable[Dict[str, EventBase]]],
|
state_map_factory: Callable[[Iterable[str]], Awaitable[Dict[str, EventBase]]],
|
||||||
|
@ -104,7 +105,7 @@ async def resolve_events_with_store(
|
||||||
# get the ids of the auth events which allow us to authenticate the
|
# get the ids of the auth events which allow us to authenticate the
|
||||||
# conflicted state, picking only from the unconflicting state.
|
# conflicted state, picking only from the unconflicting state.
|
||||||
auth_events = _create_auth_events_from_maps(
|
auth_events = _create_auth_events_from_maps(
|
||||||
unconflicted_state, conflicted_state, state_map
|
room_version, unconflicted_state, conflicted_state, state_map
|
||||||
)
|
)
|
||||||
|
|
||||||
new_needed_events = set(auth_events.values())
|
new_needed_events = set(auth_events.values())
|
||||||
|
@ -132,7 +133,7 @@ async def resolve_events_with_store(
|
||||||
state_map.update(state_map_new)
|
state_map.update(state_map_new)
|
||||||
|
|
||||||
return _resolve_with_state(
|
return _resolve_with_state(
|
||||||
unconflicted_state, conflicted_state, auth_events, state_map
|
room_version, unconflicted_state, conflicted_state, auth_events, state_map
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -187,6 +188,7 @@ def _seperate(
|
||||||
|
|
||||||
|
|
||||||
def _create_auth_events_from_maps(
|
def _create_auth_events_from_maps(
|
||||||
|
room_version: RoomVersion,
|
||||||
unconflicted_state: StateMap[str],
|
unconflicted_state: StateMap[str],
|
||||||
conflicted_state: StateMap[Set[str]],
|
conflicted_state: StateMap[Set[str]],
|
||||||
state_map: Dict[str, EventBase],
|
state_map: Dict[str, EventBase],
|
||||||
|
@ -194,6 +196,7 @@ def _create_auth_events_from_maps(
|
||||||
"""
|
"""
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
room_version: The room version.
|
||||||
unconflicted_state: The unconflicted state map.
|
unconflicted_state: The unconflicted state map.
|
||||||
conflicted_state: The conflicted state map.
|
conflicted_state: The conflicted state map.
|
||||||
state_map:
|
state_map:
|
||||||
|
@ -205,7 +208,9 @@ def _create_auth_events_from_maps(
|
||||||
for event_ids in conflicted_state.values():
|
for event_ids in conflicted_state.values():
|
||||||
for event_id in event_ids:
|
for event_id in event_ids:
|
||||||
if event_id in state_map:
|
if event_id in state_map:
|
||||||
keys = event_auth.auth_types_for_event(state_map[event_id])
|
keys = event_auth.auth_types_for_event(
|
||||||
|
room_version, state_map[event_id]
|
||||||
|
)
|
||||||
for key in keys:
|
for key in keys:
|
||||||
if key not in auth_events:
|
if key not in auth_events:
|
||||||
auth_event_id = unconflicted_state.get(key, None)
|
auth_event_id = unconflicted_state.get(key, None)
|
||||||
|
@ -215,6 +220,7 @@ def _create_auth_events_from_maps(
|
||||||
|
|
||||||
|
|
||||||
def _resolve_with_state(
|
def _resolve_with_state(
|
||||||
|
room_version: RoomVersion,
|
||||||
unconflicted_state_ids: MutableStateMap[str],
|
unconflicted_state_ids: MutableStateMap[str],
|
||||||
conflicted_state_ids: StateMap[Set[str]],
|
conflicted_state_ids: StateMap[Set[str]],
|
||||||
auth_event_ids: StateMap[str],
|
auth_event_ids: StateMap[str],
|
||||||
|
@ -235,7 +241,9 @@ def _resolve_with_state(
|
||||||
}
|
}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
resolved_state = _resolve_state_events(conflicted_state, auth_events)
|
resolved_state = _resolve_state_events(
|
||||||
|
room_version, conflicted_state, auth_events
|
||||||
|
)
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.exception("Failed to resolve state")
|
logger.exception("Failed to resolve state")
|
||||||
raise
|
raise
|
||||||
|
@ -248,7 +256,9 @@ def _resolve_with_state(
|
||||||
|
|
||||||
|
|
||||||
def _resolve_state_events(
|
def _resolve_state_events(
|
||||||
conflicted_state: StateMap[List[EventBase]], auth_events: MutableStateMap[EventBase]
|
room_version: RoomVersion,
|
||||||
|
conflicted_state: StateMap[List[EventBase]],
|
||||||
|
auth_events: MutableStateMap[EventBase],
|
||||||
) -> StateMap[EventBase]:
|
) -> StateMap[EventBase]:
|
||||||
"""This is where we actually decide which of the conflicted state to
|
"""This is where we actually decide which of the conflicted state to
|
||||||
use.
|
use.
|
||||||
|
@ -263,21 +273,27 @@ def _resolve_state_events(
|
||||||
if POWER_KEY in conflicted_state:
|
if POWER_KEY in conflicted_state:
|
||||||
events = conflicted_state[POWER_KEY]
|
events = conflicted_state[POWER_KEY]
|
||||||
logger.debug("Resolving conflicted power levels %r", events)
|
logger.debug("Resolving conflicted power levels %r", events)
|
||||||
resolved_state[POWER_KEY] = _resolve_auth_events(events, auth_events)
|
resolved_state[POWER_KEY] = _resolve_auth_events(
|
||||||
|
room_version, events, auth_events
|
||||||
|
)
|
||||||
|
|
||||||
auth_events.update(resolved_state)
|
auth_events.update(resolved_state)
|
||||||
|
|
||||||
for key, events in conflicted_state.items():
|
for key, events in conflicted_state.items():
|
||||||
if key[0] == EventTypes.JoinRules:
|
if key[0] == EventTypes.JoinRules:
|
||||||
logger.debug("Resolving conflicted join rules %r", events)
|
logger.debug("Resolving conflicted join rules %r", events)
|
||||||
resolved_state[key] = _resolve_auth_events(events, auth_events)
|
resolved_state[key] = _resolve_auth_events(
|
||||||
|
room_version, events, auth_events
|
||||||
|
)
|
||||||
|
|
||||||
auth_events.update(resolved_state)
|
auth_events.update(resolved_state)
|
||||||
|
|
||||||
for key, events in conflicted_state.items():
|
for key, events in conflicted_state.items():
|
||||||
if key[0] == EventTypes.Member:
|
if key[0] == EventTypes.Member:
|
||||||
logger.debug("Resolving conflicted member lists %r", events)
|
logger.debug("Resolving conflicted member lists %r", events)
|
||||||
resolved_state[key] = _resolve_auth_events(events, auth_events)
|
resolved_state[key] = _resolve_auth_events(
|
||||||
|
room_version, events, auth_events
|
||||||
|
)
|
||||||
|
|
||||||
auth_events.update(resolved_state)
|
auth_events.update(resolved_state)
|
||||||
|
|
||||||
|
@ -290,12 +306,14 @@ def _resolve_state_events(
|
||||||
|
|
||||||
|
|
||||||
def _resolve_auth_events(
|
def _resolve_auth_events(
|
||||||
events: List[EventBase], auth_events: StateMap[EventBase]
|
room_version: RoomVersion, events: List[EventBase], auth_events: StateMap[EventBase]
|
||||||
) -> EventBase:
|
) -> EventBase:
|
||||||
reverse = list(reversed(_ordered_events(events)))
|
reverse = list(reversed(_ordered_events(events)))
|
||||||
|
|
||||||
auth_keys = {
|
auth_keys = {
|
||||||
key for event in events for key in event_auth.auth_types_for_event(event)
|
key
|
||||||
|
for event in events
|
||||||
|
for key in event_auth.auth_types_for_event(room_version, event)
|
||||||
}
|
}
|
||||||
|
|
||||||
new_auth_events = {}
|
new_auth_events = {}
|
||||||
|
|
|
@ -36,7 +36,7 @@ import synapse.state
|
||||||
from synapse import event_auth
|
from synapse import event_auth
|
||||||
from synapse.api.constants import EventTypes
|
from synapse.api.constants import EventTypes
|
||||||
from synapse.api.errors import AuthError
|
from synapse.api.errors import AuthError
|
||||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
from synapse.api.room_versions import RoomVersion
|
||||||
from synapse.events import EventBase
|
from synapse.events import EventBase
|
||||||
from synapse.types import MutableStateMap, StateMap
|
from synapse.types import MutableStateMap, StateMap
|
||||||
from synapse.util import Clock
|
from synapse.util import Clock
|
||||||
|
@ -53,7 +53,7 @@ _AWAIT_AFTER_ITERATIONS = 100
|
||||||
async def resolve_events_with_store(
|
async def resolve_events_with_store(
|
||||||
clock: Clock,
|
clock: Clock,
|
||||||
room_id: str,
|
room_id: str,
|
||||||
room_version: str,
|
room_version: RoomVersion,
|
||||||
state_sets: Sequence[StateMap[str]],
|
state_sets: Sequence[StateMap[str]],
|
||||||
event_map: Optional[Dict[str, EventBase]],
|
event_map: Optional[Dict[str, EventBase]],
|
||||||
state_res_store: "synapse.state.StateResolutionStore",
|
state_res_store: "synapse.state.StateResolutionStore",
|
||||||
|
@ -497,7 +497,7 @@ async def _reverse_topological_power_sort(
|
||||||
async def _iterative_auth_checks(
|
async def _iterative_auth_checks(
|
||||||
clock: Clock,
|
clock: Clock,
|
||||||
room_id: str,
|
room_id: str,
|
||||||
room_version: str,
|
room_version: RoomVersion,
|
||||||
event_ids: List[str],
|
event_ids: List[str],
|
||||||
base_state: StateMap[str],
|
base_state: StateMap[str],
|
||||||
event_map: Dict[str, EventBase],
|
event_map: Dict[str, EventBase],
|
||||||
|
@ -519,7 +519,6 @@ async def _iterative_auth_checks(
|
||||||
Returns the final updated state
|
Returns the final updated state
|
||||||
"""
|
"""
|
||||||
resolved_state = dict(base_state)
|
resolved_state = dict(base_state)
|
||||||
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
|
|
||||||
|
|
||||||
for idx, event_id in enumerate(event_ids, start=1):
|
for idx, event_id in enumerate(event_ids, start=1):
|
||||||
event = event_map[event_id]
|
event = event_map[event_id]
|
||||||
|
@ -538,7 +537,7 @@ async def _iterative_auth_checks(
|
||||||
if ev.rejected_reason is None:
|
if ev.rejected_reason is None:
|
||||||
auth_events[(ev.type, ev.state_key)] = ev
|
auth_events[(ev.type, ev.state_key)] = ev
|
||||||
|
|
||||||
for key in event_auth.auth_types_for_event(event):
|
for key in event_auth.auth_types_for_event(room_version, event):
|
||||||
if key in resolved_state:
|
if key in resolved_state:
|
||||||
ev_id = resolved_state[key]
|
ev_id = resolved_state[key]
|
||||||
ev = await _get_event(room_id, ev_id, event_map, state_res_store)
|
ev = await _get_event(room_id, ev_id, event_map, state_res_store)
|
||||||
|
@ -548,7 +547,7 @@ async def _iterative_auth_checks(
|
||||||
|
|
||||||
try:
|
try:
|
||||||
event_auth.check(
|
event_auth.check(
|
||||||
room_version_obj,
|
room_version,
|
||||||
event,
|
event,
|
||||||
auth_events,
|
auth_events,
|
||||||
do_sig_check=False,
|
do_sig_check=False,
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
import logging
|
import logging
|
||||||
import time
|
import time
|
||||||
|
from collections import defaultdict
|
||||||
from sys import intern
|
from sys import intern
|
||||||
from time import monotonic as monotonic_time
|
from time import monotonic as monotonic_time
|
||||||
from typing import (
|
from typing import (
|
||||||
|
@ -397,6 +398,7 @@ class DatabasePool:
|
||||||
):
|
):
|
||||||
self.hs = hs
|
self.hs = hs
|
||||||
self._clock = hs.get_clock()
|
self._clock = hs.get_clock()
|
||||||
|
self._txn_limit = database_config.config.get("txn_limit", 0)
|
||||||
self._database_config = database_config
|
self._database_config = database_config
|
||||||
self._db_pool = make_pool(hs.get_reactor(), database_config, engine)
|
self._db_pool = make_pool(hs.get_reactor(), database_config, engine)
|
||||||
|
|
||||||
|
@ -406,6 +408,9 @@ class DatabasePool:
|
||||||
self._current_txn_total_time = 0.0
|
self._current_txn_total_time = 0.0
|
||||||
self._previous_loop_ts = 0.0
|
self._previous_loop_ts = 0.0
|
||||||
|
|
||||||
|
# Transaction counter: key is the twisted thread id, value is the current count
|
||||||
|
self._txn_counters: Dict[int, int] = defaultdict(int)
|
||||||
|
|
||||||
# TODO(paul): These can eventually be removed once the metrics code
|
# TODO(paul): These can eventually be removed once the metrics code
|
||||||
# is running in mainline, and we have some nice monitoring frontends
|
# is running in mainline, and we have some nice monitoring frontends
|
||||||
# to watch it
|
# to watch it
|
||||||
|
@ -750,10 +755,26 @@ class DatabasePool:
|
||||||
sql_scheduling_timer.observe(sched_duration_sec)
|
sql_scheduling_timer.observe(sched_duration_sec)
|
||||||
context.add_database_scheduled(sched_duration_sec)
|
context.add_database_scheduled(sched_duration_sec)
|
||||||
|
|
||||||
|
if self._txn_limit > 0:
|
||||||
|
tid = self._db_pool.threadID()
|
||||||
|
self._txn_counters[tid] += 1
|
||||||
|
|
||||||
|
if self._txn_counters[tid] > self._txn_limit:
|
||||||
|
logger.debug(
|
||||||
|
"Reconnecting database connection over transaction limit"
|
||||||
|
)
|
||||||
|
conn.reconnect()
|
||||||
|
opentracing.log_kv(
|
||||||
|
{"message": "reconnected due to txn limit"}
|
||||||
|
)
|
||||||
|
self._txn_counters[tid] = 1
|
||||||
|
|
||||||
if self.engine.is_connection_closed(conn):
|
if self.engine.is_connection_closed(conn):
|
||||||
logger.debug("Reconnecting closed database connection")
|
logger.debug("Reconnecting closed database connection")
|
||||||
conn.reconnect()
|
conn.reconnect()
|
||||||
opentracing.log_kv({"message": "reconnected"})
|
opentracing.log_kv({"message": "reconnected"})
|
||||||
|
if self._txn_limit > 0:
|
||||||
|
self._txn_counters[tid] = 1
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if db_autocommit:
|
if db_autocommit:
|
||||||
|
@ -832,31 +853,16 @@ class DatabasePool:
|
||||||
self,
|
self,
|
||||||
table: str,
|
table: str,
|
||||||
values: Dict[str, Any],
|
values: Dict[str, Any],
|
||||||
or_ignore: bool = False,
|
|
||||||
desc: str = "simple_insert",
|
desc: str = "simple_insert",
|
||||||
) -> bool:
|
) -> None:
|
||||||
"""Executes an INSERT query on the named table.
|
"""Executes an INSERT query on the named table.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
table: string giving the table name
|
table: string giving the table name
|
||||||
values: dict of new column names and values for them
|
values: dict of new column names and values for them
|
||||||
or_ignore: bool stating whether an exception should be raised
|
|
||||||
when a conflicting row already exists. If True, False will be
|
|
||||||
returned by the function instead
|
|
||||||
desc: description of the transaction, for logging and metrics
|
desc: description of the transaction, for logging and metrics
|
||||||
|
|
||||||
Returns:
|
|
||||||
Whether the row was inserted or not. Only useful when `or_ignore` is True
|
|
||||||
"""
|
"""
|
||||||
try:
|
|
||||||
await self.runInteraction(desc, self.simple_insert_txn, table, values)
|
await self.runInteraction(desc, self.simple_insert_txn, table, values)
|
||||||
except self.engine.module.IntegrityError:
|
|
||||||
# We have to do or_ignore flag at this layer, since we can't reuse
|
|
||||||
# a cursor after we receive an error from the db.
|
|
||||||
if not or_ignore:
|
|
||||||
raise
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def simple_insert_txn(
|
def simple_insert_txn(
|
||||||
|
@ -930,7 +936,7 @@ class DatabasePool:
|
||||||
insertion_values: Optional[Dict[str, Any]] = None,
|
insertion_values: Optional[Dict[str, Any]] = None,
|
||||||
desc: str = "simple_upsert",
|
desc: str = "simple_upsert",
|
||||||
lock: bool = True,
|
lock: bool = True,
|
||||||
) -> Optional[bool]:
|
) -> bool:
|
||||||
"""
|
"""
|
||||||
|
|
||||||
`lock` should generally be set to True (the default), but can be set
|
`lock` should generally be set to True (the default), but can be set
|
||||||
|
@ -951,8 +957,8 @@ class DatabasePool:
|
||||||
desc: description of the transaction, for logging and metrics
|
desc: description of the transaction, for logging and metrics
|
||||||
lock: True to lock the table when doing the upsert.
|
lock: True to lock the table when doing the upsert.
|
||||||
Returns:
|
Returns:
|
||||||
Native upserts always return None. Emulated upserts return True if a
|
Returns True if a row was inserted or updated (i.e. if `values` is
|
||||||
new entry was created, False if an existing one was updated.
|
not empty then this always returns True)
|
||||||
"""
|
"""
|
||||||
insertion_values = insertion_values or {}
|
insertion_values = insertion_values or {}
|
||||||
|
|
||||||
|
@ -995,7 +1001,7 @@ class DatabasePool:
|
||||||
values: Dict[str, Any],
|
values: Dict[str, Any],
|
||||||
insertion_values: Optional[Dict[str, Any]] = None,
|
insertion_values: Optional[Dict[str, Any]] = None,
|
||||||
lock: bool = True,
|
lock: bool = True,
|
||||||
) -> Optional[bool]:
|
) -> bool:
|
||||||
"""
|
"""
|
||||||
Pick the UPSERT method which works best on the platform. Either the
|
Pick the UPSERT method which works best on the platform. Either the
|
||||||
native one (Pg9.5+, recent SQLites), or fall back to an emulated method.
|
native one (Pg9.5+, recent SQLites), or fall back to an emulated method.
|
||||||
|
@ -1008,16 +1014,15 @@ class DatabasePool:
|
||||||
insertion_values: additional key/values to use only when inserting
|
insertion_values: additional key/values to use only when inserting
|
||||||
lock: True to lock the table when doing the upsert.
|
lock: True to lock the table when doing the upsert.
|
||||||
Returns:
|
Returns:
|
||||||
Native upserts always return None. Emulated upserts return True if a
|
Returns True if a row was inserted or updated (i.e. if `values` is
|
||||||
new entry was created, False if an existing one was updated.
|
not empty then this always returns True)
|
||||||
"""
|
"""
|
||||||
insertion_values = insertion_values or {}
|
insertion_values = insertion_values or {}
|
||||||
|
|
||||||
if self.engine.can_native_upsert and table not in self._unsafe_to_upsert_tables:
|
if self.engine.can_native_upsert and table not in self._unsafe_to_upsert_tables:
|
||||||
self.simple_upsert_txn_native_upsert(
|
return self.simple_upsert_txn_native_upsert(
|
||||||
txn, table, keyvalues, values, insertion_values=insertion_values
|
txn, table, keyvalues, values, insertion_values=insertion_values
|
||||||
)
|
)
|
||||||
return None
|
|
||||||
else:
|
else:
|
||||||
return self.simple_upsert_txn_emulated(
|
return self.simple_upsert_txn_emulated(
|
||||||
txn,
|
txn,
|
||||||
|
@ -1045,8 +1050,8 @@ class DatabasePool:
|
||||||
insertion_values: additional key/values to use only when inserting
|
insertion_values: additional key/values to use only when inserting
|
||||||
lock: True to lock the table when doing the upsert.
|
lock: True to lock the table when doing the upsert.
|
||||||
Returns:
|
Returns:
|
||||||
Returns True if a new entry was created, False if an existing
|
Returns True if a row was inserted or updated (i.e. if `values` is
|
||||||
one was updated.
|
not empty then this always returns True)
|
||||||
"""
|
"""
|
||||||
insertion_values = insertion_values or {}
|
insertion_values = insertion_values or {}
|
||||||
|
|
||||||
|
@ -1086,8 +1091,7 @@ class DatabasePool:
|
||||||
|
|
||||||
txn.execute(sql, sqlargs)
|
txn.execute(sql, sqlargs)
|
||||||
if txn.rowcount > 0:
|
if txn.rowcount > 0:
|
||||||
# successfully updated at least one row.
|
return True
|
||||||
return False
|
|
||||||
|
|
||||||
# We didn't find any existing rows, so insert a new one
|
# We didn't find any existing rows, so insert a new one
|
||||||
allvalues: Dict[str, Any] = {}
|
allvalues: Dict[str, Any] = {}
|
||||||
|
@ -1111,15 +1115,19 @@ class DatabasePool:
|
||||||
keyvalues: Dict[str, Any],
|
keyvalues: Dict[str, Any],
|
||||||
values: Dict[str, Any],
|
values: Dict[str, Any],
|
||||||
insertion_values: Optional[Dict[str, Any]] = None,
|
insertion_values: Optional[Dict[str, Any]] = None,
|
||||||
) -> None:
|
) -> bool:
|
||||||
"""
|
"""
|
||||||
Use the native UPSERT functionality in recent PostgreSQL versions.
|
Use the native UPSERT functionality in PostgreSQL.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
table: The table to upsert into
|
table: The table to upsert into
|
||||||
keyvalues: The unique key tables and their new values
|
keyvalues: The unique key tables and their new values
|
||||||
values: The nonunique columns and their new values
|
values: The nonunique columns and their new values
|
||||||
insertion_values: additional key/values to use only when inserting
|
insertion_values: additional key/values to use only when inserting
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Returns True if a row was inserted or updated (i.e. if `values` is
|
||||||
|
not empty then this always returns True)
|
||||||
"""
|
"""
|
||||||
allvalues: Dict[str, Any] = {}
|
allvalues: Dict[str, Any] = {}
|
||||||
allvalues.update(keyvalues)
|
allvalues.update(keyvalues)
|
||||||
|
@ -1140,6 +1148,8 @@ class DatabasePool:
|
||||||
)
|
)
|
||||||
txn.execute(sql, list(allvalues.values()))
|
txn.execute(sql, list(allvalues.values()))
|
||||||
|
|
||||||
|
return bool(txn.rowcount)
|
||||||
|
|
||||||
async def simple_upsert_many(
|
async def simple_upsert_many(
|
||||||
self,
|
self,
|
||||||
table: str,
|
table: str,
|
||||||
|
|
|
@ -249,7 +249,7 @@ class DataStore(
|
||||||
name: Optional[str] = None,
|
name: Optional[str] = None,
|
||||||
guests: bool = True,
|
guests: bool = True,
|
||||||
deactivated: bool = False,
|
deactivated: bool = False,
|
||||||
order_by: UserSortOrder = UserSortOrder.USER_ID.value,
|
order_by: str = UserSortOrder.USER_ID.value,
|
||||||
direction: str = "f",
|
direction: str = "f",
|
||||||
) -> Tuple[List[JsonDict], int]:
|
) -> Tuple[List[JsonDict], int]:
|
||||||
"""Function to retrieve a paginated list of users from
|
"""Function to retrieve a paginated list of users from
|
||||||
|
@ -297,27 +297,22 @@ class DataStore(
|
||||||
|
|
||||||
where_clause = "WHERE " + " AND ".join(filters) if len(filters) > 0 else ""
|
where_clause = "WHERE " + " AND ".join(filters) if len(filters) > 0 else ""
|
||||||
|
|
||||||
sql_base = """
|
sql_base = f"""
|
||||||
FROM users as u
|
FROM users as u
|
||||||
LEFT JOIN profiles AS p ON u.name = '@' || p.user_id || ':' || ?
|
LEFT JOIN profiles AS p ON u.name = '@' || p.user_id || ':' || ?
|
||||||
{}
|
{where_clause}
|
||||||
""".format(
|
"""
|
||||||
where_clause
|
|
||||||
)
|
|
||||||
sql = "SELECT COUNT(*) as total_users " + sql_base
|
sql = "SELECT COUNT(*) as total_users " + sql_base
|
||||||
txn.execute(sql, args)
|
txn.execute(sql, args)
|
||||||
count = txn.fetchone()[0]
|
count = txn.fetchone()[0]
|
||||||
|
|
||||||
sql = """
|
sql = f"""
|
||||||
SELECT name, user_type, is_guest, admin, deactivated, shadow_banned, displayname, avatar_url
|
SELECT name, user_type, is_guest, admin, deactivated, shadow_banned,
|
||||||
|
displayname, avatar_url, creation_ts * 1000 as creation_ts
|
||||||
{sql_base}
|
{sql_base}
|
||||||
ORDER BY {order_by_column} {order}, u.name ASC
|
ORDER BY {order_by_column} {order}, u.name ASC
|
||||||
LIMIT ? OFFSET ?
|
LIMIT ? OFFSET ?
|
||||||
""".format(
|
"""
|
||||||
sql_base=sql_base,
|
|
||||||
order_by_column=order_by_column,
|
|
||||||
order=order,
|
|
||||||
)
|
|
||||||
args += [limit, start]
|
args += [limit, start]
|
||||||
txn.execute(sql, args)
|
txn.execute(sql, args)
|
||||||
users = self.db_pool.cursor_to_dict(txn)
|
users = self.db_pool.cursor_to_dict(txn)
|
||||||
|
|
|
@ -1078,16 +1078,18 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
inserted = await self.db_pool.simple_insert(
|
inserted = await self.db_pool.simple_upsert(
|
||||||
"devices",
|
"devices",
|
||||||
values={
|
keyvalues={
|
||||||
"user_id": user_id,
|
"user_id": user_id,
|
||||||
"device_id": device_id,
|
"device_id": device_id,
|
||||||
|
},
|
||||||
|
values={},
|
||||||
|
insertion_values={
|
||||||
"display_name": initial_device_display_name,
|
"display_name": initial_device_display_name,
|
||||||
"hidden": False,
|
"hidden": False,
|
||||||
},
|
},
|
||||||
desc="store_device",
|
desc="store_device",
|
||||||
or_ignore=True,
|
|
||||||
)
|
)
|
||||||
if not inserted:
|
if not inserted:
|
||||||
# if the device already exists, check if it's a real device, or
|
# if the device already exists, check if it's a real device, or
|
||||||
|
@ -1099,6 +1101,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||||
)
|
)
|
||||||
if hidden:
|
if hidden:
|
||||||
raise StoreError(400, "The device ID is in use", Codes.FORBIDDEN)
|
raise StoreError(400, "The device ID is in use", Codes.FORBIDDEN)
|
||||||
|
|
||||||
self.device_id_exists_cache.set(key, True)
|
self.device_id_exists_cache.set(key, True)
|
||||||
return inserted
|
return inserted
|
||||||
except StoreError:
|
except StoreError:
|
||||||
|
|
|
@ -16,11 +16,11 @@ import logging
|
||||||
from queue import Empty, PriorityQueue
|
from queue import Empty, PriorityQueue
|
||||||
from typing import Collection, Dict, Iterable, List, Optional, Set, Tuple
|
from typing import Collection, Dict, Iterable, List, Optional, Set, Tuple
|
||||||
|
|
||||||
from prometheus_client import Gauge
|
from prometheus_client import Counter, Gauge
|
||||||
|
|
||||||
from synapse.api.constants import MAX_DEPTH
|
from synapse.api.constants import MAX_DEPTH
|
||||||
from synapse.api.errors import StoreError
|
from synapse.api.errors import StoreError
|
||||||
from synapse.api.room_versions import RoomVersion
|
from synapse.api.room_versions import EventFormatVersions, RoomVersion
|
||||||
from synapse.events import EventBase, make_event_from_dict
|
from synapse.events import EventBase, make_event_from_dict
|
||||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||||
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
|
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
|
||||||
|
@ -44,6 +44,12 @@ number_pdus_in_federation_queue = Gauge(
|
||||||
"The total number of events in the inbound federation staging",
|
"The total number of events in the inbound federation staging",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
pdus_pruned_from_federation_queue = Counter(
|
||||||
|
"synapse_federation_server_number_inbound_pdu_pruned",
|
||||||
|
"The number of events in the inbound federation staging that have been "
|
||||||
|
"pruned due to the queue getting too long",
|
||||||
|
)
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@ -936,15 +942,46 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
|
||||||
# We want to make sure that we do a breadth-first, "depth" ordered
|
# We want to make sure that we do a breadth-first, "depth" ordered
|
||||||
# search.
|
# search.
|
||||||
|
|
||||||
query = (
|
# Look for the prev_event_id connected to the given event_id
|
||||||
"SELECT depth, prev_event_id FROM event_edges"
|
query = """
|
||||||
" INNER JOIN events"
|
SELECT depth, prev_event_id FROM event_edges
|
||||||
" ON prev_event_id = events.event_id"
|
/* Get the depth of the prev_event_id from the events table */
|
||||||
" WHERE event_edges.event_id = ?"
|
INNER JOIN events
|
||||||
" AND event_edges.is_state = ?"
|
ON prev_event_id = events.event_id
|
||||||
" LIMIT ?"
|
/* Find an event which matches the given event_id */
|
||||||
)
|
WHERE event_edges.event_id = ?
|
||||||
|
AND event_edges.is_state = ?
|
||||||
|
LIMIT ?
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Look for the "insertion" events connected to the given event_id
|
||||||
|
connected_insertion_event_query = """
|
||||||
|
SELECT e.depth, i.event_id FROM insertion_event_edges AS i
|
||||||
|
/* Get the depth of the insertion event from the events table */
|
||||||
|
INNER JOIN events AS e USING (event_id)
|
||||||
|
/* Find an insertion event which points via prev_events to the given event_id */
|
||||||
|
WHERE i.insertion_prev_event_id = ?
|
||||||
|
LIMIT ?
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Find any chunk connections of a given insertion event
|
||||||
|
chunk_connection_query = """
|
||||||
|
SELECT e.depth, c.event_id FROM insertion_events AS i
|
||||||
|
/* Find the chunk that connects to the given insertion event */
|
||||||
|
INNER JOIN chunk_events AS c
|
||||||
|
ON i.next_chunk_id = c.chunk_id
|
||||||
|
/* Get the depth of the chunk start event from the events table */
|
||||||
|
INNER JOIN events AS e USING (event_id)
|
||||||
|
/* Find an insertion event which matches the given event_id */
|
||||||
|
WHERE i.event_id = ?
|
||||||
|
LIMIT ?
|
||||||
|
"""
|
||||||
|
|
||||||
|
# In a PriorityQueue, the lowest valued entries are retrieved first.
|
||||||
|
# We're using depth as the priority in the queue.
|
||||||
|
# Depth is lowest at the oldest-in-time message and highest and
|
||||||
|
# newest-in-time message. We add events to the queue with a negative depth so that
|
||||||
|
# we process the newest-in-time messages first going backwards in time.
|
||||||
queue = PriorityQueue()
|
queue = PriorityQueue()
|
||||||
|
|
||||||
for event_id in event_list:
|
for event_id in event_list:
|
||||||
|
@ -970,9 +1007,48 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
|
||||||
|
|
||||||
event_results.add(event_id)
|
event_results.add(event_id)
|
||||||
|
|
||||||
txn.execute(query, (event_id, False, limit - len(event_results)))
|
# Try and find any potential historical chunks of message history.
|
||||||
|
#
|
||||||
|
# First we look for an insertion event connected to the current
|
||||||
|
# event (by prev_event). If we find any, we need to go and try to
|
||||||
|
# find any chunk events connected to the insertion event (by
|
||||||
|
# chunk_id). If we find any, we'll add them to the queue and
|
||||||
|
# navigate up the DAG like normal in the next iteration of the loop.
|
||||||
|
txn.execute(
|
||||||
|
connected_insertion_event_query, (event_id, limit - len(event_results))
|
||||||
|
)
|
||||||
|
connected_insertion_event_id_results = txn.fetchall()
|
||||||
|
logger.debug(
|
||||||
|
"_get_backfill_events: connected_insertion_event_query %s",
|
||||||
|
connected_insertion_event_id_results,
|
||||||
|
)
|
||||||
|
for row in connected_insertion_event_id_results:
|
||||||
|
connected_insertion_event_depth = row[0]
|
||||||
|
connected_insertion_event = row[1]
|
||||||
|
queue.put((-connected_insertion_event_depth, connected_insertion_event))
|
||||||
|
|
||||||
for row in txn:
|
# Find any chunk connections for the given insertion event
|
||||||
|
txn.execute(
|
||||||
|
chunk_connection_query,
|
||||||
|
(connected_insertion_event, limit - len(event_results)),
|
||||||
|
)
|
||||||
|
chunk_start_event_id_results = txn.fetchall()
|
||||||
|
logger.debug(
|
||||||
|
"_get_backfill_events: chunk_start_event_id_results %s",
|
||||||
|
chunk_start_event_id_results,
|
||||||
|
)
|
||||||
|
for row in chunk_start_event_id_results:
|
||||||
|
if row[1] not in event_results:
|
||||||
|
queue.put((-row[0], row[1]))
|
||||||
|
|
||||||
|
# Navigate up the DAG by prev_event
|
||||||
|
txn.execute(query, (event_id, False, limit - len(event_results)))
|
||||||
|
prev_event_id_results = txn.fetchall()
|
||||||
|
logger.debug(
|
||||||
|
"_get_backfill_events: prev_event_ids %s", prev_event_id_results
|
||||||
|
)
|
||||||
|
|
||||||
|
for row in prev_event_id_results:
|
||||||
if row[1] not in event_results:
|
if row[1] not in event_results:
|
||||||
queue.put((-row[0], row[1]))
|
queue.put((-row[0], row[1]))
|
||||||
|
|
||||||
|
@ -1207,6 +1283,100 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
|
||||||
|
|
||||||
return origin, event
|
return origin, event
|
||||||
|
|
||||||
|
async def prune_staged_events_in_room(
|
||||||
|
self,
|
||||||
|
room_id: str,
|
||||||
|
room_version: RoomVersion,
|
||||||
|
) -> bool:
|
||||||
|
"""Checks if there are lots of staged events for the room, and if so
|
||||||
|
prune them down.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Whether any events were pruned
|
||||||
|
"""
|
||||||
|
|
||||||
|
# First check the size of the queue.
|
||||||
|
count = await self.db_pool.simple_select_one_onecol(
|
||||||
|
table="federation_inbound_events_staging",
|
||||||
|
keyvalues={"room_id": room_id},
|
||||||
|
retcol="COALESCE(COUNT(*), 0)",
|
||||||
|
desc="prune_staged_events_in_room_count",
|
||||||
|
)
|
||||||
|
|
||||||
|
if count < 100:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# If the queue is too large, then we want clear the entire queue,
|
||||||
|
# keeping only the forward extremities (i.e. the events not referenced
|
||||||
|
# by other events in the queue). We do this so that we can always
|
||||||
|
# backpaginate in all the events we have dropped.
|
||||||
|
rows = await self.db_pool.simple_select_list(
|
||||||
|
table="federation_inbound_events_staging",
|
||||||
|
keyvalues={"room_id": room_id},
|
||||||
|
retcols=("event_id", "event_json"),
|
||||||
|
desc="prune_staged_events_in_room_fetch",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Find the set of events referenced by those in the queue, as well as
|
||||||
|
# collecting all the event IDs in the queue.
|
||||||
|
referenced_events: Set[str] = set()
|
||||||
|
seen_events: Set[str] = set()
|
||||||
|
for row in rows:
|
||||||
|
event_id = row["event_id"]
|
||||||
|
seen_events.add(event_id)
|
||||||
|
event_d = db_to_json(row["event_json"])
|
||||||
|
|
||||||
|
# We don't bother parsing the dicts into full blown event objects,
|
||||||
|
# as that is needlessly expensive.
|
||||||
|
|
||||||
|
# We haven't checked that the `prev_events` have the right format
|
||||||
|
# yet, so we check as we go.
|
||||||
|
prev_events = event_d.get("prev_events", [])
|
||||||
|
if not isinstance(prev_events, list):
|
||||||
|
logger.info("Invalid prev_events for %s", event_id)
|
||||||
|
continue
|
||||||
|
|
||||||
|
if room_version.event_format == EventFormatVersions.V1:
|
||||||
|
for prev_event_tuple in prev_events:
|
||||||
|
if not isinstance(prev_event_tuple, list) or len(prev_events) != 2:
|
||||||
|
logger.info("Invalid prev_events for %s", event_id)
|
||||||
|
break
|
||||||
|
|
||||||
|
prev_event_id = prev_event_tuple[0]
|
||||||
|
if not isinstance(prev_event_id, str):
|
||||||
|
logger.info("Invalid prev_events for %s", event_id)
|
||||||
|
break
|
||||||
|
|
||||||
|
referenced_events.add(prev_event_id)
|
||||||
|
else:
|
||||||
|
for prev_event_id in prev_events:
|
||||||
|
if not isinstance(prev_event_id, str):
|
||||||
|
logger.info("Invalid prev_events for %s", event_id)
|
||||||
|
break
|
||||||
|
|
||||||
|
referenced_events.add(prev_event_id)
|
||||||
|
|
||||||
|
to_delete = referenced_events & seen_events
|
||||||
|
if not to_delete:
|
||||||
|
return False
|
||||||
|
|
||||||
|
pdus_pruned_from_federation_queue.inc(len(to_delete))
|
||||||
|
logger.info(
|
||||||
|
"Pruning %d events in room %s from federation queue",
|
||||||
|
len(to_delete),
|
||||||
|
room_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
await self.db_pool.simple_delete_many(
|
||||||
|
table="federation_inbound_events_staging",
|
||||||
|
keyvalues={"room_id": room_id},
|
||||||
|
iterable=to_delete,
|
||||||
|
column="event_id",
|
||||||
|
desc="prune_staged_events_in_room_delete",
|
||||||
|
)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
async def get_all_rooms_with_staged_incoming_events(self) -> List[str]:
|
async def get_all_rooms_with_staged_incoming_events(self) -> List[str]:
|
||||||
"""Get the room IDs of all events currently staged."""
|
"""Get the room IDs of all events currently staged."""
|
||||||
return await self.db_pool.simple_select_onecol(
|
return await self.db_pool.simple_select_onecol(
|
||||||
|
@ -1227,11 +1397,14 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
|
||||||
(count,) = txn.fetchone()
|
(count,) = txn.fetchone()
|
||||||
|
|
||||||
txn.execute(
|
txn.execute(
|
||||||
"SELECT coalesce(min(received_ts), 0) FROM federation_inbound_events_staging"
|
"SELECT min(received_ts) FROM federation_inbound_events_staging"
|
||||||
)
|
)
|
||||||
|
|
||||||
(received_ts,) = txn.fetchone()
|
(received_ts,) = txn.fetchone()
|
||||||
|
|
||||||
|
# If there is nothing in the staging area default it to 0.
|
||||||
|
age = 0
|
||||||
|
if received_ts is not None:
|
||||||
age = self._clock.time_msec() - received_ts
|
age = self._clock.time_msec() - received_ts
|
||||||
|
|
||||||
return count, age
|
return count, age
|
||||||
|
|
|
@ -1502,6 +1502,9 @@ class PersistEventsStore:
|
||||||
|
|
||||||
self._handle_event_relations(txn, event)
|
self._handle_event_relations(txn, event)
|
||||||
|
|
||||||
|
self._handle_insertion_event(txn, event)
|
||||||
|
self._handle_chunk_event(txn, event)
|
||||||
|
|
||||||
# Store the labels for this event.
|
# Store the labels for this event.
|
||||||
labels = event.content.get(EventContentFields.LABELS)
|
labels = event.content.get(EventContentFields.LABELS)
|
||||||
if labels:
|
if labels:
|
||||||
|
@ -1754,6 +1757,94 @@ class PersistEventsStore:
|
||||||
if rel_type == RelationTypes.REPLACE:
|
if rel_type == RelationTypes.REPLACE:
|
||||||
txn.call_after(self.store.get_applicable_edit.invalidate, (parent_id,))
|
txn.call_after(self.store.get_applicable_edit.invalidate, (parent_id,))
|
||||||
|
|
||||||
|
def _handle_insertion_event(self, txn: LoggingTransaction, event: EventBase):
|
||||||
|
"""Handles keeping track of insertion events and edges/connections.
|
||||||
|
Part of MSC2716.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
txn: The database transaction object
|
||||||
|
event: The event to process
|
||||||
|
"""
|
||||||
|
|
||||||
|
if event.type != EventTypes.MSC2716_INSERTION:
|
||||||
|
# Not a insertion event
|
||||||
|
return
|
||||||
|
|
||||||
|
# Skip processing a insertion event if the room version doesn't
|
||||||
|
# support it.
|
||||||
|
room_version = self.store.get_room_version_txn(txn, event.room_id)
|
||||||
|
if not room_version.msc2716_historical:
|
||||||
|
return
|
||||||
|
|
||||||
|
next_chunk_id = event.content.get(EventContentFields.MSC2716_NEXT_CHUNK_ID)
|
||||||
|
if next_chunk_id is None:
|
||||||
|
# Invalid insertion event without next chunk ID
|
||||||
|
return
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
"_handle_insertion_event (next_chunk_id=%s) %s", next_chunk_id, event
|
||||||
|
)
|
||||||
|
|
||||||
|
# Keep track of the insertion event and the chunk ID
|
||||||
|
self.db_pool.simple_insert_txn(
|
||||||
|
txn,
|
||||||
|
table="insertion_events",
|
||||||
|
values={
|
||||||
|
"event_id": event.event_id,
|
||||||
|
"room_id": event.room_id,
|
||||||
|
"next_chunk_id": next_chunk_id,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
# Insert an edge for every prev_event connection
|
||||||
|
for prev_event_id in event.prev_events:
|
||||||
|
self.db_pool.simple_insert_txn(
|
||||||
|
txn,
|
||||||
|
table="insertion_event_edges",
|
||||||
|
values={
|
||||||
|
"event_id": event.event_id,
|
||||||
|
"room_id": event.room_id,
|
||||||
|
"insertion_prev_event_id": prev_event_id,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
def _handle_chunk_event(self, txn: LoggingTransaction, event: EventBase):
|
||||||
|
"""Handles inserting the chunk edges/connections between the chunk event
|
||||||
|
and an insertion event. Part of MSC2716.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
txn: The database transaction object
|
||||||
|
event: The event to process
|
||||||
|
"""
|
||||||
|
|
||||||
|
if event.type != EventTypes.MSC2716_CHUNK:
|
||||||
|
# Not a chunk event
|
||||||
|
return
|
||||||
|
|
||||||
|
# Skip processing a chunk event if the room version doesn't
|
||||||
|
# support it.
|
||||||
|
room_version = self.store.get_room_version_txn(txn, event.room_id)
|
||||||
|
if not room_version.msc2716_historical:
|
||||||
|
return
|
||||||
|
|
||||||
|
chunk_id = event.content.get(EventContentFields.MSC2716_CHUNK_ID)
|
||||||
|
if chunk_id is None:
|
||||||
|
# Invalid chunk event without a chunk ID
|
||||||
|
return
|
||||||
|
|
||||||
|
logger.debug("_handle_chunk_event chunk_id=%s %s", chunk_id, event)
|
||||||
|
|
||||||
|
# Keep track of the insertion event and the chunk ID
|
||||||
|
self.db_pool.simple_insert_txn(
|
||||||
|
txn,
|
||||||
|
table="chunk_events",
|
||||||
|
values={
|
||||||
|
"event_id": event.event_id,
|
||||||
|
"room_id": event.room_id,
|
||||||
|
"chunk_id": chunk_id,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
def _handle_redaction(self, txn, redacted_event_id):
|
def _handle_redaction(self, txn, redacted_event_id):
|
||||||
"""Handles receiving a redaction and checking whether we need to remove
|
"""Handles receiving a redaction and checking whether we need to remove
|
||||||
any redacted relations from the database.
|
any redacted relations from the database.
|
||||||
|
|
|
@ -297,17 +297,13 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore):
|
||||||
Args:
|
Args:
|
||||||
txn (cursor):
|
txn (cursor):
|
||||||
user_id (str): user to add/update
|
user_id (str): user to add/update
|
||||||
|
|
||||||
Returns:
|
|
||||||
bool: True if a new entry was created, False if an
|
|
||||||
existing one was updated.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Am consciously deciding to lock the table on the basis that is ought
|
# Am consciously deciding to lock the table on the basis that is ought
|
||||||
# never be a big table and alternative approaches (batching multiple
|
# never be a big table and alternative approaches (batching multiple
|
||||||
# upserts into a single txn) introduced a lot of extra complexity.
|
# upserts into a single txn) introduced a lot of extra complexity.
|
||||||
# See https://github.com/matrix-org/synapse/issues/3854 for more
|
# See https://github.com/matrix-org/synapse/issues/3854 for more
|
||||||
is_insert = self.db_pool.simple_upsert_txn(
|
self.db_pool.simple_upsert_txn(
|
||||||
txn,
|
txn,
|
||||||
table="monthly_active_users",
|
table="monthly_active_users",
|
||||||
keyvalues={"user_id": user_id},
|
keyvalues={"user_id": user_id},
|
||||||
|
@ -322,8 +318,6 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore):
|
||||||
txn, self.user_last_seen_monthly_active, (user_id,)
|
txn, self.user_last_seen_monthly_active, (user_id,)
|
||||||
)
|
)
|
||||||
|
|
||||||
return is_insert
|
|
||||||
|
|
||||||
async def populate_monthly_active_users(self, user_id):
|
async def populate_monthly_active_users(self, user_id):
|
||||||
"""Checks on the state of monthly active user limits and optionally
|
"""Checks on the state of monthly active user limits and optionally
|
||||||
add the user to the monthly active tables
|
add the user to the monthly active tables
|
||||||
|
|
|
@ -363,7 +363,7 @@ class RoomWorkerStore(SQLBaseStore):
|
||||||
self,
|
self,
|
||||||
start: int,
|
start: int,
|
||||||
limit: int,
|
limit: int,
|
||||||
order_by: RoomSortOrder,
|
order_by: str,
|
||||||
reverse_order: bool,
|
reverse_order: bool,
|
||||||
search_term: Optional[str],
|
search_term: Optional[str],
|
||||||
) -> Tuple[List[Dict[str, Any]], int]:
|
) -> Tuple[List[Dict[str, Any]], int]:
|
||||||
|
|
|
@ -22,7 +22,7 @@ from synapse.api.errors import NotFoundError, UnsupportedRoomVersionError
|
||||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
|
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
|
||||||
from synapse.events import EventBase
|
from synapse.events import EventBase
|
||||||
from synapse.storage._base import SQLBaseStore
|
from synapse.storage._base import SQLBaseStore
|
||||||
from synapse.storage.database import DatabasePool
|
from synapse.storage.database import DatabasePool, LoggingTransaction
|
||||||
from synapse.storage.databases.main.events_worker import EventsWorkerStore
|
from synapse.storage.databases.main.events_worker import EventsWorkerStore
|
||||||
from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
|
from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
|
||||||
from synapse.storage.state import StateFilter
|
from synapse.storage.state import StateFilter
|
||||||
|
@ -58,15 +58,32 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||||
|
|
||||||
async def get_room_version(self, room_id: str) -> RoomVersion:
|
async def get_room_version(self, room_id: str) -> RoomVersion:
|
||||||
"""Get the room_version of a given room
|
"""Get the room_version of a given room
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
NotFoundError: if the room is unknown
|
NotFoundError: if the room is unknown
|
||||||
|
|
||||||
UnsupportedRoomVersionError: if the room uses an unknown room version.
|
UnsupportedRoomVersionError: if the room uses an unknown room version.
|
||||||
Typically this happens if support for the room's version has been
|
Typically this happens if support for the room's version has been
|
||||||
removed from Synapse.
|
removed from Synapse.
|
||||||
"""
|
"""
|
||||||
room_version_id = await self.get_room_version_id(room_id)
|
return await self.db_pool.runInteraction(
|
||||||
|
"get_room_version_txn",
|
||||||
|
self.get_room_version_txn,
|
||||||
|
room_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_room_version_txn(
|
||||||
|
self, txn: LoggingTransaction, room_id: str
|
||||||
|
) -> RoomVersion:
|
||||||
|
"""Get the room_version of a given room
|
||||||
|
Args:
|
||||||
|
txn: Transaction object
|
||||||
|
room_id: The room_id of the room you are trying to get the version for
|
||||||
|
Raises:
|
||||||
|
NotFoundError: if the room is unknown
|
||||||
|
UnsupportedRoomVersionError: if the room uses an unknown room version.
|
||||||
|
Typically this happens if support for the room's version has been
|
||||||
|
removed from Synapse.
|
||||||
|
"""
|
||||||
|
room_version_id = self.get_room_version_id_txn(txn, room_id)
|
||||||
v = KNOWN_ROOM_VERSIONS.get(room_version_id)
|
v = KNOWN_ROOM_VERSIONS.get(room_version_id)
|
||||||
|
|
||||||
if not v:
|
if not v:
|
||||||
|
@ -80,7 +97,20 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||||
@cached(max_entries=10000)
|
@cached(max_entries=10000)
|
||||||
async def get_room_version_id(self, room_id: str) -> str:
|
async def get_room_version_id(self, room_id: str) -> str:
|
||||||
"""Get the room_version of a given room
|
"""Get the room_version of a given room
|
||||||
|
Raises:
|
||||||
|
NotFoundError: if the room is unknown
|
||||||
|
"""
|
||||||
|
return await self.db_pool.runInteraction(
|
||||||
|
"get_room_version_id_txn",
|
||||||
|
self.get_room_version_id_txn,
|
||||||
|
room_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_room_version_id_txn(self, txn: LoggingTransaction, room_id: str) -> str:
|
||||||
|
"""Get the room_version of a given room
|
||||||
|
Args:
|
||||||
|
txn: Transaction object
|
||||||
|
room_id: The room_id of the room you are trying to get the version for
|
||||||
Raises:
|
Raises:
|
||||||
NotFoundError: if the room is unknown
|
NotFoundError: if the room is unknown
|
||||||
"""
|
"""
|
||||||
|
@ -88,24 +118,22 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||||
# First we try looking up room version from the database, but for old
|
# First we try looking up room version from the database, but for old
|
||||||
# rooms we might not have added the room version to it yet so we fall
|
# rooms we might not have added the room version to it yet so we fall
|
||||||
# back to previous behaviour and look in current state events.
|
# back to previous behaviour and look in current state events.
|
||||||
|
#
|
||||||
# We really should have an entry in the rooms table for every room we
|
# We really should have an entry in the rooms table for every room we
|
||||||
# care about, but let's be a bit paranoid (at least while the background
|
# care about, but let's be a bit paranoid (at least while the background
|
||||||
# update is happening) to avoid breaking existing rooms.
|
# update is happening) to avoid breaking existing rooms.
|
||||||
version = await self.db_pool.simple_select_one_onecol(
|
room_version = self.db_pool.simple_select_one_onecol_txn(
|
||||||
|
txn,
|
||||||
table="rooms",
|
table="rooms",
|
||||||
keyvalues={"room_id": room_id},
|
keyvalues={"room_id": room_id},
|
||||||
retcol="room_version",
|
retcol="room_version",
|
||||||
desc="get_room_version",
|
|
||||||
allow_none=True,
|
allow_none=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
if version is not None:
|
if room_version is None:
|
||||||
return version
|
raise NotFoundError("Could not room_version for %s" % (room_id,))
|
||||||
|
|
||||||
# Retrieve the room's create event
|
return room_version
|
||||||
create_event = await self.get_create_event_for_room(room_id)
|
|
||||||
return create_event.content.get("room_version", "1")
|
|
||||||
|
|
||||||
async def get_room_predecessor(self, room_id: str) -> Optional[dict]:
|
async def get_room_predecessor(self, room_id: str) -> Optional[dict]:
|
||||||
"""Get the predecessor of an upgraded room if it exists.
|
"""Get the predecessor of an upgraded room if it exists.
|
||||||
|
|
|
@ -75,6 +75,7 @@ class UserSortOrder(Enum):
|
||||||
USER_TYPE = ordered alphabetically by `user_type`
|
USER_TYPE = ordered alphabetically by `user_type`
|
||||||
AVATAR_URL = ordered alphabetically by `avatar_url`
|
AVATAR_URL = ordered alphabetically by `avatar_url`
|
||||||
SHADOW_BANNED = ordered by `shadow_banned`
|
SHADOW_BANNED = ordered by `shadow_banned`
|
||||||
|
CREATION_TS = ordered by `creation_ts`
|
||||||
"""
|
"""
|
||||||
|
|
||||||
MEDIA_LENGTH = "media_length"
|
MEDIA_LENGTH = "media_length"
|
||||||
|
@ -88,6 +89,7 @@ class UserSortOrder(Enum):
|
||||||
USER_TYPE = "user_type"
|
USER_TYPE = "user_type"
|
||||||
AVATAR_URL = "avatar_url"
|
AVATAR_URL = "avatar_url"
|
||||||
SHADOW_BANNED = "shadow_banned"
|
SHADOW_BANNED = "shadow_banned"
|
||||||
|
CREATION_TS = "creation_ts"
|
||||||
|
|
||||||
|
|
||||||
class StatsStore(StateDeltasStore):
|
class StatsStore(StateDeltasStore):
|
||||||
|
@ -647,10 +649,10 @@ class StatsStore(StateDeltasStore):
|
||||||
limit: int,
|
limit: int,
|
||||||
from_ts: Optional[int] = None,
|
from_ts: Optional[int] = None,
|
||||||
until_ts: Optional[int] = None,
|
until_ts: Optional[int] = None,
|
||||||
order_by: Optional[UserSortOrder] = UserSortOrder.USER_ID.value,
|
order_by: Optional[str] = UserSortOrder.USER_ID.value,
|
||||||
direction: Optional[str] = "f",
|
direction: Optional[str] = "f",
|
||||||
search_term: Optional[str] = None,
|
search_term: Optional[str] = None,
|
||||||
) -> Tuple[List[JsonDict], Dict[str, int]]:
|
) -> Tuple[List[JsonDict], int]:
|
||||||
"""Function to retrieve a paginated list of users and their uploaded local media
|
"""Function to retrieve a paginated list of users and their uploaded local media
|
||||||
(size and number). This will return a json list of users and the
|
(size and number). This will return a json list of users and the
|
||||||
total number of users matching the filter criteria.
|
total number of users matching the filter criteria.
|
||||||
|
|
|
@ -134,16 +134,18 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore):
|
||||||
response_dict: The response, to be encoded into JSON.
|
response_dict: The response, to be encoded into JSON.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
await self.db_pool.simple_insert(
|
await self.db_pool.simple_upsert(
|
||||||
table="received_transactions",
|
table="received_transactions",
|
||||||
values={
|
keyvalues={
|
||||||
"transaction_id": transaction_id,
|
"transaction_id": transaction_id,
|
||||||
"origin": origin,
|
"origin": origin,
|
||||||
|
},
|
||||||
|
values={},
|
||||||
|
insertion_values={
|
||||||
"response_code": code,
|
"response_code": code,
|
||||||
"response_json": db_binary_type(encode_canonical_json(response_dict)),
|
"response_json": db_binary_type(encode_canonical_json(response_dict)),
|
||||||
"ts": self._clock.time_msec(),
|
"ts": self._clock.time_msec(),
|
||||||
},
|
},
|
||||||
or_ignore=True,
|
|
||||||
desc="set_received_txn_response",
|
desc="set_received_txn_response",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -377,7 +377,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
|
||||||
avatar_url = None
|
avatar_url = None
|
||||||
|
|
||||||
def _update_profile_in_user_dir_txn(txn):
|
def _update_profile_in_user_dir_txn(txn):
|
||||||
new_entry = self.db_pool.simple_upsert_txn(
|
self.db_pool.simple_upsert_txn(
|
||||||
txn,
|
txn,
|
||||||
table="user_directory",
|
table="user_directory",
|
||||||
keyvalues={"user_id": user_id},
|
keyvalues={"user_id": user_id},
|
||||||
|
@ -388,7 +388,6 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
|
||||||
if isinstance(self.database_engine, PostgresEngine):
|
if isinstance(self.database_engine, PostgresEngine):
|
||||||
# We weight the localpart most highly, then display name and finally
|
# We weight the localpart most highly, then display name and finally
|
||||||
# server name
|
# server name
|
||||||
if self.database_engine.can_native_upsert:
|
|
||||||
sql = """
|
sql = """
|
||||||
INSERT INTO user_directory_search(user_id, vector)
|
INSERT INTO user_directory_search(user_id, vector)
|
||||||
VALUES (?,
|
VALUES (?,
|
||||||
|
@ -406,49 +405,6 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
|
||||||
display_name,
|
display_name,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
else:
|
|
||||||
# TODO: Remove this code after we've bumped the minimum version
|
|
||||||
# of postgres to always support upserts, so we can get rid of
|
|
||||||
# `new_entry` usage
|
|
||||||
if new_entry is True:
|
|
||||||
sql = """
|
|
||||||
INSERT INTO user_directory_search(user_id, vector)
|
|
||||||
VALUES (?,
|
|
||||||
setweight(to_tsvector('simple', ?), 'A')
|
|
||||||
|| setweight(to_tsvector('simple', ?), 'D')
|
|
||||||
|| setweight(to_tsvector('simple', COALESCE(?, '')), 'B')
|
|
||||||
)
|
|
||||||
"""
|
|
||||||
txn.execute(
|
|
||||||
sql,
|
|
||||||
(
|
|
||||||
user_id,
|
|
||||||
get_localpart_from_id(user_id),
|
|
||||||
get_domain_from_id(user_id),
|
|
||||||
display_name,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
elif new_entry is False:
|
|
||||||
sql = """
|
|
||||||
UPDATE user_directory_search
|
|
||||||
SET vector = setweight(to_tsvector('simple', ?), 'A')
|
|
||||||
|| setweight(to_tsvector('simple', ?), 'D')
|
|
||||||
|| setweight(to_tsvector('simple', COALESCE(?, '')), 'B')
|
|
||||||
WHERE user_id = ?
|
|
||||||
"""
|
|
||||||
txn.execute(
|
|
||||||
sql,
|
|
||||||
(
|
|
||||||
get_localpart_from_id(user_id),
|
|
||||||
get_domain_from_id(user_id),
|
|
||||||
display_name,
|
|
||||||
user_id,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
raise RuntimeError(
|
|
||||||
"upsert returned None when 'can_native_upsert' is False"
|
|
||||||
)
|
|
||||||
elif isinstance(self.database_engine, Sqlite3Engine):
|
elif isinstance(self.database_engine, Sqlite3Engine):
|
||||||
value = "%s %s" % (user_id, display_name) if display_name else user_id
|
value = "%s %s" % (user_id, display_name) if display_name else user_id
|
||||||
self.db_pool.simple_upsert_txn(
|
self.db_pool.simple_upsert_txn(
|
||||||
|
|
|
@ -372,18 +372,23 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
|
||||||
)
|
)
|
||||||
|
|
||||||
async def store_state_group(
|
async def store_state_group(
|
||||||
self, event_id, room_id, prev_group, delta_ids, current_state_ids
|
self,
|
||||||
|
event_id: str,
|
||||||
|
room_id: str,
|
||||||
|
prev_group: Optional[int],
|
||||||
|
delta_ids: Optional[StateMap[str]],
|
||||||
|
current_state_ids: StateMap[str],
|
||||||
) -> int:
|
) -> int:
|
||||||
"""Store a new set of state, returning a newly assigned state group.
|
"""Store a new set of state, returning a newly assigned state group.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
event_id (str): The event ID for which the state was calculated
|
event_id: The event ID for which the state was calculated
|
||||||
room_id (str)
|
room_id
|
||||||
prev_group (int|None): A previous state group for the room, optional.
|
prev_group: A previous state group for the room, optional.
|
||||||
delta_ids (dict|None): The delta between state at `prev_group` and
|
delta_ids: The delta between state at `prev_group` and
|
||||||
`current_state_ids`, if `prev_group` was given. Same format as
|
`current_state_ids`, if `prev_group` was given. Same format as
|
||||||
`current_state_ids`.
|
`current_state_ids`.
|
||||||
current_state_ids (dict): The state to store. Map of (type, state_key)
|
current_state_ids: The state to store. Map of (type, state_key)
|
||||||
to event_id.
|
to event_id.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
|
|
|
@ -170,7 +170,9 @@ class _EventPeristenceQueue(Generic[_PersistResult]):
|
||||||
end_item = queue[-1]
|
end_item = queue[-1]
|
||||||
else:
|
else:
|
||||||
# need to make a new queue item
|
# need to make a new queue item
|
||||||
deferred = ObservableDeferred(defer.Deferred(), consumeErrors=True)
|
deferred: ObservableDeferred[_PersistResult] = ObservableDeferred(
|
||||||
|
defer.Deferred(), consumeErrors=True
|
||||||
|
)
|
||||||
|
|
||||||
end_item = _EventPersistQueueItem(
|
end_item = _EventPersistQueueItem(
|
||||||
events_and_contexts=[],
|
events_and_contexts=[],
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue