mirror of
https://git.anonymousland.org/anonymousland/synapse-product.git
synced 2024-12-11 08:24:19 -05:00
Merge branch 'develop' into uhoreg/cross_signing_fix_workers_notify
This commit is contained in:
commit
670972c0e1
21
.buildkite/postgres-config.yaml
Normal file
21
.buildkite/postgres-config.yaml
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
# Configuration file used for testing the 'synapse_port_db' script.
|
||||||
|
# Tells the script to connect to the postgresql database that will be available in the
|
||||||
|
# CI's Docker setup at the point where this file is considered.
|
||||||
|
server_name: "test"
|
||||||
|
|
||||||
|
signing_key_path: "/src/.buildkite/test.signing.key"
|
||||||
|
|
||||||
|
report_stats: false
|
||||||
|
|
||||||
|
database:
|
||||||
|
name: "psycopg2"
|
||||||
|
args:
|
||||||
|
user: postgres
|
||||||
|
host: postgres
|
||||||
|
password: postgres
|
||||||
|
database: synapse
|
||||||
|
|
||||||
|
# Suppress the key server warning.
|
||||||
|
trusted_key_servers:
|
||||||
|
- server_name: "matrix.org"
|
||||||
|
suppress_key_server_warning: true
|
36
.buildkite/scripts/create_postgres_db.py
Executable file
36
.buildkite/scripts/create_postgres_db.py
Executable file
@ -0,0 +1,36 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from synapse.storage.engines import create_engine
|
||||||
|
|
||||||
|
logger = logging.getLogger("create_postgres_db")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Create a PostgresEngine.
|
||||||
|
db_engine = create_engine({"name": "psycopg2", "args": {}})
|
||||||
|
|
||||||
|
# Connect to postgres to create the base database.
|
||||||
|
# We use "postgres" as a database because it's bound to exist and the "synapse" one
|
||||||
|
# doesn't exist yet.
|
||||||
|
db_conn = db_engine.module.connect(
|
||||||
|
user="postgres", host="postgres", password="postgres", dbname="postgres"
|
||||||
|
)
|
||||||
|
db_conn.autocommit = True
|
||||||
|
cur = db_conn.cursor()
|
||||||
|
cur.execute("CREATE DATABASE synapse;")
|
||||||
|
cur.close()
|
||||||
|
db_conn.close()
|
36
.buildkite/scripts/test_synapse_port_db.sh
Executable file
36
.buildkite/scripts/test_synapse_port_db.sh
Executable file
@ -0,0 +1,36 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Test script for 'synapse_port_db', which creates a virtualenv, installs Synapse along
|
||||||
|
# with additional dependencies needed for the test (such as coverage or the PostgreSQL
|
||||||
|
# driver), update the schema of the test SQLite database and run background updates on it,
|
||||||
|
# create an empty test database in PostgreSQL, then run the 'synapse_port_db' script to
|
||||||
|
# test porting the SQLite database to the PostgreSQL database (with coverage).
|
||||||
|
|
||||||
|
set -xe
|
||||||
|
cd `dirname $0`/../..
|
||||||
|
|
||||||
|
echo "--- Install dependencies"
|
||||||
|
|
||||||
|
# Install dependencies for this test.
|
||||||
|
pip install psycopg2 coverage coverage-enable-subprocess
|
||||||
|
|
||||||
|
# Install Synapse itself. This won't update any libraries.
|
||||||
|
pip install -e .
|
||||||
|
|
||||||
|
echo "--- Generate the signing key"
|
||||||
|
|
||||||
|
# Generate the server's signing key.
|
||||||
|
python -m synapse.app.homeserver --generate-keys -c .buildkite/sqlite-config.yaml
|
||||||
|
|
||||||
|
echo "--- Prepare the databases"
|
||||||
|
|
||||||
|
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
||||||
|
scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml
|
||||||
|
|
||||||
|
# Create the PostgreSQL database.
|
||||||
|
./.buildkite/scripts/create_postgres_db.py
|
||||||
|
|
||||||
|
echo "+++ Run synapse_port_db"
|
||||||
|
|
||||||
|
# Run the script
|
||||||
|
coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml
|
18
.buildkite/sqlite-config.yaml
Normal file
18
.buildkite/sqlite-config.yaml
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
# Configuration file used for testing the 'synapse_port_db' script.
|
||||||
|
# Tells the 'update_database' script to connect to the test SQLite database to upgrade its
|
||||||
|
# schema and run background updates on it.
|
||||||
|
server_name: "test"
|
||||||
|
|
||||||
|
signing_key_path: "/src/.buildkite/test.signing.key"
|
||||||
|
|
||||||
|
report_stats: false
|
||||||
|
|
||||||
|
database:
|
||||||
|
name: "sqlite3"
|
||||||
|
args:
|
||||||
|
database: ".buildkite/test_db.db"
|
||||||
|
|
||||||
|
# Suppress the key server warning.
|
||||||
|
trusted_key_servers:
|
||||||
|
- server_name: "matrix.org"
|
||||||
|
suppress_key_server_warning: true
|
BIN
.buildkite/test_db.db
Normal file
BIN
.buildkite/test_db.db
Normal file
Binary file not shown.
41
AUTHORS.rst
41
AUTHORS.rst
@ -1,34 +1,8 @@
|
|||||||
Erik Johnston <erik at matrix.org>
|
The following is an incomplete list of people outside the core team who have
|
||||||
* HS core
|
contributed to Synapse. It is no longer maintained: more recent contributions
|
||||||
* Federation API impl
|
are listed in the `changelog <CHANGES.md>`_.
|
||||||
|
|
||||||
Mark Haines <mark at matrix.org>
|
----
|
||||||
* HS core
|
|
||||||
* Crypto
|
|
||||||
* Content repository
|
|
||||||
* CS v2 API impl
|
|
||||||
|
|
||||||
Kegan Dougal <kegan at matrix.org>
|
|
||||||
* HS core
|
|
||||||
* CS v1 API impl
|
|
||||||
* AS API impl
|
|
||||||
|
|
||||||
Paul "LeoNerd" Evans <paul at matrix.org>
|
|
||||||
* HS core
|
|
||||||
* Presence
|
|
||||||
* Typing Notifications
|
|
||||||
* Performance metrics and caching layer
|
|
||||||
|
|
||||||
Dave Baker <dave at matrix.org>
|
|
||||||
* Push notifications
|
|
||||||
* Auth CS v2 impl
|
|
||||||
|
|
||||||
Matthew Hodgson <matthew at matrix.org>
|
|
||||||
* General doc & housekeeping
|
|
||||||
* Vertobot/vertobridge matrix<->verto PoC
|
|
||||||
|
|
||||||
Emmanuel Rohee <manu at matrix.org>
|
|
||||||
* Supporting iOS clients (testability and fallback registration)
|
|
||||||
|
|
||||||
Turned to Dust <dwinslow86 at gmail.com>
|
Turned to Dust <dwinslow86 at gmail.com>
|
||||||
* ArchLinux installation instructions
|
* ArchLinux installation instructions
|
||||||
@ -62,16 +36,13 @@ Christoph Witzany <christoph at web.crofting.com>
|
|||||||
* Add LDAP support for authentication
|
* Add LDAP support for authentication
|
||||||
|
|
||||||
Pierre Jaury <pierre at jaury.eu>
|
Pierre Jaury <pierre at jaury.eu>
|
||||||
* Docker packaging
|
* Docker packaging
|
||||||
|
|
||||||
Serban Constantin <serban.constantin at gmail dot com>
|
Serban Constantin <serban.constantin at gmail dot com>
|
||||||
* Small bug fix
|
* Small bug fix
|
||||||
|
|
||||||
Jason Robinson <jasonr at matrix.org>
|
|
||||||
* Minor fixes
|
|
||||||
|
|
||||||
Joseph Weston <joseph at weston.cloud>
|
Joseph Weston <joseph at weston.cloud>
|
||||||
+ Add admin API for querying HS version
|
* Add admin API for querying HS version
|
||||||
|
|
||||||
Benjamin Saunders <ben.e.saunders at gmail dot com>
|
Benjamin Saunders <ben.e.saunders at gmail dot com>
|
||||||
* Documentation improvements
|
* Documentation improvements
|
||||||
|
41
CHANGES.md
41
CHANGES.md
@ -1,12 +1,37 @@
|
|||||||
Synapse 1.5.0rc1 (2019-10-24)
|
Synapse 1.5.0 (2019-10-29)
|
||||||
==========================
|
==========================
|
||||||
|
|
||||||
This release includes a database migration step **which may take a long time to complete**:
|
Security updates
|
||||||
|
----------------
|
||||||
|
|
||||||
- Allow devices to be marked as hidden, for use by features such as cross-signing.
|
This release includes a security fix ([\#6262](https://github.com/matrix-org/synapse/issues/6262), below). Administrators are encouraged to upgrade as soon as possible.
|
||||||
This adds a new field with a default value to the devices field in the database,
|
|
||||||
and so the database upgrade may take a long time depending on how many devices
|
Bugfixes
|
||||||
are in the database. ([\#5759](https://github.com/matrix-org/synapse/issues/5759))
|
--------
|
||||||
|
|
||||||
|
- Fix bug where room directory search was case sensitive. ([\#6268](https://github.com/matrix-org/synapse/issues/6268))
|
||||||
|
|
||||||
|
|
||||||
|
Synapse 1.5.0rc2 (2019-10-28)
|
||||||
|
=============================
|
||||||
|
|
||||||
|
Bugfixes
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Update list of boolean columns in `synapse_port_db`. ([\#6247](https://github.com/matrix-org/synapse/issues/6247))
|
||||||
|
- Fix /keys/query API on workers. ([\#6256](https://github.com/matrix-org/synapse/issues/6256))
|
||||||
|
- Improve signature checking on some federation APIs. ([\#6262](https://github.com/matrix-org/synapse/issues/6262))
|
||||||
|
|
||||||
|
|
||||||
|
Internal Changes
|
||||||
|
----------------
|
||||||
|
|
||||||
|
- Move schema delta files to the correct data store. ([\#6248](https://github.com/matrix-org/synapse/issues/6248))
|
||||||
|
- Small performance improvement by removing repeated config lookups in room stats calculation. ([\#6255](https://github.com/matrix-org/synapse/issues/6255))
|
||||||
|
|
||||||
|
|
||||||
|
Synapse 1.5.0rc1 (2019-10-24)
|
||||||
|
==========================
|
||||||
|
|
||||||
Features
|
Features
|
||||||
--------
|
--------
|
||||||
@ -51,6 +76,10 @@ Internal Changes
|
|||||||
----------------
|
----------------
|
||||||
|
|
||||||
- Update `user_filters` table to have a unique index, and non-null columns. Thanks to @pik for contributing this. ([\#1172](https://github.com/matrix-org/synapse/issues/1172), [\#6175](https://github.com/matrix-org/synapse/issues/6175), [\#6184](https://github.com/matrix-org/synapse/issues/6184))
|
- Update `user_filters` table to have a unique index, and non-null columns. Thanks to @pik for contributing this. ([\#1172](https://github.com/matrix-org/synapse/issues/1172), [\#6175](https://github.com/matrix-org/synapse/issues/6175), [\#6184](https://github.com/matrix-org/synapse/issues/6184))
|
||||||
|
- Allow devices to be marked as hidden, for use by features such as cross-signing.
|
||||||
|
This adds a new field with a default value to the devices field in the database,
|
||||||
|
and so the database upgrade may take a long time depending on how many devices
|
||||||
|
are in the database. ([\#5759](https://github.com/matrix-org/synapse/issues/5759))
|
||||||
- Move lookup-related functions from RoomMemberHandler to IdentityHandler. ([\#5978](https://github.com/matrix-org/synapse/issues/5978))
|
- Move lookup-related functions from RoomMemberHandler to IdentityHandler. ([\#5978](https://github.com/matrix-org/synapse/issues/5978))
|
||||||
- Improve performance of the public room list directory. ([\#6019](https://github.com/matrix-org/synapse/issues/6019), [\#6152](https://github.com/matrix-org/synapse/issues/6152), [\#6153](https://github.com/matrix-org/synapse/issues/6153), [\#6154](https://github.com/matrix-org/synapse/issues/6154))
|
- Improve performance of the public room list directory. ([\#6019](https://github.com/matrix-org/synapse/issues/6019), [\#6152](https://github.com/matrix-org/synapse/issues/6152), [\#6153](https://github.com/matrix-org/synapse/issues/6153), [\#6154](https://github.com/matrix-org/synapse/issues/6154))
|
||||||
- Edit header dicts docstrings in `SimpleHttpClient` to note that `str` or `bytes` can be passed as header keys. ([\#6077](https://github.com/matrix-org/synapse/issues/6077))
|
- Edit header dicts docstrings in `SimpleHttpClient` to note that `str` or `bytes` can be passed as header keys. ([\#6077](https://github.com/matrix-org/synapse/issues/6077))
|
||||||
|
@ -114,17 +114,6 @@ directory, you will need both a regular newsfragment *and* an entry in the
|
|||||||
debian changelog. (Though typically such changes should be submitted as two
|
debian changelog. (Though typically such changes should be submitted as two
|
||||||
separate pull requests.)
|
separate pull requests.)
|
||||||
|
|
||||||
Attribution
|
|
||||||
~~~~~~~~~~~
|
|
||||||
|
|
||||||
Everyone who contributes anything to Matrix is welcome to be listed in the
|
|
||||||
AUTHORS.rst file for the project in question. Please feel free to include a
|
|
||||||
change to AUTHORS.rst in your pull request to list yourself and a short
|
|
||||||
description of the area(s) you've worked on. Also, we sometimes have swag to
|
|
||||||
give away to contributors - if you feel that Matrix-branded apparel is missing
|
|
||||||
from your life, please mail us your shipping address to matrix at matrix.org and
|
|
||||||
we'll try to fix it :)
|
|
||||||
|
|
||||||
Sign off
|
Sign off
|
||||||
~~~~~~~~
|
~~~~~~~~
|
||||||
|
|
||||||
|
14
UPGRADE.rst
14
UPGRADE.rst
@ -2,7 +2,7 @@ Upgrading Synapse
|
|||||||
=================
|
=================
|
||||||
|
|
||||||
Before upgrading check if any special steps are required to upgrade from the
|
Before upgrading check if any special steps are required to upgrade from the
|
||||||
what you currently have installed to current version of Synapse. The extra
|
version you currently have installed to the current version of Synapse. The extra
|
||||||
instructions that may be required are listed later in this document.
|
instructions that may be required are listed later in this document.
|
||||||
|
|
||||||
* If Synapse was installed using `prebuilt packages
|
* If Synapse was installed using `prebuilt packages
|
||||||
@ -29,7 +29,7 @@ instructions that may be required are listed later in this document.
|
|||||||
running:
|
running:
|
||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
git pull
|
git pull
|
||||||
pip install --upgrade .
|
pip install --upgrade .
|
||||||
|
|
||||||
@ -75,6 +75,16 @@ for example:
|
|||||||
wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||||
|
|
||||||
|
|
||||||
|
Upgrading to v1.5.0
|
||||||
|
===================
|
||||||
|
|
||||||
|
This release includes a database migration which may take several minutes to
|
||||||
|
complete if there are a large number (more than a million or so) of entries in
|
||||||
|
the ``devices`` table. This is only likely to a be a problem on very large
|
||||||
|
installations.
|
||||||
|
|
||||||
|
|
||||||
Upgrading to v1.4.0
|
Upgrading to v1.4.0
|
||||||
===================
|
===================
|
||||||
|
|
||||||
|
1
changelog.d/6140.misc
Normal file
1
changelog.d/6140.misc
Normal file
@ -0,0 +1 @@
|
|||||||
|
Add a CI job to test the `synapse_port_db` script.
|
1
changelog.d/6218.misc
Normal file
1
changelog.d/6218.misc
Normal file
@ -0,0 +1 @@
|
|||||||
|
Convert EventContext to an attrs.
|
1
changelog.d/6240.misc
Normal file
1
changelog.d/6240.misc
Normal file
@ -0,0 +1 @@
|
|||||||
|
Move `persist_events` out from main data store.
|
@ -1 +0,0 @@
|
|||||||
Update list of boolean columns in `synapse_port_db`.
|
|
@ -1 +0,0 @@
|
|||||||
Move schema delta files to the correct data store.
|
|
1
changelog.d/6250.misc
Normal file
1
changelog.d/6250.misc
Normal file
@ -0,0 +1 @@
|
|||||||
|
Reduce verbosity of user/room stats.
|
1
changelog.d/6251.misc
Normal file
1
changelog.d/6251.misc
Normal file
@ -0,0 +1 @@
|
|||||||
|
Reduce impact of debug logging.
|
1
changelog.d/6253.bugfix
Normal file
1
changelog.d/6253.bugfix
Normal file
@ -0,0 +1 @@
|
|||||||
|
Delete keys from key backup when deleting backup versions.
|
1
changelog.d/6257.doc
Normal file
1
changelog.d/6257.doc
Normal file
@ -0,0 +1 @@
|
|||||||
|
Modify CAPTCHA_SETUP.md to update the terms `private key` and `public key` to `secret key` and `site key` respectively. Contributed by Yash Jipkate.
|
1
changelog.d/6263.misc
Normal file
1
changelog.d/6263.misc
Normal file
@ -0,0 +1 @@
|
|||||||
|
Change cache descriptors to always return deferreds.
|
1
changelog.d/6269.misc
Normal file
1
changelog.d/6269.misc
Normal file
@ -0,0 +1 @@
|
|||||||
|
Fix incorrect comment regarding the functionality of an `if` statement.
|
1
changelog.d/6270.misc
Normal file
1
changelog.d/6270.misc
Normal file
@ -0,0 +1 @@
|
|||||||
|
Update CI to run `isort` over the `scripts` and `scripts-dev` directories.
|
1
changelog.d/6273.doc
Normal file
1
changelog.d/6273.doc
Normal file
@ -0,0 +1 @@
|
|||||||
|
Fix a small typo in `account_threepid_delegates` configuration option.
|
1
changelog.d/6274.misc
Normal file
1
changelog.d/6274.misc
Normal file
@ -0,0 +1 @@
|
|||||||
|
Port replication http server endpoints to async/await.
|
1
changelog.d/6275.misc
Normal file
1
changelog.d/6275.misc
Normal file
@ -0,0 +1 @@
|
|||||||
|
Port room rest handlers to async/await.
|
1
changelog.d/6276.misc
Normal file
1
changelog.d/6276.misc
Normal file
@ -0,0 +1 @@
|
|||||||
|
Add a CI job to test the `synapse_port_db` script.
|
1
changelog.d/6277.misc
Normal file
1
changelog.d/6277.misc
Normal file
@ -0,0 +1 @@
|
|||||||
|
Remove redundant CLI parameters on CI's `flake8` step.
|
1
changelog.d/6278.bugfix
Normal file
1
changelog.d/6278.bugfix
Normal file
@ -0,0 +1 @@
|
|||||||
|
Fix exception when remote servers attempt to join a room that they're not allowed to join.
|
1
changelog.d/6279.misc
Normal file
1
changelog.d/6279.misc
Normal file
@ -0,0 +1 @@
|
|||||||
|
Port `federation_server.py` to async/await.
|
1
changelog.d/6280.misc
Normal file
1
changelog.d/6280.misc
Normal file
@ -0,0 +1 @@
|
|||||||
|
Port receipt and read markers to async/wait.
|
1
changelog.d/6284.bugfix
Normal file
1
changelog.d/6284.bugfix
Normal file
@ -0,0 +1 @@
|
|||||||
|
Prevent errors from appearing on Synapse startup if `git` is not installed.
|
1
changelog.d/6291.misc
Normal file
1
changelog.d/6291.misc
Normal file
@ -0,0 +1 @@
|
|||||||
|
Change cache descriptors to always return deferreds.
|
6
debian/changelog
vendored
6
debian/changelog
vendored
@ -1,3 +1,9 @@
|
|||||||
|
matrix-synapse-py3 (1.5.0) stable; urgency=medium
|
||||||
|
|
||||||
|
* New synapse release 1.5.0.
|
||||||
|
|
||||||
|
-- Synapse Packaging team <packages@matrix.org> Tue, 29 Oct 2019 14:28:41 +0000
|
||||||
|
|
||||||
matrix-synapse-py3 (1.4.1) stable; urgency=medium
|
matrix-synapse-py3 (1.4.1) stable; urgency=medium
|
||||||
|
|
||||||
* New synapse release 1.4.1.
|
* New synapse release 1.4.1.
|
||||||
|
@ -101,7 +101,7 @@ is suitable for local testing, but for any practical use, you will either need
|
|||||||
to use a reverse proxy, or configure Synapse to expose an HTTPS port.
|
to use a reverse proxy, or configure Synapse to expose an HTTPS port.
|
||||||
|
|
||||||
For documentation on using a reverse proxy, see
|
For documentation on using a reverse proxy, see
|
||||||
https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.rst.
|
https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.md.
|
||||||
|
|
||||||
For more information on enabling TLS support in synapse itself, see
|
For more information on enabling TLS support in synapse itself, see
|
||||||
https://github.com/matrix-org/synapse/blob/master/INSTALL.md#tls-certificates. Of
|
https://github.com/matrix-org/synapse/blob/master/INSTALL.md#tls-certificates. Of
|
||||||
|
@ -4,7 +4,7 @@ The captcha mechanism used is Google's ReCaptcha. This requires API keys from Go
|
|||||||
|
|
||||||
## Getting keys
|
## Getting keys
|
||||||
|
|
||||||
Requires a public/private key pair from:
|
Requires a site/secret key pair from:
|
||||||
|
|
||||||
<https://developers.google.com/recaptcha/>
|
<https://developers.google.com/recaptcha/>
|
||||||
|
|
||||||
@ -15,8 +15,8 @@ Must be a reCAPTCHA v2 key using the "I'm not a robot" Checkbox option
|
|||||||
The keys are a config option on the home server config. If they are not
|
The keys are a config option on the home server config. If they are not
|
||||||
visible, you can generate them via `--generate-config`. Set the following value:
|
visible, you can generate them via `--generate-config`. Set the following value:
|
||||||
|
|
||||||
recaptcha_public_key: YOUR_PUBLIC_KEY
|
recaptcha_public_key: YOUR_SITE_KEY
|
||||||
recaptcha_private_key: YOUR_PRIVATE_KEY
|
recaptcha_private_key: YOUR_SECRET_KEY
|
||||||
|
|
||||||
In addition, you MUST enable captchas via:
|
In addition, you MUST enable captchas via:
|
||||||
|
|
||||||
|
@ -955,7 +955,7 @@ uploads_path: "DATADIR/uploads"
|
|||||||
# If a delegate is specified, the config option public_baseurl must also be filled out.
|
# If a delegate is specified, the config option public_baseurl must also be filled out.
|
||||||
#
|
#
|
||||||
account_threepid_delegates:
|
account_threepid_delegates:
|
||||||
#email: https://example.com # Delegate email sending to example.org
|
#email: https://example.com # Delegate email sending to example.com
|
||||||
#msisdn: http://localhost:8090 # Delegate SMS sending to this local process
|
#msisdn: http://localhost:8090 # Delegate SMS sending to this local process
|
||||||
|
|
||||||
# Users who register on this homeserver will automatically be joined
|
# Users who register on this homeserver will automatically be joined
|
||||||
|
124
scripts-dev/update_database
Executable file
124
scripts-dev/update_database
Executable file
@ -0,0 +1,124 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from twisted.internet import defer, reactor
|
||||||
|
|
||||||
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
from synapse.storage import DataStore
|
||||||
|
from synapse.storage.engines import create_engine
|
||||||
|
from synapse.storage.prepare_database import prepare_database
|
||||||
|
|
||||||
|
logger = logging.getLogger("update_database")
|
||||||
|
|
||||||
|
|
||||||
|
class MockHomeserver(HomeServer):
|
||||||
|
DATASTORE_CLASS = DataStore
|
||||||
|
|
||||||
|
def __init__(self, config, database_engine, db_conn, **kwargs):
|
||||||
|
super(MockHomeserver, self).__init__(
|
||||||
|
config.server_name,
|
||||||
|
reactor=reactor,
|
||||||
|
config=config,
|
||||||
|
database_engine=database_engine,
|
||||||
|
**kwargs
|
||||||
|
)
|
||||||
|
|
||||||
|
self.database_engine = database_engine
|
||||||
|
self.db_conn = db_conn
|
||||||
|
|
||||||
|
def get_db_conn(self):
|
||||||
|
return self.db_conn
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description=(
|
||||||
|
"Updates a synapse database to the latest schema and runs background updates"
|
||||||
|
" on it."
|
||||||
|
)
|
||||||
|
)
|
||||||
|
parser.add_argument("-v", action='store_true')
|
||||||
|
parser.add_argument(
|
||||||
|
"--database-config",
|
||||||
|
type=argparse.FileType('r'),
|
||||||
|
required=True,
|
||||||
|
help="A database config file for either a SQLite3 database or a PostgreSQL one.",
|
||||||
|
)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
logging_config = {
|
||||||
|
"level": logging.DEBUG if args.v else logging.INFO,
|
||||||
|
"format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s",
|
||||||
|
}
|
||||||
|
|
||||||
|
logging.basicConfig(**logging_config)
|
||||||
|
|
||||||
|
# Load, process and sanity-check the config.
|
||||||
|
hs_config = yaml.safe_load(args.database_config)
|
||||||
|
|
||||||
|
if "database" not in hs_config:
|
||||||
|
sys.stderr.write("The configuration file must have a 'database' section.\n")
|
||||||
|
sys.exit(4)
|
||||||
|
|
||||||
|
config = HomeServerConfig()
|
||||||
|
config.parse_config_dict(hs_config, "", "")
|
||||||
|
|
||||||
|
# Create the database engine and a connection to it.
|
||||||
|
database_engine = create_engine(config.database_config)
|
||||||
|
db_conn = database_engine.module.connect(
|
||||||
|
**{
|
||||||
|
k: v
|
||||||
|
for k, v in config.database_config.get("args", {}).items()
|
||||||
|
if not k.startswith("cp_")
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Update the database to the latest schema.
|
||||||
|
prepare_database(db_conn, database_engine, config=config)
|
||||||
|
db_conn.commit()
|
||||||
|
|
||||||
|
# Instantiate and initialise the homeserver object.
|
||||||
|
hs = MockHomeserver(
|
||||||
|
config,
|
||||||
|
database_engine,
|
||||||
|
db_conn,
|
||||||
|
db_config=config.database_config,
|
||||||
|
)
|
||||||
|
# setup instantiates the store within the homeserver object.
|
||||||
|
hs.setup()
|
||||||
|
store = hs.get_datastore()
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def run_background_updates():
|
||||||
|
yield store.run_background_updates(sleep=False)
|
||||||
|
# Stop the reactor to exit the script once every background update is run.
|
||||||
|
reactor.stop()
|
||||||
|
|
||||||
|
# Apply all background updates on the database.
|
||||||
|
reactor.callWhenRunning(lambda: run_as_background_process(
|
||||||
|
"background_updates", run_background_updates
|
||||||
|
))
|
||||||
|
|
||||||
|
reactor.run()
|
@ -36,7 +36,7 @@ try:
|
|||||||
except ImportError:
|
except ImportError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
__version__ = "1.5.0rc1"
|
__version__ = "1.5.0"
|
||||||
|
|
||||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||||
# We import here so that we don't have to install a bunch of deps when
|
# We import here so that we don't have to install a bunch of deps when
|
||||||
|
@ -300,7 +300,7 @@ class RegistrationConfig(Config):
|
|||||||
# If a delegate is specified, the config option public_baseurl must also be filled out.
|
# If a delegate is specified, the config option public_baseurl must also be filled out.
|
||||||
#
|
#
|
||||||
account_threepid_delegates:
|
account_threepid_delegates:
|
||||||
#email: https://example.com # Delegate email sending to example.org
|
#email: https://example.com # Delegate email sending to example.com
|
||||||
#msisdn: http://localhost:8090 # Delegate SMS sending to this local process
|
#msisdn: http://localhost:8090 # Delegate SMS sending to this local process
|
||||||
|
|
||||||
# Users who register on this homeserver will automatically be joined
|
# Users who register on this homeserver will automatically be joined
|
||||||
|
@ -125,9 +125,11 @@ def compute_event_signature(event_dict, signature_name, signing_key):
|
|||||||
redact_json = prune_event_dict(event_dict)
|
redact_json = prune_event_dict(event_dict)
|
||||||
redact_json.pop("age_ts", None)
|
redact_json.pop("age_ts", None)
|
||||||
redact_json.pop("unsigned", None)
|
redact_json.pop("unsigned", None)
|
||||||
logger.debug("Signing event: %s", encode_canonical_json(redact_json))
|
if logger.isEnabledFor(logging.DEBUG):
|
||||||
|
logger.debug("Signing event: %s", encode_canonical_json(redact_json))
|
||||||
redact_json = sign_json(redact_json, signature_name, signing_key)
|
redact_json = sign_json(redact_json, signature_name, signing_key)
|
||||||
logger.debug("Signed event: %s", encode_canonical_json(redact_json))
|
if logger.isEnabledFor(logging.DEBUG):
|
||||||
|
logger.debug("Signed event: %s", encode_canonical_json(redact_json))
|
||||||
return redact_json["signatures"]
|
return redact_json["signatures"]
|
||||||
|
|
||||||
|
|
||||||
|
@ -12,9 +12,9 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from six import iteritems
|
from six import iteritems
|
||||||
|
|
||||||
|
import attr
|
||||||
from frozendict import frozendict
|
from frozendict import frozendict
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
@ -22,7 +22,8 @@ from twisted.internet import defer
|
|||||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||||
|
|
||||||
|
|
||||||
class EventContext(object):
|
@attr.s(slots=True)
|
||||||
|
class EventContext:
|
||||||
"""
|
"""
|
||||||
Attributes:
|
Attributes:
|
||||||
state_group (int|None): state group id, if the state has been stored
|
state_group (int|None): state group id, if the state has been stored
|
||||||
@ -31,9 +32,6 @@ class EventContext(object):
|
|||||||
rejected (bool|str): A rejection reason if the event was rejected, else
|
rejected (bool|str): A rejection reason if the event was rejected, else
|
||||||
False
|
False
|
||||||
|
|
||||||
push_actions (list[(str, list[object])]): list of (user_id, actions)
|
|
||||||
tuples
|
|
||||||
|
|
||||||
prev_group (int): Previously persisted state group. ``None`` for an
|
prev_group (int): Previously persisted state group. ``None`` for an
|
||||||
outlier.
|
outlier.
|
||||||
delta_ids (dict[(str, str), str]): Delta from ``prev_group``.
|
delta_ids (dict[(str, str), str]): Delta from ``prev_group``.
|
||||||
@ -42,6 +40,8 @@ class EventContext(object):
|
|||||||
prev_state_events (?): XXX: is this ever set to anything other than
|
prev_state_events (?): XXX: is this ever set to anything other than
|
||||||
the empty list?
|
the empty list?
|
||||||
|
|
||||||
|
app_service: FIXME
|
||||||
|
|
||||||
_current_state_ids (dict[(str, str), str]|None):
|
_current_state_ids (dict[(str, str), str]|None):
|
||||||
The current state map including the current event. None if outlier
|
The current state map including the current event. None if outlier
|
||||||
or we haven't fetched the state from DB yet.
|
or we haven't fetched the state from DB yet.
|
||||||
@ -67,49 +67,33 @@ class EventContext(object):
|
|||||||
Only set when state has not been fetched yet.
|
Only set when state has not been fetched yet.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
__slots__ = [
|
state_group = attr.ib(default=None)
|
||||||
"state_group",
|
rejected = attr.ib(default=False)
|
||||||
"rejected",
|
prev_group = attr.ib(default=None)
|
||||||
"prev_group",
|
delta_ids = attr.ib(default=None)
|
||||||
"delta_ids",
|
prev_state_events = attr.ib(default=attr.Factory(list))
|
||||||
"prev_state_events",
|
app_service = attr.ib(default=None)
|
||||||
"app_service",
|
|
||||||
"_current_state_ids",
|
|
||||||
"_prev_state_ids",
|
|
||||||
"_prev_state_id",
|
|
||||||
"_event_type",
|
|
||||||
"_event_state_key",
|
|
||||||
"_fetching_state_deferred",
|
|
||||||
]
|
|
||||||
|
|
||||||
def __init__(self):
|
_current_state_ids = attr.ib(default=None)
|
||||||
self.prev_state_events = []
|
_prev_state_ids = attr.ib(default=None)
|
||||||
self.rejected = False
|
_prev_state_id = attr.ib(default=None)
|
||||||
self.app_service = None
|
|
||||||
|
_event_type = attr.ib(default=None)
|
||||||
|
_event_state_key = attr.ib(default=None)
|
||||||
|
_fetching_state_deferred = attr.ib(default=None)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def with_state(
|
def with_state(
|
||||||
state_group, current_state_ids, prev_state_ids, prev_group=None, delta_ids=None
|
state_group, current_state_ids, prev_state_ids, prev_group=None, delta_ids=None
|
||||||
):
|
):
|
||||||
context = EventContext()
|
return EventContext(
|
||||||
|
current_state_ids=current_state_ids,
|
||||||
# The current state including the current event
|
prev_state_ids=prev_state_ids,
|
||||||
context._current_state_ids = current_state_ids
|
state_group=state_group,
|
||||||
# The current state excluding the current event
|
fetching_state_deferred=defer.succeed(None),
|
||||||
context._prev_state_ids = prev_state_ids
|
prev_group=prev_group,
|
||||||
context.state_group = state_group
|
delta_ids=delta_ids,
|
||||||
|
)
|
||||||
context._prev_state_id = None
|
|
||||||
context._event_type = None
|
|
||||||
context._event_state_key = None
|
|
||||||
context._fetching_state_deferred = defer.succeed(None)
|
|
||||||
|
|
||||||
# A previously persisted state group and a delta between that
|
|
||||||
# and this state.
|
|
||||||
context.prev_group = prev_group
|
|
||||||
context.delta_ids = delta_ids
|
|
||||||
|
|
||||||
return context
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def serialize(self, event, store):
|
def serialize(self, event, store):
|
||||||
@ -157,24 +141,18 @@ class EventContext(object):
|
|||||||
Returns:
|
Returns:
|
||||||
EventContext
|
EventContext
|
||||||
"""
|
"""
|
||||||
context = EventContext()
|
context = EventContext(
|
||||||
|
# We use the state_group and prev_state_id stuff to pull the
|
||||||
# We use the state_group and prev_state_id stuff to pull the
|
# current_state_ids out of the DB and construct prev_state_ids.
|
||||||
# current_state_ids out of the DB and construct prev_state_ids.
|
prev_state_id=input["prev_state_id"],
|
||||||
context._prev_state_id = input["prev_state_id"]
|
event_type=input["event_type"],
|
||||||
context._event_type = input["event_type"]
|
event_state_key=input["event_state_key"],
|
||||||
context._event_state_key = input["event_state_key"]
|
state_group=input["state_group"],
|
||||||
|
prev_group=input["prev_group"],
|
||||||
context._current_state_ids = None
|
delta_ids=_decode_state_dict(input["delta_ids"]),
|
||||||
context._prev_state_ids = None
|
rejected=input["rejected"],
|
||||||
context._fetching_state_deferred = None
|
prev_state_events=input["prev_state_events"],
|
||||||
|
)
|
||||||
context.state_group = input["state_group"]
|
|
||||||
context.prev_group = input["prev_group"]
|
|
||||||
context.delta_ids = _decode_state_dict(input["delta_ids"])
|
|
||||||
|
|
||||||
context.rejected = input["rejected"]
|
|
||||||
context.prev_state_events = input["prev_state_events"]
|
|
||||||
|
|
||||||
app_service_id = input["app_service_id"]
|
app_service_id = input["app_service_id"]
|
||||||
if app_service_id:
|
if app_service_id:
|
||||||
|
@ -278,9 +278,7 @@ def _check_sigs_on_pdus(keyring, room_version, pdus):
|
|||||||
pdu_to_check.sender_domain,
|
pdu_to_check.sender_domain,
|
||||||
e.getErrorMessage(),
|
e.getErrorMessage(),
|
||||||
)
|
)
|
||||||
# XX not really sure if these are the right codes, but they are what
|
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
|
||||||
# we've done for ages
|
|
||||||
raise SynapseError(400, errmsg, Codes.UNAUTHORIZED)
|
|
||||||
|
|
||||||
for p, d in zip(pdus_to_check_sender, more_deferreds):
|
for p, d in zip(pdus_to_check_sender, more_deferreds):
|
||||||
d.addErrback(sender_err, p)
|
d.addErrback(sender_err, p)
|
||||||
@ -314,8 +312,7 @@ def _check_sigs_on_pdus(keyring, room_version, pdus):
|
|||||||
"event id %s: unable to verify signature for event id domain: %s"
|
"event id %s: unable to verify signature for event id domain: %s"
|
||||||
% (pdu_to_check.pdu.event_id, e.getErrorMessage())
|
% (pdu_to_check.pdu.event_id, e.getErrorMessage())
|
||||||
)
|
)
|
||||||
# XX as above: not really sure if these are the right codes
|
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
|
||||||
raise SynapseError(400, errmsg, Codes.UNAUTHORIZED)
|
|
||||||
|
|
||||||
for p, d in zip(pdus_to_check_event_id, more_deferreds):
|
for p, d in zip(pdus_to_check_event_id, more_deferreds):
|
||||||
d.addErrback(event_err, p)
|
d.addErrback(event_err, p)
|
||||||
|
@ -196,7 +196,7 @@ class FederationClient(FederationBase):
|
|||||||
dest, room_id, extremities, limit
|
dest, room_id, extremities, limit
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.debug("backfill transaction_data=%s", repr(transaction_data))
|
logger.debug("backfill transaction_data=%r", transaction_data)
|
||||||
|
|
||||||
room_version = yield self.store.get_room_version(room_id)
|
room_version = yield self.store.get_room_version(room_id)
|
||||||
format_ver = room_version_to_event_format(room_version)
|
format_ver = room_version_to_event_format(room_version)
|
||||||
|
@ -21,7 +21,6 @@ from six import iteritems
|
|||||||
from canonicaljson import json
|
from canonicaljson import json
|
||||||
from prometheus_client import Counter
|
from prometheus_client import Counter
|
||||||
|
|
||||||
from twisted.internet import defer
|
|
||||||
from twisted.internet.abstract import isIPAddress
|
from twisted.internet.abstract import isIPAddress
|
||||||
from twisted.python import failure
|
from twisted.python import failure
|
||||||
|
|
||||||
@ -86,14 +85,12 @@ class FederationServer(FederationBase):
|
|||||||
# come in waves.
|
# come in waves.
|
||||||
self._state_resp_cache = ResponseCache(hs, "state_resp", timeout_ms=30000)
|
self._state_resp_cache = ResponseCache(hs, "state_resp", timeout_ms=30000)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_backfill_request(self, origin, room_id, versions, limit):
|
||||||
@log_function
|
with (await self._server_linearizer.queue((origin, room_id))):
|
||||||
def on_backfill_request(self, origin, room_id, versions, limit):
|
|
||||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
|
||||||
origin_host, _ = parse_server_name(origin)
|
origin_host, _ = parse_server_name(origin)
|
||||||
yield self.check_server_matches_acl(origin_host, room_id)
|
await self.check_server_matches_acl(origin_host, room_id)
|
||||||
|
|
||||||
pdus = yield self.handler.on_backfill_request(
|
pdus = await self.handler.on_backfill_request(
|
||||||
origin, room_id, versions, limit
|
origin, room_id, versions, limit
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -101,9 +98,7 @@ class FederationServer(FederationBase):
|
|||||||
|
|
||||||
return 200, res
|
return 200, res
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_incoming_transaction(self, origin, transaction_data):
|
||||||
@log_function
|
|
||||||
def on_incoming_transaction(self, origin, transaction_data):
|
|
||||||
# keep this as early as possible to make the calculated origin ts as
|
# keep this as early as possible to make the calculated origin ts as
|
||||||
# accurate as possible.
|
# accurate as possible.
|
||||||
request_time = self._clock.time_msec()
|
request_time = self._clock.time_msec()
|
||||||
@ -118,18 +113,17 @@ class FederationServer(FederationBase):
|
|||||||
# use a linearizer to ensure that we don't process the same transaction
|
# use a linearizer to ensure that we don't process the same transaction
|
||||||
# multiple times in parallel.
|
# multiple times in parallel.
|
||||||
with (
|
with (
|
||||||
yield self._transaction_linearizer.queue(
|
await self._transaction_linearizer.queue(
|
||||||
(origin, transaction.transaction_id)
|
(origin, transaction.transaction_id)
|
||||||
)
|
)
|
||||||
):
|
):
|
||||||
result = yield self._handle_incoming_transaction(
|
result = await self._handle_incoming_transaction(
|
||||||
origin, transaction, request_time
|
origin, transaction, request_time
|
||||||
)
|
)
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def _handle_incoming_transaction(self, origin, transaction, request_time):
|
||||||
def _handle_incoming_transaction(self, origin, transaction, request_time):
|
|
||||||
""" Process an incoming transaction and return the HTTP response
|
""" Process an incoming transaction and return the HTTP response
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@ -140,7 +134,7 @@ class FederationServer(FederationBase):
|
|||||||
Returns:
|
Returns:
|
||||||
Deferred[(int, object)]: http response code and body
|
Deferred[(int, object)]: http response code and body
|
||||||
"""
|
"""
|
||||||
response = yield self.transaction_actions.have_responded(origin, transaction)
|
response = await self.transaction_actions.have_responded(origin, transaction)
|
||||||
|
|
||||||
if response:
|
if response:
|
||||||
logger.debug(
|
logger.debug(
|
||||||
@ -151,7 +145,7 @@ class FederationServer(FederationBase):
|
|||||||
|
|
||||||
logger.debug("[%s] Transaction is new", transaction.transaction_id)
|
logger.debug("[%s] Transaction is new", transaction.transaction_id)
|
||||||
|
|
||||||
# Reject if PDU count > 50 and EDU count > 100
|
# Reject if PDU count > 50 or EDU count > 100
|
||||||
if len(transaction.pdus) > 50 or (
|
if len(transaction.pdus) > 50 or (
|
||||||
hasattr(transaction, "edus") and len(transaction.edus) > 100
|
hasattr(transaction, "edus") and len(transaction.edus) > 100
|
||||||
):
|
):
|
||||||
@ -159,7 +153,7 @@ class FederationServer(FederationBase):
|
|||||||
logger.info("Transaction PDU or EDU count too large. Returning 400")
|
logger.info("Transaction PDU or EDU count too large. Returning 400")
|
||||||
|
|
||||||
response = {}
|
response = {}
|
||||||
yield self.transaction_actions.set_response(
|
await self.transaction_actions.set_response(
|
||||||
origin, transaction, 400, response
|
origin, transaction, 400, response
|
||||||
)
|
)
|
||||||
return 400, response
|
return 400, response
|
||||||
@ -195,7 +189,7 @@ class FederationServer(FederationBase):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
try:
|
try:
|
||||||
room_version = yield self.store.get_room_version(room_id)
|
room_version = await self.store.get_room_version(room_id)
|
||||||
except NotFoundError:
|
except NotFoundError:
|
||||||
logger.info("Ignoring PDU for unknown room_id: %s", room_id)
|
logger.info("Ignoring PDU for unknown room_id: %s", room_id)
|
||||||
continue
|
continue
|
||||||
@ -221,11 +215,10 @@ class FederationServer(FederationBase):
|
|||||||
# require callouts to other servers to fetch missing events), but
|
# require callouts to other servers to fetch missing events), but
|
||||||
# impose a limit to avoid going too crazy with ram/cpu.
|
# impose a limit to avoid going too crazy with ram/cpu.
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def process_pdus_for_room(room_id):
|
||||||
def process_pdus_for_room(room_id):
|
|
||||||
logger.debug("Processing PDUs for %s", room_id)
|
logger.debug("Processing PDUs for %s", room_id)
|
||||||
try:
|
try:
|
||||||
yield self.check_server_matches_acl(origin_host, room_id)
|
await self.check_server_matches_acl(origin_host, room_id)
|
||||||
except AuthError as e:
|
except AuthError as e:
|
||||||
logger.warn("Ignoring PDUs for room %s from banned server", room_id)
|
logger.warn("Ignoring PDUs for room %s from banned server", room_id)
|
||||||
for pdu in pdus_by_room[room_id]:
|
for pdu in pdus_by_room[room_id]:
|
||||||
@ -237,7 +230,7 @@ class FederationServer(FederationBase):
|
|||||||
event_id = pdu.event_id
|
event_id = pdu.event_id
|
||||||
with nested_logging_context(event_id):
|
with nested_logging_context(event_id):
|
||||||
try:
|
try:
|
||||||
yield self._handle_received_pdu(origin, pdu)
|
await self._handle_received_pdu(origin, pdu)
|
||||||
pdu_results[event_id] = {}
|
pdu_results[event_id] = {}
|
||||||
except FederationError as e:
|
except FederationError as e:
|
||||||
logger.warn("Error handling PDU %s: %s", event_id, e)
|
logger.warn("Error handling PDU %s: %s", event_id, e)
|
||||||
@ -251,36 +244,33 @@ class FederationServer(FederationBase):
|
|||||||
exc_info=(f.type, f.value, f.getTracebackObject()),
|
exc_info=(f.type, f.value, f.getTracebackObject()),
|
||||||
)
|
)
|
||||||
|
|
||||||
yield concurrently_execute(
|
await concurrently_execute(
|
||||||
process_pdus_for_room, pdus_by_room.keys(), TRANSACTION_CONCURRENCY_LIMIT
|
process_pdus_for_room, pdus_by_room.keys(), TRANSACTION_CONCURRENCY_LIMIT
|
||||||
)
|
)
|
||||||
|
|
||||||
if hasattr(transaction, "edus"):
|
if hasattr(transaction, "edus"):
|
||||||
for edu in (Edu(**x) for x in transaction.edus):
|
for edu in (Edu(**x) for x in transaction.edus):
|
||||||
yield self.received_edu(origin, edu.edu_type, edu.content)
|
await self.received_edu(origin, edu.edu_type, edu.content)
|
||||||
|
|
||||||
response = {"pdus": pdu_results}
|
response = {"pdus": pdu_results}
|
||||||
|
|
||||||
logger.debug("Returning: %s", str(response))
|
logger.debug("Returning: %s", str(response))
|
||||||
|
|
||||||
yield self.transaction_actions.set_response(origin, transaction, 200, response)
|
await self.transaction_actions.set_response(origin, transaction, 200, response)
|
||||||
return 200, response
|
return 200, response
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def received_edu(self, origin, edu_type, content):
|
||||||
def received_edu(self, origin, edu_type, content):
|
|
||||||
received_edus_counter.inc()
|
received_edus_counter.inc()
|
||||||
yield self.registry.on_edu(edu_type, origin, content)
|
await self.registry.on_edu(edu_type, origin, content)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_context_state_request(self, origin, room_id, event_id):
|
||||||
@log_function
|
|
||||||
def on_context_state_request(self, origin, room_id, event_id):
|
|
||||||
if not event_id:
|
if not event_id:
|
||||||
raise NotImplementedError("Specify an event")
|
raise NotImplementedError("Specify an event")
|
||||||
|
|
||||||
origin_host, _ = parse_server_name(origin)
|
origin_host, _ = parse_server_name(origin)
|
||||||
yield self.check_server_matches_acl(origin_host, room_id)
|
await self.check_server_matches_acl(origin_host, room_id)
|
||||||
|
|
||||||
in_room = yield self.auth.check_host_in_room(room_id, origin)
|
in_room = await self.auth.check_host_in_room(room_id, origin)
|
||||||
if not in_room:
|
if not in_room:
|
||||||
raise AuthError(403, "Host not in room.")
|
raise AuthError(403, "Host not in room.")
|
||||||
|
|
||||||
@ -289,8 +279,8 @@ class FederationServer(FederationBase):
|
|||||||
# in the cache so we could return it without waiting for the linearizer
|
# in the cache so we could return it without waiting for the linearizer
|
||||||
# - but that's non-trivial to get right, and anyway somewhat defeats
|
# - but that's non-trivial to get right, and anyway somewhat defeats
|
||||||
# the point of the linearizer.
|
# the point of the linearizer.
|
||||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
with (await self._server_linearizer.queue((origin, room_id))):
|
||||||
resp = yield self._state_resp_cache.wrap(
|
resp = await self._state_resp_cache.wrap(
|
||||||
(room_id, event_id),
|
(room_id, event_id),
|
||||||
self._on_context_state_request_compute,
|
self._on_context_state_request_compute,
|
||||||
room_id,
|
room_id,
|
||||||
@ -299,65 +289,58 @@ class FederationServer(FederationBase):
|
|||||||
|
|
||||||
return 200, resp
|
return 200, resp
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_state_ids_request(self, origin, room_id, event_id):
|
||||||
def on_state_ids_request(self, origin, room_id, event_id):
|
|
||||||
if not event_id:
|
if not event_id:
|
||||||
raise NotImplementedError("Specify an event")
|
raise NotImplementedError("Specify an event")
|
||||||
|
|
||||||
origin_host, _ = parse_server_name(origin)
|
origin_host, _ = parse_server_name(origin)
|
||||||
yield self.check_server_matches_acl(origin_host, room_id)
|
await self.check_server_matches_acl(origin_host, room_id)
|
||||||
|
|
||||||
in_room = yield self.auth.check_host_in_room(room_id, origin)
|
in_room = await self.auth.check_host_in_room(room_id, origin)
|
||||||
if not in_room:
|
if not in_room:
|
||||||
raise AuthError(403, "Host not in room.")
|
raise AuthError(403, "Host not in room.")
|
||||||
|
|
||||||
state_ids = yield self.handler.get_state_ids_for_pdu(room_id, event_id)
|
state_ids = await self.handler.get_state_ids_for_pdu(room_id, event_id)
|
||||||
auth_chain_ids = yield self.store.get_auth_chain_ids(state_ids)
|
auth_chain_ids = await self.store.get_auth_chain_ids(state_ids)
|
||||||
|
|
||||||
return 200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids}
|
return 200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def _on_context_state_request_compute(self, room_id, event_id):
|
||||||
def _on_context_state_request_compute(self, room_id, event_id):
|
pdus = await self.handler.get_state_for_pdu(room_id, event_id)
|
||||||
pdus = yield self.handler.get_state_for_pdu(room_id, event_id)
|
auth_chain = await self.store.get_auth_chain([pdu.event_id for pdu in pdus])
|
||||||
auth_chain = yield self.store.get_auth_chain([pdu.event_id for pdu in pdus])
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"pdus": [pdu.get_pdu_json() for pdu in pdus],
|
"pdus": [pdu.get_pdu_json() for pdu in pdus],
|
||||||
"auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
|
"auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
|
||||||
}
|
}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_pdu_request(self, origin, event_id):
|
||||||
@log_function
|
pdu = await self.handler.get_persisted_pdu(origin, event_id)
|
||||||
def on_pdu_request(self, origin, event_id):
|
|
||||||
pdu = yield self.handler.get_persisted_pdu(origin, event_id)
|
|
||||||
|
|
||||||
if pdu:
|
if pdu:
|
||||||
return 200, self._transaction_from_pdus([pdu]).get_dict()
|
return 200, self._transaction_from_pdus([pdu]).get_dict()
|
||||||
else:
|
else:
|
||||||
return 404, ""
|
return 404, ""
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_query_request(self, query_type, args):
|
||||||
def on_query_request(self, query_type, args):
|
|
||||||
received_queries_counter.labels(query_type).inc()
|
received_queries_counter.labels(query_type).inc()
|
||||||
resp = yield self.registry.on_query(query_type, args)
|
resp = await self.registry.on_query(query_type, args)
|
||||||
return 200, resp
|
return 200, resp
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_make_join_request(self, origin, room_id, user_id, supported_versions):
|
||||||
def on_make_join_request(self, origin, room_id, user_id, supported_versions):
|
|
||||||
origin_host, _ = parse_server_name(origin)
|
origin_host, _ = parse_server_name(origin)
|
||||||
yield self.check_server_matches_acl(origin_host, room_id)
|
await self.check_server_matches_acl(origin_host, room_id)
|
||||||
|
|
||||||
room_version = yield self.store.get_room_version(room_id)
|
room_version = await self.store.get_room_version(room_id)
|
||||||
if room_version not in supported_versions:
|
if room_version not in supported_versions:
|
||||||
logger.warn("Room version %s not in %s", room_version, supported_versions)
|
logger.warn("Room version %s not in %s", room_version, supported_versions)
|
||||||
raise IncompatibleRoomVersionError(room_version=room_version)
|
raise IncompatibleRoomVersionError(room_version=room_version)
|
||||||
|
|
||||||
pdu = yield self.handler.on_make_join_request(origin, room_id, user_id)
|
pdu = await self.handler.on_make_join_request(origin, room_id, user_id)
|
||||||
time_now = self._clock.time_msec()
|
time_now = self._clock.time_msec()
|
||||||
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
|
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_invite_request(self, origin, content, room_version):
|
||||||
def on_invite_request(self, origin, content, room_version):
|
|
||||||
if room_version not in KNOWN_ROOM_VERSIONS:
|
if room_version not in KNOWN_ROOM_VERSIONS:
|
||||||
raise SynapseError(
|
raise SynapseError(
|
||||||
400,
|
400,
|
||||||
@ -369,24 +352,27 @@ class FederationServer(FederationBase):
|
|||||||
|
|
||||||
pdu = event_from_pdu_json(content, format_ver)
|
pdu = event_from_pdu_json(content, format_ver)
|
||||||
origin_host, _ = parse_server_name(origin)
|
origin_host, _ = parse_server_name(origin)
|
||||||
yield self.check_server_matches_acl(origin_host, pdu.room_id)
|
await self.check_server_matches_acl(origin_host, pdu.room_id)
|
||||||
ret_pdu = yield self.handler.on_invite_request(origin, pdu)
|
pdu = await self._check_sigs_and_hash(room_version, pdu)
|
||||||
|
ret_pdu = await self.handler.on_invite_request(origin, pdu)
|
||||||
time_now = self._clock.time_msec()
|
time_now = self._clock.time_msec()
|
||||||
return {"event": ret_pdu.get_pdu_json(time_now)}
|
return {"event": ret_pdu.get_pdu_json(time_now)}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_send_join_request(self, origin, content, room_id):
|
||||||
def on_send_join_request(self, origin, content, room_id):
|
|
||||||
logger.debug("on_send_join_request: content: %s", content)
|
logger.debug("on_send_join_request: content: %s", content)
|
||||||
|
|
||||||
room_version = yield self.store.get_room_version(room_id)
|
room_version = await self.store.get_room_version(room_id)
|
||||||
format_ver = room_version_to_event_format(room_version)
|
format_ver = room_version_to_event_format(room_version)
|
||||||
pdu = event_from_pdu_json(content, format_ver)
|
pdu = event_from_pdu_json(content, format_ver)
|
||||||
|
|
||||||
origin_host, _ = parse_server_name(origin)
|
origin_host, _ = parse_server_name(origin)
|
||||||
yield self.check_server_matches_acl(origin_host, pdu.room_id)
|
await self.check_server_matches_acl(origin_host, pdu.room_id)
|
||||||
|
|
||||||
logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
|
logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
|
||||||
res_pdus = yield self.handler.on_send_join_request(origin, pdu)
|
|
||||||
|
pdu = await self._check_sigs_and_hash(room_version, pdu)
|
||||||
|
|
||||||
|
res_pdus = await self.handler.on_send_join_request(origin, pdu)
|
||||||
time_now = self._clock.time_msec()
|
time_now = self._clock.time_msec()
|
||||||
return (
|
return (
|
||||||
200,
|
200,
|
||||||
@ -398,45 +384,44 @@ class FederationServer(FederationBase):
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_make_leave_request(self, origin, room_id, user_id):
|
||||||
def on_make_leave_request(self, origin, room_id, user_id):
|
|
||||||
origin_host, _ = parse_server_name(origin)
|
origin_host, _ = parse_server_name(origin)
|
||||||
yield self.check_server_matches_acl(origin_host, room_id)
|
await self.check_server_matches_acl(origin_host, room_id)
|
||||||
pdu = yield self.handler.on_make_leave_request(origin, room_id, user_id)
|
pdu = await self.handler.on_make_leave_request(origin, room_id, user_id)
|
||||||
|
|
||||||
room_version = yield self.store.get_room_version(room_id)
|
room_version = await self.store.get_room_version(room_id)
|
||||||
|
|
||||||
time_now = self._clock.time_msec()
|
time_now = self._clock.time_msec()
|
||||||
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
|
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_send_leave_request(self, origin, content, room_id):
|
||||||
def on_send_leave_request(self, origin, content, room_id):
|
|
||||||
logger.debug("on_send_leave_request: content: %s", content)
|
logger.debug("on_send_leave_request: content: %s", content)
|
||||||
|
|
||||||
room_version = yield self.store.get_room_version(room_id)
|
room_version = await self.store.get_room_version(room_id)
|
||||||
format_ver = room_version_to_event_format(room_version)
|
format_ver = room_version_to_event_format(room_version)
|
||||||
pdu = event_from_pdu_json(content, format_ver)
|
pdu = event_from_pdu_json(content, format_ver)
|
||||||
|
|
||||||
origin_host, _ = parse_server_name(origin)
|
origin_host, _ = parse_server_name(origin)
|
||||||
yield self.check_server_matches_acl(origin_host, pdu.room_id)
|
await self.check_server_matches_acl(origin_host, pdu.room_id)
|
||||||
|
|
||||||
logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
|
logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
|
||||||
yield self.handler.on_send_leave_request(origin, pdu)
|
|
||||||
|
pdu = await self._check_sigs_and_hash(room_version, pdu)
|
||||||
|
|
||||||
|
await self.handler.on_send_leave_request(origin, pdu)
|
||||||
return 200, {}
|
return 200, {}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_event_auth(self, origin, room_id, event_id):
|
||||||
def on_event_auth(self, origin, room_id, event_id):
|
with (await self._server_linearizer.queue((origin, room_id))):
|
||||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
|
||||||
origin_host, _ = parse_server_name(origin)
|
origin_host, _ = parse_server_name(origin)
|
||||||
yield self.check_server_matches_acl(origin_host, room_id)
|
await self.check_server_matches_acl(origin_host, room_id)
|
||||||
|
|
||||||
time_now = self._clock.time_msec()
|
time_now = self._clock.time_msec()
|
||||||
auth_pdus = yield self.handler.on_event_auth(event_id)
|
auth_pdus = await self.handler.on_event_auth(event_id)
|
||||||
res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]}
|
res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]}
|
||||||
return 200, res
|
return 200, res
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_query_auth_request(self, origin, content, room_id, event_id):
|
||||||
def on_query_auth_request(self, origin, content, room_id, event_id):
|
|
||||||
"""
|
"""
|
||||||
Content is a dict with keys::
|
Content is a dict with keys::
|
||||||
auth_chain (list): A list of events that give the auth chain.
|
auth_chain (list): A list of events that give the auth chain.
|
||||||
@ -455,22 +440,22 @@ class FederationServer(FederationBase):
|
|||||||
Returns:
|
Returns:
|
||||||
Deferred: Results in `dict` with the same format as `content`
|
Deferred: Results in `dict` with the same format as `content`
|
||||||
"""
|
"""
|
||||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
with (await self._server_linearizer.queue((origin, room_id))):
|
||||||
origin_host, _ = parse_server_name(origin)
|
origin_host, _ = parse_server_name(origin)
|
||||||
yield self.check_server_matches_acl(origin_host, room_id)
|
await self.check_server_matches_acl(origin_host, room_id)
|
||||||
|
|
||||||
room_version = yield self.store.get_room_version(room_id)
|
room_version = await self.store.get_room_version(room_id)
|
||||||
format_ver = room_version_to_event_format(room_version)
|
format_ver = room_version_to_event_format(room_version)
|
||||||
|
|
||||||
auth_chain = [
|
auth_chain = [
|
||||||
event_from_pdu_json(e, format_ver) for e in content["auth_chain"]
|
event_from_pdu_json(e, format_ver) for e in content["auth_chain"]
|
||||||
]
|
]
|
||||||
|
|
||||||
signed_auth = yield self._check_sigs_and_hash_and_fetch(
|
signed_auth = await self._check_sigs_and_hash_and_fetch(
|
||||||
origin, auth_chain, outlier=True, room_version=room_version
|
origin, auth_chain, outlier=True, room_version=room_version
|
||||||
)
|
)
|
||||||
|
|
||||||
ret = yield self.handler.on_query_auth(
|
ret = await self.handler.on_query_auth(
|
||||||
origin,
|
origin,
|
||||||
event_id,
|
event_id,
|
||||||
room_id,
|
room_id,
|
||||||
@ -496,16 +481,14 @@ class FederationServer(FederationBase):
|
|||||||
return self.on_query_request("user_devices", user_id)
|
return self.on_query_request("user_devices", user_id)
|
||||||
|
|
||||||
@trace
|
@trace
|
||||||
@defer.inlineCallbacks
|
async def on_claim_client_keys(self, origin, content):
|
||||||
@log_function
|
|
||||||
def on_claim_client_keys(self, origin, content):
|
|
||||||
query = []
|
query = []
|
||||||
for user_id, device_keys in content.get("one_time_keys", {}).items():
|
for user_id, device_keys in content.get("one_time_keys", {}).items():
|
||||||
for device_id, algorithm in device_keys.items():
|
for device_id, algorithm in device_keys.items():
|
||||||
query.append((user_id, device_id, algorithm))
|
query.append((user_id, device_id, algorithm))
|
||||||
|
|
||||||
log_kv({"message": "Claiming one time keys.", "user, device pairs": query})
|
log_kv({"message": "Claiming one time keys.", "user, device pairs": query})
|
||||||
results = yield self.store.claim_e2e_one_time_keys(query)
|
results = await self.store.claim_e2e_one_time_keys(query)
|
||||||
|
|
||||||
json_result = {}
|
json_result = {}
|
||||||
for user_id, device_keys in results.items():
|
for user_id, device_keys in results.items():
|
||||||
@ -529,14 +512,12 @@ class FederationServer(FederationBase):
|
|||||||
|
|
||||||
return {"one_time_keys": json_result}
|
return {"one_time_keys": json_result}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_get_missing_events(
|
||||||
@log_function
|
|
||||||
def on_get_missing_events(
|
|
||||||
self, origin, room_id, earliest_events, latest_events, limit
|
self, origin, room_id, earliest_events, latest_events, limit
|
||||||
):
|
):
|
||||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
with (await self._server_linearizer.queue((origin, room_id))):
|
||||||
origin_host, _ = parse_server_name(origin)
|
origin_host, _ = parse_server_name(origin)
|
||||||
yield self.check_server_matches_acl(origin_host, room_id)
|
await self.check_server_matches_acl(origin_host, room_id)
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
"on_get_missing_events: earliest_events: %r, latest_events: %r,"
|
"on_get_missing_events: earliest_events: %r, latest_events: %r,"
|
||||||
@ -546,7 +527,7 @@ class FederationServer(FederationBase):
|
|||||||
limit,
|
limit,
|
||||||
)
|
)
|
||||||
|
|
||||||
missing_events = yield self.handler.on_get_missing_events(
|
missing_events = await self.handler.on_get_missing_events(
|
||||||
origin, room_id, earliest_events, latest_events, limit
|
origin, room_id, earliest_events, latest_events, limit
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -579,8 +560,7 @@ class FederationServer(FederationBase):
|
|||||||
destination=None,
|
destination=None,
|
||||||
)
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def _handle_received_pdu(self, origin, pdu):
|
||||||
def _handle_received_pdu(self, origin, pdu):
|
|
||||||
""" Process a PDU received in a federation /send/ transaction.
|
""" Process a PDU received in a federation /send/ transaction.
|
||||||
|
|
||||||
If the event is invalid, then this method throws a FederationError.
|
If the event is invalid, then this method throws a FederationError.
|
||||||
@ -633,37 +613,34 @@ class FederationServer(FederationBase):
|
|||||||
logger.info("Accepting join PDU %s from %s", pdu.event_id, origin)
|
logger.info("Accepting join PDU %s from %s", pdu.event_id, origin)
|
||||||
|
|
||||||
# We've already checked that we know the room version by this point
|
# We've already checked that we know the room version by this point
|
||||||
room_version = yield self.store.get_room_version(pdu.room_id)
|
room_version = await self.store.get_room_version(pdu.room_id)
|
||||||
|
|
||||||
# Check signature.
|
# Check signature.
|
||||||
try:
|
try:
|
||||||
pdu = yield self._check_sigs_and_hash(room_version, pdu)
|
pdu = await self._check_sigs_and_hash(room_version, pdu)
|
||||||
except SynapseError as e:
|
except SynapseError as e:
|
||||||
raise FederationError("ERROR", e.code, e.msg, affected=pdu.event_id)
|
raise FederationError("ERROR", e.code, e.msg, affected=pdu.event_id)
|
||||||
|
|
||||||
yield self.handler.on_receive_pdu(origin, pdu, sent_to_us_directly=True)
|
await self.handler.on_receive_pdu(origin, pdu, sent_to_us_directly=True)
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return "<ReplicationLayer(%s)>" % self.server_name
|
return "<ReplicationLayer(%s)>" % self.server_name
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def exchange_third_party_invite(
|
||||||
def exchange_third_party_invite(
|
|
||||||
self, sender_user_id, target_user_id, room_id, signed
|
self, sender_user_id, target_user_id, room_id, signed
|
||||||
):
|
):
|
||||||
ret = yield self.handler.exchange_third_party_invite(
|
ret = await self.handler.exchange_third_party_invite(
|
||||||
sender_user_id, target_user_id, room_id, signed
|
sender_user_id, target_user_id, room_id, signed
|
||||||
)
|
)
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_exchange_third_party_invite_request(self, room_id, event_dict):
|
||||||
def on_exchange_third_party_invite_request(self, room_id, event_dict):
|
ret = await self.handler.on_exchange_third_party_invite_request(
|
||||||
ret = yield self.handler.on_exchange_third_party_invite_request(
|
|
||||||
room_id, event_dict
|
room_id, event_dict
|
||||||
)
|
)
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def check_server_matches_acl(self, server_name, room_id):
|
||||||
def check_server_matches_acl(self, server_name, room_id):
|
|
||||||
"""Check if the given server is allowed by the server ACLs in the room
|
"""Check if the given server is allowed by the server ACLs in the room
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@ -673,13 +650,13 @@ class FederationServer(FederationBase):
|
|||||||
Raises:
|
Raises:
|
||||||
AuthError if the server does not match the ACL
|
AuthError if the server does not match the ACL
|
||||||
"""
|
"""
|
||||||
state_ids = yield self.store.get_current_state_ids(room_id)
|
state_ids = await self.store.get_current_state_ids(room_id)
|
||||||
acl_event_id = state_ids.get((EventTypes.ServerACL, ""))
|
acl_event_id = state_ids.get((EventTypes.ServerACL, ""))
|
||||||
|
|
||||||
if not acl_event_id:
|
if not acl_event_id:
|
||||||
return
|
return
|
||||||
|
|
||||||
acl_event = yield self.store.get_event(acl_event_id)
|
acl_event = await self.store.get_event(acl_event_id)
|
||||||
if server_matches_acl_event(server_name, acl_event):
|
if server_matches_acl_event(server_name, acl_event):
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -792,15 +769,14 @@ class FederationHandlerRegistry(object):
|
|||||||
|
|
||||||
self.query_handlers[query_type] = handler
|
self.query_handlers[query_type] = handler
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_edu(self, edu_type, origin, content):
|
||||||
def on_edu(self, edu_type, origin, content):
|
|
||||||
handler = self.edu_handlers.get(edu_type)
|
handler = self.edu_handlers.get(edu_type)
|
||||||
if not handler:
|
if not handler:
|
||||||
logger.warn("No handler registered for EDU type %s", edu_type)
|
logger.warn("No handler registered for EDU type %s", edu_type)
|
||||||
|
|
||||||
with start_active_span_from_edu(content, "handle_edu"):
|
with start_active_span_from_edu(content, "handle_edu"):
|
||||||
try:
|
try:
|
||||||
yield handler(origin, content)
|
await handler(origin, content)
|
||||||
except SynapseError as e:
|
except SynapseError as e:
|
||||||
logger.info("Failed to handle edu %r: %r", edu_type, e)
|
logger.info("Failed to handle edu %r: %r", edu_type, e)
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -833,7 +809,7 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry):
|
|||||||
|
|
||||||
super(ReplicationFederationHandlerRegistry, self).__init__()
|
super(ReplicationFederationHandlerRegistry, self).__init__()
|
||||||
|
|
||||||
def on_edu(self, edu_type, origin, content):
|
async def on_edu(self, edu_type, origin, content):
|
||||||
"""Overrides FederationHandlerRegistry
|
"""Overrides FederationHandlerRegistry
|
||||||
"""
|
"""
|
||||||
if not self.config.use_presence and edu_type == "m.presence":
|
if not self.config.use_presence and edu_type == "m.presence":
|
||||||
@ -841,17 +817,17 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry):
|
|||||||
|
|
||||||
handler = self.edu_handlers.get(edu_type)
|
handler = self.edu_handlers.get(edu_type)
|
||||||
if handler:
|
if handler:
|
||||||
return super(ReplicationFederationHandlerRegistry, self).on_edu(
|
return await super(ReplicationFederationHandlerRegistry, self).on_edu(
|
||||||
edu_type, origin, content
|
edu_type, origin, content
|
||||||
)
|
)
|
||||||
|
|
||||||
return self._send_edu(edu_type=edu_type, origin=origin, content=content)
|
return await self._send_edu(edu_type=edu_type, origin=origin, content=content)
|
||||||
|
|
||||||
def on_query(self, query_type, args):
|
async def on_query(self, query_type, args):
|
||||||
"""Overrides FederationHandlerRegistry
|
"""Overrides FederationHandlerRegistry
|
||||||
"""
|
"""
|
||||||
handler = self.query_handlers.get(query_type)
|
handler = self.query_handlers.get(query_type)
|
||||||
if handler:
|
if handler:
|
||||||
return handler(args)
|
return await handler(args)
|
||||||
|
|
||||||
return self._get_query_client(query_type=query_type, args=args)
|
return await self._get_query_client(query_type=query_type, args=args)
|
||||||
|
@ -36,6 +36,8 @@ from six import iteritems
|
|||||||
|
|
||||||
from sortedcontainers import SortedDict
|
from sortedcontainers import SortedDict
|
||||||
|
|
||||||
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.metrics import LaterGauge
|
from synapse.metrics import LaterGauge
|
||||||
from synapse.storage.presence import UserPresenceState
|
from synapse.storage.presence import UserPresenceState
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
@ -212,7 +214,7 @@ class FederationRemoteSendQueue(object):
|
|||||||
receipt (synapse.types.ReadReceipt):
|
receipt (synapse.types.ReadReceipt):
|
||||||
"""
|
"""
|
||||||
# nothing to do here: the replication listener will handle it.
|
# nothing to do here: the replication listener will handle it.
|
||||||
pass
|
return defer.succeed(None)
|
||||||
|
|
||||||
def send_presence(self, states):
|
def send_presence(self, states):
|
||||||
"""As per FederationSender
|
"""As per FederationSender
|
||||||
|
@ -122,10 +122,10 @@ class TransportLayerClient(object):
|
|||||||
Deferred: Results in a dict received from the remote homeserver.
|
Deferred: Results in a dict received from the remote homeserver.
|
||||||
"""
|
"""
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"backfill dest=%s, room_id=%s, event_tuples=%s, limit=%s",
|
"backfill dest=%s, room_id=%s, event_tuples=%r, limit=%s",
|
||||||
destination,
|
destination,
|
||||||
room_id,
|
room_id,
|
||||||
repr(event_tuples),
|
event_tuples,
|
||||||
str(limit),
|
str(limit),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -109,6 +109,7 @@ class FederationHandler(BaseHandler):
|
|||||||
self.hs = hs
|
self.hs = hs
|
||||||
|
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
|
self.storage = hs.get_storage()
|
||||||
self.federation_client = hs.get_federation_client()
|
self.federation_client = hs.get_federation_client()
|
||||||
self.state_handler = hs.get_state_handler()
|
self.state_handler = hs.get_state_handler()
|
||||||
self.server_name = hs.hostname
|
self.server_name = hs.hostname
|
||||||
@ -1222,7 +1223,6 @@ class FederationHandler(BaseHandler):
|
|||||||
Returns:
|
Returns:
|
||||||
Deferred[FrozenEvent]
|
Deferred[FrozenEvent]
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if get_domain_from_id(user_id) != origin:
|
if get_domain_from_id(user_id) != origin:
|
||||||
logger.info(
|
logger.info(
|
||||||
"Got /make_join request for user %r from different origin %s, ignoring",
|
"Got /make_join request for user %r from different origin %s, ignoring",
|
||||||
@ -1251,7 +1251,7 @@ class FederationHandler(BaseHandler):
|
|||||||
builder=builder
|
builder=builder
|
||||||
)
|
)
|
||||||
except AuthError as e:
|
except AuthError as e:
|
||||||
logger.warn("Failed to create join %r because %s", event, e)
|
logger.warn("Failed to create join to %s because %s", room_id, e)
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
event_allowed = yield self.third_party_event_rules.check_event_allowed(
|
event_allowed = yield self.third_party_event_rules.check_event_allowed(
|
||||||
@ -1280,11 +1280,20 @@ class FederationHandler(BaseHandler):
|
|||||||
event = pdu
|
event = pdu
|
||||||
|
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"on_send_join_request: Got event: %s, signatures: %s",
|
"on_send_join_request from %s: Got event: %s, signatures: %s",
|
||||||
|
origin,
|
||||||
event.event_id,
|
event.event_id,
|
||||||
event.signatures,
|
event.signatures,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if get_domain_from_id(event.sender) != origin:
|
||||||
|
logger.info(
|
||||||
|
"Got /send_join request for user %r from different origin %s",
|
||||||
|
event.sender,
|
||||||
|
origin,
|
||||||
|
)
|
||||||
|
raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
|
||||||
|
|
||||||
event.internal_metadata.outlier = False
|
event.internal_metadata.outlier = False
|
||||||
# Send this event on behalf of the origin server.
|
# Send this event on behalf of the origin server.
|
||||||
#
|
#
|
||||||
@ -1503,6 +1512,14 @@ class FederationHandler(BaseHandler):
|
|||||||
event.signatures,
|
event.signatures,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if get_domain_from_id(event.sender) != origin:
|
||||||
|
logger.info(
|
||||||
|
"Got /send_leave request for user %r from different origin %s",
|
||||||
|
event.sender,
|
||||||
|
origin,
|
||||||
|
)
|
||||||
|
raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
|
||||||
|
|
||||||
event.internal_metadata.outlier = False
|
event.internal_metadata.outlier = False
|
||||||
|
|
||||||
context = yield self._handle_new_event(origin, event)
|
context = yield self._handle_new_event(origin, event)
|
||||||
@ -2648,7 +2665,7 @@ class FederationHandler(BaseHandler):
|
|||||||
backfilled=backfilled,
|
backfilled=backfilled,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
max_stream_id = yield self.store.persist_events(
|
max_stream_id = yield self.storage.persistence.persist_events(
|
||||||
event_and_contexts, backfilled=backfilled
|
event_and_contexts, backfilled=backfilled
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -234,6 +234,7 @@ class EventCreationHandler(object):
|
|||||||
self.hs = hs
|
self.hs = hs
|
||||||
self.auth = hs.get_auth()
|
self.auth = hs.get_auth()
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
|
self.storage = hs.get_storage()
|
||||||
self.state = hs.get_state_handler()
|
self.state = hs.get_state_handler()
|
||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
self.validator = EventValidator()
|
self.validator = EventValidator()
|
||||||
@ -868,7 +869,7 @@ class EventCreationHandler(object):
|
|||||||
if prev_state_ids:
|
if prev_state_ids:
|
||||||
raise AuthError(403, "Changing the room create event is forbidden")
|
raise AuthError(403, "Changing the room create event is forbidden")
|
||||||
|
|
||||||
(event_stream_id, max_stream_id) = yield self.store.persist_event(
|
event_stream_id, max_stream_id = yield self.storage.persistence.persist_event(
|
||||||
event, context=context
|
event, context=context
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -15,8 +15,6 @@
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from twisted.internet import defer
|
|
||||||
|
|
||||||
from synapse.util.async_helpers import Linearizer
|
from synapse.util.async_helpers import Linearizer
|
||||||
|
|
||||||
from ._base import BaseHandler
|
from ._base import BaseHandler
|
||||||
@ -32,8 +30,7 @@ class ReadMarkerHandler(BaseHandler):
|
|||||||
self.read_marker_linearizer = Linearizer(name="read_marker")
|
self.read_marker_linearizer = Linearizer(name="read_marker")
|
||||||
self.notifier = hs.get_notifier()
|
self.notifier = hs.get_notifier()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def received_client_read_marker(self, room_id, user_id, event_id):
|
||||||
def received_client_read_marker(self, room_id, user_id, event_id):
|
|
||||||
"""Updates the read marker for a given user in a given room if the event ID given
|
"""Updates the read marker for a given user in a given room if the event ID given
|
||||||
is ahead in the stream relative to the current read marker.
|
is ahead in the stream relative to the current read marker.
|
||||||
|
|
||||||
@ -41,8 +38,8 @@ class ReadMarkerHandler(BaseHandler):
|
|||||||
the read marker has changed.
|
the read marker has changed.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
with (yield self.read_marker_linearizer.queue((room_id, user_id))):
|
with await self.read_marker_linearizer.queue((room_id, user_id)):
|
||||||
existing_read_marker = yield self.store.get_account_data_for_room_and_type(
|
existing_read_marker = await self.store.get_account_data_for_room_and_type(
|
||||||
user_id, room_id, "m.fully_read"
|
user_id, room_id, "m.fully_read"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -50,13 +47,13 @@ class ReadMarkerHandler(BaseHandler):
|
|||||||
|
|
||||||
if existing_read_marker:
|
if existing_read_marker:
|
||||||
# Only update if the new marker is ahead in the stream
|
# Only update if the new marker is ahead in the stream
|
||||||
should_update = yield self.store.is_event_after(
|
should_update = await self.store.is_event_after(
|
||||||
event_id, existing_read_marker["event_id"]
|
event_id, existing_read_marker["event_id"]
|
||||||
)
|
)
|
||||||
|
|
||||||
if should_update:
|
if should_update:
|
||||||
content = {"event_id": event_id}
|
content = {"event_id": event_id}
|
||||||
max_id = yield self.store.add_account_data_to_room(
|
max_id = await self.store.add_account_data_to_room(
|
||||||
user_id, room_id, "m.fully_read", content
|
user_id, room_id, "m.fully_read", content
|
||||||
)
|
)
|
||||||
self.notifier.on_new_event("account_data_key", max_id, users=[user_id])
|
self.notifier.on_new_event("account_data_key", max_id, users=[user_id])
|
||||||
|
@ -18,6 +18,7 @@ from twisted.internet import defer
|
|||||||
|
|
||||||
from synapse.handlers._base import BaseHandler
|
from synapse.handlers._base import BaseHandler
|
||||||
from synapse.types import ReadReceipt, get_domain_from_id
|
from synapse.types import ReadReceipt, get_domain_from_id
|
||||||
|
from synapse.util.async_helpers import maybe_awaitable
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -36,8 +37,7 @@ class ReceiptsHandler(BaseHandler):
|
|||||||
self.clock = self.hs.get_clock()
|
self.clock = self.hs.get_clock()
|
||||||
self.state = hs.get_state_handler()
|
self.state = hs.get_state_handler()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def _received_remote_receipt(self, origin, content):
|
||||||
def _received_remote_receipt(self, origin, content):
|
|
||||||
"""Called when we receive an EDU of type m.receipt from a remote HS.
|
"""Called when we receive an EDU of type m.receipt from a remote HS.
|
||||||
"""
|
"""
|
||||||
receipts = []
|
receipts = []
|
||||||
@ -62,17 +62,16 @@ class ReceiptsHandler(BaseHandler):
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
yield self._handle_new_receipts(receipts)
|
await self._handle_new_receipts(receipts)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def _handle_new_receipts(self, receipts):
|
||||||
def _handle_new_receipts(self, receipts):
|
|
||||||
"""Takes a list of receipts, stores them and informs the notifier.
|
"""Takes a list of receipts, stores them and informs the notifier.
|
||||||
"""
|
"""
|
||||||
min_batch_id = None
|
min_batch_id = None
|
||||||
max_batch_id = None
|
max_batch_id = None
|
||||||
|
|
||||||
for receipt in receipts:
|
for receipt in receipts:
|
||||||
res = yield self.store.insert_receipt(
|
res = await self.store.insert_receipt(
|
||||||
receipt.room_id,
|
receipt.room_id,
|
||||||
receipt.receipt_type,
|
receipt.receipt_type,
|
||||||
receipt.user_id,
|
receipt.user_id,
|
||||||
@ -99,14 +98,15 @@ class ReceiptsHandler(BaseHandler):
|
|||||||
|
|
||||||
self.notifier.on_new_event("receipt_key", max_batch_id, rooms=affected_room_ids)
|
self.notifier.on_new_event("receipt_key", max_batch_id, rooms=affected_room_ids)
|
||||||
# Note that the min here shouldn't be relied upon to be accurate.
|
# Note that the min here shouldn't be relied upon to be accurate.
|
||||||
yield self.hs.get_pusherpool().on_new_receipts(
|
await maybe_awaitable(
|
||||||
min_batch_id, max_batch_id, affected_room_ids
|
self.hs.get_pusherpool().on_new_receipts(
|
||||||
|
min_batch_id, max_batch_id, affected_room_ids
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def received_client_receipt(self, room_id, receipt_type, user_id, event_id):
|
||||||
def received_client_receipt(self, room_id, receipt_type, user_id, event_id):
|
|
||||||
"""Called when a client tells us a local user has read up to the given
|
"""Called when a client tells us a local user has read up to the given
|
||||||
event_id in the room.
|
event_id in the room.
|
||||||
"""
|
"""
|
||||||
@ -118,24 +118,11 @@ class ReceiptsHandler(BaseHandler):
|
|||||||
data={"ts": int(self.clock.time_msec())},
|
data={"ts": int(self.clock.time_msec())},
|
||||||
)
|
)
|
||||||
|
|
||||||
is_new = yield self._handle_new_receipts([receipt])
|
is_new = await self._handle_new_receipts([receipt])
|
||||||
if not is_new:
|
if not is_new:
|
||||||
return
|
return
|
||||||
|
|
||||||
yield self.federation.send_read_receipt(receipt)
|
await self.federation.send_read_receipt(receipt)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def get_receipts_for_room(self, room_id, to_key):
|
|
||||||
"""Gets all receipts for a room, upto the given key.
|
|
||||||
"""
|
|
||||||
result = yield self.store.get_linearized_receipts_for_room(
|
|
||||||
room_id, to_key=to_key
|
|
||||||
)
|
|
||||||
|
|
||||||
if not result:
|
|
||||||
return []
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
class ReceiptEventSource(object):
|
class ReceiptEventSource(object):
|
||||||
|
@ -45,6 +45,8 @@ class StatsHandler(StateDeltasHandler):
|
|||||||
self.is_mine_id = hs.is_mine_id
|
self.is_mine_id = hs.is_mine_id
|
||||||
self.stats_bucket_size = hs.config.stats_bucket_size
|
self.stats_bucket_size = hs.config.stats_bucket_size
|
||||||
|
|
||||||
|
self.stats_enabled = hs.config.stats_enabled
|
||||||
|
|
||||||
# The current position in the current_state_delta stream
|
# The current position in the current_state_delta stream
|
||||||
self.pos = None
|
self.pos = None
|
||||||
|
|
||||||
@ -61,7 +63,7 @@ class StatsHandler(StateDeltasHandler):
|
|||||||
def notify_new_event(self):
|
def notify_new_event(self):
|
||||||
"""Called when there may be more deltas to process
|
"""Called when there may be more deltas to process
|
||||||
"""
|
"""
|
||||||
if not self.hs.config.stats_enabled or self._is_processing:
|
if not self.stats_enabled or self._is_processing:
|
||||||
return
|
return
|
||||||
|
|
||||||
self._is_processing = True
|
self._is_processing = True
|
||||||
|
@ -79,7 +79,7 @@ class BulkPushRuleEvaluator(object):
|
|||||||
dict of user_id -> push_rules
|
dict of user_id -> push_rules
|
||||||
"""
|
"""
|
||||||
room_id = event.room_id
|
room_id = event.room_id
|
||||||
rules_for_room = self._get_rules_for_room(room_id)
|
rules_for_room = yield self._get_rules_for_room(room_id)
|
||||||
|
|
||||||
rules_by_user = yield rules_for_room.get_rules(event, context)
|
rules_by_user = yield rules_for_room.get_rules(event, context)
|
||||||
|
|
||||||
|
@ -110,14 +110,14 @@ class ReplicationEndpoint(object):
|
|||||||
return {}
|
return {}
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def _handle_request(self, request, **kwargs):
|
async def _handle_request(self, request, **kwargs):
|
||||||
"""Handle incoming request.
|
"""Handle incoming request.
|
||||||
|
|
||||||
This is called with the request object and PATH_ARGS.
|
This is called with the request object and PATH_ARGS.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Deferred[dict]: A JSON serialisable dict to be used as response
|
tuple[int, dict]: HTTP status code and a JSON serialisable dict
|
||||||
body of request.
|
to be used as response body of request.
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -82,8 +82,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
|
|||||||
|
|
||||||
return payload
|
return payload
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def _handle_request(self, request):
|
||||||
def _handle_request(self, request):
|
|
||||||
with Measure(self.clock, "repl_fed_send_events_parse"):
|
with Measure(self.clock, "repl_fed_send_events_parse"):
|
||||||
content = parse_json_object_from_request(request)
|
content = parse_json_object_from_request(request)
|
||||||
|
|
||||||
@ -101,15 +100,13 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
|
|||||||
EventType = event_type_from_format_version(format_ver)
|
EventType = event_type_from_format_version(format_ver)
|
||||||
event = EventType(event_dict, internal_metadata, rejected_reason)
|
event = EventType(event_dict, internal_metadata, rejected_reason)
|
||||||
|
|
||||||
context = yield EventContext.deserialize(
|
context = EventContext.deserialize(self.store, event_payload["context"])
|
||||||
self.store, event_payload["context"]
|
|
||||||
)
|
|
||||||
|
|
||||||
event_and_contexts.append((event, context))
|
event_and_contexts.append((event, context))
|
||||||
|
|
||||||
logger.info("Got %d events from federation", len(event_and_contexts))
|
logger.info("Got %d events from federation", len(event_and_contexts))
|
||||||
|
|
||||||
yield self.federation_handler.persist_events_and_notify(
|
await self.federation_handler.persist_events_and_notify(
|
||||||
event_and_contexts, backfilled
|
event_and_contexts, backfilled
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -144,8 +141,7 @@ class ReplicationFederationSendEduRestServlet(ReplicationEndpoint):
|
|||||||
def _serialize_payload(edu_type, origin, content):
|
def _serialize_payload(edu_type, origin, content):
|
||||||
return {"origin": origin, "content": content}
|
return {"origin": origin, "content": content}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def _handle_request(self, request, edu_type):
|
||||||
def _handle_request(self, request, edu_type):
|
|
||||||
with Measure(self.clock, "repl_fed_send_edu_parse"):
|
with Measure(self.clock, "repl_fed_send_edu_parse"):
|
||||||
content = parse_json_object_from_request(request)
|
content = parse_json_object_from_request(request)
|
||||||
|
|
||||||
@ -154,7 +150,7 @@ class ReplicationFederationSendEduRestServlet(ReplicationEndpoint):
|
|||||||
|
|
||||||
logger.info("Got %r edu from %s", edu_type, origin)
|
logger.info("Got %r edu from %s", edu_type, origin)
|
||||||
|
|
||||||
result = yield self.registry.on_edu(edu_type, origin, edu_content)
|
result = await self.registry.on_edu(edu_type, origin, edu_content)
|
||||||
|
|
||||||
return 200, result
|
return 200, result
|
||||||
|
|
||||||
@ -193,8 +189,7 @@ class ReplicationGetQueryRestServlet(ReplicationEndpoint):
|
|||||||
"""
|
"""
|
||||||
return {"args": args}
|
return {"args": args}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def _handle_request(self, request, query_type):
|
||||||
def _handle_request(self, request, query_type):
|
|
||||||
with Measure(self.clock, "repl_fed_query_parse"):
|
with Measure(self.clock, "repl_fed_query_parse"):
|
||||||
content = parse_json_object_from_request(request)
|
content = parse_json_object_from_request(request)
|
||||||
|
|
||||||
@ -202,7 +197,7 @@ class ReplicationGetQueryRestServlet(ReplicationEndpoint):
|
|||||||
|
|
||||||
logger.info("Got %r query", query_type)
|
logger.info("Got %r query", query_type)
|
||||||
|
|
||||||
result = yield self.registry.on_query(query_type, args)
|
result = await self.registry.on_query(query_type, args)
|
||||||
|
|
||||||
return 200, result
|
return 200, result
|
||||||
|
|
||||||
@ -234,9 +229,8 @@ class ReplicationCleanRoomRestServlet(ReplicationEndpoint):
|
|||||||
"""
|
"""
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def _handle_request(self, request, room_id):
|
||||||
def _handle_request(self, request, room_id):
|
await self.store.clean_room_for_join(room_id)
|
||||||
yield self.store.clean_room_for_join(room_id)
|
|
||||||
|
|
||||||
return 200, {}
|
return 200, {}
|
||||||
|
|
||||||
|
@ -15,8 +15,6 @@
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from twisted.internet import defer
|
|
||||||
|
|
||||||
from synapse.http.servlet import parse_json_object_from_request
|
from synapse.http.servlet import parse_json_object_from_request
|
||||||
from synapse.replication.http._base import ReplicationEndpoint
|
from synapse.replication.http._base import ReplicationEndpoint
|
||||||
|
|
||||||
@ -52,15 +50,14 @@ class RegisterDeviceReplicationServlet(ReplicationEndpoint):
|
|||||||
"is_guest": is_guest,
|
"is_guest": is_guest,
|
||||||
}
|
}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def _handle_request(self, request, user_id):
|
||||||
def _handle_request(self, request, user_id):
|
|
||||||
content = parse_json_object_from_request(request)
|
content = parse_json_object_from_request(request)
|
||||||
|
|
||||||
device_id = content["device_id"]
|
device_id = content["device_id"]
|
||||||
initial_display_name = content["initial_display_name"]
|
initial_display_name = content["initial_display_name"]
|
||||||
is_guest = content["is_guest"]
|
is_guest = content["is_guest"]
|
||||||
|
|
||||||
device_id, access_token = yield self.registration_handler.register_device(
|
device_id, access_token = await self.registration_handler.register_device(
|
||||||
user_id, device_id, initial_display_name, is_guest
|
user_id, device_id, initial_display_name, is_guest
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -15,8 +15,6 @@
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from twisted.internet import defer
|
|
||||||
|
|
||||||
from synapse.http.servlet import parse_json_object_from_request
|
from synapse.http.servlet import parse_json_object_from_request
|
||||||
from synapse.replication.http._base import ReplicationEndpoint
|
from synapse.replication.http._base import ReplicationEndpoint
|
||||||
from synapse.types import Requester, UserID
|
from synapse.types import Requester, UserID
|
||||||
@ -65,8 +63,7 @@ class ReplicationRemoteJoinRestServlet(ReplicationEndpoint):
|
|||||||
"content": content,
|
"content": content,
|
||||||
}
|
}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def _handle_request(self, request, room_id, user_id):
|
||||||
def _handle_request(self, request, room_id, user_id):
|
|
||||||
content = parse_json_object_from_request(request)
|
content = parse_json_object_from_request(request)
|
||||||
|
|
||||||
remote_room_hosts = content["remote_room_hosts"]
|
remote_room_hosts = content["remote_room_hosts"]
|
||||||
@ -79,7 +76,7 @@ class ReplicationRemoteJoinRestServlet(ReplicationEndpoint):
|
|||||||
|
|
||||||
logger.info("remote_join: %s into room: %s", user_id, room_id)
|
logger.info("remote_join: %s into room: %s", user_id, room_id)
|
||||||
|
|
||||||
yield self.federation_handler.do_invite_join(
|
await self.federation_handler.do_invite_join(
|
||||||
remote_room_hosts, room_id, user_id, event_content
|
remote_room_hosts, room_id, user_id, event_content
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -123,8 +120,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint):
|
|||||||
"remote_room_hosts": remote_room_hosts,
|
"remote_room_hosts": remote_room_hosts,
|
||||||
}
|
}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def _handle_request(self, request, room_id, user_id):
|
||||||
def _handle_request(self, request, room_id, user_id):
|
|
||||||
content = parse_json_object_from_request(request)
|
content = parse_json_object_from_request(request)
|
||||||
|
|
||||||
remote_room_hosts = content["remote_room_hosts"]
|
remote_room_hosts = content["remote_room_hosts"]
|
||||||
@ -137,7 +133,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint):
|
|||||||
logger.info("remote_reject_invite: %s out of room: %s", user_id, room_id)
|
logger.info("remote_reject_invite: %s out of room: %s", user_id, room_id)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
event = yield self.federation_handler.do_remotely_reject_invite(
|
event = await self.federation_handler.do_remotely_reject_invite(
|
||||||
remote_room_hosts, room_id, user_id
|
remote_room_hosts, room_id, user_id
|
||||||
)
|
)
|
||||||
ret = event.get_pdu_json()
|
ret = event.get_pdu_json()
|
||||||
@ -150,7 +146,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint):
|
|||||||
#
|
#
|
||||||
logger.warn("Failed to reject invite: %s", e)
|
logger.warn("Failed to reject invite: %s", e)
|
||||||
|
|
||||||
yield self.store.locally_reject_invite(user_id, room_id)
|
await self.store.locally_reject_invite(user_id, room_id)
|
||||||
ret = {}
|
ret = {}
|
||||||
|
|
||||||
return 200, ret
|
return 200, ret
|
||||||
|
@ -15,8 +15,6 @@
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from twisted.internet import defer
|
|
||||||
|
|
||||||
from synapse.http.servlet import parse_json_object_from_request
|
from synapse.http.servlet import parse_json_object_from_request
|
||||||
from synapse.replication.http._base import ReplicationEndpoint
|
from synapse.replication.http._base import ReplicationEndpoint
|
||||||
|
|
||||||
@ -74,11 +72,10 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
|
|||||||
"address": address,
|
"address": address,
|
||||||
}
|
}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def _handle_request(self, request, user_id):
|
||||||
def _handle_request(self, request, user_id):
|
|
||||||
content = parse_json_object_from_request(request)
|
content = parse_json_object_from_request(request)
|
||||||
|
|
||||||
yield self.registration_handler.register_with_store(
|
await self.registration_handler.register_with_store(
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
password_hash=content["password_hash"],
|
password_hash=content["password_hash"],
|
||||||
was_guest=content["was_guest"],
|
was_guest=content["was_guest"],
|
||||||
@ -117,14 +114,13 @@ class ReplicationPostRegisterActionsServlet(ReplicationEndpoint):
|
|||||||
"""
|
"""
|
||||||
return {"auth_result": auth_result, "access_token": access_token}
|
return {"auth_result": auth_result, "access_token": access_token}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def _handle_request(self, request, user_id):
|
||||||
def _handle_request(self, request, user_id):
|
|
||||||
content = parse_json_object_from_request(request)
|
content = parse_json_object_from_request(request)
|
||||||
|
|
||||||
auth_result = content["auth_result"]
|
auth_result = content["auth_result"]
|
||||||
access_token = content["access_token"]
|
access_token = content["access_token"]
|
||||||
|
|
||||||
yield self.registration_handler.post_registration_actions(
|
await self.registration_handler.post_registration_actions(
|
||||||
user_id=user_id, auth_result=auth_result, access_token=access_token
|
user_id=user_id, auth_result=auth_result, access_token=access_token
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -87,8 +87,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
|
|||||||
|
|
||||||
return payload
|
return payload
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def _handle_request(self, request, event_id):
|
||||||
def _handle_request(self, request, event_id):
|
|
||||||
with Measure(self.clock, "repl_send_event_parse"):
|
with Measure(self.clock, "repl_send_event_parse"):
|
||||||
content = parse_json_object_from_request(request)
|
content = parse_json_object_from_request(request)
|
||||||
|
|
||||||
@ -101,7 +100,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
|
|||||||
event = EventType(event_dict, internal_metadata, rejected_reason)
|
event = EventType(event_dict, internal_metadata, rejected_reason)
|
||||||
|
|
||||||
requester = Requester.deserialize(self.store, content["requester"])
|
requester = Requester.deserialize(self.store, content["requester"])
|
||||||
context = yield EventContext.deserialize(self.store, content["context"])
|
context = EventContext.deserialize(self.store, content["context"])
|
||||||
|
|
||||||
ratelimit = content["ratelimit"]
|
ratelimit = content["ratelimit"]
|
||||||
extra_users = [UserID.from_string(u) for u in content["extra_users"]]
|
extra_users = [UserID.from_string(u) for u in content["extra_users"]]
|
||||||
@ -113,7 +112,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
|
|||||||
"Got event to send with ID: %s into room: %s", event.event_id, event.room_id
|
"Got event to send with ID: %s into room: %s", event.event_id, event.room_id
|
||||||
)
|
)
|
||||||
|
|
||||||
yield self.event_creation_handler.persist_and_notify_client_event(
|
await self.event_creation_handler.persist_and_notify_client_event(
|
||||||
requester, event, context, ratelimit=ratelimit, extra_users=extra_users
|
requester, event, context, ratelimit=ratelimit, extra_users=extra_users
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -21,8 +21,6 @@ from six.moves.urllib import parse as urlparse
|
|||||||
|
|
||||||
from canonicaljson import json
|
from canonicaljson import json
|
||||||
|
|
||||||
from twisted.internet import defer
|
|
||||||
|
|
||||||
from synapse.api.constants import EventTypes, Membership
|
from synapse.api.constants import EventTypes, Membership
|
||||||
from synapse.api.errors import (
|
from synapse.api.errors import (
|
||||||
AuthError,
|
AuthError,
|
||||||
@ -85,11 +83,10 @@ class RoomCreateRestServlet(TransactionRestServlet):
|
|||||||
set_tag("txn_id", txn_id)
|
set_tag("txn_id", txn_id)
|
||||||
return self.txns.fetch_or_execute_request(request, self.on_POST, request)
|
return self.txns.fetch_or_execute_request(request, self.on_POST, request)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_POST(self, request):
|
||||||
def on_POST(self, request):
|
requester = await self.auth.get_user_by_req(request)
|
||||||
requester = yield self.auth.get_user_by_req(request)
|
|
||||||
|
|
||||||
info = yield self._room_creation_handler.create_room(
|
info = await self._room_creation_handler.create_room(
|
||||||
requester, self.get_room_config(request)
|
requester, self.get_room_config(request)
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -154,15 +151,14 @@ class RoomStateEventRestServlet(TransactionRestServlet):
|
|||||||
def on_PUT_no_state_key(self, request, room_id, event_type):
|
def on_PUT_no_state_key(self, request, room_id, event_type):
|
||||||
return self.on_PUT(request, room_id, event_type, "")
|
return self.on_PUT(request, room_id, event_type, "")
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_GET(self, request, room_id, event_type, state_key):
|
||||||
def on_GET(self, request, room_id, event_type, state_key):
|
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||||
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
|
||||||
format = parse_string(
|
format = parse_string(
|
||||||
request, "format", default="content", allowed_values=["content", "event"]
|
request, "format", default="content", allowed_values=["content", "event"]
|
||||||
)
|
)
|
||||||
|
|
||||||
msg_handler = self.message_handler
|
msg_handler = self.message_handler
|
||||||
data = yield msg_handler.get_room_data(
|
data = await msg_handler.get_room_data(
|
||||||
user_id=requester.user.to_string(),
|
user_id=requester.user.to_string(),
|
||||||
room_id=room_id,
|
room_id=room_id,
|
||||||
event_type=event_type,
|
event_type=event_type,
|
||||||
@ -179,9 +175,8 @@ class RoomStateEventRestServlet(TransactionRestServlet):
|
|||||||
elif format == "content":
|
elif format == "content":
|
||||||
return 200, data.get_dict()["content"]
|
return 200, data.get_dict()["content"]
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_PUT(self, request, room_id, event_type, state_key, txn_id=None):
|
||||||
def on_PUT(self, request, room_id, event_type, state_key, txn_id=None):
|
requester = await self.auth.get_user_by_req(request)
|
||||||
requester = yield self.auth.get_user_by_req(request)
|
|
||||||
|
|
||||||
if txn_id:
|
if txn_id:
|
||||||
set_tag("txn_id", txn_id)
|
set_tag("txn_id", txn_id)
|
||||||
@ -200,7 +195,7 @@ class RoomStateEventRestServlet(TransactionRestServlet):
|
|||||||
|
|
||||||
if event_type == EventTypes.Member:
|
if event_type == EventTypes.Member:
|
||||||
membership = content.get("membership", None)
|
membership = content.get("membership", None)
|
||||||
event = yield self.room_member_handler.update_membership(
|
event = await self.room_member_handler.update_membership(
|
||||||
requester,
|
requester,
|
||||||
target=UserID.from_string(state_key),
|
target=UserID.from_string(state_key),
|
||||||
room_id=room_id,
|
room_id=room_id,
|
||||||
@ -208,7 +203,7 @@ class RoomStateEventRestServlet(TransactionRestServlet):
|
|||||||
content=content,
|
content=content,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
event = yield self.event_creation_handler.create_and_send_nonmember_event(
|
event = await self.event_creation_handler.create_and_send_nonmember_event(
|
||||||
requester, event_dict, txn_id=txn_id
|
requester, event_dict, txn_id=txn_id
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -231,9 +226,8 @@ class RoomSendEventRestServlet(TransactionRestServlet):
|
|||||||
PATTERNS = "/rooms/(?P<room_id>[^/]*)/send/(?P<event_type>[^/]*)"
|
PATTERNS = "/rooms/(?P<room_id>[^/]*)/send/(?P<event_type>[^/]*)"
|
||||||
register_txn_path(self, PATTERNS, http_server, with_get=True)
|
register_txn_path(self, PATTERNS, http_server, with_get=True)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_POST(self, request, room_id, event_type, txn_id=None):
|
||||||
def on_POST(self, request, room_id, event_type, txn_id=None):
|
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||||
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
|
||||||
content = parse_json_object_from_request(request)
|
content = parse_json_object_from_request(request)
|
||||||
|
|
||||||
event_dict = {
|
event_dict = {
|
||||||
@ -246,7 +240,7 @@ class RoomSendEventRestServlet(TransactionRestServlet):
|
|||||||
if b"ts" in request.args and requester.app_service:
|
if b"ts" in request.args and requester.app_service:
|
||||||
event_dict["origin_server_ts"] = parse_integer(request, "ts", 0)
|
event_dict["origin_server_ts"] = parse_integer(request, "ts", 0)
|
||||||
|
|
||||||
event = yield self.event_creation_handler.create_and_send_nonmember_event(
|
event = await self.event_creation_handler.create_and_send_nonmember_event(
|
||||||
requester, event_dict, txn_id=txn_id
|
requester, event_dict, txn_id=txn_id
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -276,9 +270,8 @@ class JoinRoomAliasServlet(TransactionRestServlet):
|
|||||||
PATTERNS = "/join/(?P<room_identifier>[^/]*)"
|
PATTERNS = "/join/(?P<room_identifier>[^/]*)"
|
||||||
register_txn_path(self, PATTERNS, http_server)
|
register_txn_path(self, PATTERNS, http_server)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_POST(self, request, room_identifier, txn_id=None):
|
||||||
def on_POST(self, request, room_identifier, txn_id=None):
|
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||||
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
content = parse_json_object_from_request(request)
|
content = parse_json_object_from_request(request)
|
||||||
@ -298,14 +291,14 @@ class JoinRoomAliasServlet(TransactionRestServlet):
|
|||||||
elif RoomAlias.is_valid(room_identifier):
|
elif RoomAlias.is_valid(room_identifier):
|
||||||
handler = self.room_member_handler
|
handler = self.room_member_handler
|
||||||
room_alias = RoomAlias.from_string(room_identifier)
|
room_alias = RoomAlias.from_string(room_identifier)
|
||||||
room_id, remote_room_hosts = yield handler.lookup_room_alias(room_alias)
|
room_id, remote_room_hosts = await handler.lookup_room_alias(room_alias)
|
||||||
room_id = room_id.to_string()
|
room_id = room_id.to_string()
|
||||||
else:
|
else:
|
||||||
raise SynapseError(
|
raise SynapseError(
|
||||||
400, "%s was not legal room ID or room alias" % (room_identifier,)
|
400, "%s was not legal room ID or room alias" % (room_identifier,)
|
||||||
)
|
)
|
||||||
|
|
||||||
yield self.room_member_handler.update_membership(
|
await self.room_member_handler.update_membership(
|
||||||
requester=requester,
|
requester=requester,
|
||||||
target=requester.user,
|
target=requester.user,
|
||||||
room_id=room_id,
|
room_id=room_id,
|
||||||
@ -335,12 +328,11 @@ class PublicRoomListRestServlet(TransactionRestServlet):
|
|||||||
self.hs = hs
|
self.hs = hs
|
||||||
self.auth = hs.get_auth()
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_GET(self, request):
|
||||||
def on_GET(self, request):
|
|
||||||
server = parse_string(request, "server", default=None)
|
server = parse_string(request, "server", default=None)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
yield self.auth.get_user_by_req(request, allow_guest=True)
|
await self.auth.get_user_by_req(request, allow_guest=True)
|
||||||
except InvalidClientCredentialsError as e:
|
except InvalidClientCredentialsError as e:
|
||||||
# Option to allow servers to require auth when accessing
|
# Option to allow servers to require auth when accessing
|
||||||
# /publicRooms via CS API. This is especially helpful in private
|
# /publicRooms via CS API. This is especially helpful in private
|
||||||
@ -367,19 +359,18 @@ class PublicRoomListRestServlet(TransactionRestServlet):
|
|||||||
|
|
||||||
handler = self.hs.get_room_list_handler()
|
handler = self.hs.get_room_list_handler()
|
||||||
if server:
|
if server:
|
||||||
data = yield handler.get_remote_public_room_list(
|
data = await handler.get_remote_public_room_list(
|
||||||
server, limit=limit, since_token=since_token
|
server, limit=limit, since_token=since_token
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
data = yield handler.get_local_public_room_list(
|
data = await handler.get_local_public_room_list(
|
||||||
limit=limit, since_token=since_token
|
limit=limit, since_token=since_token
|
||||||
)
|
)
|
||||||
|
|
||||||
return 200, data
|
return 200, data
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_POST(self, request):
|
||||||
def on_POST(self, request):
|
await self.auth.get_user_by_req(request, allow_guest=True)
|
||||||
yield self.auth.get_user_by_req(request, allow_guest=True)
|
|
||||||
|
|
||||||
server = parse_string(request, "server", default=None)
|
server = parse_string(request, "server", default=None)
|
||||||
content = parse_json_object_from_request(request)
|
content = parse_json_object_from_request(request)
|
||||||
@ -408,7 +399,7 @@ class PublicRoomListRestServlet(TransactionRestServlet):
|
|||||||
|
|
||||||
handler = self.hs.get_room_list_handler()
|
handler = self.hs.get_room_list_handler()
|
||||||
if server:
|
if server:
|
||||||
data = yield handler.get_remote_public_room_list(
|
data = await handler.get_remote_public_room_list(
|
||||||
server,
|
server,
|
||||||
limit=limit,
|
limit=limit,
|
||||||
since_token=since_token,
|
since_token=since_token,
|
||||||
@ -417,7 +408,7 @@ class PublicRoomListRestServlet(TransactionRestServlet):
|
|||||||
third_party_instance_id=third_party_instance_id,
|
third_party_instance_id=third_party_instance_id,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
data = yield handler.get_local_public_room_list(
|
data = await handler.get_local_public_room_list(
|
||||||
limit=limit,
|
limit=limit,
|
||||||
since_token=since_token,
|
since_token=since_token,
|
||||||
search_filter=search_filter,
|
search_filter=search_filter,
|
||||||
@ -436,10 +427,9 @@ class RoomMemberListRestServlet(RestServlet):
|
|||||||
self.message_handler = hs.get_message_handler()
|
self.message_handler = hs.get_message_handler()
|
||||||
self.auth = hs.get_auth()
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_GET(self, request, room_id):
|
||||||
def on_GET(self, request, room_id):
|
|
||||||
# TODO support Pagination stream API (limit/tokens)
|
# TODO support Pagination stream API (limit/tokens)
|
||||||
requester = yield self.auth.get_user_by_req(request)
|
requester = await self.auth.get_user_by_req(request)
|
||||||
handler = self.message_handler
|
handler = self.message_handler
|
||||||
|
|
||||||
# request the state as of a given event, as identified by a stream token,
|
# request the state as of a given event, as identified by a stream token,
|
||||||
@ -459,7 +449,7 @@ class RoomMemberListRestServlet(RestServlet):
|
|||||||
membership = parse_string(request, "membership")
|
membership = parse_string(request, "membership")
|
||||||
not_membership = parse_string(request, "not_membership")
|
not_membership = parse_string(request, "not_membership")
|
||||||
|
|
||||||
events = yield handler.get_state_events(
|
events = await handler.get_state_events(
|
||||||
room_id=room_id,
|
room_id=room_id,
|
||||||
user_id=requester.user.to_string(),
|
user_id=requester.user.to_string(),
|
||||||
at_token=at_token,
|
at_token=at_token,
|
||||||
@ -488,11 +478,10 @@ class JoinedRoomMemberListRestServlet(RestServlet):
|
|||||||
self.message_handler = hs.get_message_handler()
|
self.message_handler = hs.get_message_handler()
|
||||||
self.auth = hs.get_auth()
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_GET(self, request, room_id):
|
||||||
def on_GET(self, request, room_id):
|
requester = await self.auth.get_user_by_req(request)
|
||||||
requester = yield self.auth.get_user_by_req(request)
|
|
||||||
|
|
||||||
users_with_profile = yield self.message_handler.get_joined_members(
|
users_with_profile = await self.message_handler.get_joined_members(
|
||||||
requester, room_id
|
requester, room_id
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -508,9 +497,8 @@ class RoomMessageListRestServlet(RestServlet):
|
|||||||
self.pagination_handler = hs.get_pagination_handler()
|
self.pagination_handler = hs.get_pagination_handler()
|
||||||
self.auth = hs.get_auth()
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_GET(self, request, room_id):
|
||||||
def on_GET(self, request, room_id):
|
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||||
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
|
||||||
pagination_config = PaginationConfig.from_request(request, default_limit=10)
|
pagination_config = PaginationConfig.from_request(request, default_limit=10)
|
||||||
as_client_event = b"raw" not in request.args
|
as_client_event = b"raw" not in request.args
|
||||||
filter_bytes = parse_string(request, b"filter", encoding=None)
|
filter_bytes = parse_string(request, b"filter", encoding=None)
|
||||||
@ -521,7 +509,7 @@ class RoomMessageListRestServlet(RestServlet):
|
|||||||
as_client_event = False
|
as_client_event = False
|
||||||
else:
|
else:
|
||||||
event_filter = None
|
event_filter = None
|
||||||
msgs = yield self.pagination_handler.get_messages(
|
msgs = await self.pagination_handler.get_messages(
|
||||||
room_id=room_id,
|
room_id=room_id,
|
||||||
requester=requester,
|
requester=requester,
|
||||||
pagin_config=pagination_config,
|
pagin_config=pagination_config,
|
||||||
@ -541,11 +529,10 @@ class RoomStateRestServlet(RestServlet):
|
|||||||
self.message_handler = hs.get_message_handler()
|
self.message_handler = hs.get_message_handler()
|
||||||
self.auth = hs.get_auth()
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_GET(self, request, room_id):
|
||||||
def on_GET(self, request, room_id):
|
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||||
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
|
||||||
# Get all the current state for this room
|
# Get all the current state for this room
|
||||||
events = yield self.message_handler.get_state_events(
|
events = await self.message_handler.get_state_events(
|
||||||
room_id=room_id,
|
room_id=room_id,
|
||||||
user_id=requester.user.to_string(),
|
user_id=requester.user.to_string(),
|
||||||
is_guest=requester.is_guest,
|
is_guest=requester.is_guest,
|
||||||
@ -562,11 +549,10 @@ class RoomInitialSyncRestServlet(RestServlet):
|
|||||||
self.initial_sync_handler = hs.get_initial_sync_handler()
|
self.initial_sync_handler = hs.get_initial_sync_handler()
|
||||||
self.auth = hs.get_auth()
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_GET(self, request, room_id):
|
||||||
def on_GET(self, request, room_id):
|
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||||
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
|
||||||
pagination_config = PaginationConfig.from_request(request)
|
pagination_config = PaginationConfig.from_request(request)
|
||||||
content = yield self.initial_sync_handler.room_initial_sync(
|
content = await self.initial_sync_handler.room_initial_sync(
|
||||||
room_id=room_id, requester=requester, pagin_config=pagination_config
|
room_id=room_id, requester=requester, pagin_config=pagination_config
|
||||||
)
|
)
|
||||||
return 200, content
|
return 200, content
|
||||||
@ -584,11 +570,10 @@ class RoomEventServlet(RestServlet):
|
|||||||
self._event_serializer = hs.get_event_client_serializer()
|
self._event_serializer = hs.get_event_client_serializer()
|
||||||
self.auth = hs.get_auth()
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_GET(self, request, room_id, event_id):
|
||||||
def on_GET(self, request, room_id, event_id):
|
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||||
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
|
||||||
try:
|
try:
|
||||||
event = yield self.event_handler.get_event(
|
event = await self.event_handler.get_event(
|
||||||
requester.user, room_id, event_id
|
requester.user, room_id, event_id
|
||||||
)
|
)
|
||||||
except AuthError:
|
except AuthError:
|
||||||
@ -599,7 +584,7 @@ class RoomEventServlet(RestServlet):
|
|||||||
|
|
||||||
time_now = self.clock.time_msec()
|
time_now = self.clock.time_msec()
|
||||||
if event:
|
if event:
|
||||||
event = yield self._event_serializer.serialize_event(event, time_now)
|
event = await self._event_serializer.serialize_event(event, time_now)
|
||||||
return 200, event
|
return 200, event
|
||||||
|
|
||||||
return SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND)
|
return SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND)
|
||||||
@ -617,9 +602,8 @@ class RoomEventContextServlet(RestServlet):
|
|||||||
self._event_serializer = hs.get_event_client_serializer()
|
self._event_serializer = hs.get_event_client_serializer()
|
||||||
self.auth = hs.get_auth()
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_GET(self, request, room_id, event_id):
|
||||||
def on_GET(self, request, room_id, event_id):
|
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||||
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
|
||||||
|
|
||||||
limit = parse_integer(request, "limit", default=10)
|
limit = parse_integer(request, "limit", default=10)
|
||||||
|
|
||||||
@ -631,7 +615,7 @@ class RoomEventContextServlet(RestServlet):
|
|||||||
else:
|
else:
|
||||||
event_filter = None
|
event_filter = None
|
||||||
|
|
||||||
results = yield self.room_context_handler.get_event_context(
|
results = await self.room_context_handler.get_event_context(
|
||||||
requester.user, room_id, event_id, limit, event_filter
|
requester.user, room_id, event_id, limit, event_filter
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -639,16 +623,16 @@ class RoomEventContextServlet(RestServlet):
|
|||||||
raise SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND)
|
raise SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND)
|
||||||
|
|
||||||
time_now = self.clock.time_msec()
|
time_now = self.clock.time_msec()
|
||||||
results["events_before"] = yield self._event_serializer.serialize_events(
|
results["events_before"] = await self._event_serializer.serialize_events(
|
||||||
results["events_before"], time_now
|
results["events_before"], time_now
|
||||||
)
|
)
|
||||||
results["event"] = yield self._event_serializer.serialize_event(
|
results["event"] = await self._event_serializer.serialize_event(
|
||||||
results["event"], time_now
|
results["event"], time_now
|
||||||
)
|
)
|
||||||
results["events_after"] = yield self._event_serializer.serialize_events(
|
results["events_after"] = await self._event_serializer.serialize_events(
|
||||||
results["events_after"], time_now
|
results["events_after"], time_now
|
||||||
)
|
)
|
||||||
results["state"] = yield self._event_serializer.serialize_events(
|
results["state"] = await self._event_serializer.serialize_events(
|
||||||
results["state"], time_now
|
results["state"], time_now
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -665,11 +649,10 @@ class RoomForgetRestServlet(TransactionRestServlet):
|
|||||||
PATTERNS = "/rooms/(?P<room_id>[^/]*)/forget"
|
PATTERNS = "/rooms/(?P<room_id>[^/]*)/forget"
|
||||||
register_txn_path(self, PATTERNS, http_server)
|
register_txn_path(self, PATTERNS, http_server)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_POST(self, request, room_id, txn_id=None):
|
||||||
def on_POST(self, request, room_id, txn_id=None):
|
requester = await self.auth.get_user_by_req(request, allow_guest=False)
|
||||||
requester = yield self.auth.get_user_by_req(request, allow_guest=False)
|
|
||||||
|
|
||||||
yield self.room_member_handler.forget(user=requester.user, room_id=room_id)
|
await self.room_member_handler.forget(user=requester.user, room_id=room_id)
|
||||||
|
|
||||||
return 200, {}
|
return 200, {}
|
||||||
|
|
||||||
@ -696,9 +679,8 @@ class RoomMembershipRestServlet(TransactionRestServlet):
|
|||||||
)
|
)
|
||||||
register_txn_path(self, PATTERNS, http_server)
|
register_txn_path(self, PATTERNS, http_server)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_POST(self, request, room_id, membership_action, txn_id=None):
|
||||||
def on_POST(self, request, room_id, membership_action, txn_id=None):
|
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||||
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
|
||||||
|
|
||||||
if requester.is_guest and membership_action not in {
|
if requester.is_guest and membership_action not in {
|
||||||
Membership.JOIN,
|
Membership.JOIN,
|
||||||
@ -714,7 +696,7 @@ class RoomMembershipRestServlet(TransactionRestServlet):
|
|||||||
content = {}
|
content = {}
|
||||||
|
|
||||||
if membership_action == "invite" and self._has_3pid_invite_keys(content):
|
if membership_action == "invite" and self._has_3pid_invite_keys(content):
|
||||||
yield self.room_member_handler.do_3pid_invite(
|
await self.room_member_handler.do_3pid_invite(
|
||||||
room_id,
|
room_id,
|
||||||
requester.user,
|
requester.user,
|
||||||
content["medium"],
|
content["medium"],
|
||||||
@ -735,7 +717,7 @@ class RoomMembershipRestServlet(TransactionRestServlet):
|
|||||||
if "reason" in content and membership_action in ["kick", "ban"]:
|
if "reason" in content and membership_action in ["kick", "ban"]:
|
||||||
event_content = {"reason": content["reason"]}
|
event_content = {"reason": content["reason"]}
|
||||||
|
|
||||||
yield self.room_member_handler.update_membership(
|
await self.room_member_handler.update_membership(
|
||||||
requester=requester,
|
requester=requester,
|
||||||
target=target,
|
target=target,
|
||||||
room_id=room_id,
|
room_id=room_id,
|
||||||
@ -777,12 +759,11 @@ class RoomRedactEventRestServlet(TransactionRestServlet):
|
|||||||
PATTERNS = "/rooms/(?P<room_id>[^/]*)/redact/(?P<event_id>[^/]*)"
|
PATTERNS = "/rooms/(?P<room_id>[^/]*)/redact/(?P<event_id>[^/]*)"
|
||||||
register_txn_path(self, PATTERNS, http_server)
|
register_txn_path(self, PATTERNS, http_server)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_POST(self, request, room_id, event_id, txn_id=None):
|
||||||
def on_POST(self, request, room_id, event_id, txn_id=None):
|
requester = await self.auth.get_user_by_req(request)
|
||||||
requester = yield self.auth.get_user_by_req(request)
|
|
||||||
content = parse_json_object_from_request(request)
|
content = parse_json_object_from_request(request)
|
||||||
|
|
||||||
event = yield self.event_creation_handler.create_and_send_nonmember_event(
|
event = await self.event_creation_handler.create_and_send_nonmember_event(
|
||||||
requester,
|
requester,
|
||||||
{
|
{
|
||||||
"type": EventTypes.Redaction,
|
"type": EventTypes.Redaction,
|
||||||
@ -816,29 +797,28 @@ class RoomTypingRestServlet(RestServlet):
|
|||||||
self.typing_handler = hs.get_typing_handler()
|
self.typing_handler = hs.get_typing_handler()
|
||||||
self.auth = hs.get_auth()
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_PUT(self, request, room_id, user_id):
|
||||||
def on_PUT(self, request, room_id, user_id):
|
requester = await self.auth.get_user_by_req(request)
|
||||||
requester = yield self.auth.get_user_by_req(request)
|
|
||||||
|
|
||||||
room_id = urlparse.unquote(room_id)
|
room_id = urlparse.unquote(room_id)
|
||||||
target_user = UserID.from_string(urlparse.unquote(user_id))
|
target_user = UserID.from_string(urlparse.unquote(user_id))
|
||||||
|
|
||||||
content = parse_json_object_from_request(request)
|
content = parse_json_object_from_request(request)
|
||||||
|
|
||||||
yield self.presence_handler.bump_presence_active_time(requester.user)
|
await self.presence_handler.bump_presence_active_time(requester.user)
|
||||||
|
|
||||||
# Limit timeout to stop people from setting silly typing timeouts.
|
# Limit timeout to stop people from setting silly typing timeouts.
|
||||||
timeout = min(content.get("timeout", 30000), 120000)
|
timeout = min(content.get("timeout", 30000), 120000)
|
||||||
|
|
||||||
if content["typing"]:
|
if content["typing"]:
|
||||||
yield self.typing_handler.started_typing(
|
await self.typing_handler.started_typing(
|
||||||
target_user=target_user,
|
target_user=target_user,
|
||||||
auth_user=requester.user,
|
auth_user=requester.user,
|
||||||
room_id=room_id,
|
room_id=room_id,
|
||||||
timeout=timeout,
|
timeout=timeout,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
yield self.typing_handler.stopped_typing(
|
await self.typing_handler.stopped_typing(
|
||||||
target_user=target_user, auth_user=requester.user, room_id=room_id
|
target_user=target_user, auth_user=requester.user, room_id=room_id
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -853,14 +833,13 @@ class SearchRestServlet(RestServlet):
|
|||||||
self.handlers = hs.get_handlers()
|
self.handlers = hs.get_handlers()
|
||||||
self.auth = hs.get_auth()
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_POST(self, request):
|
||||||
def on_POST(self, request):
|
requester = await self.auth.get_user_by_req(request)
|
||||||
requester = yield self.auth.get_user_by_req(request)
|
|
||||||
|
|
||||||
content = parse_json_object_from_request(request)
|
content = parse_json_object_from_request(request)
|
||||||
|
|
||||||
batch = parse_string(request, "next_batch")
|
batch = parse_string(request, "next_batch")
|
||||||
results = yield self.handlers.search_handler.search(
|
results = await self.handlers.search_handler.search(
|
||||||
requester.user, content, batch
|
requester.user, content, batch
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -875,11 +854,10 @@ class JoinedRoomsRestServlet(RestServlet):
|
|||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
self.auth = hs.get_auth()
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_GET(self, request):
|
||||||
def on_GET(self, request):
|
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||||
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
|
||||||
|
|
||||||
room_ids = yield self.store.get_rooms_for_user(requester.user.to_string())
|
room_ids = await self.store.get_rooms_for_user(requester.user.to_string())
|
||||||
return 200, {"joined_rooms": list(room_ids)}
|
return 200, {"joined_rooms": list(room_ids)}
|
||||||
|
|
||||||
|
|
||||||
|
@ -15,8 +15,6 @@
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from twisted.internet import defer
|
|
||||||
|
|
||||||
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||||
|
|
||||||
from ._base import client_patterns
|
from ._base import client_patterns
|
||||||
@ -34,17 +32,16 @@ class ReadMarkerRestServlet(RestServlet):
|
|||||||
self.read_marker_handler = hs.get_read_marker_handler()
|
self.read_marker_handler = hs.get_read_marker_handler()
|
||||||
self.presence_handler = hs.get_presence_handler()
|
self.presence_handler = hs.get_presence_handler()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_POST(self, request, room_id):
|
||||||
def on_POST(self, request, room_id):
|
requester = await self.auth.get_user_by_req(request)
|
||||||
requester = yield self.auth.get_user_by_req(request)
|
|
||||||
|
|
||||||
yield self.presence_handler.bump_presence_active_time(requester.user)
|
await self.presence_handler.bump_presence_active_time(requester.user)
|
||||||
|
|
||||||
body = parse_json_object_from_request(request)
|
body = parse_json_object_from_request(request)
|
||||||
|
|
||||||
read_event_id = body.get("m.read", None)
|
read_event_id = body.get("m.read", None)
|
||||||
if read_event_id:
|
if read_event_id:
|
||||||
yield self.receipts_handler.received_client_receipt(
|
await self.receipts_handler.received_client_receipt(
|
||||||
room_id,
|
room_id,
|
||||||
"m.read",
|
"m.read",
|
||||||
user_id=requester.user.to_string(),
|
user_id=requester.user.to_string(),
|
||||||
@ -53,7 +50,7 @@ class ReadMarkerRestServlet(RestServlet):
|
|||||||
|
|
||||||
read_marker_event_id = body.get("m.fully_read", None)
|
read_marker_event_id = body.get("m.fully_read", None)
|
||||||
if read_marker_event_id:
|
if read_marker_event_id:
|
||||||
yield self.read_marker_handler.received_client_read_marker(
|
await self.read_marker_handler.received_client_read_marker(
|
||||||
room_id,
|
room_id,
|
||||||
user_id=requester.user.to_string(),
|
user_id=requester.user.to_string(),
|
||||||
event_id=read_marker_event_id,
|
event_id=read_marker_event_id,
|
||||||
|
@ -15,8 +15,6 @@
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from twisted.internet import defer
|
|
||||||
|
|
||||||
from synapse.api.errors import SynapseError
|
from synapse.api.errors import SynapseError
|
||||||
from synapse.http.servlet import RestServlet
|
from synapse.http.servlet import RestServlet
|
||||||
|
|
||||||
@ -39,16 +37,15 @@ class ReceiptRestServlet(RestServlet):
|
|||||||
self.receipts_handler = hs.get_receipts_handler()
|
self.receipts_handler = hs.get_receipts_handler()
|
||||||
self.presence_handler = hs.get_presence_handler()
|
self.presence_handler = hs.get_presence_handler()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def on_POST(self, request, room_id, receipt_type, event_id):
|
||||||
def on_POST(self, request, room_id, receipt_type, event_id):
|
requester = await self.auth.get_user_by_req(request)
|
||||||
requester = yield self.auth.get_user_by_req(request)
|
|
||||||
|
|
||||||
if receipt_type != "m.read":
|
if receipt_type != "m.read":
|
||||||
raise SynapseError(400, "Receipt type must be 'm.read'")
|
raise SynapseError(400, "Receipt type must be 'm.read'")
|
||||||
|
|
||||||
yield self.presence_handler.bump_presence_active_time(requester.user)
|
await self.presence_handler.bump_presence_active_time(requester.user)
|
||||||
|
|
||||||
yield self.receipts_handler.received_client_receipt(
|
await self.receipts_handler.received_client_receipt(
|
||||||
room_id, receipt_type, user_id=requester.user.to_string(), event_id=event_id
|
room_id, receipt_type, user_id=requester.user.to_string(), event_id=event_id
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -112,9 +112,14 @@ class SyncRestServlet(RestServlet):
|
|||||||
full_state = parse_boolean(request, "full_state", default=False)
|
full_state = parse_boolean(request, "full_state", default=False)
|
||||||
|
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"/sync: user=%r, timeout=%r, since=%r,"
|
"/sync: user=%r, timeout=%r, since=%r, "
|
||||||
" set_presence=%r, filter_id=%r, device_id=%r"
|
"set_presence=%r, filter_id=%r, device_id=%r",
|
||||||
% (user, timeout, since, set_presence, filter_id, device_id)
|
user,
|
||||||
|
timeout,
|
||||||
|
since,
|
||||||
|
set_presence,
|
||||||
|
filter_id,
|
||||||
|
device_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
request_key = (user, timeout, since, filter_id, full_state, device_id)
|
request_key = (user, timeout, since, filter_id, full_state, device_id)
|
||||||
|
@ -117,8 +117,10 @@ class PreviewUrlResource(DirectServeResource):
|
|||||||
pattern = entry[attrib]
|
pattern = entry[attrib]
|
||||||
value = getattr(url_tuple, attrib)
|
value = getattr(url_tuple, attrib)
|
||||||
logger.debug(
|
logger.debug(
|
||||||
("Matching attrib '%s' with value '%s' against" " pattern '%s'")
|
"Matching attrib '%s' with value '%s' against" " pattern '%s'",
|
||||||
% (attrib, value, pattern)
|
attrib,
|
||||||
|
value,
|
||||||
|
pattern,
|
||||||
)
|
)
|
||||||
|
|
||||||
if value is None:
|
if value is None:
|
||||||
@ -186,7 +188,7 @@ class PreviewUrlResource(DirectServeResource):
|
|||||||
|
|
||||||
media_info = yield self._download_url(url, user)
|
media_info = yield self._download_url(url, user)
|
||||||
|
|
||||||
logger.debug("got media_info of '%s'" % media_info)
|
logger.debug("got media_info of '%s'", media_info)
|
||||||
|
|
||||||
if _is_media(media_info["media_type"]):
|
if _is_media(media_info["media_type"]):
|
||||||
file_id = media_info["filesystem_id"]
|
file_id = media_info["filesystem_id"]
|
||||||
@ -254,7 +256,7 @@ class PreviewUrlResource(DirectServeResource):
|
|||||||
og["og:image:width"] = dims["width"]
|
og["og:image:width"] = dims["width"]
|
||||||
og["og:image:height"] = dims["height"]
|
og["og:image:height"] = dims["height"]
|
||||||
else:
|
else:
|
||||||
logger.warn("Couldn't get dims for %s" % og["og:image"])
|
logger.warn("Couldn't get dims for %s", og["og:image"])
|
||||||
|
|
||||||
og["og:image"] = "mxc://%s/%s" % (
|
og["og:image"] = "mxc://%s/%s" % (
|
||||||
self.server_name,
|
self.server_name,
|
||||||
@ -268,7 +270,7 @@ class PreviewUrlResource(DirectServeResource):
|
|||||||
logger.warn("Failed to find any OG data in %s", url)
|
logger.warn("Failed to find any OG data in %s", url)
|
||||||
og = {}
|
og = {}
|
||||||
|
|
||||||
logger.debug("Calculated OG for %s as %s" % (url, og))
|
logger.debug("Calculated OG for %s as %s", url, og)
|
||||||
|
|
||||||
jsonog = json.dumps(og)
|
jsonog = json.dumps(og)
|
||||||
|
|
||||||
@ -297,7 +299,7 @@ class PreviewUrlResource(DirectServeResource):
|
|||||||
|
|
||||||
with self.media_storage.store_into_file(file_info) as (f, fname, finish):
|
with self.media_storage.store_into_file(file_info) as (f, fname, finish):
|
||||||
try:
|
try:
|
||||||
logger.debug("Trying to get url '%s'" % url)
|
logger.debug("Trying to get url '%s'", url)
|
||||||
length, headers, uri, code = yield self.client.get_file(
|
length, headers, uri, code = yield self.client.get_file(
|
||||||
url, output_stream=f, max_size=self.max_spider_size
|
url, output_stream=f, max_size=self.max_spider_size
|
||||||
)
|
)
|
||||||
|
@ -95,6 +95,7 @@ from synapse.server_notices.worker_server_notices_sender import (
|
|||||||
WorkerServerNoticesSender,
|
WorkerServerNoticesSender,
|
||||||
)
|
)
|
||||||
from synapse.state import StateHandler, StateResolutionHandler
|
from synapse.state import StateHandler, StateResolutionHandler
|
||||||
|
from synapse.storage import DataStores, Storage
|
||||||
from synapse.streams.events import EventSources
|
from synapse.streams.events import EventSources
|
||||||
from synapse.util import Clock
|
from synapse.util import Clock
|
||||||
from synapse.util.distributor import Distributor
|
from synapse.util.distributor import Distributor
|
||||||
@ -196,6 +197,7 @@ class HomeServer(object):
|
|||||||
"account_validity_handler",
|
"account_validity_handler",
|
||||||
"saml_handler",
|
"saml_handler",
|
||||||
"event_client_serializer",
|
"event_client_serializer",
|
||||||
|
"storage",
|
||||||
]
|
]
|
||||||
|
|
||||||
REQUIRED_ON_MASTER_STARTUP = ["user_directory_handler", "stats_handler"]
|
REQUIRED_ON_MASTER_STARTUP = ["user_directory_handler", "stats_handler"]
|
||||||
@ -224,7 +226,7 @@ class HomeServer(object):
|
|||||||
self.admin_redaction_ratelimiter = Ratelimiter()
|
self.admin_redaction_ratelimiter = Ratelimiter()
|
||||||
self.registration_ratelimiter = Ratelimiter()
|
self.registration_ratelimiter = Ratelimiter()
|
||||||
|
|
||||||
self.datastore = None
|
self.datastores = None
|
||||||
|
|
||||||
# Other kwargs are explicit dependencies
|
# Other kwargs are explicit dependencies
|
||||||
for depname in kwargs:
|
for depname in kwargs:
|
||||||
@ -233,7 +235,8 @@ class HomeServer(object):
|
|||||||
def setup(self):
|
def setup(self):
|
||||||
logger.info("Setting up.")
|
logger.info("Setting up.")
|
||||||
with self.get_db_conn() as conn:
|
with self.get_db_conn() as conn:
|
||||||
self.datastore = self.DATASTORE_CLASS(conn, self)
|
datastore = self.DATASTORE_CLASS(conn, self)
|
||||||
|
self.datastores = DataStores(datastore, conn, self)
|
||||||
conn.commit()
|
conn.commit()
|
||||||
logger.info("Finished setting up.")
|
logger.info("Finished setting up.")
|
||||||
|
|
||||||
@ -266,7 +269,7 @@ class HomeServer(object):
|
|||||||
return self.clock
|
return self.clock
|
||||||
|
|
||||||
def get_datastore(self):
|
def get_datastore(self):
|
||||||
return self.datastore
|
return self.datastores.main
|
||||||
|
|
||||||
def get_config(self):
|
def get_config(self):
|
||||||
return self.config
|
return self.config
|
||||||
@ -537,6 +540,9 @@ class HomeServer(object):
|
|||||||
def build_event_client_serializer(self):
|
def build_event_client_serializer(self):
|
||||||
return EventClientSerializer(self)
|
return EventClientSerializer(self)
|
||||||
|
|
||||||
|
def build_storage(self) -> Storage:
|
||||||
|
return Storage(self, self.datastores)
|
||||||
|
|
||||||
def remove_pusher(self, app_id, push_key, user_id):
|
def remove_pusher(self, app_id, push_key, user_id):
|
||||||
return self.get_pusherpool().remove_pusher(app_id, push_key, user_id)
|
return self.get_pusherpool().remove_pusher(app_id, push_key, user_id)
|
||||||
|
|
||||||
|
@ -27,7 +27,24 @@ data stores associated with them (e.g. the schema version tables), which are
|
|||||||
stored in `synapse.storage.schema`.
|
stored in `synapse.storage.schema`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from synapse.storage.data_stores.main import DataStore # noqa: F401
|
from synapse.storage.data_stores import DataStores
|
||||||
|
from synapse.storage.data_stores.main import DataStore
|
||||||
|
from synapse.storage.persist_events import EventsPersistenceStorage
|
||||||
|
|
||||||
|
__all__ = ["DataStores", "DataStore"]
|
||||||
|
|
||||||
|
|
||||||
|
class Storage(object):
|
||||||
|
"""The high level interfaces for talking to various storage layers.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, hs, stores: DataStores):
|
||||||
|
# We include the main data store here mainly so that we don't have to
|
||||||
|
# rewrite all the existing code to split it into high vs low level
|
||||||
|
# interfaces.
|
||||||
|
self.main = stores.main
|
||||||
|
|
||||||
|
self.persistence = EventsPersistenceStorage(hs, stores)
|
||||||
|
|
||||||
|
|
||||||
def are_all_users_on_domain(txn, database_engine, domain):
|
def are_all_users_on_domain(txn, database_engine, domain):
|
||||||
|
@ -94,13 +94,16 @@ class BackgroundUpdateStore(SQLBaseStore):
|
|||||||
self._all_done = False
|
self._all_done = False
|
||||||
|
|
||||||
def start_doing_background_updates(self):
|
def start_doing_background_updates(self):
|
||||||
run_as_background_process("background_updates", self._run_background_updates)
|
run_as_background_process("background_updates", self.run_background_updates)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _run_background_updates(self):
|
def run_background_updates(self, sleep=True):
|
||||||
logger.info("Starting background schema updates")
|
logger.info("Starting background schema updates")
|
||||||
while True:
|
while True:
|
||||||
yield self.hs.get_clock().sleep(self.BACKGROUND_UPDATE_INTERVAL_MS / 1000.0)
|
if sleep:
|
||||||
|
yield self.hs.get_clock().sleep(
|
||||||
|
self.BACKGROUND_UPDATE_INTERVAL_MS / 1000.0
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
result = yield self.do_next_background_update(
|
result = yield self.do_next_background_update(
|
||||||
|
@ -12,3 +12,15 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
class DataStores(object):
|
||||||
|
"""The various data stores.
|
||||||
|
|
||||||
|
These are low level interfaces to physical databases.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, main_store, db_conn, hs):
|
||||||
|
# Note we pass in the main store here as workers use a different main
|
||||||
|
# store.
|
||||||
|
self.main = main_store
|
||||||
|
@ -321,9 +321,17 @@ class EndToEndRoomKeyStore(SQLBaseStore):
|
|||||||
def _delete_e2e_room_keys_version_txn(txn):
|
def _delete_e2e_room_keys_version_txn(txn):
|
||||||
if version is None:
|
if version is None:
|
||||||
this_version = self._get_current_version(txn, user_id)
|
this_version = self._get_current_version(txn, user_id)
|
||||||
|
if this_version is None:
|
||||||
|
raise StoreError(404, "No current backup version")
|
||||||
else:
|
else:
|
||||||
this_version = version
|
this_version = version
|
||||||
|
|
||||||
|
self._simple_delete_txn(
|
||||||
|
txn,
|
||||||
|
table="e2e_room_keys",
|
||||||
|
keyvalues={"user_id": user_id, "version": this_version},
|
||||||
|
)
|
||||||
|
|
||||||
return self._simple_update_one_txn(
|
return self._simple_update_one_txn(
|
||||||
txn,
|
txn,
|
||||||
table="e2e_room_keys_versions",
|
table="e2e_room_keys_versions",
|
||||||
|
@ -248,6 +248,73 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
|
|||||||
|
|
||||||
return self.runInteraction("count_e2e_one_time_keys", _count_e2e_one_time_keys)
|
return self.runInteraction("count_e2e_one_time_keys", _count_e2e_one_time_keys)
|
||||||
|
|
||||||
|
def _get_e2e_cross_signing_key_txn(self, txn, user_id, key_type, from_user_id=None):
|
||||||
|
"""Returns a user's cross-signing key.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
txn (twisted.enterprise.adbapi.Connection): db connection
|
||||||
|
user_id (str): the user whose key is being requested
|
||||||
|
key_type (str): the type of key that is being set: either 'master'
|
||||||
|
for a master key, 'self_signing' for a self-signing key, or
|
||||||
|
'user_signing' for a user-signing key
|
||||||
|
from_user_id (str): if specified, signatures made by this user on
|
||||||
|
the key will be included in the result
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict of the key data or None if not found
|
||||||
|
"""
|
||||||
|
sql = (
|
||||||
|
"SELECT keydata "
|
||||||
|
" FROM e2e_cross_signing_keys "
|
||||||
|
" WHERE user_id = ? AND keytype = ? ORDER BY stream_id DESC LIMIT 1"
|
||||||
|
)
|
||||||
|
txn.execute(sql, (user_id, key_type))
|
||||||
|
row = txn.fetchone()
|
||||||
|
if not row:
|
||||||
|
return None
|
||||||
|
key = json.loads(row[0])
|
||||||
|
|
||||||
|
device_id = None
|
||||||
|
for k in key["keys"].values():
|
||||||
|
device_id = k
|
||||||
|
|
||||||
|
if from_user_id is not None:
|
||||||
|
sql = (
|
||||||
|
"SELECT key_id, signature "
|
||||||
|
" FROM e2e_cross_signing_signatures "
|
||||||
|
" WHERE user_id = ? "
|
||||||
|
" AND target_user_id = ? "
|
||||||
|
" AND target_device_id = ? "
|
||||||
|
)
|
||||||
|
txn.execute(sql, (from_user_id, user_id, device_id))
|
||||||
|
row = txn.fetchone()
|
||||||
|
if row:
|
||||||
|
key.setdefault("signatures", {}).setdefault(from_user_id, {})[
|
||||||
|
row[0]
|
||||||
|
] = row[1]
|
||||||
|
|
||||||
|
return key
|
||||||
|
|
||||||
|
def get_e2e_cross_signing_key(self, user_id, key_type, from_user_id=None):
|
||||||
|
"""Returns a user's cross-signing key.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id (str): the user whose self-signing key is being requested
|
||||||
|
key_type (str): the type of cross-signing key to get
|
||||||
|
from_user_id (str): if specified, signatures made by this user on
|
||||||
|
the self-signing key will be included in the result
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict of the key data or None if not found
|
||||||
|
"""
|
||||||
|
return self.runInteraction(
|
||||||
|
"get_e2e_cross_signing_key",
|
||||||
|
self._get_e2e_cross_signing_key_txn,
|
||||||
|
user_id,
|
||||||
|
key_type,
|
||||||
|
from_user_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
|
class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
|
||||||
def set_e2e_device_keys(self, user_id, device_id, time_now, device_keys):
|
def set_e2e_device_keys(self, user_id, device_id, time_now, device_keys):
|
||||||
@ -426,73 +493,6 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
|
|||||||
key,
|
key,
|
||||||
)
|
)
|
||||||
|
|
||||||
def _get_e2e_cross_signing_key_txn(self, txn, user_id, key_type, from_user_id=None):
|
|
||||||
"""Returns a user's cross-signing key.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
txn (twisted.enterprise.adbapi.Connection): db connection
|
|
||||||
user_id (str): the user whose key is being requested
|
|
||||||
key_type (str): the type of key that is being set: either 'master'
|
|
||||||
for a master key, 'self_signing' for a self-signing key, or
|
|
||||||
'user_signing' for a user-signing key
|
|
||||||
from_user_id (str): if specified, signatures made by this user on
|
|
||||||
the key will be included in the result
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
dict of the key data or None if not found
|
|
||||||
"""
|
|
||||||
sql = (
|
|
||||||
"SELECT keydata "
|
|
||||||
" FROM e2e_cross_signing_keys "
|
|
||||||
" WHERE user_id = ? AND keytype = ? ORDER BY stream_id DESC LIMIT 1"
|
|
||||||
)
|
|
||||||
txn.execute(sql, (user_id, key_type))
|
|
||||||
row = txn.fetchone()
|
|
||||||
if not row:
|
|
||||||
return None
|
|
||||||
key = json.loads(row[0])
|
|
||||||
|
|
||||||
device_id = None
|
|
||||||
for k in key["keys"].values():
|
|
||||||
device_id = k
|
|
||||||
|
|
||||||
if from_user_id is not None:
|
|
||||||
sql = (
|
|
||||||
"SELECT key_id, signature "
|
|
||||||
" FROM e2e_cross_signing_signatures "
|
|
||||||
" WHERE user_id = ? "
|
|
||||||
" AND target_user_id = ? "
|
|
||||||
" AND target_device_id = ? "
|
|
||||||
)
|
|
||||||
txn.execute(sql, (from_user_id, user_id, device_id))
|
|
||||||
row = txn.fetchone()
|
|
||||||
if row:
|
|
||||||
key.setdefault("signatures", {}).setdefault(from_user_id, {})[
|
|
||||||
row[0]
|
|
||||||
] = row[1]
|
|
||||||
|
|
||||||
return key
|
|
||||||
|
|
||||||
def get_e2e_cross_signing_key(self, user_id, key_type, from_user_id=None):
|
|
||||||
"""Returns a user's cross-signing key.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
user_id (str): the user whose self-signing key is being requested
|
|
||||||
key_type (str): the type of cross-signing key to get
|
|
||||||
from_user_id (str): if specified, signatures made by this user on
|
|
||||||
the self-signing key will be included in the result
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
dict of the key data or None if not found
|
|
||||||
"""
|
|
||||||
return self.runInteraction(
|
|
||||||
"get_e2e_cross_signing_key",
|
|
||||||
self._get_e2e_cross_signing_key_txn,
|
|
||||||
user_id,
|
|
||||||
key_type,
|
|
||||||
from_user_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
def store_e2e_cross_signing_signatures(self, user_id, signatures):
|
def store_e2e_cross_signing_signatures(self, user_id, signatures):
|
||||||
"""Stores cross-signing signatures.
|
"""Stores cross-signing signatures.
|
||||||
|
|
||||||
|
@ -364,9 +364,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
|
|||||||
)
|
)
|
||||||
|
|
||||||
def _get_backfill_events(self, txn, room_id, event_list, limit):
|
def _get_backfill_events(self, txn, room_id, event_list, limit):
|
||||||
logger.debug(
|
logger.debug("_get_backfill_events: %s, %r, %s", room_id, event_list, limit)
|
||||||
"_get_backfill_events: %s, %s, %s", room_id, repr(event_list), limit
|
|
||||||
)
|
|
||||||
|
|
||||||
event_results = set()
|
event_results = set()
|
||||||
|
|
||||||
|
@ -17,14 +17,14 @@
|
|||||||
|
|
||||||
import itertools
|
import itertools
|
||||||
import logging
|
import logging
|
||||||
from collections import Counter as c_counter, OrderedDict, deque, namedtuple
|
from collections import Counter as c_counter, OrderedDict, namedtuple
|
||||||
from functools import wraps
|
from functools import wraps
|
||||||
|
|
||||||
from six import iteritems, text_type
|
from six import iteritems, text_type
|
||||||
from six.moves import range
|
from six.moves import range
|
||||||
|
|
||||||
from canonicaljson import json
|
from canonicaljson import json
|
||||||
from prometheus_client import Counter, Histogram
|
from prometheus_client import Counter
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
@ -34,11 +34,9 @@ from synapse.api.errors import SynapseError
|
|||||||
from synapse.events import EventBase # noqa: F401
|
from synapse.events import EventBase # noqa: F401
|
||||||
from synapse.events.snapshot import EventContext # noqa: F401
|
from synapse.events.snapshot import EventContext # noqa: F401
|
||||||
from synapse.events.utils import prune_event_dict
|
from synapse.events.utils import prune_event_dict
|
||||||
from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
|
|
||||||
from synapse.logging.utils import log_function
|
from synapse.logging.utils import log_function
|
||||||
from synapse.metrics import BucketCollector
|
from synapse.metrics import BucketCollector
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.state import StateResolutionStore
|
|
||||||
from synapse.storage._base import make_in_list_sql_clause
|
from synapse.storage._base import make_in_list_sql_clause
|
||||||
from synapse.storage.background_updates import BackgroundUpdateStore
|
from synapse.storage.background_updates import BackgroundUpdateStore
|
||||||
from synapse.storage.data_stores.main.event_federation import EventFederationStore
|
from synapse.storage.data_stores.main.event_federation import EventFederationStore
|
||||||
@ -46,10 +44,8 @@ from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
|
|||||||
from synapse.storage.data_stores.main.state import StateGroupWorkerStore
|
from synapse.storage.data_stores.main.state import StateGroupWorkerStore
|
||||||
from synapse.types import RoomStreamToken, get_domain_from_id
|
from synapse.types import RoomStreamToken, get_domain_from_id
|
||||||
from synapse.util import batch_iter
|
from synapse.util import batch_iter
|
||||||
from synapse.util.async_helpers import ObservableDeferred
|
|
||||||
from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
|
from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
|
||||||
from synapse.util.frozenutils import frozendict_json_encoder
|
from synapse.util.frozenutils import frozendict_json_encoder
|
||||||
from synapse.util.metrics import Measure
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -60,37 +56,6 @@ event_counter = Counter(
|
|||||||
["type", "origin_type", "origin_entity"],
|
["type", "origin_type", "origin_entity"],
|
||||||
)
|
)
|
||||||
|
|
||||||
# The number of times we are recalculating the current state
|
|
||||||
state_delta_counter = Counter("synapse_storage_events_state_delta", "")
|
|
||||||
|
|
||||||
# The number of times we are recalculating state when there is only a
|
|
||||||
# single forward extremity
|
|
||||||
state_delta_single_event_counter = Counter(
|
|
||||||
"synapse_storage_events_state_delta_single_event", ""
|
|
||||||
)
|
|
||||||
|
|
||||||
# The number of times we are reculating state when we could have resonably
|
|
||||||
# calculated the delta when we calculated the state for an event we were
|
|
||||||
# persisting.
|
|
||||||
state_delta_reuse_delta_counter = Counter(
|
|
||||||
"synapse_storage_events_state_delta_reuse_delta", ""
|
|
||||||
)
|
|
||||||
|
|
||||||
# The number of forward extremities for each new event.
|
|
||||||
forward_extremities_counter = Histogram(
|
|
||||||
"synapse_storage_events_forward_extremities_persisted",
|
|
||||||
"Number of forward extremities for each new event",
|
|
||||||
buckets=(1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"),
|
|
||||||
)
|
|
||||||
|
|
||||||
# The number of stale forward extremities for each new event. Stale extremities
|
|
||||||
# are those that were in the previous set of extremities as well as the new.
|
|
||||||
stale_forward_extremities_counter = Histogram(
|
|
||||||
"synapse_storage_events_stale_forward_extremities_persisted",
|
|
||||||
"Number of unchanged forward extremities for each new event",
|
|
||||||
buckets=(0, 1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def encode_json(json_object):
|
def encode_json(json_object):
|
||||||
"""
|
"""
|
||||||
@ -102,110 +67,6 @@ def encode_json(json_object):
|
|||||||
return out
|
return out
|
||||||
|
|
||||||
|
|
||||||
class _EventPeristenceQueue(object):
|
|
||||||
"""Queues up events so that they can be persisted in bulk with only one
|
|
||||||
concurrent transaction per room.
|
|
||||||
"""
|
|
||||||
|
|
||||||
_EventPersistQueueItem = namedtuple(
|
|
||||||
"_EventPersistQueueItem", ("events_and_contexts", "backfilled", "deferred")
|
|
||||||
)
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self._event_persist_queues = {}
|
|
||||||
self._currently_persisting_rooms = set()
|
|
||||||
|
|
||||||
def add_to_queue(self, room_id, events_and_contexts, backfilled):
|
|
||||||
"""Add events to the queue, with the given persist_event options.
|
|
||||||
|
|
||||||
NB: due to the normal usage pattern of this method, it does *not*
|
|
||||||
follow the synapse logcontext rules, and leaves the logcontext in
|
|
||||||
place whether or not the returned deferred is ready.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
room_id (str):
|
|
||||||
events_and_contexts (list[(EventBase, EventContext)]):
|
|
||||||
backfilled (bool):
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
defer.Deferred: a deferred which will resolve once the events are
|
|
||||||
persisted. Runs its callbacks *without* a logcontext.
|
|
||||||
"""
|
|
||||||
queue = self._event_persist_queues.setdefault(room_id, deque())
|
|
||||||
if queue:
|
|
||||||
# if the last item in the queue has the same `backfilled` setting,
|
|
||||||
# we can just add these new events to that item.
|
|
||||||
end_item = queue[-1]
|
|
||||||
if end_item.backfilled == backfilled:
|
|
||||||
end_item.events_and_contexts.extend(events_and_contexts)
|
|
||||||
return end_item.deferred.observe()
|
|
||||||
|
|
||||||
deferred = ObservableDeferred(defer.Deferred(), consumeErrors=True)
|
|
||||||
|
|
||||||
queue.append(
|
|
||||||
self._EventPersistQueueItem(
|
|
||||||
events_and_contexts=events_and_contexts,
|
|
||||||
backfilled=backfilled,
|
|
||||||
deferred=deferred,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
return deferred.observe()
|
|
||||||
|
|
||||||
def handle_queue(self, room_id, per_item_callback):
|
|
||||||
"""Attempts to handle the queue for a room if not already being handled.
|
|
||||||
|
|
||||||
The given callback will be invoked with for each item in the queue,
|
|
||||||
of type _EventPersistQueueItem. The per_item_callback will continuously
|
|
||||||
be called with new items, unless the queue becomnes empty. The return
|
|
||||||
value of the function will be given to the deferreds waiting on the item,
|
|
||||||
exceptions will be passed to the deferreds as well.
|
|
||||||
|
|
||||||
This function should therefore be called whenever anything is added
|
|
||||||
to the queue.
|
|
||||||
|
|
||||||
If another callback is currently handling the queue then it will not be
|
|
||||||
invoked.
|
|
||||||
"""
|
|
||||||
|
|
||||||
if room_id in self._currently_persisting_rooms:
|
|
||||||
return
|
|
||||||
|
|
||||||
self._currently_persisting_rooms.add(room_id)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def handle_queue_loop():
|
|
||||||
try:
|
|
||||||
queue = self._get_drainining_queue(room_id)
|
|
||||||
for item in queue:
|
|
||||||
try:
|
|
||||||
ret = yield per_item_callback(item)
|
|
||||||
except Exception:
|
|
||||||
with PreserveLoggingContext():
|
|
||||||
item.deferred.errback()
|
|
||||||
else:
|
|
||||||
with PreserveLoggingContext():
|
|
||||||
item.deferred.callback(ret)
|
|
||||||
finally:
|
|
||||||
queue = self._event_persist_queues.pop(room_id, None)
|
|
||||||
if queue:
|
|
||||||
self._event_persist_queues[room_id] = queue
|
|
||||||
self._currently_persisting_rooms.discard(room_id)
|
|
||||||
|
|
||||||
# set handle_queue_loop off in the background
|
|
||||||
run_as_background_process("persist_events", handle_queue_loop)
|
|
||||||
|
|
||||||
def _get_drainining_queue(self, room_id):
|
|
||||||
queue = self._event_persist_queues.setdefault(room_id, deque())
|
|
||||||
|
|
||||||
try:
|
|
||||||
while True:
|
|
||||||
yield queue.popleft()
|
|
||||||
except IndexError:
|
|
||||||
# Queue has been drained.
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
_EventCacheEntry = namedtuple("_EventCacheEntry", ("event", "redacted_event"))
|
_EventCacheEntry = namedtuple("_EventCacheEntry", ("event", "redacted_event"))
|
||||||
|
|
||||||
|
|
||||||
@ -241,9 +102,6 @@ class EventsStore(
|
|||||||
def __init__(self, db_conn, hs):
|
def __init__(self, db_conn, hs):
|
||||||
super(EventsStore, self).__init__(db_conn, hs)
|
super(EventsStore, self).__init__(db_conn, hs)
|
||||||
|
|
||||||
self._event_persist_queue = _EventPeristenceQueue()
|
|
||||||
self._state_resolution_handler = hs.get_state_resolution_handler()
|
|
||||||
|
|
||||||
# Collect metrics on the number of forward extremities that exist.
|
# Collect metrics on the number of forward extremities that exist.
|
||||||
# Counter of number of extremities to count
|
# Counter of number of extremities to count
|
||||||
self._current_forward_extremities_amount = c_counter()
|
self._current_forward_extremities_amount = c_counter()
|
||||||
@ -286,340 +144,106 @@ class EventsStore(
|
|||||||
res = yield self.runInteraction("read_forward_extremities", fetch)
|
res = yield self.runInteraction("read_forward_extremities", fetch)
|
||||||
self._current_forward_extremities_amount = c_counter(list(x[0] for x in res))
|
self._current_forward_extremities_amount = c_counter(list(x[0] for x in res))
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def persist_events(self, events_and_contexts, backfilled=False):
|
|
||||||
"""
|
|
||||||
Write events to the database
|
|
||||||
Args:
|
|
||||||
events_and_contexts: list of tuples of (event, context)
|
|
||||||
backfilled (bool): Whether the results are retrieved from federation
|
|
||||||
via backfill or not. Used to determine if they're "new" events
|
|
||||||
which might update the current state etc.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Deferred[int]: the stream ordering of the latest persisted event
|
|
||||||
"""
|
|
||||||
partitioned = {}
|
|
||||||
for event, ctx in events_and_contexts:
|
|
||||||
partitioned.setdefault(event.room_id, []).append((event, ctx))
|
|
||||||
|
|
||||||
deferreds = []
|
|
||||||
for room_id, evs_ctxs in iteritems(partitioned):
|
|
||||||
d = self._event_persist_queue.add_to_queue(
|
|
||||||
room_id, evs_ctxs, backfilled=backfilled
|
|
||||||
)
|
|
||||||
deferreds.append(d)
|
|
||||||
|
|
||||||
for room_id in partitioned:
|
|
||||||
self._maybe_start_persisting(room_id)
|
|
||||||
|
|
||||||
yield make_deferred_yieldable(
|
|
||||||
defer.gatherResults(deferreds, consumeErrors=True)
|
|
||||||
)
|
|
||||||
|
|
||||||
max_persisted_id = yield self._stream_id_gen.get_current_token()
|
|
||||||
|
|
||||||
return max_persisted_id
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
@log_function
|
|
||||||
def persist_event(self, event, context, backfilled=False):
|
|
||||||
"""
|
|
||||||
|
|
||||||
Args:
|
|
||||||
event (EventBase):
|
|
||||||
context (EventContext):
|
|
||||||
backfilled (bool):
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Deferred: resolves to (int, int): the stream ordering of ``event``,
|
|
||||||
and the stream ordering of the latest persisted event
|
|
||||||
"""
|
|
||||||
deferred = self._event_persist_queue.add_to_queue(
|
|
||||||
event.room_id, [(event, context)], backfilled=backfilled
|
|
||||||
)
|
|
||||||
|
|
||||||
self._maybe_start_persisting(event.room_id)
|
|
||||||
|
|
||||||
yield make_deferred_yieldable(deferred)
|
|
||||||
|
|
||||||
max_persisted_id = yield self._stream_id_gen.get_current_token()
|
|
||||||
return (event.internal_metadata.stream_ordering, max_persisted_id)
|
|
||||||
|
|
||||||
def _maybe_start_persisting(self, room_id):
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def persisting_queue(item):
|
|
||||||
with Measure(self._clock, "persist_events"):
|
|
||||||
yield self._persist_events(
|
|
||||||
item.events_and_contexts, backfilled=item.backfilled
|
|
||||||
)
|
|
||||||
|
|
||||||
self._event_persist_queue.handle_queue(room_id, persisting_queue)
|
|
||||||
|
|
||||||
@_retry_on_integrity_error
|
@_retry_on_integrity_error
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _persist_events(
|
def _persist_events_and_state_updates(
|
||||||
self, events_and_contexts, backfilled=False, delete_existing=False
|
self,
|
||||||
|
events_and_contexts,
|
||||||
|
current_state_for_room,
|
||||||
|
state_delta_for_room,
|
||||||
|
new_forward_extremeties,
|
||||||
|
backfilled=False,
|
||||||
|
delete_existing=False,
|
||||||
):
|
):
|
||||||
"""Persist events to db
|
"""Persist a set of events alongside updates to the current state and
|
||||||
|
forward extremities tables.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
events_and_contexts (list[(EventBase, EventContext)]):
|
events_and_contexts (list[(EventBase, EventContext)]):
|
||||||
backfilled (bool):
|
current_state_for_room (dict[str, dict]): Map from room_id to the
|
||||||
|
current state of the room based on forward extremities
|
||||||
|
state_delta_for_room (dict[str, tuple]): Map from room_id to tuple
|
||||||
|
of `(to_delete, to_insert)` where to_delete is a list
|
||||||
|
of type/state keys to remove from current state, and to_insert
|
||||||
|
is a map (type,key)->event_id giving the state delta in each
|
||||||
|
room.
|
||||||
|
new_forward_extremities (dict[str, list[str]]): Map from room_id
|
||||||
|
to list of event IDs that are the new forward extremities of
|
||||||
|
the room.
|
||||||
|
backfilled (bool)
|
||||||
delete_existing (bool):
|
delete_existing (bool):
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Deferred: resolves when the events have been persisted
|
Deferred: resolves when the events have been persisted
|
||||||
"""
|
"""
|
||||||
if not events_and_contexts:
|
|
||||||
return
|
|
||||||
|
|
||||||
chunks = [
|
# We want to calculate the stream orderings as late as possible, as
|
||||||
events_and_contexts[x : x + 100]
|
# we only notify after all events with a lesser stream ordering have
|
||||||
for x in range(0, len(events_and_contexts), 100)
|
# been persisted. I.e. if we spend 10s inside the with block then
|
||||||
]
|
# that will delay all subsequent events from being notified about.
|
||||||
|
# Hence why we do it down here rather than wrapping the entire
|
||||||
|
# function.
|
||||||
|
#
|
||||||
|
# Its safe to do this after calculating the state deltas etc as we
|
||||||
|
# only need to protect the *persistence* of the events. This is to
|
||||||
|
# ensure that queries of the form "fetch events since X" don't
|
||||||
|
# return events and stream positions after events that are still in
|
||||||
|
# flight, as otherwise subsequent requests "fetch event since Y"
|
||||||
|
# will not return those events.
|
||||||
|
#
|
||||||
|
# Note: Multiple instances of this function cannot be in flight at
|
||||||
|
# the same time for the same room.
|
||||||
|
if backfilled:
|
||||||
|
stream_ordering_manager = self._backfill_id_gen.get_next_mult(
|
||||||
|
len(events_and_contexts)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
stream_ordering_manager = self._stream_id_gen.get_next_mult(
|
||||||
|
len(events_and_contexts)
|
||||||
|
)
|
||||||
|
|
||||||
for chunk in chunks:
|
with stream_ordering_manager as stream_orderings:
|
||||||
# We can't easily parallelize these since different chunks
|
for (event, context), stream in zip(events_and_contexts, stream_orderings):
|
||||||
# might contain the same event. :(
|
event.internal_metadata.stream_ordering = stream
|
||||||
|
|
||||||
# NB: Assumes that we are only persisting events for one room
|
yield self.runInteraction(
|
||||||
# at a time.
|
"persist_events",
|
||||||
|
self._persist_events_txn,
|
||||||
# map room_id->list[event_ids] giving the new forward
|
events_and_contexts=events_and_contexts,
|
||||||
# extremities in each room
|
backfilled=backfilled,
|
||||||
new_forward_extremeties = {}
|
delete_existing=delete_existing,
|
||||||
|
state_delta_for_room=state_delta_for_room,
|
||||||
# map room_id->(type,state_key)->event_id tracking the full
|
new_forward_extremeties=new_forward_extremeties,
|
||||||
# state in each room after adding these events.
|
)
|
||||||
# This is simply used to prefill the get_current_state_ids
|
persist_event_counter.inc(len(events_and_contexts))
|
||||||
# cache
|
|
||||||
current_state_for_room = {}
|
|
||||||
|
|
||||||
# map room_id->(to_delete, to_insert) where to_delete is a list
|
|
||||||
# of type/state keys to remove from current state, and to_insert
|
|
||||||
# is a map (type,key)->event_id giving the state delta in each
|
|
||||||
# room
|
|
||||||
state_delta_for_room = {}
|
|
||||||
|
|
||||||
if not backfilled:
|
if not backfilled:
|
||||||
with Measure(self._clock, "_calculate_state_and_extrem"):
|
# backfilled events have negative stream orderings, so we don't
|
||||||
# Work out the new "current state" for each room.
|
# want to set the event_persisted_position to that.
|
||||||
# We do this by working out what the new extremities are and then
|
synapse.metrics.event_persisted_position.set(
|
||||||
# calculating the state from that.
|
events_and_contexts[-1][0].internal_metadata.stream_ordering
|
||||||
events_by_room = {}
|
|
||||||
for event, context in chunk:
|
|
||||||
events_by_room.setdefault(event.room_id, []).append(
|
|
||||||
(event, context)
|
|
||||||
)
|
|
||||||
|
|
||||||
for room_id, ev_ctx_rm in iteritems(events_by_room):
|
|
||||||
latest_event_ids = yield self.get_latest_event_ids_in_room(
|
|
||||||
room_id
|
|
||||||
)
|
|
||||||
new_latest_event_ids = yield self._calculate_new_extremities(
|
|
||||||
room_id, ev_ctx_rm, latest_event_ids
|
|
||||||
)
|
|
||||||
|
|
||||||
latest_event_ids = set(latest_event_ids)
|
|
||||||
if new_latest_event_ids == latest_event_ids:
|
|
||||||
# No change in extremities, so no change in state
|
|
||||||
continue
|
|
||||||
|
|
||||||
# there should always be at least one forward extremity.
|
|
||||||
# (except during the initial persistence of the send_join
|
|
||||||
# results, in which case there will be no existing
|
|
||||||
# extremities, so we'll `continue` above and skip this bit.)
|
|
||||||
assert new_latest_event_ids, "No forward extremities left!"
|
|
||||||
|
|
||||||
new_forward_extremeties[room_id] = new_latest_event_ids
|
|
||||||
|
|
||||||
len_1 = (
|
|
||||||
len(latest_event_ids) == 1
|
|
||||||
and len(new_latest_event_ids) == 1
|
|
||||||
)
|
|
||||||
if len_1:
|
|
||||||
all_single_prev_not_state = all(
|
|
||||||
len(event.prev_event_ids()) == 1
|
|
||||||
and not event.is_state()
|
|
||||||
for event, ctx in ev_ctx_rm
|
|
||||||
)
|
|
||||||
# Don't bother calculating state if they're just
|
|
||||||
# a long chain of single ancestor non-state events.
|
|
||||||
if all_single_prev_not_state:
|
|
||||||
continue
|
|
||||||
|
|
||||||
state_delta_counter.inc()
|
|
||||||
if len(new_latest_event_ids) == 1:
|
|
||||||
state_delta_single_event_counter.inc()
|
|
||||||
|
|
||||||
# This is a fairly handwavey check to see if we could
|
|
||||||
# have guessed what the delta would have been when
|
|
||||||
# processing one of these events.
|
|
||||||
# What we're interested in is if the latest extremities
|
|
||||||
# were the same when we created the event as they are
|
|
||||||
# now. When this server creates a new event (as opposed
|
|
||||||
# to receiving it over federation) it will use the
|
|
||||||
# forward extremities as the prev_events, so we can
|
|
||||||
# guess this by looking at the prev_events and checking
|
|
||||||
# if they match the current forward extremities.
|
|
||||||
for ev, _ in ev_ctx_rm:
|
|
||||||
prev_event_ids = set(ev.prev_event_ids())
|
|
||||||
if latest_event_ids == prev_event_ids:
|
|
||||||
state_delta_reuse_delta_counter.inc()
|
|
||||||
break
|
|
||||||
|
|
||||||
logger.info("Calculating state delta for room %s", room_id)
|
|
||||||
with Measure(
|
|
||||||
self._clock, "persist_events.get_new_state_after_events"
|
|
||||||
):
|
|
||||||
res = yield self._get_new_state_after_events(
|
|
||||||
room_id,
|
|
||||||
ev_ctx_rm,
|
|
||||||
latest_event_ids,
|
|
||||||
new_latest_event_ids,
|
|
||||||
)
|
|
||||||
current_state, delta_ids = res
|
|
||||||
|
|
||||||
# If either are not None then there has been a change,
|
|
||||||
# and we need to work out the delta (or use that
|
|
||||||
# given)
|
|
||||||
if delta_ids is not None:
|
|
||||||
# If there is a delta we know that we've
|
|
||||||
# only added or replaced state, never
|
|
||||||
# removed keys entirely.
|
|
||||||
state_delta_for_room[room_id] = ([], delta_ids)
|
|
||||||
elif current_state is not None:
|
|
||||||
with Measure(
|
|
||||||
self._clock, "persist_events.calculate_state_delta"
|
|
||||||
):
|
|
||||||
delta = yield self._calculate_state_delta(
|
|
||||||
room_id, current_state
|
|
||||||
)
|
|
||||||
state_delta_for_room[room_id] = delta
|
|
||||||
|
|
||||||
# If we have the current_state then lets prefill
|
|
||||||
# the cache with it.
|
|
||||||
if current_state is not None:
|
|
||||||
current_state_for_room[room_id] = current_state
|
|
||||||
|
|
||||||
# We want to calculate the stream orderings as late as possible, as
|
|
||||||
# we only notify after all events with a lesser stream ordering have
|
|
||||||
# been persisted. I.e. if we spend 10s inside the with block then
|
|
||||||
# that will delay all subsequent events from being notified about.
|
|
||||||
# Hence why we do it down here rather than wrapping the entire
|
|
||||||
# function.
|
|
||||||
#
|
|
||||||
# Its safe to do this after calculating the state deltas etc as we
|
|
||||||
# only need to protect the *persistence* of the events. This is to
|
|
||||||
# ensure that queries of the form "fetch events since X" don't
|
|
||||||
# return events and stream positions after events that are still in
|
|
||||||
# flight, as otherwise subsequent requests "fetch event since Y"
|
|
||||||
# will not return those events.
|
|
||||||
#
|
|
||||||
# Note: Multiple instances of this function cannot be in flight at
|
|
||||||
# the same time for the same room.
|
|
||||||
if backfilled:
|
|
||||||
stream_ordering_manager = self._backfill_id_gen.get_next_mult(
|
|
||||||
len(chunk)
|
|
||||||
)
|
)
|
||||||
else:
|
|
||||||
stream_ordering_manager = self._stream_id_gen.get_next_mult(len(chunk))
|
|
||||||
|
|
||||||
with stream_ordering_manager as stream_orderings:
|
for event, context in events_and_contexts:
|
||||||
for (event, context), stream in zip(chunk, stream_orderings):
|
if context.app_service:
|
||||||
event.internal_metadata.stream_ordering = stream
|
origin_type = "local"
|
||||||
|
origin_entity = context.app_service.id
|
||||||
|
elif self.hs.is_mine_id(event.sender):
|
||||||
|
origin_type = "local"
|
||||||
|
origin_entity = "*client*"
|
||||||
|
else:
|
||||||
|
origin_type = "remote"
|
||||||
|
origin_entity = get_domain_from_id(event.sender)
|
||||||
|
|
||||||
yield self.runInteraction(
|
event_counter.labels(event.type, origin_type, origin_entity).inc()
|
||||||
"persist_events",
|
|
||||||
self._persist_events_txn,
|
for room_id, new_state in iteritems(current_state_for_room):
|
||||||
events_and_contexts=chunk,
|
self.get_current_state_ids.prefill((room_id,), new_state)
|
||||||
backfilled=backfilled,
|
|
||||||
delete_existing=delete_existing,
|
for room_id, latest_event_ids in iteritems(new_forward_extremeties):
|
||||||
state_delta_for_room=state_delta_for_room,
|
self.get_latest_event_ids_in_room.prefill(
|
||||||
new_forward_extremeties=new_forward_extremeties,
|
(room_id,), list(latest_event_ids)
|
||||||
)
|
)
|
||||||
persist_event_counter.inc(len(chunk))
|
|
||||||
|
|
||||||
if not backfilled:
|
|
||||||
# backfilled events have negative stream orderings, so we don't
|
|
||||||
# want to set the event_persisted_position to that.
|
|
||||||
synapse.metrics.event_persisted_position.set(
|
|
||||||
chunk[-1][0].internal_metadata.stream_ordering
|
|
||||||
)
|
|
||||||
|
|
||||||
for event, context in chunk:
|
|
||||||
if context.app_service:
|
|
||||||
origin_type = "local"
|
|
||||||
origin_entity = context.app_service.id
|
|
||||||
elif self.hs.is_mine_id(event.sender):
|
|
||||||
origin_type = "local"
|
|
||||||
origin_entity = "*client*"
|
|
||||||
else:
|
|
||||||
origin_type = "remote"
|
|
||||||
origin_entity = get_domain_from_id(event.sender)
|
|
||||||
|
|
||||||
event_counter.labels(event.type, origin_type, origin_entity).inc()
|
|
||||||
|
|
||||||
for room_id, new_state in iteritems(current_state_for_room):
|
|
||||||
self.get_current_state_ids.prefill((room_id,), new_state)
|
|
||||||
|
|
||||||
for room_id, latest_event_ids in iteritems(new_forward_extremeties):
|
|
||||||
self.get_latest_event_ids_in_room.prefill(
|
|
||||||
(room_id,), list(latest_event_ids)
|
|
||||||
)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def _calculate_new_extremities(self, room_id, event_contexts, latest_event_ids):
|
|
||||||
"""Calculates the new forward extremities for a room given events to
|
|
||||||
persist.
|
|
||||||
|
|
||||||
Assumes that we are only persisting events for one room at a time.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# we're only interested in new events which aren't outliers and which aren't
|
|
||||||
# being rejected.
|
|
||||||
new_events = [
|
|
||||||
event
|
|
||||||
for event, ctx in event_contexts
|
|
||||||
if not event.internal_metadata.is_outlier()
|
|
||||||
and not ctx.rejected
|
|
||||||
and not event.internal_metadata.is_soft_failed()
|
|
||||||
]
|
|
||||||
|
|
||||||
latest_event_ids = set(latest_event_ids)
|
|
||||||
|
|
||||||
# start with the existing forward extremities
|
|
||||||
result = set(latest_event_ids)
|
|
||||||
|
|
||||||
# add all the new events to the list
|
|
||||||
result.update(event.event_id for event in new_events)
|
|
||||||
|
|
||||||
# Now remove all events which are prev_events of any of the new events
|
|
||||||
result.difference_update(
|
|
||||||
e_id for event in new_events for e_id in event.prev_event_ids()
|
|
||||||
)
|
|
||||||
|
|
||||||
# Remove any events which are prev_events of any existing events.
|
|
||||||
existing_prevs = yield self._get_events_which_are_prevs(result)
|
|
||||||
result.difference_update(existing_prevs)
|
|
||||||
|
|
||||||
# Finally handle the case where the new events have soft-failed prev
|
|
||||||
# events. If they do we need to remove them and their prev events,
|
|
||||||
# otherwise we end up with dangling extremities.
|
|
||||||
existing_prevs = yield self._get_prevs_before_rejected(
|
|
||||||
e_id for event in new_events for e_id in event.prev_event_ids()
|
|
||||||
)
|
|
||||||
result.difference_update(existing_prevs)
|
|
||||||
|
|
||||||
# We only update metrics for events that change forward extremities
|
|
||||||
# (e.g. we ignore backfill/outliers/etc)
|
|
||||||
if result != latest_event_ids:
|
|
||||||
forward_extremities_counter.observe(len(result))
|
|
||||||
stale = latest_event_ids & result
|
|
||||||
stale_forward_extremities_counter.observe(len(stale))
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _get_events_which_are_prevs(self, event_ids):
|
def _get_events_which_are_prevs(self, event_ids):
|
||||||
@ -725,188 +349,6 @@ class EventsStore(
|
|||||||
|
|
||||||
return existing_prevs
|
return existing_prevs
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def _get_new_state_after_events(
|
|
||||||
self, room_id, events_context, old_latest_event_ids, new_latest_event_ids
|
|
||||||
):
|
|
||||||
"""Calculate the current state dict after adding some new events to
|
|
||||||
a room
|
|
||||||
|
|
||||||
Args:
|
|
||||||
room_id (str):
|
|
||||||
room to which the events are being added. Used for logging etc
|
|
||||||
|
|
||||||
events_context (list[(EventBase, EventContext)]):
|
|
||||||
events and contexts which are being added to the room
|
|
||||||
|
|
||||||
old_latest_event_ids (iterable[str]):
|
|
||||||
the old forward extremities for the room.
|
|
||||||
|
|
||||||
new_latest_event_ids (iterable[str]):
|
|
||||||
the new forward extremities for the room.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Deferred[tuple[dict[(str,str), str]|None, dict[(str,str), str]|None]]:
|
|
||||||
Returns a tuple of two state maps, the first being the full new current
|
|
||||||
state and the second being the delta to the existing current state.
|
|
||||||
If both are None then there has been no change.
|
|
||||||
|
|
||||||
If there has been a change then we only return the delta if its
|
|
||||||
already been calculated. Conversely if we do know the delta then
|
|
||||||
the new current state is only returned if we've already calculated
|
|
||||||
it.
|
|
||||||
"""
|
|
||||||
# map from state_group to ((type, key) -> event_id) state map
|
|
||||||
state_groups_map = {}
|
|
||||||
|
|
||||||
# Map from (prev state group, new state group) -> delta state dict
|
|
||||||
state_group_deltas = {}
|
|
||||||
|
|
||||||
for ev, ctx in events_context:
|
|
||||||
if ctx.state_group is None:
|
|
||||||
# This should only happen for outlier events.
|
|
||||||
if not ev.internal_metadata.is_outlier():
|
|
||||||
raise Exception(
|
|
||||||
"Context for new event %s has no state "
|
|
||||||
"group" % (ev.event_id,)
|
|
||||||
)
|
|
||||||
continue
|
|
||||||
|
|
||||||
if ctx.state_group in state_groups_map:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# We're only interested in pulling out state that has already
|
|
||||||
# been cached in the context. We'll pull stuff out of the DB later
|
|
||||||
# if necessary.
|
|
||||||
current_state_ids = ctx.get_cached_current_state_ids()
|
|
||||||
if current_state_ids is not None:
|
|
||||||
state_groups_map[ctx.state_group] = current_state_ids
|
|
||||||
|
|
||||||
if ctx.prev_group:
|
|
||||||
state_group_deltas[(ctx.prev_group, ctx.state_group)] = ctx.delta_ids
|
|
||||||
|
|
||||||
# We need to map the event_ids to their state groups. First, let's
|
|
||||||
# check if the event is one we're persisting, in which case we can
|
|
||||||
# pull the state group from its context.
|
|
||||||
# Otherwise we need to pull the state group from the database.
|
|
||||||
|
|
||||||
# Set of events we need to fetch groups for. (We know none of the old
|
|
||||||
# extremities are going to be in events_context).
|
|
||||||
missing_event_ids = set(old_latest_event_ids)
|
|
||||||
|
|
||||||
event_id_to_state_group = {}
|
|
||||||
for event_id in new_latest_event_ids:
|
|
||||||
# First search in the list of new events we're adding.
|
|
||||||
for ev, ctx in events_context:
|
|
||||||
if event_id == ev.event_id and ctx.state_group is not None:
|
|
||||||
event_id_to_state_group[event_id] = ctx.state_group
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
# If we couldn't find it, then we'll need to pull
|
|
||||||
# the state from the database
|
|
||||||
missing_event_ids.add(event_id)
|
|
||||||
|
|
||||||
if missing_event_ids:
|
|
||||||
# Now pull out the state groups for any missing events from DB
|
|
||||||
event_to_groups = yield self._get_state_group_for_events(missing_event_ids)
|
|
||||||
event_id_to_state_group.update(event_to_groups)
|
|
||||||
|
|
||||||
# State groups of old_latest_event_ids
|
|
||||||
old_state_groups = set(
|
|
||||||
event_id_to_state_group[evid] for evid in old_latest_event_ids
|
|
||||||
)
|
|
||||||
|
|
||||||
# State groups of new_latest_event_ids
|
|
||||||
new_state_groups = set(
|
|
||||||
event_id_to_state_group[evid] for evid in new_latest_event_ids
|
|
||||||
)
|
|
||||||
|
|
||||||
# If they old and new groups are the same then we don't need to do
|
|
||||||
# anything.
|
|
||||||
if old_state_groups == new_state_groups:
|
|
||||||
return None, None
|
|
||||||
|
|
||||||
if len(new_state_groups) == 1 and len(old_state_groups) == 1:
|
|
||||||
# If we're going from one state group to another, lets check if
|
|
||||||
# we have a delta for that transition. If we do then we can just
|
|
||||||
# return that.
|
|
||||||
|
|
||||||
new_state_group = next(iter(new_state_groups))
|
|
||||||
old_state_group = next(iter(old_state_groups))
|
|
||||||
|
|
||||||
delta_ids = state_group_deltas.get((old_state_group, new_state_group), None)
|
|
||||||
if delta_ids is not None:
|
|
||||||
# We have a delta from the existing to new current state,
|
|
||||||
# so lets just return that. If we happen to already have
|
|
||||||
# the current state in memory then lets also return that,
|
|
||||||
# but it doesn't matter if we don't.
|
|
||||||
new_state = state_groups_map.get(new_state_group)
|
|
||||||
return new_state, delta_ids
|
|
||||||
|
|
||||||
# Now that we have calculated new_state_groups we need to get
|
|
||||||
# their state IDs so we can resolve to a single state set.
|
|
||||||
missing_state = new_state_groups - set(state_groups_map)
|
|
||||||
if missing_state:
|
|
||||||
group_to_state = yield self._get_state_for_groups(missing_state)
|
|
||||||
state_groups_map.update(group_to_state)
|
|
||||||
|
|
||||||
if len(new_state_groups) == 1:
|
|
||||||
# If there is only one state group, then we know what the current
|
|
||||||
# state is.
|
|
||||||
return state_groups_map[new_state_groups.pop()], None
|
|
||||||
|
|
||||||
# Ok, we need to defer to the state handler to resolve our state sets.
|
|
||||||
|
|
||||||
state_groups = {sg: state_groups_map[sg] for sg in new_state_groups}
|
|
||||||
|
|
||||||
events_map = {ev.event_id: ev for ev, _ in events_context}
|
|
||||||
|
|
||||||
# We need to get the room version, which is in the create event.
|
|
||||||
# Normally that'd be in the database, but its also possible that we're
|
|
||||||
# currently trying to persist it.
|
|
||||||
room_version = None
|
|
||||||
for ev, _ in events_context:
|
|
||||||
if ev.type == EventTypes.Create and ev.state_key == "":
|
|
||||||
room_version = ev.content.get("room_version", "1")
|
|
||||||
break
|
|
||||||
|
|
||||||
if not room_version:
|
|
||||||
room_version = yield self.get_room_version(room_id)
|
|
||||||
|
|
||||||
logger.debug("calling resolve_state_groups from preserve_events")
|
|
||||||
res = yield self._state_resolution_handler.resolve_state_groups(
|
|
||||||
room_id,
|
|
||||||
room_version,
|
|
||||||
state_groups,
|
|
||||||
events_map,
|
|
||||||
state_res_store=StateResolutionStore(self),
|
|
||||||
)
|
|
||||||
|
|
||||||
return res.state, None
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def _calculate_state_delta(self, room_id, current_state):
|
|
||||||
"""Calculate the new state deltas for a room.
|
|
||||||
|
|
||||||
Assumes that we are only persisting events for one room at a time.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
tuple[list, dict] (to_delete, to_insert): where to_delete are the
|
|
||||||
type/state_keys to remove from current_state_events and `to_insert`
|
|
||||||
are the updates to current_state_events.
|
|
||||||
"""
|
|
||||||
existing_state = yield self.get_current_state_ids(room_id)
|
|
||||||
|
|
||||||
to_delete = [key for key in existing_state if key not in current_state]
|
|
||||||
|
|
||||||
to_insert = {
|
|
||||||
key: ev_id
|
|
||||||
for key, ev_id in iteritems(current_state)
|
|
||||||
if ev_id != existing_state.get(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
return to_delete, to_insert
|
|
||||||
|
|
||||||
@log_function
|
@log_function
|
||||||
def _persist_events_txn(
|
def _persist_events_txn(
|
||||||
self,
|
self,
|
||||||
@ -2439,12 +1881,11 @@ class EventsStore(
|
|||||||
|
|
||||||
logger.info("[purge] done")
|
logger.info("[purge] done")
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def is_event_after(self, event_id1, event_id2):
|
||||||
def is_event_after(self, event_id1, event_id2):
|
|
||||||
"""Returns True if event_id1 is after event_id2 in the stream
|
"""Returns True if event_id1 is after event_id2 in the stream
|
||||||
"""
|
"""
|
||||||
to_1, so_1 = yield self._get_event_ordering(event_id1)
|
to_1, so_1 = await self._get_event_ordering(event_id1)
|
||||||
to_2, so_2 = yield self._get_event_ordering(event_id2)
|
to_2, so_2 = await self._get_event_ordering(event_id2)
|
||||||
return (to_1, so_1) > (to_2, so_2)
|
return (to_1, so_1) > (to_2, so_2)
|
||||||
|
|
||||||
@cachedInlineCallbacks(max_entries=5000)
|
@cachedInlineCallbacks(max_entries=5000)
|
||||||
|
@ -201,13 +201,17 @@ class RoomWorkerStore(SQLBaseStore):
|
|||||||
where_clauses.append(
|
where_clauses.append(
|
||||||
"""
|
"""
|
||||||
(
|
(
|
||||||
name LIKE ?
|
LOWER(name) LIKE ?
|
||||||
OR topic LIKE ?
|
OR LOWER(topic) LIKE ?
|
||||||
OR canonical_alias LIKE ?
|
OR LOWER(canonical_alias) LIKE ?
|
||||||
)
|
)
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
query_args += [search_term, search_term, search_term]
|
query_args += [
|
||||||
|
search_term.lower(),
|
||||||
|
search_term.lower(),
|
||||||
|
search_term.lower(),
|
||||||
|
]
|
||||||
|
|
||||||
where_clause = ""
|
where_clause = ""
|
||||||
if where_clauses:
|
if where_clauses:
|
||||||
|
@ -720,7 +720,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
|
|||||||
# See bulk_get_push_rules_for_room for how we work around this.
|
# See bulk_get_push_rules_for_room for how we work around this.
|
||||||
assert state_group is not None
|
assert state_group is not None
|
||||||
|
|
||||||
cache = self._get_joined_hosts_cache(room_id)
|
cache = yield self._get_joined_hosts_cache(room_id)
|
||||||
joined_hosts = yield cache.get_destinations(state_entry)
|
joined_hosts = yield cache.get_destinations(state_entry)
|
||||||
|
|
||||||
return joined_hosts
|
return joined_hosts
|
||||||
|
@ -0,0 +1,25 @@
|
|||||||
|
/* Copyright 2019 The Matrix.org Foundation C.I.C
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* delete room keys that belong to deleted room key version, or to room key
|
||||||
|
* versions that don't exist (anymore)
|
||||||
|
*/
|
||||||
|
DELETE FROM e2e_room_keys
|
||||||
|
WHERE version NOT IN (
|
||||||
|
SELECT version
|
||||||
|
FROM e2e_room_keys_versions
|
||||||
|
WHERE e2e_room_keys.user_id = e2e_room_keys_versions.user_id
|
||||||
|
AND e2e_room_keys_versions.deleted = 0
|
||||||
|
);
|
@ -15,6 +15,7 @@
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
|
from typing import Iterable, Tuple
|
||||||
|
|
||||||
from six import iteritems, itervalues
|
from six import iteritems, itervalues
|
||||||
from six.moves import range
|
from six.moves import range
|
||||||
@ -23,6 +24,8 @@ from twisted.internet import defer
|
|||||||
|
|
||||||
from synapse.api.constants import EventTypes
|
from synapse.api.constants import EventTypes
|
||||||
from synapse.api.errors import NotFoundError
|
from synapse.api.errors import NotFoundError
|
||||||
|
from synapse.events import EventBase
|
||||||
|
from synapse.events.snapshot import EventContext
|
||||||
from synapse.storage._base import SQLBaseStore
|
from synapse.storage._base import SQLBaseStore
|
||||||
from synapse.storage.background_updates import BackgroundUpdateStore
|
from synapse.storage.background_updates import BackgroundUpdateStore
|
||||||
from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
|
from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
|
||||||
@ -1215,7 +1218,9 @@ class StateStore(StateGroupWorkerStore, StateBackgroundUpdateStore):
|
|||||||
def __init__(self, db_conn, hs):
|
def __init__(self, db_conn, hs):
|
||||||
super(StateStore, self).__init__(db_conn, hs)
|
super(StateStore, self).__init__(db_conn, hs)
|
||||||
|
|
||||||
def _store_event_state_mappings_txn(self, txn, events_and_contexts):
|
def _store_event_state_mappings_txn(
|
||||||
|
self, txn, events_and_contexts: Iterable[Tuple[EventBase, EventContext]]
|
||||||
|
):
|
||||||
state_groups = {}
|
state_groups = {}
|
||||||
for event, context in events_and_contexts:
|
for event, context in events_and_contexts:
|
||||||
if event.internal_metadata.is_outlier():
|
if event.internal_metadata.is_outlier():
|
||||||
|
@ -332,7 +332,7 @@ class StatsStore(StateDeltasStore):
|
|||||||
def _bulk_update_stats_delta_txn(txn):
|
def _bulk_update_stats_delta_txn(txn):
|
||||||
for stats_type, stats_updates in updates.items():
|
for stats_type, stats_updates in updates.items():
|
||||||
for stats_id, fields in stats_updates.items():
|
for stats_id, fields in stats_updates.items():
|
||||||
logger.info(
|
logger.debug(
|
||||||
"Updating %s stats for %s: %s", stats_type, stats_id, fields
|
"Updating %s stats for %s: %s", stats_type, stats_id, fields
|
||||||
)
|
)
|
||||||
self._update_stats_delta_txn(
|
self._update_stats_delta_txn(
|
||||||
|
652
synapse/storage/persist_events.py
Normal file
652
synapse/storage/persist_events.py
Normal file
@ -0,0 +1,652 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
|
# Copyright 2018-2019 New Vector Ltd
|
||||||
|
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from collections import deque, namedtuple
|
||||||
|
|
||||||
|
from six import iteritems
|
||||||
|
from six.moves import range
|
||||||
|
|
||||||
|
from prometheus_client import Counter, Histogram
|
||||||
|
|
||||||
|
from twisted.internet import defer
|
||||||
|
|
||||||
|
from synapse.api.constants import EventTypes
|
||||||
|
from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
|
||||||
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
|
from synapse.state import StateResolutionStore
|
||||||
|
from synapse.storage.data_stores import DataStores
|
||||||
|
from synapse.util.async_helpers import ObservableDeferred
|
||||||
|
from synapse.util.metrics import Measure
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# The number of times we are recalculating the current state
|
||||||
|
state_delta_counter = Counter("synapse_storage_events_state_delta", "")
|
||||||
|
|
||||||
|
# The number of times we are recalculating state when there is only a
|
||||||
|
# single forward extremity
|
||||||
|
state_delta_single_event_counter = Counter(
|
||||||
|
"synapse_storage_events_state_delta_single_event", ""
|
||||||
|
)
|
||||||
|
|
||||||
|
# The number of times we are reculating state when we could have resonably
|
||||||
|
# calculated the delta when we calculated the state for an event we were
|
||||||
|
# persisting.
|
||||||
|
state_delta_reuse_delta_counter = Counter(
|
||||||
|
"synapse_storage_events_state_delta_reuse_delta", ""
|
||||||
|
)
|
||||||
|
|
||||||
|
# The number of forward extremities for each new event.
|
||||||
|
forward_extremities_counter = Histogram(
|
||||||
|
"synapse_storage_events_forward_extremities_persisted",
|
||||||
|
"Number of forward extremities for each new event",
|
||||||
|
buckets=(1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"),
|
||||||
|
)
|
||||||
|
|
||||||
|
# The number of stale forward extremities for each new event. Stale extremities
|
||||||
|
# are those that were in the previous set of extremities as well as the new.
|
||||||
|
stale_forward_extremities_counter = Histogram(
|
||||||
|
"synapse_storage_events_stale_forward_extremities_persisted",
|
||||||
|
"Number of unchanged forward extremities for each new event",
|
||||||
|
buckets=(0, 1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class _EventPeristenceQueue(object):
|
||||||
|
"""Queues up events so that they can be persisted in bulk with only one
|
||||||
|
concurrent transaction per room.
|
||||||
|
"""
|
||||||
|
|
||||||
|
_EventPersistQueueItem = namedtuple(
|
||||||
|
"_EventPersistQueueItem", ("events_and_contexts", "backfilled", "deferred")
|
||||||
|
)
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self._event_persist_queues = {}
|
||||||
|
self._currently_persisting_rooms = set()
|
||||||
|
|
||||||
|
def add_to_queue(self, room_id, events_and_contexts, backfilled):
|
||||||
|
"""Add events to the queue, with the given persist_event options.
|
||||||
|
|
||||||
|
NB: due to the normal usage pattern of this method, it does *not*
|
||||||
|
follow the synapse logcontext rules, and leaves the logcontext in
|
||||||
|
place whether or not the returned deferred is ready.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
room_id (str):
|
||||||
|
events_and_contexts (list[(EventBase, EventContext)]):
|
||||||
|
backfilled (bool):
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
defer.Deferred: a deferred which will resolve once the events are
|
||||||
|
persisted. Runs its callbacks *without* a logcontext.
|
||||||
|
"""
|
||||||
|
queue = self._event_persist_queues.setdefault(room_id, deque())
|
||||||
|
if queue:
|
||||||
|
# if the last item in the queue has the same `backfilled` setting,
|
||||||
|
# we can just add these new events to that item.
|
||||||
|
end_item = queue[-1]
|
||||||
|
if end_item.backfilled == backfilled:
|
||||||
|
end_item.events_and_contexts.extend(events_and_contexts)
|
||||||
|
return end_item.deferred.observe()
|
||||||
|
|
||||||
|
deferred = ObservableDeferred(defer.Deferred(), consumeErrors=True)
|
||||||
|
|
||||||
|
queue.append(
|
||||||
|
self._EventPersistQueueItem(
|
||||||
|
events_and_contexts=events_and_contexts,
|
||||||
|
backfilled=backfilled,
|
||||||
|
deferred=deferred,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
return deferred.observe()
|
||||||
|
|
||||||
|
def handle_queue(self, room_id, per_item_callback):
|
||||||
|
"""Attempts to handle the queue for a room if not already being handled.
|
||||||
|
|
||||||
|
The given callback will be invoked with for each item in the queue,
|
||||||
|
of type _EventPersistQueueItem. The per_item_callback will continuously
|
||||||
|
be called with new items, unless the queue becomnes empty. The return
|
||||||
|
value of the function will be given to the deferreds waiting on the item,
|
||||||
|
exceptions will be passed to the deferreds as well.
|
||||||
|
|
||||||
|
This function should therefore be called whenever anything is added
|
||||||
|
to the queue.
|
||||||
|
|
||||||
|
If another callback is currently handling the queue then it will not be
|
||||||
|
invoked.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if room_id in self._currently_persisting_rooms:
|
||||||
|
return
|
||||||
|
|
||||||
|
self._currently_persisting_rooms.add(room_id)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def handle_queue_loop():
|
||||||
|
try:
|
||||||
|
queue = self._get_drainining_queue(room_id)
|
||||||
|
for item in queue:
|
||||||
|
try:
|
||||||
|
ret = yield per_item_callback(item)
|
||||||
|
except Exception:
|
||||||
|
with PreserveLoggingContext():
|
||||||
|
item.deferred.errback()
|
||||||
|
else:
|
||||||
|
with PreserveLoggingContext():
|
||||||
|
item.deferred.callback(ret)
|
||||||
|
finally:
|
||||||
|
queue = self._event_persist_queues.pop(room_id, None)
|
||||||
|
if queue:
|
||||||
|
self._event_persist_queues[room_id] = queue
|
||||||
|
self._currently_persisting_rooms.discard(room_id)
|
||||||
|
|
||||||
|
# set handle_queue_loop off in the background
|
||||||
|
run_as_background_process("persist_events", handle_queue_loop)
|
||||||
|
|
||||||
|
def _get_drainining_queue(self, room_id):
|
||||||
|
queue = self._event_persist_queues.setdefault(room_id, deque())
|
||||||
|
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
yield queue.popleft()
|
||||||
|
except IndexError:
|
||||||
|
# Queue has been drained.
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class EventsPersistenceStorage(object):
|
||||||
|
"""High level interface for handling persisting newly received events.
|
||||||
|
|
||||||
|
Takes care of batching up events by room, and calculating the necessary
|
||||||
|
current state and forward extremity changes.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, hs, stores: DataStores):
|
||||||
|
# We ultimately want to split out the state store from the main store,
|
||||||
|
# so we use separate variables here even though they point to the same
|
||||||
|
# store for now.
|
||||||
|
self.main_store = stores.main
|
||||||
|
self.state_store = stores.main
|
||||||
|
|
||||||
|
self._clock = hs.get_clock()
|
||||||
|
self.is_mine_id = hs.is_mine_id
|
||||||
|
self._event_persist_queue = _EventPeristenceQueue()
|
||||||
|
self._state_resolution_handler = hs.get_state_resolution_handler()
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def persist_events(self, events_and_contexts, backfilled=False):
|
||||||
|
"""
|
||||||
|
Write events to the database
|
||||||
|
Args:
|
||||||
|
events_and_contexts: list of tuples of (event, context)
|
||||||
|
backfilled (bool): Whether the results are retrieved from federation
|
||||||
|
via backfill or not. Used to determine if they're "new" events
|
||||||
|
which might update the current state etc.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred[int]: the stream ordering of the latest persisted event
|
||||||
|
"""
|
||||||
|
partitioned = {}
|
||||||
|
for event, ctx in events_and_contexts:
|
||||||
|
partitioned.setdefault(event.room_id, []).append((event, ctx))
|
||||||
|
|
||||||
|
deferreds = []
|
||||||
|
for room_id, evs_ctxs in iteritems(partitioned):
|
||||||
|
d = self._event_persist_queue.add_to_queue(
|
||||||
|
room_id, evs_ctxs, backfilled=backfilled
|
||||||
|
)
|
||||||
|
deferreds.append(d)
|
||||||
|
|
||||||
|
for room_id in partitioned:
|
||||||
|
self._maybe_start_persisting(room_id)
|
||||||
|
|
||||||
|
yield make_deferred_yieldable(
|
||||||
|
defer.gatherResults(deferreds, consumeErrors=True)
|
||||||
|
)
|
||||||
|
|
||||||
|
max_persisted_id = yield self.main_store.get_current_events_token()
|
||||||
|
|
||||||
|
return max_persisted_id
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def persist_event(self, event, context, backfilled=False):
|
||||||
|
"""
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event (EventBase):
|
||||||
|
context (EventContext):
|
||||||
|
backfilled (bool):
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred: resolves to (int, int): the stream ordering of ``event``,
|
||||||
|
and the stream ordering of the latest persisted event
|
||||||
|
"""
|
||||||
|
deferred = self._event_persist_queue.add_to_queue(
|
||||||
|
event.room_id, [(event, context)], backfilled=backfilled
|
||||||
|
)
|
||||||
|
|
||||||
|
self._maybe_start_persisting(event.room_id)
|
||||||
|
|
||||||
|
yield make_deferred_yieldable(deferred)
|
||||||
|
|
||||||
|
max_persisted_id = yield self.main_store.get_current_events_token()
|
||||||
|
return (event.internal_metadata.stream_ordering, max_persisted_id)
|
||||||
|
|
||||||
|
def _maybe_start_persisting(self, room_id):
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def persisting_queue(item):
|
||||||
|
with Measure(self._clock, "persist_events"):
|
||||||
|
yield self._persist_events(
|
||||||
|
item.events_and_contexts, backfilled=item.backfilled
|
||||||
|
)
|
||||||
|
|
||||||
|
self._event_persist_queue.handle_queue(room_id, persisting_queue)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _persist_events(
|
||||||
|
self, events_and_contexts, backfilled=False, delete_existing=False
|
||||||
|
):
|
||||||
|
"""Calculates the change to current state and forward extremities, and
|
||||||
|
persists the given events and with those updates.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
events_and_contexts (list[(EventBase, EventContext)]):
|
||||||
|
backfilled (bool):
|
||||||
|
delete_existing (bool):
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred: resolves when the events have been persisted
|
||||||
|
"""
|
||||||
|
if not events_and_contexts:
|
||||||
|
return
|
||||||
|
|
||||||
|
chunks = [
|
||||||
|
events_and_contexts[x : x + 100]
|
||||||
|
for x in range(0, len(events_and_contexts), 100)
|
||||||
|
]
|
||||||
|
|
||||||
|
for chunk in chunks:
|
||||||
|
# We can't easily parallelize these since different chunks
|
||||||
|
# might contain the same event. :(
|
||||||
|
|
||||||
|
# NB: Assumes that we are only persisting events for one room
|
||||||
|
# at a time.
|
||||||
|
|
||||||
|
# map room_id->list[event_ids] giving the new forward
|
||||||
|
# extremities in each room
|
||||||
|
new_forward_extremeties = {}
|
||||||
|
|
||||||
|
# map room_id->(type,state_key)->event_id tracking the full
|
||||||
|
# state in each room after adding these events.
|
||||||
|
# This is simply used to prefill the get_current_state_ids
|
||||||
|
# cache
|
||||||
|
current_state_for_room = {}
|
||||||
|
|
||||||
|
# map room_id->(to_delete, to_insert) where to_delete is a list
|
||||||
|
# of type/state keys to remove from current state, and to_insert
|
||||||
|
# is a map (type,key)->event_id giving the state delta in each
|
||||||
|
# room
|
||||||
|
state_delta_for_room = {}
|
||||||
|
|
||||||
|
if not backfilled:
|
||||||
|
with Measure(self._clock, "_calculate_state_and_extrem"):
|
||||||
|
# Work out the new "current state" for each room.
|
||||||
|
# We do this by working out what the new extremities are and then
|
||||||
|
# calculating the state from that.
|
||||||
|
events_by_room = {}
|
||||||
|
for event, context in chunk:
|
||||||
|
events_by_room.setdefault(event.room_id, []).append(
|
||||||
|
(event, context)
|
||||||
|
)
|
||||||
|
|
||||||
|
for room_id, ev_ctx_rm in iteritems(events_by_room):
|
||||||
|
latest_event_ids = yield self.main_store.get_latest_event_ids_in_room(
|
||||||
|
room_id
|
||||||
|
)
|
||||||
|
new_latest_event_ids = yield self._calculate_new_extremities(
|
||||||
|
room_id, ev_ctx_rm, latest_event_ids
|
||||||
|
)
|
||||||
|
|
||||||
|
latest_event_ids = set(latest_event_ids)
|
||||||
|
if new_latest_event_ids == latest_event_ids:
|
||||||
|
# No change in extremities, so no change in state
|
||||||
|
continue
|
||||||
|
|
||||||
|
# there should always be at least one forward extremity.
|
||||||
|
# (except during the initial persistence of the send_join
|
||||||
|
# results, in which case there will be no existing
|
||||||
|
# extremities, so we'll `continue` above and skip this bit.)
|
||||||
|
assert new_latest_event_ids, "No forward extremities left!"
|
||||||
|
|
||||||
|
new_forward_extremeties[room_id] = new_latest_event_ids
|
||||||
|
|
||||||
|
len_1 = (
|
||||||
|
len(latest_event_ids) == 1
|
||||||
|
and len(new_latest_event_ids) == 1
|
||||||
|
)
|
||||||
|
if len_1:
|
||||||
|
all_single_prev_not_state = all(
|
||||||
|
len(event.prev_event_ids()) == 1
|
||||||
|
and not event.is_state()
|
||||||
|
for event, ctx in ev_ctx_rm
|
||||||
|
)
|
||||||
|
# Don't bother calculating state if they're just
|
||||||
|
# a long chain of single ancestor non-state events.
|
||||||
|
if all_single_prev_not_state:
|
||||||
|
continue
|
||||||
|
|
||||||
|
state_delta_counter.inc()
|
||||||
|
if len(new_latest_event_ids) == 1:
|
||||||
|
state_delta_single_event_counter.inc()
|
||||||
|
|
||||||
|
# This is a fairly handwavey check to see if we could
|
||||||
|
# have guessed what the delta would have been when
|
||||||
|
# processing one of these events.
|
||||||
|
# What we're interested in is if the latest extremities
|
||||||
|
# were the same when we created the event as they are
|
||||||
|
# now. When this server creates a new event (as opposed
|
||||||
|
# to receiving it over federation) it will use the
|
||||||
|
# forward extremities as the prev_events, so we can
|
||||||
|
# guess this by looking at the prev_events and checking
|
||||||
|
# if they match the current forward extremities.
|
||||||
|
for ev, _ in ev_ctx_rm:
|
||||||
|
prev_event_ids = set(ev.prev_event_ids())
|
||||||
|
if latest_event_ids == prev_event_ids:
|
||||||
|
state_delta_reuse_delta_counter.inc()
|
||||||
|
break
|
||||||
|
|
||||||
|
logger.info("Calculating state delta for room %s", room_id)
|
||||||
|
with Measure(
|
||||||
|
self._clock, "persist_events.get_new_state_after_events"
|
||||||
|
):
|
||||||
|
res = yield self._get_new_state_after_events(
|
||||||
|
room_id,
|
||||||
|
ev_ctx_rm,
|
||||||
|
latest_event_ids,
|
||||||
|
new_latest_event_ids,
|
||||||
|
)
|
||||||
|
current_state, delta_ids = res
|
||||||
|
|
||||||
|
# If either are not None then there has been a change,
|
||||||
|
# and we need to work out the delta (or use that
|
||||||
|
# given)
|
||||||
|
if delta_ids is not None:
|
||||||
|
# If there is a delta we know that we've
|
||||||
|
# only added or replaced state, never
|
||||||
|
# removed keys entirely.
|
||||||
|
state_delta_for_room[room_id] = ([], delta_ids)
|
||||||
|
elif current_state is not None:
|
||||||
|
with Measure(
|
||||||
|
self._clock, "persist_events.calculate_state_delta"
|
||||||
|
):
|
||||||
|
delta = yield self._calculate_state_delta(
|
||||||
|
room_id, current_state
|
||||||
|
)
|
||||||
|
state_delta_for_room[room_id] = delta
|
||||||
|
|
||||||
|
# If we have the current_state then lets prefill
|
||||||
|
# the cache with it.
|
||||||
|
if current_state is not None:
|
||||||
|
current_state_for_room[room_id] = current_state
|
||||||
|
|
||||||
|
yield self.main_store._persist_events_and_state_updates(
|
||||||
|
chunk,
|
||||||
|
current_state_for_room=current_state_for_room,
|
||||||
|
state_delta_for_room=state_delta_for_room,
|
||||||
|
new_forward_extremeties=new_forward_extremeties,
|
||||||
|
backfilled=backfilled,
|
||||||
|
delete_existing=delete_existing,
|
||||||
|
)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _calculate_new_extremities(self, room_id, event_contexts, latest_event_ids):
|
||||||
|
"""Calculates the new forward extremities for a room given events to
|
||||||
|
persist.
|
||||||
|
|
||||||
|
Assumes that we are only persisting events for one room at a time.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# we're only interested in new events which aren't outliers and which aren't
|
||||||
|
# being rejected.
|
||||||
|
new_events = [
|
||||||
|
event
|
||||||
|
for event, ctx in event_contexts
|
||||||
|
if not event.internal_metadata.is_outlier()
|
||||||
|
and not ctx.rejected
|
||||||
|
and not event.internal_metadata.is_soft_failed()
|
||||||
|
]
|
||||||
|
|
||||||
|
latest_event_ids = set(latest_event_ids)
|
||||||
|
|
||||||
|
# start with the existing forward extremities
|
||||||
|
result = set(latest_event_ids)
|
||||||
|
|
||||||
|
# add all the new events to the list
|
||||||
|
result.update(event.event_id for event in new_events)
|
||||||
|
|
||||||
|
# Now remove all events which are prev_events of any of the new events
|
||||||
|
result.difference_update(
|
||||||
|
e_id for event in new_events for e_id in event.prev_event_ids()
|
||||||
|
)
|
||||||
|
|
||||||
|
# Remove any events which are prev_events of any existing events.
|
||||||
|
existing_prevs = yield self.main_store._get_events_which_are_prevs(result)
|
||||||
|
result.difference_update(existing_prevs)
|
||||||
|
|
||||||
|
# Finally handle the case where the new events have soft-failed prev
|
||||||
|
# events. If they do we need to remove them and their prev events,
|
||||||
|
# otherwise we end up with dangling extremities.
|
||||||
|
existing_prevs = yield self.main_store._get_prevs_before_rejected(
|
||||||
|
e_id for event in new_events for e_id in event.prev_event_ids()
|
||||||
|
)
|
||||||
|
result.difference_update(existing_prevs)
|
||||||
|
|
||||||
|
# We only update metrics for events that change forward extremities
|
||||||
|
# (e.g. we ignore backfill/outliers/etc)
|
||||||
|
if result != latest_event_ids:
|
||||||
|
forward_extremities_counter.observe(len(result))
|
||||||
|
stale = latest_event_ids & result
|
||||||
|
stale_forward_extremities_counter.observe(len(stale))
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _get_new_state_after_events(
|
||||||
|
self, room_id, events_context, old_latest_event_ids, new_latest_event_ids
|
||||||
|
):
|
||||||
|
"""Calculate the current state dict after adding some new events to
|
||||||
|
a room
|
||||||
|
|
||||||
|
Args:
|
||||||
|
room_id (str):
|
||||||
|
room to which the events are being added. Used for logging etc
|
||||||
|
|
||||||
|
events_context (list[(EventBase, EventContext)]):
|
||||||
|
events and contexts which are being added to the room
|
||||||
|
|
||||||
|
old_latest_event_ids (iterable[str]):
|
||||||
|
the old forward extremities for the room.
|
||||||
|
|
||||||
|
new_latest_event_ids (iterable[str]):
|
||||||
|
the new forward extremities for the room.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred[tuple[dict[(str,str), str]|None, dict[(str,str), str]|None]]:
|
||||||
|
Returns a tuple of two state maps, the first being the full new current
|
||||||
|
state and the second being the delta to the existing current state.
|
||||||
|
If both are None then there has been no change.
|
||||||
|
|
||||||
|
If there has been a change then we only return the delta if its
|
||||||
|
already been calculated. Conversely if we do know the delta then
|
||||||
|
the new current state is only returned if we've already calculated
|
||||||
|
it.
|
||||||
|
"""
|
||||||
|
# map from state_group to ((type, key) -> event_id) state map
|
||||||
|
state_groups_map = {}
|
||||||
|
|
||||||
|
# Map from (prev state group, new state group) -> delta state dict
|
||||||
|
state_group_deltas = {}
|
||||||
|
|
||||||
|
for ev, ctx in events_context:
|
||||||
|
if ctx.state_group is None:
|
||||||
|
# This should only happen for outlier events.
|
||||||
|
if not ev.internal_metadata.is_outlier():
|
||||||
|
raise Exception(
|
||||||
|
"Context for new event %s has no state "
|
||||||
|
"group" % (ev.event_id,)
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
if ctx.state_group in state_groups_map:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# We're only interested in pulling out state that has already
|
||||||
|
# been cached in the context. We'll pull stuff out of the DB later
|
||||||
|
# if necessary.
|
||||||
|
current_state_ids = ctx.get_cached_current_state_ids()
|
||||||
|
if current_state_ids is not None:
|
||||||
|
state_groups_map[ctx.state_group] = current_state_ids
|
||||||
|
|
||||||
|
if ctx.prev_group:
|
||||||
|
state_group_deltas[(ctx.prev_group, ctx.state_group)] = ctx.delta_ids
|
||||||
|
|
||||||
|
# We need to map the event_ids to their state groups. First, let's
|
||||||
|
# check if the event is one we're persisting, in which case we can
|
||||||
|
# pull the state group from its context.
|
||||||
|
# Otherwise we need to pull the state group from the database.
|
||||||
|
|
||||||
|
# Set of events we need to fetch groups for. (We know none of the old
|
||||||
|
# extremities are going to be in events_context).
|
||||||
|
missing_event_ids = set(old_latest_event_ids)
|
||||||
|
|
||||||
|
event_id_to_state_group = {}
|
||||||
|
for event_id in new_latest_event_ids:
|
||||||
|
# First search in the list of new events we're adding.
|
||||||
|
for ev, ctx in events_context:
|
||||||
|
if event_id == ev.event_id and ctx.state_group is not None:
|
||||||
|
event_id_to_state_group[event_id] = ctx.state_group
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
# If we couldn't find it, then we'll need to pull
|
||||||
|
# the state from the database
|
||||||
|
missing_event_ids.add(event_id)
|
||||||
|
|
||||||
|
if missing_event_ids:
|
||||||
|
# Now pull out the state groups for any missing events from DB
|
||||||
|
event_to_groups = yield self.state_store._get_state_group_for_events(
|
||||||
|
missing_event_ids
|
||||||
|
)
|
||||||
|
event_id_to_state_group.update(event_to_groups)
|
||||||
|
|
||||||
|
# State groups of old_latest_event_ids
|
||||||
|
old_state_groups = set(
|
||||||
|
event_id_to_state_group[evid] for evid in old_latest_event_ids
|
||||||
|
)
|
||||||
|
|
||||||
|
# State groups of new_latest_event_ids
|
||||||
|
new_state_groups = set(
|
||||||
|
event_id_to_state_group[evid] for evid in new_latest_event_ids
|
||||||
|
)
|
||||||
|
|
||||||
|
# If they old and new groups are the same then we don't need to do
|
||||||
|
# anything.
|
||||||
|
if old_state_groups == new_state_groups:
|
||||||
|
return None, None
|
||||||
|
|
||||||
|
if len(new_state_groups) == 1 and len(old_state_groups) == 1:
|
||||||
|
# If we're going from one state group to another, lets check if
|
||||||
|
# we have a delta for that transition. If we do then we can just
|
||||||
|
# return that.
|
||||||
|
|
||||||
|
new_state_group = next(iter(new_state_groups))
|
||||||
|
old_state_group = next(iter(old_state_groups))
|
||||||
|
|
||||||
|
delta_ids = state_group_deltas.get((old_state_group, new_state_group), None)
|
||||||
|
if delta_ids is not None:
|
||||||
|
# We have a delta from the existing to new current state,
|
||||||
|
# so lets just return that. If we happen to already have
|
||||||
|
# the current state in memory then lets also return that,
|
||||||
|
# but it doesn't matter if we don't.
|
||||||
|
new_state = state_groups_map.get(new_state_group)
|
||||||
|
return new_state, delta_ids
|
||||||
|
|
||||||
|
# Now that we have calculated new_state_groups we need to get
|
||||||
|
# their state IDs so we can resolve to a single state set.
|
||||||
|
missing_state = new_state_groups - set(state_groups_map)
|
||||||
|
if missing_state:
|
||||||
|
group_to_state = yield self.state_store._get_state_for_groups(missing_state)
|
||||||
|
state_groups_map.update(group_to_state)
|
||||||
|
|
||||||
|
if len(new_state_groups) == 1:
|
||||||
|
# If there is only one state group, then we know what the current
|
||||||
|
# state is.
|
||||||
|
return state_groups_map[new_state_groups.pop()], None
|
||||||
|
|
||||||
|
# Ok, we need to defer to the state handler to resolve our state sets.
|
||||||
|
|
||||||
|
state_groups = {sg: state_groups_map[sg] for sg in new_state_groups}
|
||||||
|
|
||||||
|
events_map = {ev.event_id: ev for ev, _ in events_context}
|
||||||
|
|
||||||
|
# We need to get the room version, which is in the create event.
|
||||||
|
# Normally that'd be in the database, but its also possible that we're
|
||||||
|
# currently trying to persist it.
|
||||||
|
room_version = None
|
||||||
|
for ev, _ in events_context:
|
||||||
|
if ev.type == EventTypes.Create and ev.state_key == "":
|
||||||
|
room_version = ev.content.get("room_version", "1")
|
||||||
|
break
|
||||||
|
|
||||||
|
if not room_version:
|
||||||
|
room_version = yield self.main_store.get_room_version(room_id)
|
||||||
|
|
||||||
|
logger.debug("calling resolve_state_groups from preserve_events")
|
||||||
|
res = yield self._state_resolution_handler.resolve_state_groups(
|
||||||
|
room_id,
|
||||||
|
room_version,
|
||||||
|
state_groups,
|
||||||
|
events_map,
|
||||||
|
state_res_store=StateResolutionStore(self.main_store),
|
||||||
|
)
|
||||||
|
|
||||||
|
return res.state, None
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _calculate_state_delta(self, room_id, current_state):
|
||||||
|
"""Calculate the new state deltas for a room.
|
||||||
|
|
||||||
|
Assumes that we are only persisting events for one room at a time.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple[list, dict] (to_delete, to_insert): where to_delete are the
|
||||||
|
type/state_keys to remove from current_state_events and `to_insert`
|
||||||
|
are the updates to current_state_events.
|
||||||
|
"""
|
||||||
|
existing_state = yield self.main_store.get_current_state_ids(room_id)
|
||||||
|
|
||||||
|
to_delete = [key for key in existing_state if key not in current_state]
|
||||||
|
|
||||||
|
to_insert = {
|
||||||
|
key: ev_id
|
||||||
|
for key, ev_id in iteritems(current_state)
|
||||||
|
if ev_id != existing_state.get(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
return to_delete, to_insert
|
@ -86,11 +86,12 @@ class ObservableDeferred(object):
|
|||||||
|
|
||||||
deferred.addCallbacks(callback, errback)
|
deferred.addCallbacks(callback, errback)
|
||||||
|
|
||||||
def observe(self):
|
def observe(self) -> defer.Deferred:
|
||||||
"""Observe the underlying deferred.
|
"""Observe the underlying deferred.
|
||||||
|
|
||||||
Can return either a deferred if the underlying deferred is still pending
|
This returns a brand new deferred that is resolved when the underlying
|
||||||
(or has failed), or the actual value. Callers may need to use maybeDeferred.
|
deferred is resolved. Interacting with the returned deferred does not
|
||||||
|
effect the underdlying deferred.
|
||||||
"""
|
"""
|
||||||
if not self._result:
|
if not self._result:
|
||||||
d = defer.Deferred()
|
d = defer.Deferred()
|
||||||
@ -105,7 +106,7 @@ class ObservableDeferred(object):
|
|||||||
return d
|
return d
|
||||||
else:
|
else:
|
||||||
success, res = self._result
|
success, res = self._result
|
||||||
return res if success else defer.fail(res)
|
return defer.succeed(res) if success else defer.fail(res)
|
||||||
|
|
||||||
def observers(self):
|
def observers(self):
|
||||||
return self._observers
|
return self._observers
|
||||||
@ -138,7 +139,7 @@ def concurrently_execute(func, args, limit):
|
|||||||
the number of concurrent executions.
|
the number of concurrent executions.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
func (func): Function to execute, should return a deferred.
|
func (func): Function to execute, should return a deferred or coroutine.
|
||||||
args (list): List of arguments to pass to func, each invocation of func
|
args (list): List of arguments to pass to func, each invocation of func
|
||||||
gets a signle argument.
|
gets a signle argument.
|
||||||
limit (int): Maximum number of conccurent executions.
|
limit (int): Maximum number of conccurent executions.
|
||||||
@ -148,11 +149,10 @@ def concurrently_execute(func, args, limit):
|
|||||||
"""
|
"""
|
||||||
it = iter(args)
|
it = iter(args)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def _concurrently_execute_inner():
|
||||||
def _concurrently_execute_inner():
|
|
||||||
try:
|
try:
|
||||||
while True:
|
while True:
|
||||||
yield func(next(it))
|
await maybe_awaitable(func(next(it)))
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -438,7 +438,7 @@ class CacheDescriptor(_CacheDescriptorBase):
|
|||||||
if isinstance(cached_result_d, ObservableDeferred):
|
if isinstance(cached_result_d, ObservableDeferred):
|
||||||
observer = cached_result_d.observe()
|
observer = cached_result_d.observe()
|
||||||
else:
|
else:
|
||||||
observer = cached_result_d
|
observer = defer.succeed(cached_result_d)
|
||||||
|
|
||||||
except KeyError:
|
except KeyError:
|
||||||
ret = defer.maybeDeferred(
|
ret = defer.maybeDeferred(
|
||||||
@ -482,9 +482,8 @@ class CacheListDescriptor(_CacheDescriptorBase):
|
|||||||
Given a list of keys it looks in the cache to find any hits, then passes
|
Given a list of keys it looks in the cache to find any hits, then passes
|
||||||
the list of missing keys to the wrapped function.
|
the list of missing keys to the wrapped function.
|
||||||
|
|
||||||
Once wrapped, the function returns either a Deferred which resolves to
|
Once wrapped, the function returns a Deferred which resolves to the list
|
||||||
the list of results, or (if all results were cached), just the list of
|
of results.
|
||||||
results.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
@ -618,7 +617,7 @@ class CacheListDescriptor(_CacheDescriptorBase):
|
|||||||
)
|
)
|
||||||
return make_deferred_yieldable(d)
|
return make_deferred_yieldable(d)
|
||||||
else:
|
else:
|
||||||
return results
|
return defer.succeed(results)
|
||||||
|
|
||||||
obj.__dict__[self.orig.__name__] = wrapped
|
obj.__dict__[self.orig.__name__] = wrapped
|
||||||
|
|
||||||
|
@ -42,6 +42,7 @@ def get_version_string(module):
|
|||||||
try:
|
try:
|
||||||
null = open(os.devnull, "w")
|
null = open(os.devnull, "w")
|
||||||
cwd = os.path.dirname(os.path.abspath(module.__file__))
|
cwd = os.path.dirname(os.path.abspath(module.__file__))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
git_branch = (
|
git_branch = (
|
||||||
subprocess.check_output(
|
subprocess.check_output(
|
||||||
@ -51,7 +52,8 @@ def get_version_string(module):
|
|||||||
.decode("ascii")
|
.decode("ascii")
|
||||||
)
|
)
|
||||||
git_branch = "b=" + git_branch
|
git_branch = "b=" + git_branch
|
||||||
except subprocess.CalledProcessError:
|
except (subprocess.CalledProcessError, FileNotFoundError):
|
||||||
|
# FileNotFoundError can arise when git is not installed
|
||||||
git_branch = ""
|
git_branch = ""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -63,7 +65,7 @@ def get_version_string(module):
|
|||||||
.decode("ascii")
|
.decode("ascii")
|
||||||
)
|
)
|
||||||
git_tag = "t=" + git_tag
|
git_tag = "t=" + git_tag
|
||||||
except subprocess.CalledProcessError:
|
except (subprocess.CalledProcessError, FileNotFoundError):
|
||||||
git_tag = ""
|
git_tag = ""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -74,7 +76,7 @@ def get_version_string(module):
|
|||||||
.strip()
|
.strip()
|
||||||
.decode("ascii")
|
.decode("ascii")
|
||||||
)
|
)
|
||||||
except subprocess.CalledProcessError:
|
except (subprocess.CalledProcessError, FileNotFoundError):
|
||||||
git_commit = ""
|
git_commit = ""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -89,7 +91,7 @@ def get_version_string(module):
|
|||||||
)
|
)
|
||||||
|
|
||||||
git_dirty = "dirty" if is_dirty else ""
|
git_dirty = "dirty" if is_dirty else ""
|
||||||
except subprocess.CalledProcessError:
|
except (subprocess.CalledProcessError, FileNotFoundError):
|
||||||
git_dirty = ""
|
git_dirty = ""
|
||||||
|
|
||||||
if git_branch or git_tag or git_commit or git_dirty:
|
if git_branch or git_tag or git_commit or git_dirty:
|
||||||
|
@ -178,7 +178,7 @@ class KeyringTestCase(unittest.HomeserverTestCase):
|
|||||||
kr = keyring.Keyring(self.hs)
|
kr = keyring.Keyring(self.hs)
|
||||||
|
|
||||||
key1 = signedjson.key.generate_signing_key(1)
|
key1 = signedjson.key.generate_signing_key(1)
|
||||||
r = self.hs.datastore.store_server_verify_keys(
|
r = self.hs.get_datastore().store_server_verify_keys(
|
||||||
"server9",
|
"server9",
|
||||||
time.time() * 1000,
|
time.time() * 1000,
|
||||||
[("server9", get_key_id(key1), FetchKeyResult(get_verify_key(key1), 1000))],
|
[("server9", get_key_id(key1), FetchKeyResult(get_verify_key(key1), 1000))],
|
||||||
@ -209,7 +209,7 @@ class KeyringTestCase(unittest.HomeserverTestCase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
key1 = signedjson.key.generate_signing_key(1)
|
key1 = signedjson.key.generate_signing_key(1)
|
||||||
r = self.hs.datastore.store_server_verify_keys(
|
r = self.hs.get_datastore().store_server_verify_keys(
|
||||||
"server9",
|
"server9",
|
||||||
time.time() * 1000,
|
time.time() * 1000,
|
||||||
[("server9", get_key_id(key1), FetchKeyResult(get_verify_key(key1), None))],
|
[("server9", get_key_id(key1), FetchKeyResult(get_verify_key(key1), None))],
|
||||||
|
@ -607,6 +607,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
self.hs.config.stats_enabled = False
|
self.hs.config.stats_enabled = False
|
||||||
|
self.handler.stats_enabled = False
|
||||||
|
|
||||||
u1 = self.register_user("u1", "pass")
|
u1 = self.register_user("u1", "pass")
|
||||||
u1token = self.login("u1", "pass")
|
u1token = self.login("u1", "pass")
|
||||||
@ -618,6 +619,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
|
|||||||
self.assertIsNone(self._get_current_stats("user", u1))
|
self.assertIsNone(self._get_current_stats("user", u1))
|
||||||
|
|
||||||
self.hs.config.stats_enabled = True
|
self.hs.config.stats_enabled = True
|
||||||
|
self.handler.stats_enabled = True
|
||||||
|
|
||||||
self._perform_background_initial_update()
|
self._perform_background_initial_update()
|
||||||
|
|
||||||
|
@ -144,6 +144,9 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
|
|||||||
self.datastore.get_to_device_stream_token = lambda: 0
|
self.datastore.get_to_device_stream_token = lambda: 0
|
||||||
self.datastore.get_new_device_msgs_for_remote = lambda *args, **kargs: ([], 0)
|
self.datastore.get_new_device_msgs_for_remote = lambda *args, **kargs: ([], 0)
|
||||||
self.datastore.delete_device_msgs_for_remote = lambda *args, **kargs: None
|
self.datastore.delete_device_msgs_for_remote = lambda *args, **kargs: None
|
||||||
|
self.datastore.set_received_txn_response = lambda *args, **kwargs: defer.succeed(
|
||||||
|
None
|
||||||
|
)
|
||||||
|
|
||||||
def test_started_typing_local(self):
|
def test_started_typing_local(self):
|
||||||
self.room_members = [U_APPLE, U_BANANA]
|
self.room_members = [U_APPLE, U_BANANA]
|
||||||
|
@ -41,6 +41,7 @@ class BaseSlavedStoreTestCase(unittest.HomeserverTestCase):
|
|||||||
def prepare(self, reactor, clock, hs):
|
def prepare(self, reactor, clock, hs):
|
||||||
|
|
||||||
self.master_store = self.hs.get_datastore()
|
self.master_store = self.hs.get_datastore()
|
||||||
|
self.storage = hs.get_storage()
|
||||||
self.slaved_store = self.STORE_TYPE(self.hs.get_db_conn(), self.hs)
|
self.slaved_store = self.STORE_TYPE(self.hs.get_db_conn(), self.hs)
|
||||||
self.event_id = 0
|
self.event_id = 0
|
||||||
|
|
||||||
|
@ -234,7 +234,9 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
|
|||||||
type="m.room.member", sender=USER_ID_2, key=USER_ID_2, membership="join"
|
type="m.room.member", sender=USER_ID_2, key=USER_ID_2, membership="join"
|
||||||
)
|
)
|
||||||
msg, msgctx = self.build_event()
|
msg, msgctx = self.build_event()
|
||||||
self.get_success(self.master_store.persist_events([(j2, j2ctx), (msg, msgctx)]))
|
self.get_success(
|
||||||
|
self.storage.persistence.persist_events([(j2, j2ctx), (msg, msgctx)])
|
||||||
|
)
|
||||||
self.replicate()
|
self.replicate()
|
||||||
|
|
||||||
event_source = RoomEventSource(self.hs)
|
event_source = RoomEventSource(self.hs)
|
||||||
@ -290,10 +292,12 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
|
|||||||
|
|
||||||
if backfill:
|
if backfill:
|
||||||
self.get_success(
|
self.get_success(
|
||||||
self.master_store.persist_events([(event, context)], backfilled=True)
|
self.storage.persistence.persist_events(
|
||||||
|
[(event, context)], backfilled=True
|
||||||
|
)
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
self.get_success(self.master_store.persist_event(event, context))
|
self.get_success(self.storage.persistence.persist_event(event, context))
|
||||||
|
|
||||||
return event
|
return event
|
||||||
|
|
||||||
|
@ -197,7 +197,7 @@ class CacheDecoratorTestCase(unittest.TestCase):
|
|||||||
|
|
||||||
a.func.prefill(("foo",), ObservableDeferred(d))
|
a.func.prefill(("foo",), ObservableDeferred(d))
|
||||||
|
|
||||||
self.assertEquals(a.func("foo"), d.result)
|
self.assertEquals(a.func("foo").result, d.result)
|
||||||
self.assertEquals(callcount[0], 0)
|
self.assertEquals(callcount[0], 0)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
|
75
tests/storage/test_e2e_room_keys.py
Normal file
75
tests/storage/test_e2e_room_keys.py
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from tests import unittest
|
||||||
|
|
||||||
|
# sample room_key data for use in the tests
|
||||||
|
room_key = {
|
||||||
|
"first_message_index": 1,
|
||||||
|
"forwarded_count": 1,
|
||||||
|
"is_verified": False,
|
||||||
|
"session_data": "SSBBTSBBIEZJU0gK",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class E2eRoomKeysHandlerTestCase(unittest.HomeserverTestCase):
|
||||||
|
def make_homeserver(self, reactor, clock):
|
||||||
|
hs = self.setup_test_homeserver("server", http_client=None)
|
||||||
|
self.store = hs.get_datastore()
|
||||||
|
return hs
|
||||||
|
|
||||||
|
def test_room_keys_version_delete(self):
|
||||||
|
# test that deleting a room key backup deletes the keys
|
||||||
|
version1 = self.get_success(
|
||||||
|
self.store.create_e2e_room_keys_version(
|
||||||
|
"user_id", {"algorithm": "rot13", "auth_data": {}}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
self.get_success(
|
||||||
|
self.store.set_e2e_room_key(
|
||||||
|
"user_id", version1, "room", "session", room_key
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
version2 = self.get_success(
|
||||||
|
self.store.create_e2e_room_keys_version(
|
||||||
|
"user_id", {"algorithm": "rot13", "auth_data": {}}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
self.get_success(
|
||||||
|
self.store.set_e2e_room_key(
|
||||||
|
"user_id", version2, "room", "session", room_key
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# make sure the keys were stored properly
|
||||||
|
keys = self.get_success(self.store.get_e2e_room_keys("user_id", version1))
|
||||||
|
self.assertEqual(len(keys["rooms"]), 1)
|
||||||
|
|
||||||
|
keys = self.get_success(self.store.get_e2e_room_keys("user_id", version2))
|
||||||
|
self.assertEqual(len(keys["rooms"]), 1)
|
||||||
|
|
||||||
|
# delete version1
|
||||||
|
self.get_success(self.store.delete_e2e_room_keys_version("user_id", version1))
|
||||||
|
|
||||||
|
# make sure the key from version1 is gone, and the key from version2 is
|
||||||
|
# still there
|
||||||
|
keys = self.get_success(self.store.get_e2e_room_keys("user_id", version1))
|
||||||
|
self.assertEqual(len(keys["rooms"]), 0)
|
||||||
|
|
||||||
|
keys = self.get_success(self.store.get_e2e_room_keys("user_id", version2))
|
||||||
|
self.assertEqual(len(keys["rooms"]), 1)
|
@ -39,6 +39,7 @@ class RedactionTestCase(unittest.HomeserverTestCase):
|
|||||||
|
|
||||||
def prepare(self, reactor, clock, hs):
|
def prepare(self, reactor, clock, hs):
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
|
self.storage = hs.get_storage()
|
||||||
self.event_builder_factory = hs.get_event_builder_factory()
|
self.event_builder_factory = hs.get_event_builder_factory()
|
||||||
self.event_creation_handler = hs.get_event_creation_handler()
|
self.event_creation_handler = hs.get_event_creation_handler()
|
||||||
|
|
||||||
@ -73,7 +74,7 @@ class RedactionTestCase(unittest.HomeserverTestCase):
|
|||||||
self.event_creation_handler.create_new_client_event(builder)
|
self.event_creation_handler.create_new_client_event(builder)
|
||||||
)
|
)
|
||||||
|
|
||||||
self.get_success(self.store.persist_event(event, context))
|
self.get_success(self.storage.persistence.persist_event(event, context))
|
||||||
|
|
||||||
return event
|
return event
|
||||||
|
|
||||||
@ -95,7 +96,7 @@ class RedactionTestCase(unittest.HomeserverTestCase):
|
|||||||
self.event_creation_handler.create_new_client_event(builder)
|
self.event_creation_handler.create_new_client_event(builder)
|
||||||
)
|
)
|
||||||
|
|
||||||
self.get_success(self.store.persist_event(event, context))
|
self.get_success(self.storage.persistence.persist_event(event, context))
|
||||||
|
|
||||||
return event
|
return event
|
||||||
|
|
||||||
@ -116,7 +117,7 @@ class RedactionTestCase(unittest.HomeserverTestCase):
|
|||||||
self.event_creation_handler.create_new_client_event(builder)
|
self.event_creation_handler.create_new_client_event(builder)
|
||||||
)
|
)
|
||||||
|
|
||||||
self.get_success(self.store.persist_event(event, context))
|
self.get_success(self.storage.persistence.persist_event(event, context))
|
||||||
|
|
||||||
return event
|
return event
|
||||||
|
|
||||||
@ -263,7 +264,7 @@ class RedactionTestCase(unittest.HomeserverTestCase):
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
self.get_success(self.store.persist_event(event_1, context_1))
|
self.get_success(self.storage.persistence.persist_event(event_1, context_1))
|
||||||
|
|
||||||
event_2, context_2 = self.get_success(
|
event_2, context_2 = self.get_success(
|
||||||
self.event_creation_handler.create_new_client_event(
|
self.event_creation_handler.create_new_client_event(
|
||||||
@ -282,7 +283,7 @@ class RedactionTestCase(unittest.HomeserverTestCase):
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
self.get_success(self.store.persist_event(event_2, context_2))
|
self.get_success(self.storage.persistence.persist_event(event_2, context_2))
|
||||||
|
|
||||||
# fetch one of the redactions
|
# fetch one of the redactions
|
||||||
fetched = self.get_success(self.store.get_event(redaction_event_id1))
|
fetched = self.get_success(self.store.get_event(redaction_event_id1))
|
||||||
|
@ -62,6 +62,7 @@ class RoomEventsStoreTestCase(unittest.TestCase):
|
|||||||
# Room events need the full datastore, for persist_event() and
|
# Room events need the full datastore, for persist_event() and
|
||||||
# get_room_state()
|
# get_room_state()
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
|
self.storage = hs.get_storage()
|
||||||
self.event_factory = hs.get_event_factory()
|
self.event_factory = hs.get_event_factory()
|
||||||
|
|
||||||
self.room = RoomID.from_string("!abcde:test")
|
self.room = RoomID.from_string("!abcde:test")
|
||||||
@ -72,7 +73,7 @@ class RoomEventsStoreTestCase(unittest.TestCase):
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def inject_room_event(self, **kwargs):
|
def inject_room_event(self, **kwargs):
|
||||||
yield self.store.persist_event(
|
yield self.storage.persistence.persist_event(
|
||||||
self.event_factory.create_event(room_id=self.room.to_string(), **kwargs)
|
self.event_factory.create_event(room_id=self.room.to_string(), **kwargs)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -44,6 +44,7 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase):
|
|||||||
# We can't test the RoomMemberStore on its own without the other event
|
# We can't test the RoomMemberStore on its own without the other event
|
||||||
# storage logic
|
# storage logic
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
|
self.storage = hs.get_storage()
|
||||||
self.event_builder_factory = hs.get_event_builder_factory()
|
self.event_builder_factory = hs.get_event_builder_factory()
|
||||||
self.event_creation_handler = hs.get_event_creation_handler()
|
self.event_creation_handler = hs.get_event_creation_handler()
|
||||||
|
|
||||||
@ -70,7 +71,7 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase):
|
|||||||
self.event_creation_handler.create_new_client_event(builder)
|
self.event_creation_handler.create_new_client_event(builder)
|
||||||
)
|
)
|
||||||
|
|
||||||
self.get_success(self.store.persist_event(event, context))
|
self.get_success(self.storage.persistence.persist_event(event, context))
|
||||||
|
|
||||||
return event
|
return event
|
||||||
|
|
||||||
|
@ -34,6 +34,7 @@ class StateStoreTestCase(tests.unittest.TestCase):
|
|||||||
hs = yield tests.utils.setup_test_homeserver(self.addCleanup)
|
hs = yield tests.utils.setup_test_homeserver(self.addCleanup)
|
||||||
|
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
|
self.storage = hs.get_storage()
|
||||||
self.event_builder_factory = hs.get_event_builder_factory()
|
self.event_builder_factory = hs.get_event_builder_factory()
|
||||||
self.event_creation_handler = hs.get_event_creation_handler()
|
self.event_creation_handler = hs.get_event_creation_handler()
|
||||||
|
|
||||||
@ -63,7 +64,7 @@ class StateStoreTestCase(tests.unittest.TestCase):
|
|||||||
builder
|
builder
|
||||||
)
|
)
|
||||||
|
|
||||||
yield self.store.persist_event(event, context)
|
yield self.storage.persistence.persist_event(event, context)
|
||||||
|
|
||||||
return event
|
return event
|
||||||
|
|
||||||
|
@ -36,7 +36,8 @@ class MessageAcceptTests(unittest.TestCase):
|
|||||||
# Figure out what the most recent event is
|
# Figure out what the most recent event is
|
||||||
most_recent = self.successResultOf(
|
most_recent = self.successResultOf(
|
||||||
maybeDeferred(
|
maybeDeferred(
|
||||||
self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id
|
self.homeserver.get_datastore().get_latest_event_ids_in_room,
|
||||||
|
self.room_id,
|
||||||
)
|
)
|
||||||
)[0]
|
)[0]
|
||||||
|
|
||||||
@ -75,7 +76,8 @@ class MessageAcceptTests(unittest.TestCase):
|
|||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
self.successResultOf(
|
self.successResultOf(
|
||||||
maybeDeferred(
|
maybeDeferred(
|
||||||
self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id
|
self.homeserver.get_datastore().get_latest_event_ids_in_room,
|
||||||
|
self.room_id,
|
||||||
)
|
)
|
||||||
)[0],
|
)[0],
|
||||||
"$join:test.serv",
|
"$join:test.serv",
|
||||||
@ -97,7 +99,8 @@ class MessageAcceptTests(unittest.TestCase):
|
|||||||
# Figure out what the most recent event is
|
# Figure out what the most recent event is
|
||||||
most_recent = self.successResultOf(
|
most_recent = self.successResultOf(
|
||||||
maybeDeferred(
|
maybeDeferred(
|
||||||
self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id
|
self.homeserver.get_datastore().get_latest_event_ids_in_room,
|
||||||
|
self.room_id,
|
||||||
)
|
)
|
||||||
)[0]
|
)[0]
|
||||||
|
|
||||||
@ -137,6 +140,6 @@ class MessageAcceptTests(unittest.TestCase):
|
|||||||
|
|
||||||
# Make sure the invalid event isn't there
|
# Make sure the invalid event isn't there
|
||||||
extrem = maybeDeferred(
|
extrem = maybeDeferred(
|
||||||
self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id
|
self.homeserver.get_datastore().get_latest_event_ids_in_room, self.room_id
|
||||||
)
|
)
|
||||||
self.assertEqual(self.successResultOf(extrem)[0], "$join:test.serv")
|
self.assertEqual(self.successResultOf(extrem)[0], "$join:test.serv")
|
||||||
|
@ -36,6 +36,7 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase):
|
|||||||
self.event_creation_handler = self.hs.get_event_creation_handler()
|
self.event_creation_handler = self.hs.get_event_creation_handler()
|
||||||
self.event_builder_factory = self.hs.get_event_builder_factory()
|
self.event_builder_factory = self.hs.get_event_builder_factory()
|
||||||
self.store = self.hs.get_datastore()
|
self.store = self.hs.get_datastore()
|
||||||
|
self.storage = self.hs.get_storage()
|
||||||
|
|
||||||
yield create_room(self.hs, TEST_ROOM_ID, "@someone:ROOM")
|
yield create_room(self.hs, TEST_ROOM_ID, "@someone:ROOM")
|
||||||
|
|
||||||
@ -137,7 +138,7 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase):
|
|||||||
event, context = yield self.event_creation_handler.create_new_client_event(
|
event, context = yield self.event_creation_handler.create_new_client_event(
|
||||||
builder
|
builder
|
||||||
)
|
)
|
||||||
yield self.hs.get_datastore().persist_event(event, context)
|
yield self.storage.persistence.persist_event(event, context)
|
||||||
return event
|
return event
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@ -159,7 +160,7 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase):
|
|||||||
builder
|
builder
|
||||||
)
|
)
|
||||||
|
|
||||||
yield self.hs.get_datastore().persist_event(event, context)
|
yield self.storage.persistence.persist_event(event, context)
|
||||||
return event
|
return event
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@ -180,7 +181,7 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase):
|
|||||||
builder
|
builder
|
||||||
)
|
)
|
||||||
|
|
||||||
yield self.hs.get_datastore().persist_event(event, context)
|
yield self.storage.persistence.persist_event(event, context)
|
||||||
return event
|
return event
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
|
@ -310,14 +310,14 @@ class DescriptorTestCase(unittest.TestCase):
|
|||||||
|
|
||||||
obj.mock.return_value = ["spam", "eggs"]
|
obj.mock.return_value = ["spam", "eggs"]
|
||||||
r = obj.fn(1, 2)
|
r = obj.fn(1, 2)
|
||||||
self.assertEqual(r, ["spam", "eggs"])
|
self.assertEqual(r.result, ["spam", "eggs"])
|
||||||
obj.mock.assert_called_once_with(1, 2)
|
obj.mock.assert_called_once_with(1, 2)
|
||||||
obj.mock.reset_mock()
|
obj.mock.reset_mock()
|
||||||
|
|
||||||
# a call with different params should call the mock again
|
# a call with different params should call the mock again
|
||||||
obj.mock.return_value = ["chips"]
|
obj.mock.return_value = ["chips"]
|
||||||
r = obj.fn(1, 3)
|
r = obj.fn(1, 3)
|
||||||
self.assertEqual(r, ["chips"])
|
self.assertEqual(r.result, ["chips"])
|
||||||
obj.mock.assert_called_once_with(1, 3)
|
obj.mock.assert_called_once_with(1, 3)
|
||||||
obj.mock.reset_mock()
|
obj.mock.reset_mock()
|
||||||
|
|
||||||
@ -325,9 +325,9 @@ class DescriptorTestCase(unittest.TestCase):
|
|||||||
self.assertEqual(len(obj.fn.cache.cache), 3)
|
self.assertEqual(len(obj.fn.cache.cache), 3)
|
||||||
|
|
||||||
r = obj.fn(1, 2)
|
r = obj.fn(1, 2)
|
||||||
self.assertEqual(r, ["spam", "eggs"])
|
self.assertEqual(r.result, ["spam", "eggs"])
|
||||||
r = obj.fn(1, 3)
|
r = obj.fn(1, 3)
|
||||||
self.assertEqual(r, ["chips"])
|
self.assertEqual(r.result, ["chips"])
|
||||||
obj.mock.assert_not_called()
|
obj.mock.assert_not_called()
|
||||||
|
|
||||||
def test_cache_iterable_with_sync_exception(self):
|
def test_cache_iterable_with_sync_exception(self):
|
||||||
|
@ -325,10 +325,16 @@ def setup_test_homeserver(
|
|||||||
if homeserverToUse.__name__ == "TestHomeServer":
|
if homeserverToUse.__name__ == "TestHomeServer":
|
||||||
hs.setup_master()
|
hs.setup_master()
|
||||||
else:
|
else:
|
||||||
|
# If we have been given an explicit datastore we probably want to mock
|
||||||
|
# out the DataStores somehow too. This all feels a bit wrong, but then
|
||||||
|
# mocking the stores feels wrong too.
|
||||||
|
datastores = Mock(datastore=datastore)
|
||||||
|
|
||||||
hs = homeserverToUse(
|
hs = homeserverToUse(
|
||||||
name,
|
name,
|
||||||
db_pool=None,
|
db_pool=None,
|
||||||
datastore=datastore,
|
datastore=datastore,
|
||||||
|
datastores=datastores,
|
||||||
config=config,
|
config=config,
|
||||||
version_string="Synapse/tests",
|
version_string="Synapse/tests",
|
||||||
database_engine=db_engine,
|
database_engine=db_engine,
|
||||||
@ -646,7 +652,7 @@ def create_room(hs, room_id, creator_id):
|
|||||||
creator_id (str)
|
creator_id (str)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
store = hs.get_datastore()
|
persistence_store = hs.get_storage().persistence
|
||||||
event_builder_factory = hs.get_event_builder_factory()
|
event_builder_factory = hs.get_event_builder_factory()
|
||||||
event_creation_handler = hs.get_event_creation_handler()
|
event_creation_handler = hs.get_event_creation_handler()
|
||||||
|
|
||||||
@ -663,4 +669,4 @@ def create_room(hs, room_id, creator_id):
|
|||||||
|
|
||||||
event, context = yield event_creation_handler.create_new_client_event(builder)
|
event, context = yield event_creation_handler.create_new_client_event(builder)
|
||||||
|
|
||||||
yield store.persist_event(event, context)
|
yield persistence_store.persist_event(event, context)
|
||||||
|
6
tox.ini
6
tox.ini
@ -114,16 +114,16 @@ skip_install = True
|
|||||||
basepython = python3.6
|
basepython = python3.6
|
||||||
deps =
|
deps =
|
||||||
flake8
|
flake8
|
||||||
black
|
black==19.3b0 # We pin so that our tests don't start failing on new releases of black.
|
||||||
commands =
|
commands =
|
||||||
python -m black --check --diff .
|
python -m black --check --diff .
|
||||||
/bin/sh -c "flake8 synapse tests scripts scripts-dev scripts/hash_password scripts/register_new_matrix_user scripts/synapse_port_db synctl {env:PEP8SUFFIX:}"
|
/bin/sh -c "flake8 synapse tests scripts scripts-dev synctl {env:PEP8SUFFIX:}"
|
||||||
{toxinidir}/scripts-dev/config-lint.sh
|
{toxinidir}/scripts-dev/config-lint.sh
|
||||||
|
|
||||||
[testenv:check_isort]
|
[testenv:check_isort]
|
||||||
skip_install = True
|
skip_install = True
|
||||||
deps = isort
|
deps = isort
|
||||||
commands = /bin/sh -c "isort -c -df -sp setup.cfg -rc synapse tests"
|
commands = /bin/sh -c "isort -c -df -sp setup.cfg -rc synapse tests scripts-dev scripts"
|
||||||
|
|
||||||
[testenv:check-newsfragment]
|
[testenv:check-newsfragment]
|
||||||
skip_install = True
|
skip_install = True
|
||||||
|
Loading…
Reference in New Issue
Block a user