Merge remote-tracking branch 'upstream/release-v1.63'

This commit is contained in:
Tulir Asokan 2022-07-12 14:31:20 +03:00
commit a026581985
106 changed files with 3262 additions and 1370 deletions

View file

@ -0,0 +1,36 @@
#!/bin/sh
#
# Common commands to set up Complement's prerequisites in a GitHub Actions CI run.
#
# Must be called after Synapse has been checked out to `synapse/`.
#
set -eu
alias block='{ set +x; } 2>/dev/null; func() { echo "::group::$*"; set -x; }; func'
alias endblock='{ set +x; } 2>/dev/null; func() { echo "::endgroup::"; set -x; }; func'
block Set Go Version
# The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement.
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
# Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2
echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
# Add the Go path to the PATH: We need this so we can call gotestfmt
echo "~/go/bin" >> $GITHUB_PATH
endblock
block Install Complement Dependencies
sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev
go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest
endblock
block Install custom gotestfmt template
mkdir .gotestfmt/github -p
cp synapse/.ci/complement_package.gotpl .gotestfmt/github/package.gotpl
endblock
block Check out Complement
# Attempt to check out the same branch of Complement as the PR. If it
# doesn't exist, fallback to HEAD.
synapse/.ci/scripts/checkout_complement.sh
endblock

View file

@ -7,3 +7,4 @@ root = true
[*.py] [*.py]
indent_style = space indent_style = space
indent_size = 4 indent_size = 4
max_line_length = 88

View file

@ -328,85 +328,21 @@ jobs:
- arrangement: monolith - arrangement: monolith
database: Postgres database: Postgres
- arrangement: workers
database: Postgres
steps: steps:
# The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement.
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
- name: "Set Go Version"
run: |
# Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2
echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
# Add the Go path to the PATH: We need this so we can call gotestfmt
echo "~/go/bin" >> $GITHUB_PATH
- name: "Install Complement Dependencies"
run: |
sudo apt-get update && sudo apt-get install -y libolm3 libolm-dev
go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest
- name: Run actions/checkout@v2 for synapse - name: Run actions/checkout@v2 for synapse
uses: actions/checkout@v2 uses: actions/checkout@v2
with: with:
path: synapse path: synapse
- name: "Install custom gotestfmt template" - name: Prepare Complement's Prerequisites
run: | run: synapse/.ci/scripts/setup_complement_prerequisites.sh
mkdir .gotestfmt/github -p
cp synapse/.ci/complement_package.gotpl .gotestfmt/github/package.gotpl
# Attempt to check out the same branch of Complement as the PR. If it
# doesn't exist, fallback to HEAD.
- name: Checkout complement
run: synapse/.ci/scripts/checkout_complement.sh
- run: | - run: |
set -o pipefail set -o pipefail
POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
shell: bash
name: Run Complement Tests
# We only run the workers tests on `develop` for now, because they're too slow to wait for on PRs.
# Sadly, you can't have an `if` condition on the value of a matrix, so this is a temporary, separate job for now.
# GitHub Actions doesn't support YAML anchors, so it's full-on duplication for now.
complement-developonly:
if: "${{ !failure() && !cancelled() && (github.ref == 'refs/heads/develop') }}"
needs: linting-done
runs-on: ubuntu-latest
name: "Complement Workers (develop only)"
steps:
# The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement.
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
- name: "Set Go Version"
run: |
# Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2
echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
# Add the Go path to the PATH: We need this so we can call gotestfmt
echo "~/go/bin" >> $GITHUB_PATH
- name: "Install Complement Dependencies"
run: |
sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev
go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest
- name: Run actions/checkout@v2 for synapse
uses: actions/checkout@v2
with:
path: synapse
- name: "Install custom gotestfmt template"
run: |
mkdir .gotestfmt/github -p
cp synapse/.ci/complement_package.gotpl .gotestfmt/github/package.gotpl
# Attempt to check out the same branch of Complement as the PR. If it
# doesn't exist, fallback to HEAD.
- name: Checkout complement
run: synapse/.ci/scripts/checkout_complement.sh
- run: |
set -o pipefail
WORKERS=1 COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
shell: bash shell: bash
name: Run Complement Tests name: Run Complement Tests

View file

@ -96,6 +96,51 @@ jobs:
/logs/results.tap /logs/results.tap
/logs/**/*.log* /logs/**/*.log*
complement:
if: "${{ !failure() && !cancelled() }}"
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
include:
- arrangement: monolith
database: SQLite
- arrangement: monolith
database: Postgres
- arrangement: workers
database: Postgres
steps:
- name: Run actions/checkout@v2 for synapse
uses: actions/checkout@v2
with:
path: synapse
- name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
# This step is specific to the 'Twisted trunk' test run:
- name: Patch dependencies
run: |
set -x
DEBIAN_FRONTEND=noninteractive sudo apt-get install -yqq python3 pipx
pipx install poetry==1.1.12
poetry remove -n twisted
poetry add -n --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry lock --no-update
# NOT IN 1.1.12 poetry lock --check
working-directory: synapse
- run: |
set -o pipefail
TEST_ONLY_SKIP_DEP_HASH_VERIFICATION=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
shell: bash
name: Run Complement Tests
# open an issue if the build fails, so we know about it. # open an issue if the build fails, so we know about it.
open-issue: open-issue:
if: failure() if: failure()
@ -103,6 +148,7 @@ jobs:
- mypy - mypy
- trial - trial
- sytest - sytest
- complement
runs-on: ubuntu-latest runs-on: ubuntu-latest

View file

@ -1,3 +1,83 @@
Synapse 1.63.0rc1 (2022-07-12)
==============================
Features
--------
- Implement [MSC3827](https://github.com/matrix-org/matrix-spec-proposals/pull/3827): Filtering of /publicRooms by room type. ([\#13031](https://github.com/matrix-org/synapse/issues/13031))
- Add a rate limit for local users sending invites. ([\#13125](https://github.com/matrix-org/synapse/issues/13125))
- Improve validation logic in Synapse's REST endpoints. ([\#13148](https://github.com/matrix-org/synapse/issues/13148))
Bugfixes
--------
- Fix application service not being able to join remote federated room without a profile set. ([\#13131](https://github.com/matrix-org/synapse/issues/13131))
- Make use of the more robust `get_current_state` in `_get_state_map_for_room` to avoid breakages. ([\#13174](https://github.com/matrix-org/synapse/issues/13174))
- Fix bug where rows were not deleted from `event_push_actions` table on large servers. Introduced in v1.62.0. ([\#13194](https://github.com/matrix-org/synapse/issues/13194))
- Fix exception when using experimental [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to look for remote federated imported events before room creation. ([\#13197](https://github.com/matrix-org/synapse/issues/13197))
- Fix bug where notification counts would get stuck after a highlighted message. Broke in v1.62.0. ([\#13223](https://github.com/matrix-org/synapse/issues/13223))
- Fix a long-standing bug where the `synapse_port_db` script could fail to copy rows with negative row ids. ([\#13226](https://github.com/matrix-org/synapse/issues/13226))
- Fix MSC3202-enabled appservices not receiving to-device messages, preventing messages from being decrypted. ([\#13235](https://github.com/matrix-org/synapse/issues/13235))
- Fix appservices not receiving room-less EDUs, like presence, if enabled. ([\#13236](https://github.com/matrix-org/synapse/issues/13236))
Updates to the Docker image
---------------------------
- Bump the version of `lxml` in matrix.org Docker images Debian packages from 4.8.0 to 4.9.1. ([\#13207](https://github.com/matrix-org/synapse/issues/13207))
Improved Documentation
----------------------
- Add an explanation of the `--report-stats` argument to the docs. ([\#13029](https://github.com/matrix-org/synapse/issues/13029))
- Add a helpful example bash script to the contrib directory for creating multiple worker configuration files of the same type. Contributed by @villepeh. ([\#13032](https://github.com/matrix-org/synapse/issues/13032))
- Clean up references to sample configuration and redirect users to the configuration manual instead. ([\#13077](https://github.com/matrix-org/synapse/issues/13077))
- Add documentation for anonymised homeserver statistics collection. ([\#13086](https://github.com/matrix-org/synapse/issues/13086))
- Fix wrong section header for `allow_public_rooms_over_federation` in the homeserver config documentation. ([\#13116](https://github.com/matrix-org/synapse/issues/13116))
- Document how the Synapse team does reviews. ([\#13132](https://github.com/matrix-org/synapse/issues/13132))
- Add a link to the configuration manual from the homeserver sample config documentation. ([\#13139](https://github.com/matrix-org/synapse/issues/13139))
- Add missing links to config options. ([\#13166](https://github.com/matrix-org/synapse/issues/13166))
- Add documentation for the existing `databases` option in the homeserver configuration manual. ([\#13212](https://github.com/matrix-org/synapse/issues/13212))
Deprecations and Removals
-------------------------
- Remove obsolete and for 8 years unused `RoomEventsStoreTestCase`. Contributed by @arkamar. ([\#13200](https://github.com/matrix-org/synapse/issues/13200))
Internal Changes
----------------
- Add type annotations to `tests.utils`. ([\#13028](https://github.com/matrix-org/synapse/issues/13028))
- Support temporary experimental return values for spam checker module callbacks. ([\#13044](https://github.com/matrix-org/synapse/issues/13044))
- Reduce memory consumption when processing incoming events in large rooms. ([\#13078](https://github.com/matrix-org/synapse/issues/13078), [\#13222](https://github.com/matrix-org/synapse/issues/13222))
- Enable Complement testing in the 'Twisted Trunk' CI runs. ([\#13079](https://github.com/matrix-org/synapse/issues/13079), [\#13157](https://github.com/matrix-org/synapse/issues/13157))
- Faster room joins: Handle race between persisting an event and un-partial stating a room. ([\#13100](https://github.com/matrix-org/synapse/issues/13100))
- Add missing type hints to `synapse.logging`. ([\#13103](https://github.com/matrix-org/synapse/issues/13103))
- Raise a `DependencyError` on missing dependencies instead of a `ConfigError`. ([\#13113](https://github.com/matrix-org/synapse/issues/13113))
- Reduce DB usage of `/sync` when a large number of unread messages have recently been sent in a room. ([\#13119](https://github.com/matrix-org/synapse/issues/13119), [\#13153](https://github.com/matrix-org/synapse/issues/13153))
- Improve startup times in Complement test runs against workers, particularly in CPU-constrained environments. ([\#13127](https://github.com/matrix-org/synapse/issues/13127))
- Only one-line SQL statements for logging and tracing. ([\#13129](https://github.com/matrix-org/synapse/issues/13129))
- Apply ratelimiting earlier in processing of /send request. ([\#13134](https://github.com/matrix-org/synapse/issues/13134))
- Enforce type annotations for `tests.test_server`. ([\#13135](https://github.com/matrix-org/synapse/issues/13135))
- Add type annotations to `tests.server`. ([\#13136](https://github.com/matrix-org/synapse/issues/13136))
- Add support to `complement.sh` for skipping the docker build. ([\#13143](https://github.com/matrix-org/synapse/issues/13143), [\#13158](https://github.com/matrix-org/synapse/issues/13158))
- Faster joins: skip waiting for full state when processing incoming events over federation. ([\#13144](https://github.com/matrix-org/synapse/issues/13144))
- Improve exception handling when processing events received over federation. ([\#13145](https://github.com/matrix-org/synapse/issues/13145))
- Faster room joins: fix race in recalculation of current room state. ([\#13151](https://github.com/matrix-org/synapse/issues/13151))
- Add the ability to set the log level using the `SYNAPSE_TEST_LOG_LEVEL` environment when using `complement.sh`. ([\#13152](https://github.com/matrix-org/synapse/issues/13152))
- Improve and fix type hints. ([\#13159](https://github.com/matrix-org/synapse/issues/13159))
- Update config used by Complement to allow device name lookup over federation. ([\#13167](https://github.com/matrix-org/synapse/issues/13167))
- Check that `auto_vacuum` is disabled when porting a SQLite database to Postgres, as `VACUUM`s must not be performed between runs of the script. ([\#13195](https://github.com/matrix-org/synapse/issues/13195))
- Reduce number of queries used to get profile information. Contributed by Nick @ Beeper (@fizzadar). ([\#13209](https://github.com/matrix-org/synapse/issues/13209))
- Reduce number of events queried during room creation. Contributed by Nick @ Beeper (@fizzadar). ([\#13210](https://github.com/matrix-org/synapse/issues/13210))
- More aggressively rotate push actions. ([\#13211](https://github.com/matrix-org/synapse/issues/13211))
- Add `max_line_length` setting for Python files to the `.editorconfig`. Contributed by @sumnerevans @ Beeper. ([\#13228](https://github.com/matrix-org/synapse/issues/13228))
Synapse 1.62.0 (2022-07-05) Synapse 1.62.0 (2022-07-05)
=========================== ===========================

View file

@ -0,0 +1,31 @@
# Creating multiple workers with a bash script
Setting up multiple worker configuration files manually can be time-consuming.
You can alternatively create multiple worker configuration files with a simple `bash` script. For example:
```sh
#!/bin/bash
for i in {1..5}
do
cat << EOF >> generic_worker$i.yaml
worker_app: synapse.app.generic_worker
worker_name: generic_worker$i
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_listeners:
- type: http
port: 808$i
resources:
- names: [client, federation]
worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml
EOF
done
```
This would create five generic workers with a unique `worker_name` field in each file and listening on ports 8081-8085.
Customise the script to your needs.

6
debian/changelog vendored
View file

@ -1,3 +1,9 @@
matrix-synapse-py3 (1.63.0~rc1) stable; urgency=medium
* New Synapse release 1.63.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 12 Jul 2022 11:26:02 +0100
matrix-synapse-py3 (1.62.0) stable; urgency=medium matrix-synapse-py3 (1.62.0) stable; urgency=medium
* New Synapse release 1.62.0. * New Synapse release 1.62.0.

View file

@ -62,7 +62,13 @@ WORKDIR /synapse
# Copy just what we need to run `poetry export`... # Copy just what we need to run `poetry export`...
COPY pyproject.toml poetry.lock /synapse/ COPY pyproject.toml poetry.lock /synapse/
RUN /root/.local/bin/poetry export --extras all -o /synapse/requirements.txt
# If specified, we won't verify the hashes of dependencies.
# This is only needed if the hashes of dependencies cannot be checked for some
# reason, such as when a git repository is used directly as a dependency.
ARG TEST_ONLY_SKIP_DEP_HASH_VERIFICATION
RUN /root/.local/bin/poetry export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes}
### ###
### Stage 1: builder ### Stage 1: builder
@ -85,6 +91,7 @@ RUN \
openssl \ openssl \
rustc \ rustc \
zlib1g-dev \ zlib1g-dev \
git \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
# To speed up rebuilds, install all of the dependencies before we copy over # To speed up rebuilds, install all of the dependencies before we copy over

View file

@ -67,6 +67,13 @@ The following environment variables are supported in `generate` mode:
* `UID`, `GID`: the user id and group id to use for creating the data * `UID`, `GID`: the user id and group id to use for creating the data
directories. If unset, and no user is set via `docker run --user`, defaults directories. If unset, and no user is set via `docker run --user`, defaults
to `991`, `991`. to `991`, `991`.
* `SYNAPSE_LOG_LEVEL`: the log level to use (one of `DEBUG`, `INFO`, `WARNING` or `ERROR`).
Defaults to `INFO`.
* `SYNAPSE_LOG_SENSITIVE`: if set and the log level is set to `DEBUG`, Synapse
will log sensitive information such as access tokens.
This should not be needed unless you are a developer attempting to debug something
particularly tricky.
## Postgres ## Postgres

View file

@ -59,6 +59,9 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
synchrotron, \ synchrotron, \
appservice, \ appservice, \
pusher" pusher"
# Improve startup times by using a launcher based on fork()
export SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER=1
else else
# Empty string here means 'main process only' # Empty string here means 'main process only'
export SYNAPSE_WORKER_TYPES="" export SYNAPSE_WORKER_TYPES=""

View file

@ -81,6 +81,8 @@ rc_invites:
federation_rr_transactions_per_room_per_second: 9999 federation_rr_transactions_per_room_per_second: 9999
allow_device_name_lookup_over_federation: true
## Experimental Features ## ## Experimental Features ##
experimental_features: experimental_features:

View file

@ -1,3 +1,24 @@
{% if use_forking_launcher %}
[program:synapse_fork]
command=/usr/local/bin/python -m synapse.app.complement_fork_starter
{{ main_config_path }}
synapse.app.homeserver
--config-path="{{ main_config_path }}"
--config-path=/conf/workers/shared.yaml
{%- for worker in workers %}
-- {{ worker.app }}
--config-path="{{ main_config_path }}"
--config-path=/conf/workers/shared.yaml
--config-path=/conf/workers/{{ worker.name }}.yaml
{%- endfor %}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
autorestart=unexpected
exitcodes=0
{% else %}
[program:synapse_main] [program:synapse_main]
command=/usr/local/bin/prefix-log /usr/local/bin/python -m synapse.app.homeserver command=/usr/local/bin/prefix-log /usr/local/bin/python -m synapse.app.homeserver
--config-path="{{ main_config_path }}" --config-path="{{ main_config_path }}"
@ -28,3 +49,4 @@ stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0 stderr_logfile_maxbytes=0
{% endfor %} {% endfor %}
{% endif %}

View file

@ -2,7 +2,11 @@ version: 1
formatters: formatters:
precise: precise:
{% if include_worker_name_in_log_line %}
format: '{{ worker_name }} | %(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
{% else %}
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s' format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
{% endif %}
handlers: handlers:
{% if LOG_FILE_PATH %} {% if LOG_FILE_PATH %}
@ -45,11 +49,17 @@ handlers:
class: logging.StreamHandler class: logging.StreamHandler
formatter: precise formatter: precise
{% if not SYNAPSE_LOG_SENSITIVE %}
{#
If SYNAPSE_LOG_SENSITIVE is unset, then override synapse.storage.SQL to INFO
so that DEBUG entries (containing sensitive information) are not emitted.
#}
loggers: loggers:
synapse.storage.SQL: synapse.storage.SQL:
# beware: increasing this to DEBUG will make synapse log sensitive # beware: increasing this to DEBUG will make synapse log sensitive
# information such as access tokens. # information such as access tokens.
level: INFO level: INFO
{% endif %}
root: root:
level: {{ SYNAPSE_LOG_LEVEL or "INFO" }} level: {{ SYNAPSE_LOG_LEVEL or "INFO" }}

View file

@ -26,6 +26,13 @@
# * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format. # * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
# * SYNAPSE_TLS_KEY: Path to a TLS key. If this and SYNAPSE_TLS_CERT are specified, # * SYNAPSE_TLS_KEY: Path to a TLS key. If this and SYNAPSE_TLS_CERT are specified,
# Nginx will be configured to serve TLS on port 8448. # Nginx will be configured to serve TLS on port 8448.
# * SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER: Whether to use the forking launcher,
# only intended for usage in Complement at the moment.
# No stability guarantees are provided.
# * SYNAPSE_LOG_LEVEL: Set this to DEBUG, INFO, WARNING or ERROR to change the
# log level. INFO is the default.
# * SYNAPSE_LOG_SENSITIVE: If unset, SQL and SQL values won't be logged,
# regardless of the SYNAPSE_LOG_LEVEL setting.
# #
# NOTE: According to Complement's ENTRYPOINT expectations for a homeserver image (as defined # NOTE: According to Complement's ENTRYPOINT expectations for a homeserver image (as defined
# in the project's README), this script may be run multiple times, and functionality should # in the project's README), this script may be run multiple times, and functionality should
@ -35,7 +42,7 @@ import os
import subprocess import subprocess
import sys import sys
from pathlib import Path from pathlib import Path
from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Set from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Optional, Set
import yaml import yaml
from jinja2 import Environment, FileSystemLoader from jinja2 import Environment, FileSystemLoader
@ -525,6 +532,7 @@ def generate_worker_files(
"/etc/supervisor/conf.d/synapse.conf", "/etc/supervisor/conf.d/synapse.conf",
workers=worker_descriptors, workers=worker_descriptors,
main_config_path=config_path, main_config_path=config_path,
use_forking_launcher=environ.get("SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER"),
) )
# healthcheck config # healthcheck config
@ -548,18 +556,25 @@ def generate_worker_log_config(
Returns: the path to the generated file Returns: the path to the generated file
""" """
# Check whether we should write worker logs to disk, in addition to the console # Check whether we should write worker logs to disk, in addition to the console
extra_log_template_args = {} extra_log_template_args: Dict[str, Optional[str]] = {}
if environ.get("SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK"): if environ.get("SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK"):
extra_log_template_args["LOG_FILE_PATH"] = "{dir}/logs/{name}.log".format( extra_log_template_args["LOG_FILE_PATH"] = f"{data_dir}/logs/{worker_name}.log"
dir=data_dir, name=worker_name
extra_log_template_args["SYNAPSE_LOG_LEVEL"] = environ.get("SYNAPSE_LOG_LEVEL")
extra_log_template_args["SYNAPSE_LOG_SENSITIVE"] = environ.get(
"SYNAPSE_LOG_SENSITIVE"
) )
# Render and write the file # Render and write the file
log_config_filepath = "/conf/workers/{name}.log.config".format(name=worker_name) log_config_filepath = f"/conf/workers/{worker_name}.log.config"
convert( convert(
"/conf/log.config", "/conf/log.config",
log_config_filepath, log_config_filepath,
worker_name=worker_name, worker_name=worker_name,
**extra_log_template_args, **extra_log_template_args,
include_worker_name_in_log_line=environ.get(
"SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER"
),
) )
return log_config_filepath return log_config_filepath

View file

@ -110,7 +110,11 @@ def generate_config_from_template(
log_config_file = environ["SYNAPSE_LOG_CONFIG"] log_config_file = environ["SYNAPSE_LOG_CONFIG"]
log("Generating log config file " + log_config_file) log("Generating log config file " + log_config_file)
convert("/conf/log.config", log_config_file, environ) convert(
"/conf/log.config",
log_config_file,
{**environ, "include_worker_name_in_log_line": False},
)
# Hopefully we already have a signing key, but generate one if not. # Hopefully we already have a signing key, but generate one if not.
args = [ args = [

View file

@ -69,6 +69,7 @@
- [Federation](usage/administration/admin_api/federation.md) - [Federation](usage/administration/admin_api/federation.md)
- [Manhole](manhole.md) - [Manhole](manhole.md)
- [Monitoring](metrics-howto.md) - [Monitoring](metrics-howto.md)
- [Reporting Anonymised Statistics](usage/administration/monitoring/reporting_anonymised_statistics.md)
- [Understanding Synapse Through Grafana Graphs](usage/administration/understanding_synapse_through_grafana_graphs.md) - [Understanding Synapse Through Grafana Graphs](usage/administration/understanding_synapse_through_grafana_graphs.md)
- [Useful SQL for Admins](usage/administration/useful_sql_for_admins.md) - [Useful SQL for Admins](usage/administration/useful_sql_for_admins.md)
- [Database Maintenance Tools](usage/administration/database_maintenance_tools.md) - [Database Maintenance Tools](usage/administration/database_maintenance_tools.md)
@ -80,6 +81,7 @@
# Development # Development
- [Contributing Guide](development/contributing_guide.md) - [Contributing Guide](development/contributing_guide.md)
- [Code Style](code_style.md) - [Code Style](code_style.md)
- [Reviewing Code](development/reviews.md)
- [Release Cycle](development/releases.md) - [Release Cycle](development/releases.md)
- [Git Usage](development/git.md) - [Git Usage](development/git.md)
- [Testing]() - [Testing]()

View file

@ -124,9 +124,8 @@ Body parameters:
- `address` - string. Value of third-party ID. - `address` - string. Value of third-party ID.
belonging to a user. belonging to a user.
- `external_ids` - array, optional. Allow setting the identifier of the external identity - `external_ids` - array, optional. Allow setting the identifier of the external identity
provider for SSO (Single sign-on). Details in provider for SSO (Single sign-on). Details in the configuration manual under the
[Sample Configuration File](../usage/configuration/homeserver_sample_config.html) sections [sso](../usage/configuration/config_documentation.md#sso) and [oidc_providers](../usage/configuration/config_documentation.md#oidc_providers).
section `sso` and `oidc_providers`.
- `auth_provider` - string. ID of the external identity provider. Value of `idp_id` - `auth_provider` - string. ID of the external identity provider. Value of `idp_id`
in the homeserver configuration. Note that no error is raised if the provided in the homeserver configuration. Note that no error is raised if the provided
value is not in the homeserver configuration. value is not in the homeserver configuration.

View file

@ -70,82 +70,61 @@ on save as they take a while and can be very resource intensive.
- Avoid wildcard imports (`from synapse.types import *`) and - Avoid wildcard imports (`from synapse.types import *`) and
relative imports (`from .types import UserID`). relative imports (`from .types import UserID`).
## Configuration file format ## Configuration code and documentation format
The [sample configuration file](./sample_config.yaml) acts as a When adding a configuration option to the code, if several settings are grouped into a single dict, ensure that your code
correctly handles the top-level option being set to `None` (as it will be if no sub-options are enabled).
The [configuration manual](usage/configuration/config_documentation.md) acts as a
reference to Synapse's configuration options for server administrators. reference to Synapse's configuration options for server administrators.
Remember that many readers will be unfamiliar with YAML and server Remember that many readers will be unfamiliar with YAML and server
administration in general, so that it is important that the file be as administration in general, so it is important that when you add
easy to understand as possible, which includes following a consistent a configuration option the documentation be as easy to understand as possible, which
format. includes following a consistent format.
Some guidelines follow: Some guidelines follow:
- Sections should be separated with a heading consisting of a single - Each option should be listed in the config manual with the following format:
line prefixed and suffixed with `##`. There should be **two** blank
lines before the section header, and **one** after.
- Each option should be listed in the file with the following format:
- A comment describing the setting. Each line of this comment
should be prefixed with a hash (`#`) and a space.
The comment should describe the default behaviour (ie, what - The name of the option, prefixed by `###`.
- A comment which describes the default behaviour (i.e. what
happens if the setting is omitted), as well as what the effect happens if the setting is omitted), as well as what the effect
will be if the setting is changed. will be if the setting is changed.
- An example setting, using backticks to define the code block
Often, the comment end with something like "uncomment the
following to <do action>".
- A line consisting of only `#`.
- A commented-out example setting, prefixed with only `#`.
For boolean (on/off) options, convention is that this example For boolean (on/off) options, convention is that this example
should be the *opposite* to the default (so the comment will end should be the *opposite* to the default. For other options, the example should give
with "Uncomment the following to enable [or disable] some non-default value which is likely to be useful to the reader.
<feature>." For other options, the example should give some
non-default value which is likely to be useful to the reader.
- There should be a blank line between each option. - There should be a horizontal rule between each option, which can be achieved by adding `---` before and
- Where several settings are grouped into a single dict, *avoid* the after the option.
convention where the whole block is commented out, resulting in
comment lines starting `# #`, as this is hard to read and confusing
to edit. Instead, leave the top-level config option uncommented, and
follow the conventions above for sub-options. Ensure that your code
correctly handles the top-level option being set to `None` (as it
will be if no sub-options are enabled).
- Lines should be wrapped at 80 characters.
- Use two-space indents.
- `true` and `false` are spelt thus (as opposed to `True`, etc.) - `true` and `false` are spelt thus (as opposed to `True`, etc.)
- Use single quotes (`'`) rather than double-quotes (`"`) or backticks
(`` ` ``) to refer to configuration options.
Example: Example:
---
### `modules`
Use the `module` sub-option to add a module under `modules` to extend functionality.
The `module` setting then has a sub-option, `config`, which can be used to define some configuration
for the `module`.
Defaults to none.
Example configuration:
```yaml ```yaml
## Frobnication ## modules:
- module: my_super_module.MySuperClass
# The frobnicator will ensure that all requests are fully frobnicated. config:
# To enable it, uncomment the following. do_thing: true
# - module: my_other_super_module.SomeClass
#frobnicator_enabled: true config: {}
# By default, the frobnicator will frobnicate with the default frobber.
# The following will make it use an alternative frobber.
#
#frobincator_frobber: special_frobber
# Settings for the frobber
#
frobber:
# frobbing speed. Defaults to 1.
#
#speed: 10
# frobbing distance. Defaults to 1000.
#
#distance: 100
``` ```
---
Note that the sample configuration is generated from the synapse code Note that the sample configuration is generated from the synapse code
and is maintained by a script, `scripts-dev/generate_sample_config.sh`. and is maintained by a script, `scripts-dev/generate_sample_config.sh`.
Making sure that the output from this script matches the desired format Making sure that the output from this script matches the desired format
is left as an exercise for the reader! is left as an exercise for the reader!

View file

@ -309,6 +309,10 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data
- Passing `POSTGRES=1` as an environment variable to use the Postgres database instead. - Passing `POSTGRES=1` as an environment variable to use the Postgres database instead.
- Passing `WORKERS=1` as an environment variable to use a workerised setup instead. This option implies the use of Postgres. - Passing `WORKERS=1` as an environment variable to use a workerised setup instead. This option implies the use of Postgres.
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g:
```sh
SYNAPSE_TEST_LOG_LEVEL=DEBUG COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -run TestImportHistoricalMessages
```
### Prettier formatting with `gotestfmt` ### Prettier formatting with `gotestfmt`
@ -347,7 +351,7 @@ To prepare a Pull Request, please:
3. `git push` your commit to your fork of Synapse; 3. `git push` your commit to your fork of Synapse;
4. on GitHub, [create the Pull Request](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request); 4. on GitHub, [create the Pull Request](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request);
5. add a [changelog entry](#changelog) and push it to your Pull Request; 5. add a [changelog entry](#changelog) and push it to your Pull Request;
6. for most contributors, that's all - however, if you are a member of the organization `matrix-org`, on GitHub, please request a review from `matrix.org / Synapse Core`. 6. that's it for now, a non-draft pull request will automatically request review from the team;
7. if you need to update your PR, please avoid rebasing and just add new commits to your branch. 7. if you need to update your PR, please avoid rebasing and just add new commits to your branch.
@ -523,10 +527,13 @@ From this point, you should:
1. Look at the results of the CI pipeline. 1. Look at the results of the CI pipeline.
- If there is any error, fix the error. - If there is any error, fix the error.
2. If a developer has requested changes, make these changes and let us know if it is ready for a developer to review again. 2. If a developer has requested changes, make these changes and let us know if it is ready for a developer to review again.
- A pull request is a conversation, if you disagree with the suggestions, please respond and discuss it.
3. Create a new commit with the changes. 3. Create a new commit with the changes.
- Please do NOT overwrite the history. New commits make the reviewer's life easier. - Please do NOT overwrite the history. New commits make the reviewer's life easier.
- Push this commits to your Pull Request. - Push this commits to your Pull Request.
4. Back to 1. 4. Back to 1.
5. Once the pull request is ready for review again please re-request review from whichever developer did your initial
review (or leave a comment in the pull request that you believe all required changes have been done).
Once both the CI and the developers are happy, the patch will be merged into Synapse and released shortly! Once both the CI and the developers are happy, the patch will be merged into Synapse and released shortly!

View file

@ -0,0 +1,41 @@
Some notes on how we do reviews
===============================
The Synapse team works off a shared review queue -- any new pull requests for
Synapse (or related projects) has a review requested from the entire team. Team
members should process this queue using the following rules:
* Any high urgency pull requests (e.g. fixes for broken continuous integration
or fixes for release blockers);
* Follow-up reviews for pull requests which have previously received reviews;
* Any remaining pull requests.
For the latter two categories above, older pull requests should be prioritised.
It is explicit that there is no priority given to pull requests from the team
(vs from the community). If a pull request requires a quick turn around, please
explicitly communicate this via [#synapse-dev:matrix.org](https://matrix.to/#/#synapse-dev:matrix.org)
or as a comment on the pull request.
Once an initial review has been completed and the author has made additional changes,
follow-up reviews should go back to the same reviewer. This helps build a shared
context and conversation between author and reviewer.
As a team we aim to keep the number of inflight pull requests to a minimum to ensure
that ongoing work is finished before starting new work.
Performing a review
-------------------
To communicate to the rest of the team the status of each pull request, team
members should do the following:
* Assign themselves to the pull request (they should be left assigned to the
pull request until it is merged, closed, or are no longer the reviewer);
* Review the pull request by leaving comments, questions, and suggestions;
* Mark the pull request appropriately (as needing changes or accepted).
If you are unsure about a particular part of the pull request (or are not confident
in your understanding of part of the code) then ask questions or request review
from the team again. When requesting review from the team be sure to leave a comment
with the rationale on why you're putting it back in the queue.

View file

@ -49,9 +49,8 @@ as follows:
* For other installation mechanisms, see the documentation provided by the * For other installation mechanisms, see the documentation provided by the
maintainer. maintainer.
To enable the JSON web token integration, you should then add a `jwt_config` section To enable the JSON web token integration, you should then add a `jwt_config` option
to your configuration file (or uncomment the `enabled: true` line in the to your configuration file. See the [configuration manual](usage/configuration/config_documentation.md#jwt_config) for some
existing section). See [sample_config.yaml](./sample_config.yaml) for some
sample settings. sample settings.
## How to test JWT as a developer ## How to test JWT as a developer

View file

@ -13,8 +13,10 @@ environments where untrusted users have shell access.
## Configuring the manhole ## Configuring the manhole
To enable it, first uncomment the `manhole` listener configuration in To enable it, first add the `manhole` listener configuration in your
`homeserver.yaml`. The configuration is slightly different if you're using docker. `homeserver.yaml`. You can find information on how to do that
in the [configuration manual](usage/configuration/config_documentation.md#manhole_settings).
The configuration is slightly different if you're using docker.
#### Docker config #### Docker config

View file

@ -49,9 +49,9 @@ clients.
## Server configuration ## Server configuration
Support for this feature can be enabled and configured in the Support for this feature can be enabled and configured by adding a the
`retention` section of the Synapse configuration file (see the `retention` in the Synapse configuration file (see
[sample file](https://github.com/matrix-org/synapse/blob/v1.36.0/docs/sample_config.yaml#L451-L518)). [configuration manual](usage/configuration/config_documentation.md#retention)).
To enable support for message retention policies, set the setting To enable support for message retention policies, set the setting
`enabled` in this section to `true`. `enabled` in this section to `true`.
@ -65,8 +65,8 @@ message retention policy configured in its state. This allows server
admins to ensure that messages are never kept indefinitely in a server's admins to ensure that messages are never kept indefinitely in a server's
database. database.
A default policy can be defined as such, in the `retention` section of A default policy can be defined as such, by adding the `retention` option in
the configuration file: the configuration file and adding these sub-options:
```yaml ```yaml
default_policy: default_policy:
@ -86,8 +86,8 @@ Purge jobs are the jobs that Synapse runs in the background to purge
expired events from the database. They are only run if support for expired events from the database. They are only run if support for
message retention policies is enabled in the server's configuration. If message retention policies is enabled in the server's configuration. If
no configuration for purge jobs is configured by the server admin, no configuration for purge jobs is configured by the server admin,
Synapse will use a default configuration, which is described in the Synapse will use a default configuration, which is described here in the
[sample configuration file](https://github.com/matrix-org/synapse/blob/v1.36.0/docs/sample_config.yaml#L451-L518). [configuration manual](usage/configuration/config_documentation.md#retention).
Some server admins might want a finer control on when events are removed Some server admins might want a finer control on when events are removed
depending on an event's room's policy. This can be done by setting the depending on an event's room's policy. This can be done by setting the
@ -137,8 +137,8 @@ the server's database.
### Lifetime limits ### Lifetime limits
Server admins can set limits on the values of `max_lifetime` to use when Server admins can set limits on the values of `max_lifetime` to use when
purging old events in a room. These limits can be defined as such in the purging old events in a room. These limits can be defined under the
`retention` section of the configuration file: `retention` option in the configuration file:
```yaml ```yaml
allowed_lifetime_min: 1d allowed_lifetime_min: 1d

View file

@ -45,8 +45,8 @@ as follows:
maintainer. maintainer.
To enable the OpenID integration, you should then add a section to the `oidc_providers` To enable the OpenID integration, you should then add a section to the `oidc_providers`
setting in your configuration file (or uncomment one of the existing examples). setting in your configuration file.
See [sample_config.yaml](./sample_config.yaml) for some sample settings, as well as See the [configuration manual](usage/configuration/config_documentation.md#oidc_providers) for some sample settings, as well as
the text below for example configurations for specific providers. the text below for example configurations for specific providers.
## Sample configs ## Sample configs

View file

@ -143,6 +143,14 @@ to do step 2.
It is safe to at any time kill the port script and restart it. It is safe to at any time kill the port script and restart it.
However, under no circumstances should the SQLite database be `VACUUM`ed between
multiple runs of the script. Doing so can lead to an inconsistent copy of your database
into Postgres.
To avoid accidental error, the script will check that SQLite's `auto_vacuum` mechanism
is disabled, but the script is not able to protect against a manual `VACUUM` operation
performed either by the administrator or by any automated task that the administrator
may have configured.
Note that the database may take up significantly more (25% - 100% more) Note that the database may take up significantly more (25% - 100% more)
space on disk after porting to Postgres. space on disk after porting to Postgres.

View file

@ -66,8 +66,8 @@ in Synapse can be deactivated.
**NOTE**: This has an impact on security and is for testing purposes only! **NOTE**: This has an impact on security and is for testing purposes only!
To deactivate the certificate validation, the following setting must be made in To deactivate the certificate validation, the following setting must be added to
[homserver.yaml](../usage/configuration/homeserver_sample_config.md). your [homserver.yaml](../usage/configuration/homeserver_sample_config.md).
```yaml ```yaml
use_insecure_ssl_client_just_for_testing_do_not_use: true use_insecure_ssl_client_just_for_testing_do_not_use: true

View file

@ -232,7 +232,9 @@ python -m synapse.app.homeserver \
--report-stats=[yes|no] --report-stats=[yes|no]
``` ```
... substituting an appropriate value for `--server-name`. ... substituting an appropriate value for `--server-name` and choosing whether
or not to report usage statistics (hostname, Synapse version, uptime, total
users, etc.) to the developers via the `--report-stats` argument.
This command will generate you a config file that you can then customise, but it will This command will generate you a config file that you can then customise, but it will
also generate a set of keys for you. These keys will allow your homeserver to also generate a set of keys for you. These keys will allow your homeserver to
@ -405,11 +407,11 @@ The recommended way to do so is to set up a reverse proxy on port
Alternatively, you can configure Synapse to expose an HTTPS port. To do Alternatively, you can configure Synapse to expose an HTTPS port. To do
so, you will need to edit `homeserver.yaml`, as follows: so, you will need to edit `homeserver.yaml`, as follows:
- First, under the `listeners` section, uncomment the configuration for the - First, under the `listeners` option, add the configuration for the
TLS-enabled listener. (Remove the hash sign (`#`) at the start of TLS-enabled listener like so:
each line). The relevant lines are like this:
```yaml ```yaml
listeners:
- port: 8448 - port: 8448
type: http type: http
tls: true tls: true
@ -417,9 +419,11 @@ so, you will need to edit `homeserver.yaml`, as follows:
- names: [client, federation] - names: [client, federation]
``` ```
- You will also need to uncomment the `tls_certificate_path` and - You will also need to add the options `tls_certificate_path` and
`tls_private_key_path` lines under the `TLS` section. You will need to manage `tls_private_key_path`. to your configuration file. You will need to manage provisioning of
provisioning of these certificates yourself. these certificates yourself.
- You can find more information about these options as well as how to configure synapse in the
[configuration manual](../usage/configuration/config_documentation.md).
If you are using your own certificate, be sure to use a `.pem` file that If you are using your own certificate, be sure to use a `.pem` file that
includes the full certificate chain including any intermediate certificates includes the full certificate chain including any intermediate certificates

View file

@ -0,0 +1,81 @@
# Reporting Anonymised Statistics
When generating your Synapse configuration file, you are asked whether you
would like to report anonymised statistics to Matrix.org. These statistics
provide the foundation a glimpse into the number of Synapse homeservers
participating in the network, as well as statistics such as the number of
rooms being created and messages being sent. This feature is sometimes
affectionately called "phone-home" stats. Reporting
[is optional](../../configuration/config_documentation.md#report_stats)
and the reporting endpoint
[can be configured](../../configuration/config_documentation.md#report_stats_endpoint),
in case you would like to instead report statistics from a set of homeservers
to your own infrastructure.
This documentation aims to define the statistics available and the
homeserver configuration options that exist to tweak it.
## Available Statistics
The following statistics are sent to the configured reporting endpoint:
| Statistic Name | Type | Description |
|----------------------------|--------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `memory_rss` | int | The memory usage of the process (in kilobytes on Unix-based systems, bytes on MacOS). |
| `cpu_average` | int | CPU time in % of a single core (not % of all cores). |
| `homeserver` | string | The homeserver's server name. |
| `server_context` | string | An arbitrary string used to group statistics from a set of homeservers. |
| `timestamp` | int | The current time, represented as the number of seconds since the epoch. |
| `uptime_seconds` | int | The number of seconds since the homeserver was last started. |
| `python_version` | string | The Python version number in use (e.g "3.7.1"). Taken from `sys.version_info`. |
| `total_users` | int | The number of registered users on the homeserver. |
| `total_nonbridged_users` | int | The number of users, excluding those created by an Application Service. |
| `daily_user_type_native` | int | The number of native users created in the last 24 hours. |
| `daily_user_type_guest` | int | The number of guest users created in the last 24 hours. |
| `daily_user_type_bridged` | int | The number of users created by Application Services in the last 24 hours. |
| `total_room_count` | int | The total number of rooms present on the homeserver. |
| `daily_active_users` | int | The number of unique users[^1] that have used the homeserver in the last 24 hours. |
| `monthly_active_users` | int | The number of unique users[^1] that have used the homeserver in the last 30 days. |
| `daily_active_rooms` | int | The number of rooms that have had a (state) event with the type `m.room.message` sent in them in the last 24 hours. |
| `daily_active_e2ee_rooms` | int | The number of rooms that have had a (state) event with the type `m.room.encrypted` sent in them in the last 24 hours. |
| `daily_messages` | int | The number of (state) events with the type `m.room.message` seen in the last 24 hours. |
| `daily_e2ee_messages` | int | The number of (state) events with the type `m.room.encrypted` seen in the last 24 hours. |
| `daily_sent_messages` | int | The number of (state) events sent by a local user with the type `m.room.message` seen in the last 24 hours. |
| `daily_sent_e2ee_messages` | int | The number of (state) events sent by a local user with the type `m.room.encrypted` seen in the last 24 hours. |
| `r30_users_all` | int | The number of 30 day retained users, defined as users who have created their accounts more than 30 days ago, where they were last seen at most 30 days ago and where those two timestamps are over 30 days apart. Includes clients that do not fit into the below r30 client types. |
| `r30_users_android` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "Android" in the user agent string. |
| `r30_users_ios` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "iOS" in the user agent string. |
| `r30_users_electron` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "Electron" in the user agent string. |
| `r30_users_web` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "Mozilla" or "Gecko" in the user agent string. |
| `r30v2_users_all` | int | The number of 30 day retained users, with a revised algorithm. Defined as users that appear more than once in the past 60 days, and have more than 30 days between the most and least recent appearances in the past 60 days. Includes clients that do not fit into the below r30 client types. |
| `r30v2_users_android` | int | The number of 30 day retained users, as defined above. Filtered only to clients with ("riot" or "element") and "android" (case-insensitive) in the user agent string. |
| `r30v2_users_ios` | int | The number of 30 day retained users, as defined above. Filtered only to clients with ("riot" or "element") and "ios" (case-insensitive) in the user agent string. |
| `r30v2_users_electron` | int | The number of 30 day retained users, as defined above. Filtered only to clients with ("riot" or "element") and "electron" (case-insensitive) in the user agent string. |
| `r30v2_users_web` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "mozilla" or "gecko" (case-insensitive) in the user agent string. |
| `cache_factor` | int | The configured [`global factor`](../../configuration/config_documentation.md#caching) value for caching. |
| `event_cache_size` | int | The configured [`event_cache_size`](../../configuration/config_documentation.md#caching) value for caching. |
| `database_engine` | string | The database engine that is in use. Either "psycopg2" meaning PostgreSQL is in use, or "sqlite3" for SQLite3. |
| `database_server_version` | string | The version of the database server. Examples being "10.10" for PostgreSQL server version 10.0, and "3.38.5" for SQLite 3.38.5 installed on the system. |
| `log_level` | string | The log level in use. Examples are "INFO", "WARNING", "ERROR", "DEBUG", etc. |
[^1]: Native matrix users and guests are always counted. If the
[`track_puppeted_user_ips`](../../configuration/config_documentation.md#track_puppeted_user_ips)
option is set to `true`, "puppeted" users (users that an Application Service have performed
[an action on behalf of](https://spec.matrix.org/v1.3/application-service-api/#identity-assertion))
will also be counted. Note that an Application Service can "puppet" any user in their
[user namespace](https://spec.matrix.org/v1.3/application-service-api/#registration),
not only users that the Application Service has created. If this happens, the Application Service
will additionally be counted as a user (irrespective of `track_puppeted_user_ips`).
## Using a Custom Statistics Collection Server
If statistics reporting is enabled, the endpoint that Synapse sends metrics to is configured by the
[`report_stats_endpoint`](../../configuration/config_documentation.md#report_stats_endpoint) config
option. By default, statistics are sent to Matrix.org.
If you would like to set up your own statistics collection server and send metrics there, you may
consider using one of the following known implementations:
* [Matrix.org's Panopticon](https://github.com/matrix-org/panopticon)
* [Famedly's Barad-dûr](https://gitlab.com/famedly/company/devops/services/barad-dur)

View file

@ -317,7 +317,7 @@ Example configuration:
allow_public_rooms_without_auth: true allow_public_rooms_without_auth: true
``` ```
--- ---
### `allow_public_rooms_without_auth` ### `allow_public_rooms_over_federation`
If set to true, allows any other homeserver to fetch the server's public If set to true, allows any other homeserver to fetch the server's public
rooms directory via federation. Defaults to false. rooms directory via federation. Defaults to false.
@ -591,7 +591,7 @@ Example configuration:
dummy_events_threshold: 5 dummy_events_threshold: 5
``` ```
--- ---
Config option `delete_stale_devices_after` ### `delete_stale_devices_after`
An optional duration. If set, Synapse will run a daily background task to log out and An optional duration. If set, Synapse will run a daily background task to log out and
delete any device that hasn't been accessed for more than the specified amount of time. delete any device that hasn't been accessed for more than the specified amount of time.
@ -1257,6 +1257,98 @@ database:
cp_max: 10 cp_max: 10
``` ```
--- ---
### `databases`
The `databases` option allows specifying a mapping between certain database tables and
database host details, spreading the load of a single Synapse instance across multiple
database backends. This is often referred to as "database sharding". This option is only
supported for PostgreSQL database backends.
**Important note:** This is a supported option, but is not currently used in production by the
Matrix.org Foundation. Proceed with caution and always make backups.
`databases` is a dictionary of arbitrarily-named database entries. Each entry is equivalent
to the value of the `database` homeserver config option (see above), with the addition of
a `data_stores` key. `data_stores` is an array of strings that specifies the data store(s)
(a defined label for a set of tables) that should be stored on the associated database
backend entry.
The currently defined values for `data_stores` are:
* `"state"`: Database that relates to state groups will be stored in this database.
Specifically, that means the following tables:
* `state_groups`
* `state_group_edges`
* `state_groups_state`
And the following sequences:
* `state_groups_seq_id`
* `"main"`: All other database tables and sequences.
All databases will end up with additional tables used for tracking database schema migrations
and any pending background updates. Synapse will create these automatically on startup when checking for
and/or performing database schema migrations.
To migrate an existing database configuration (e.g. all tables on a single database) to a different
configuration (e.g. the "main" data store on one database, and "state" on another), do the following:
1. Take a backup of your existing database. Things can and do go wrong and database corruption is no joke!
2. Ensure all pending database migrations have been applied and background updates have run. The simplest
way to do this is to use the `update_synapse_database` script supplied with your Synapse installation.
```sh
update_synapse_database --database-config homeserver.yaml --run-background-updates
```
3. Copy over the necessary tables and sequences from one database to the other. Tables relating to database
migrations, schemas, schema versions and background updates should **not** be copied.
As an example, say that you'd like to split out the "state" data store from an existing database which
currently contains all data stores.
Simply copy the tables and sequences defined above for the "state" datastore from the existing database
to the secondary database. As noted above, additional tables will be created in the secondary database
when Synapse is started.
4. Modify/create the `databases` option in your `homeserver.yaml` to match the desired database configuration.
5. Start Synapse. Check that it starts up successfully and that things generally seem to be working.
6. Drop the old tables that were copied in step 3.
Only one of the options `database` or `databases` may be specified in your config, but not both.
Example configuration:
```yaml
databases:
basement_box:
name: psycopg2
txn_limit: 10000
data_stores: ["main"]
args:
user: synapse_user
password: secretpassword
database: synapse_main
host: localhost
port: 5432
cp_min: 5
cp_max: 10
my_other_database:
name: psycopg2
txn_limit: 10000
data_stores: ["state"]
args:
user: synapse_user
password: secretpassword
database: synapse_state
host: localhost
port: 5432
cp_min: 5
cp_max: 10
```
---
## Logging ## ## Logging ##
Config options related to logging. Config options related to logging.
@ -1843,7 +1935,7 @@ Example configuration:
turn_shared_secret: "YOUR_SHARED_SECRET" turn_shared_secret: "YOUR_SHARED_SECRET"
``` ```
---- ----
Config options: `turn_username` and `turn_password` ### `turn_username` and `turn_password`
The Username and password if the TURN server needs them and does not use a token. The Username and password if the TURN server needs them and does not use a token.
@ -2999,7 +3091,7 @@ This setting has the following sub-options:
* `localdb_enabled`: Set to false to disable authentication against the local password * `localdb_enabled`: Set to false to disable authentication against the local password
database. This is ignored if `enabled` is false, and is only useful database. This is ignored if `enabled` is false, and is only useful
if you have other `password_providers`. Defaults to true. if you have other `password_providers`. Defaults to true.
* `pepper`: Set the value here to a secret random string for extra security. # Uncomment and change to a secret random string for extra security. * `pepper`: Set the value here to a secret random string for extra security.
DO NOT CHANGE THIS AFTER INITIAL SETUP! DO NOT CHANGE THIS AFTER INITIAL SETUP!
* `policy`: Define and enforce a password policy, such as minimum lengths for passwords, etc. * `policy`: Define and enforce a password policy, such as minimum lengths for passwords, etc.
Each parameter is optional. This is an implementation of MSC2000. Parameters are as follows: Each parameter is optional. This is an implementation of MSC2000. Parameters are as follows:
@ -3373,7 +3465,7 @@ alias_creation_rules:
action: deny action: deny
``` ```
--- ---
Config options: `room_list_publication_rules` ### `room_list_publication_rules`
The `room_list_publication_rules` option controls who can publish and The `room_list_publication_rules` option controls who can publish and
which rooms can be published in the public room list. which rooms can be published in the public room list.

View file

@ -9,6 +9,9 @@ a real homeserver.yaml. Instead, if you are starting from scratch, please genera
a fresh config using Synapse by following the instructions in a fresh config using Synapse by following the instructions in
[Installation](../../setup/installation.md). [Installation](../../setup/installation.md).
Documentation for all configuration options can be found in the
[Configuration Manual](./config_documentation.md).
```yaml ```yaml
{{#include ../../sample_config.yaml}} {{#include ../../sample_config.yaml}}
``` ```

View file

@ -4,5 +4,5 @@ Synapse supports authenticating users via the [Central Authentication
Service protocol](https://en.wikipedia.org/wiki/Central_Authentication_Service) Service protocol](https://en.wikipedia.org/wiki/Central_Authentication_Service)
(CAS) natively. (CAS) natively.
Please see the `cas_config` and `sso` sections of the [Synapse configuration Please see the [cas_config](../../../configuration/config_documentation.md#cas_config) and [sso](../../../configuration/config_documentation.md#sso)
file](../../../configuration/homeserver_sample_config.md) for more details. sections of the configuration manual for more details.

View file

@ -56,7 +56,6 @@ exclude = (?x)
|tests/server.py |tests/server.py
|tests/server_notices/test_resource_limits_server_notices.py |tests/server_notices/test_resource_limits_server_notices.py
|tests/test_metrics.py |tests/test_metrics.py
|tests/test_server.py
|tests/test_state.py |tests/test_state.py
|tests/test_terms_auth.py |tests/test_terms_auth.py
|tests/util/caches/test_cached_call.py |tests/util/caches/test_cached_call.py
@ -74,7 +73,6 @@ exclude = (?x)
|tests/util/test_lrucache.py |tests/util/test_lrucache.py
|tests/util/test_rwlock.py |tests/util/test_rwlock.py
|tests/util/test_wheel_timer.py |tests/util/test_wheel_timer.py
|tests/utils.py
)$ )$
[mypy-synapse.federation.transport.client] [mypy-synapse.federation.transport.client]
@ -89,9 +87,6 @@ disallow_untyped_defs = False
[mypy-synapse.logging.opentracing] [mypy-synapse.logging.opentracing]
disallow_untyped_defs = False disallow_untyped_defs = False
[mypy-synapse.logging.scopecontextmanager]
disallow_untyped_defs = False
[mypy-synapse.metrics._reactor_metrics] [mypy-synapse.metrics._reactor_metrics]
disallow_untyped_defs = False disallow_untyped_defs = False
# This module imports select.epoll. That exists on Linux, but doesn't on macOS. # This module imports select.epoll. That exists on Linux, but doesn't on macOS.
@ -131,6 +126,9 @@ disallow_untyped_defs = True
[mypy-tests.federation.transport.test_client] [mypy-tests.federation.transport.test_client]
disallow_untyped_defs = True disallow_untyped_defs = True
[mypy-tests.utils]
disallow_untyped_defs = True
;; Dependencies without annotations ;; Dependencies without annotations
;; Before ignoring a module, check to see if type stubs are available. ;; Before ignoring a module, check to see if type stubs are available.

133
poetry.lock generated
View file

@ -502,7 +502,7 @@ pyasn1 = ">=0.4.6"
[[package]] [[package]]
name = "lxml" name = "lxml"
version = "4.8.0" version = "4.9.1"
description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API."
category = "main" category = "main"
optional = true optional = true
@ -1937,67 +1937,76 @@ ldap3 = [
{file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"}, {file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"},
] ]
lxml = [ lxml = [
{file = "lxml-4.8.0-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:e1ab2fac607842ac36864e358c42feb0960ae62c34aa4caaf12ada0a1fb5d99b"}, {file = "lxml-4.9.1-cp27-cp27m-macosx_10_15_x86_64.whl", hash = "sha256:98cafc618614d72b02185ac583c6f7796202062c41d2eeecdf07820bad3295ed"},
{file = "lxml-4.8.0-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28d1af847786f68bec57961f31221125c29d6f52d9187c01cd34dc14e2b29430"}, {file = "lxml-4.9.1-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c62e8dd9754b7debda0c5ba59d34509c4688f853588d75b53c3791983faa96fc"},
{file = "lxml-4.8.0-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:b92d40121dcbd74831b690a75533da703750f7041b4bf951befc657c37e5695a"}, {file = "lxml-4.9.1-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:21fb3d24ab430fc538a96e9fbb9b150029914805d551deeac7d7822f64631dfc"},
{file = "lxml-4.8.0-cp27-cp27m-win32.whl", hash = "sha256:e01f9531ba5420838c801c21c1b0f45dbc9607cb22ea2cf132844453bec863a5"}, {file = "lxml-4.9.1-cp27-cp27m-win32.whl", hash = "sha256:86e92728ef3fc842c50a5cb1d5ba2bc66db7da08a7af53fb3da79e202d1b2cd3"},
{file = "lxml-4.8.0-cp27-cp27m-win_amd64.whl", hash = "sha256:6259b511b0f2527e6d55ad87acc1c07b3cbffc3d5e050d7e7bcfa151b8202df9"}, {file = "lxml-4.9.1-cp27-cp27m-win_amd64.whl", hash = "sha256:4cfbe42c686f33944e12f45a27d25a492cc0e43e1dc1da5d6a87cbcaf2e95627"},
{file = "lxml-4.8.0-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1010042bfcac2b2dc6098260a2ed022968dbdfaf285fc65a3acf8e4eb1ffd1bc"}, {file = "lxml-4.9.1-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dad7b164905d3e534883281c050180afcf1e230c3d4a54e8038aa5cfcf312b84"},
{file = "lxml-4.8.0-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:fa56bb08b3dd8eac3a8c5b7d075c94e74f755fd9d8a04543ae8d37b1612dd170"}, {file = "lxml-4.9.1-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a614e4afed58c14254e67862456d212c4dcceebab2eaa44d627c2ca04bf86837"},
{file = "lxml-4.8.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:31ba2cbc64516dcdd6c24418daa7abff989ddf3ba6d3ea6f6ce6f2ed6e754ec9"}, {file = "lxml-4.9.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:f9ced82717c7ec65a67667bb05865ffe38af0e835cdd78728f1209c8fffe0cad"},
{file = "lxml-4.8.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:31499847fc5f73ee17dbe1b8e24c6dafc4e8d5b48803d17d22988976b0171f03"}, {file = "lxml-4.9.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:d9fc0bf3ff86c17348dfc5d322f627d78273eba545db865c3cd14b3f19e57fa5"},
{file = "lxml-4.8.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:5f7d7d9afc7b293147e2d506a4596641d60181a35279ef3aa5778d0d9d9123fe"}, {file = "lxml-4.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e5f66bdf0976ec667fc4594d2812a00b07ed14d1b44259d19a41ae3fff99f2b8"},
{file = "lxml-4.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:a3c5f1a719aa11866ffc530d54ad965063a8cbbecae6515acbd5f0fae8f48eaa"}, {file = "lxml-4.9.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:fe17d10b97fdf58155f858606bddb4e037b805a60ae023c009f760d8361a4eb8"},
{file = "lxml-4.8.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6268e27873a3d191849204d00d03f65c0e343b3bcb518a6eaae05677c95621d1"}, {file = "lxml-4.9.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8caf4d16b31961e964c62194ea3e26a0e9561cdf72eecb1781458b67ec83423d"},
{file = "lxml-4.8.0-cp310-cp310-win32.whl", hash = "sha256:330bff92c26d4aee79c5bc4d9967858bdbe73fdbdbacb5daf623a03a914fe05b"}, {file = "lxml-4.9.1-cp310-cp310-win32.whl", hash = "sha256:4780677767dd52b99f0af1f123bc2c22873d30b474aa0e2fc3fe5e02217687c7"},
{file = "lxml-4.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:b2582b238e1658c4061ebe1b4df53c435190d22457642377fd0cb30685cdfb76"}, {file = "lxml-4.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:b122a188cd292c4d2fcd78d04f863b789ef43aa129b233d7c9004de08693728b"},
{file = "lxml-4.8.0-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a2bfc7e2a0601b475477c954bf167dee6d0f55cb167e3f3e7cefad906e7759f6"}, {file = "lxml-4.9.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:be9eb06489bc975c38706902cbc6888f39e946b81383abc2838d186f0e8b6a9d"},
{file = "lxml-4.8.0-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a1547ff4b8a833511eeaceacbcd17b043214fcdb385148f9c1bc5556ca9623e2"}, {file = "lxml-4.9.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:f1be258c4d3dc609e654a1dc59d37b17d7fef05df912c01fc2e15eb43a9735f3"},
{file = "lxml-4.8.0-cp35-cp35m-win32.whl", hash = "sha256:a9f1c3489736ff8e1c7652e9dc39f80cff820f23624f23d9eab6e122ac99b150"}, {file = "lxml-4.9.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:927a9dd016d6033bc12e0bf5dee1dde140235fc8d0d51099353c76081c03dc29"},
{file = "lxml-4.8.0-cp35-cp35m-win_amd64.whl", hash = "sha256:530f278849031b0eb12f46cca0e5db01cfe5177ab13bd6878c6e739319bae654"}, {file = "lxml-4.9.1-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9232b09f5efee6a495a99ae6824881940d6447debe272ea400c02e3b68aad85d"},
{file = "lxml-4.8.0-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:078306d19a33920004addeb5f4630781aaeabb6a8d01398045fcde085091a169"}, {file = "lxml-4.9.1-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:04da965dfebb5dac2619cb90fcf93efdb35b3c6994fea58a157a834f2f94b318"},
{file = "lxml-4.8.0-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:86545e351e879d0b72b620db6a3b96346921fa87b3d366d6c074e5a9a0b8dadb"}, {file = "lxml-4.9.1-cp35-cp35m-win32.whl", hash = "sha256:4d5bae0a37af799207140652a700f21a85946f107a199bcb06720b13a4f1f0b7"},
{file = "lxml-4.8.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24f5c5ae618395ed871b3d8ebfcbb36e3f1091fd847bf54c4de623f9107942f3"}, {file = "lxml-4.9.1-cp35-cp35m-win_amd64.whl", hash = "sha256:4878e667ebabe9b65e785ac8da4d48886fe81193a84bbe49f12acff8f7a383a4"},
{file = "lxml-4.8.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:bbab6faf6568484707acc052f4dfc3802bdb0cafe079383fbaa23f1cdae9ecd4"}, {file = "lxml-4.9.1-cp36-cp36m-macosx_10_15_x86_64.whl", hash = "sha256:1355755b62c28950f9ce123c7a41460ed9743c699905cbe664a5bcc5c9c7c7fb"},
{file = "lxml-4.8.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7993232bd4044392c47779a3c7e8889fea6883be46281d45a81451acfd704d7e"}, {file = "lxml-4.9.1-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:bcaa1c495ce623966d9fc8a187da80082334236a2a1c7e141763ffaf7a405067"},
{file = "lxml-4.8.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6d6483b1229470e1d8835e52e0ff3c6973b9b97b24cd1c116dca90b57a2cc613"}, {file = "lxml-4.9.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6eafc048ea3f1b3c136c71a86db393be36b5b3d9c87b1c25204e7d397cee9536"},
{file = "lxml-4.8.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:ad4332a532e2d5acb231a2e5d33f943750091ee435daffca3fec0a53224e7e33"}, {file = "lxml-4.9.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:13c90064b224e10c14dcdf8086688d3f0e612db53766e7478d7754703295c7c8"},
{file = "lxml-4.8.0-cp36-cp36m-win32.whl", hash = "sha256:db3535733f59e5605a88a706824dfcb9bd06725e709ecb017e165fc1d6e7d429"}, {file = "lxml-4.9.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:206a51077773c6c5d2ce1991327cda719063a47adc02bd703c56a662cdb6c58b"},
{file = "lxml-4.8.0-cp36-cp36m-win_amd64.whl", hash = "sha256:5f148b0c6133fb928503cfcdfdba395010f997aa44bcf6474fcdd0c5398d9b63"}, {file = "lxml-4.9.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:e8f0c9d65da595cfe91713bc1222af9ecabd37971762cb830dea2fc3b3bb2acf"},
{file = "lxml-4.8.0-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:8a31f24e2a0b6317f33aafbb2f0895c0bce772980ae60c2c640d82caac49628a"}, {file = "lxml-4.9.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:8f0a4d179c9a941eb80c3a63cdb495e539e064f8054230844dcf2fcb812b71d3"},
{file = "lxml-4.8.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:719544565c2937c21a6f76d520e6e52b726d132815adb3447ccffbe9f44203c4"}, {file = "lxml-4.9.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:830c88747dce8a3e7525defa68afd742b4580df6aa2fdd6f0855481e3994d391"},
{file = "lxml-4.8.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:c0b88ed1ae66777a798dc54f627e32d3b81c8009967c63993c450ee4cbcbec15"}, {file = "lxml-4.9.1-cp36-cp36m-win32.whl", hash = "sha256:1e1cf47774373777936c5aabad489fef7b1c087dcd1f426b621fda9dcc12994e"},
{file = "lxml-4.8.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fa9b7c450be85bfc6cd39f6df8c5b8cbd76b5d6fc1f69efec80203f9894b885f"}, {file = "lxml-4.9.1-cp36-cp36m-win_amd64.whl", hash = "sha256:5974895115737a74a00b321e339b9c3f45c20275d226398ae79ac008d908bff7"},
{file = "lxml-4.8.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e9f84ed9f4d50b74fbc77298ee5c870f67cb7e91dcdc1a6915cb1ff6a317476c"}, {file = "lxml-4.9.1-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:1423631e3d51008871299525b541413c9b6c6423593e89f9c4cfbe8460afc0a2"},
{file = "lxml-4.8.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1d650812b52d98679ed6c6b3b55cbb8fe5a5460a0aef29aeb08dc0b44577df85"}, {file = "lxml-4.9.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:2aaf6a0a6465d39b5ca69688fce82d20088c1838534982996ec46633dc7ad6cc"},
{file = "lxml-4.8.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:80bbaddf2baab7e6de4bc47405e34948e694a9efe0861c61cdc23aa774fcb141"}, {file = "lxml-4.9.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:9f36de4cd0c262dd9927886cc2305aa3f2210db437aa4fed3fb4940b8bf4592c"},
{file = "lxml-4.8.0-cp37-cp37m-win32.whl", hash = "sha256:6f7b82934c08e28a2d537d870293236b1000d94d0b4583825ab9649aef7ddf63"}, {file = "lxml-4.9.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:ae06c1e4bc60ee076292e582a7512f304abdf6c70db59b56745cca1684f875a4"},
{file = "lxml-4.8.0-cp37-cp37m-win_amd64.whl", hash = "sha256:e1fd7d2fe11f1cb63d3336d147c852f6d07de0d0020d704c6031b46a30b02ca8"}, {file = "lxml-4.9.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:57e4d637258703d14171b54203fd6822fda218c6c2658a7d30816b10995f29f3"},
{file = "lxml-4.8.0-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:5045ee1ccd45a89c4daec1160217d363fcd23811e26734688007c26f28c9e9e7"}, {file = "lxml-4.9.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6d279033bf614953c3fc4a0aa9ac33a21e8044ca72d4fa8b9273fe75359d5cca"},
{file = "lxml-4.8.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:0c1978ff1fd81ed9dcbba4f91cf09faf1f8082c9d72eb122e92294716c605428"}, {file = "lxml-4.9.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:a60f90bba4c37962cbf210f0188ecca87daafdf60271f4c6948606e4dabf8785"},
{file = "lxml-4.8.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:52cbf2ff155b19dc4d4100f7442f6a697938bf4493f8d3b0c51d45568d5666b5"}, {file = "lxml-4.9.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:6ca2264f341dd81e41f3fffecec6e446aa2121e0b8d026fb5130e02de1402785"},
{file = "lxml-4.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:ce13d6291a5f47c1c8dbd375baa78551053bc6b5e5c0e9bb8e39c0a8359fd52f"}, {file = "lxml-4.9.1-cp37-cp37m-win32.whl", hash = "sha256:27e590352c76156f50f538dbcebd1925317a0f70540f7dc8c97d2931c595783a"},
{file = "lxml-4.8.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e11527dc23d5ef44d76fef11213215c34f36af1608074561fcc561d983aeb870"}, {file = "lxml-4.9.1-cp37-cp37m-win_amd64.whl", hash = "sha256:eea5d6443b093e1545ad0210e6cf27f920482bfcf5c77cdc8596aec73523bb7e"},
{file = "lxml-4.8.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:60d2f60bd5a2a979df28ab309352cdcf8181bda0cca4529769a945f09aba06f9"}, {file = "lxml-4.9.1-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:f05251bbc2145349b8d0b77c0d4e5f3b228418807b1ee27cefb11f69ed3d233b"},
{file = "lxml-4.8.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:62f93eac69ec0f4be98d1b96f4d6b964855b8255c345c17ff12c20b93f247b68"}, {file = "lxml-4.9.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:487c8e61d7acc50b8be82bda8c8d21d20e133c3cbf41bd8ad7eb1aaeb3f07c97"},
{file = "lxml-4.8.0-cp38-cp38-win32.whl", hash = "sha256:20b8a746a026017acf07da39fdb10aa80ad9877046c9182442bf80c84a1c4696"}, {file = "lxml-4.9.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:8d1a92d8e90b286d491e5626af53afef2ba04da33e82e30744795c71880eaa21"},
{file = "lxml-4.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:891dc8f522d7059ff0024cd3ae79fd224752676447f9c678f2a5c14b84d9a939"}, {file = "lxml-4.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:b570da8cd0012f4af9fa76a5635cd31f707473e65a5a335b186069d5c7121ff2"},
{file = "lxml-4.8.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:b6fc2e2fb6f532cf48b5fed57567ef286addcef38c28874458a41b7837a57807"}, {file = "lxml-4.9.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ef87fca280fb15342726bd5f980f6faf8b84a5287fcc2d4962ea8af88b35130"},
{file = "lxml-4.8.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:74eb65ec61e3c7c019d7169387d1b6ffcfea1b9ec5894d116a9a903636e4a0b1"}, {file = "lxml-4.9.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:93e414e3206779ef41e5ff2448067213febf260ba747fc65389a3ddaa3fb8715"},
{file = "lxml-4.8.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:627e79894770783c129cc5e89b947e52aa26e8e0557c7e205368a809da4b7939"}, {file = "lxml-4.9.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6653071f4f9bac46fbc30f3c7838b0e9063ee335908c5d61fb7a4a86c8fd2036"},
{file = "lxml-4.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:545bd39c9481f2e3f2727c78c169425efbfb3fbba6e7db4f46a80ebb249819ca"}, {file = "lxml-4.9.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:32a73c53783becdb7eaf75a2a1525ea8e49379fb7248c3eeefb9412123536387"},
{file = "lxml-4.8.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5a58d0b12f5053e270510bf12f753a76aaf3d74c453c00942ed7d2c804ca845c"}, {file = "lxml-4.9.1-cp38-cp38-win32.whl", hash = "sha256:1a7c59c6ffd6ef5db362b798f350e24ab2cfa5700d53ac6681918f314a4d3b94"},
{file = "lxml-4.8.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:ec4b4e75fc68da9dc0ed73dcdb431c25c57775383fec325d23a770a64e7ebc87"}, {file = "lxml-4.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:1436cf0063bba7888e43f1ba8d58824f085410ea2025befe81150aceb123e345"},
{file = "lxml-4.8.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5804e04feb4e61babf3911c2a974a5b86f66ee227cc5006230b00ac6d285b3a9"}, {file = "lxml-4.9.1-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:4beea0f31491bc086991b97517b9683e5cfb369205dac0148ef685ac12a20a67"},
{file = "lxml-4.8.0-cp39-cp39-win32.whl", hash = "sha256:aa0cf4922da7a3c905d000b35065df6184c0dc1d866dd3b86fd961905bbad2ea"}, {file = "lxml-4.9.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:41fb58868b816c202e8881fd0f179a4644ce6e7cbbb248ef0283a34b73ec73bb"},
{file = "lxml-4.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:dd10383f1d6b7edf247d0960a3db274c07e96cf3a3fc7c41c8448f93eac3fb1c"}, {file = "lxml-4.9.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:bd34f6d1810d9354dc7e35158aa6cc33456be7706df4420819af6ed966e85448"},
{file = "lxml-4.8.0-pp37-pypy37_pp73-macosx_10_14_x86_64.whl", hash = "sha256:2403a6d6fb61c285969b71f4a3527873fe93fd0abe0832d858a17fe68c8fa507"}, {file = "lxml-4.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:edffbe3c510d8f4bf8640e02ca019e48a9b72357318383ca60e3330c23aaffc7"},
{file = "lxml-4.8.0-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:986b7a96228c9b4942ec420eff37556c5777bfba6758edcb95421e4a614b57f9"}, {file = "lxml-4.9.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6d949f53ad4fc7cf02c44d6678e7ff05ec5f5552b235b9e136bd52e9bf730b91"},
{file = "lxml-4.8.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:6fe4ef4402df0250b75ba876c3795510d782def5c1e63890bde02d622570d39e"}, {file = "lxml-4.9.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:079b68f197c796e42aa80b1f739f058dcee796dc725cc9a1be0cdb08fc45b000"},
{file = "lxml-4.8.0-pp38-pypy38_pp73-macosx_10_14_x86_64.whl", hash = "sha256:f10ce66fcdeb3543df51d423ede7e238be98412232fca5daec3e54bcd16b8da0"}, {file = "lxml-4.9.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9c3a88d20e4fe4a2a4a84bf439a5ac9c9aba400b85244c63a1ab7088f85d9d25"},
{file = "lxml-4.8.0-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:730766072fd5dcb219dd2b95c4c49752a54f00157f322bc6d71f7d2a31fecd79"}, {file = "lxml-4.9.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4e285b5f2bf321fc0857b491b5028c5f276ec0c873b985d58d7748ece1d770dd"},
{file = "lxml-4.8.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:8b99ec73073b37f9ebe8caf399001848fced9c08064effdbfc4da2b5a8d07b93"}, {file = "lxml-4.9.1-cp39-cp39-win32.whl", hash = "sha256:ef72013e20dd5ba86a8ae1aed7f56f31d3374189aa8b433e7b12ad182c0d2dfb"},
{file = "lxml-4.8.0.tar.gz", hash = "sha256:f63f62fc60e6228a4ca9abae28228f35e1bd3ce675013d1dfb828688d50c6e23"}, {file = "lxml-4.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:10d2017f9150248563bb579cd0d07c61c58da85c922b780060dcc9a3aa9f432d"},
{file = "lxml-4.9.1-pp37-pypy37_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0538747a9d7827ce3e16a8fdd201a99e661c7dee3c96c885d8ecba3c35d1032c"},
{file = "lxml-4.9.1-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:0645e934e940107e2fdbe7c5b6fb8ec6232444260752598bc4d09511bd056c0b"},
{file = "lxml-4.9.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:6daa662aba22ef3258934105be2dd9afa5bb45748f4f702a3b39a5bf53a1f4dc"},
{file = "lxml-4.9.1-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:603a464c2e67d8a546ddaa206d98e3246e5db05594b97db844c2f0a1af37cf5b"},
{file = "lxml-4.9.1-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:c4b2e0559b68455c085fb0f6178e9752c4be3bba104d6e881eb5573b399d1eb2"},
{file = "lxml-4.9.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0f3f0059891d3254c7b5fb935330d6db38d6519ecd238ca4fce93c234b4a0f73"},
{file = "lxml-4.9.1-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:c852b1530083a620cb0de5f3cd6826f19862bafeaf77586f1aef326e49d95f0c"},
{file = "lxml-4.9.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:287605bede6bd36e930577c5925fcea17cb30453d96a7b4c63c14a257118dbb9"},
{file = "lxml-4.9.1.tar.gz", hash = "sha256:fe749b052bb7233fe5d072fcb549221a8cb1a16725c47c37e42b0b9cb3ff2c3f"},
] ]
markupsafe = [ markupsafe = [
{file = "MarkupSafe-2.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3028252424c72b2602a323f70fbf50aa80a5d3aa616ea6add4ba21ae9cc9da4c"}, {file = "MarkupSafe-2.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3028252424c72b2602a323f70fbf50aa80a5d3aa616ea6add4ba21ae9cc9da4c"},

View file

@ -54,7 +54,7 @@ skip_gitignore = true
[tool.poetry] [tool.poetry]
name = "matrix-synapse" name = "matrix-synapse"
version = "1.62.0" version = "1.63.0rc1"
description = "Homeserver for the Matrix decentralised comms protocol" description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"] authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "Apache-2.0" license = "Apache-2.0"

View file

@ -249,68 +249,77 @@ jinja2==3.0.3 \
jsonschema==4.4.0 \ jsonschema==4.4.0 \
--hash=sha256:77281a1f71684953ee8b3d488371b162419767973789272434bbc3f29d9c8823 \ --hash=sha256:77281a1f71684953ee8b3d488371b162419767973789272434bbc3f29d9c8823 \
--hash=sha256:636694eb41b3535ed608fe04129f26542b59ed99808b4f688aa32dcf55317a83 --hash=sha256:636694eb41b3535ed608fe04129f26542b59ed99808b4f688aa32dcf55317a83
lxml==4.8.0 \ lxml==4.9.1 \
--hash=sha256:e1ab2fac607842ac36864e358c42feb0960ae62c34aa4caaf12ada0a1fb5d99b \ --hash=sha256:98cafc618614d72b02185ac583c6f7796202062c41d2eeecdf07820bad3295ed \
--hash=sha256:28d1af847786f68bec57961f31221125c29d6f52d9187c01cd34dc14e2b29430 \ --hash=sha256:c62e8dd9754b7debda0c5ba59d34509c4688f853588d75b53c3791983faa96fc \
--hash=sha256:b92d40121dcbd74831b690a75533da703750f7041b4bf951befc657c37e5695a \ --hash=sha256:21fb3d24ab430fc538a96e9fbb9b150029914805d551deeac7d7822f64631dfc \
--hash=sha256:e01f9531ba5420838c801c21c1b0f45dbc9607cb22ea2cf132844453bec863a5 \ --hash=sha256:86e92728ef3fc842c50a5cb1d5ba2bc66db7da08a7af53fb3da79e202d1b2cd3 \
--hash=sha256:6259b511b0f2527e6d55ad87acc1c07b3cbffc3d5e050d7e7bcfa151b8202df9 \ --hash=sha256:4cfbe42c686f33944e12f45a27d25a492cc0e43e1dc1da5d6a87cbcaf2e95627 \
--hash=sha256:1010042bfcac2b2dc6098260a2ed022968dbdfaf285fc65a3acf8e4eb1ffd1bc \ --hash=sha256:dad7b164905d3e534883281c050180afcf1e230c3d4a54e8038aa5cfcf312b84 \
--hash=sha256:fa56bb08b3dd8eac3a8c5b7d075c94e74f755fd9d8a04543ae8d37b1612dd170 \ --hash=sha256:a614e4afed58c14254e67862456d212c4dcceebab2eaa44d627c2ca04bf86837 \
--hash=sha256:31ba2cbc64516dcdd6c24418daa7abff989ddf3ba6d3ea6f6ce6f2ed6e754ec9 \ --hash=sha256:f9ced82717c7ec65a67667bb05865ffe38af0e835cdd78728f1209c8fffe0cad \
--hash=sha256:31499847fc5f73ee17dbe1b8e24c6dafc4e8d5b48803d17d22988976b0171f03 \ --hash=sha256:d9fc0bf3ff86c17348dfc5d322f627d78273eba545db865c3cd14b3f19e57fa5 \
--hash=sha256:5f7d7d9afc7b293147e2d506a4596641d60181a35279ef3aa5778d0d9d9123fe \ --hash=sha256:e5f66bdf0976ec667fc4594d2812a00b07ed14d1b44259d19a41ae3fff99f2b8 \
--hash=sha256:a3c5f1a719aa11866ffc530d54ad965063a8cbbecae6515acbd5f0fae8f48eaa \ --hash=sha256:fe17d10b97fdf58155f858606bddb4e037b805a60ae023c009f760d8361a4eb8 \
--hash=sha256:6268e27873a3d191849204d00d03f65c0e343b3bcb518a6eaae05677c95621d1 \ --hash=sha256:8caf4d16b31961e964c62194ea3e26a0e9561cdf72eecb1781458b67ec83423d \
--hash=sha256:330bff92c26d4aee79c5bc4d9967858bdbe73fdbdbacb5daf623a03a914fe05b \ --hash=sha256:4780677767dd52b99f0af1f123bc2c22873d30b474aa0e2fc3fe5e02217687c7 \
--hash=sha256:b2582b238e1658c4061ebe1b4df53c435190d22457642377fd0cb30685cdfb76 \ --hash=sha256:b122a188cd292c4d2fcd78d04f863b789ef43aa129b233d7c9004de08693728b \
--hash=sha256:a2bfc7e2a0601b475477c954bf167dee6d0f55cb167e3f3e7cefad906e7759f6 \ --hash=sha256:be9eb06489bc975c38706902cbc6888f39e946b81383abc2838d186f0e8b6a9d \
--hash=sha256:a1547ff4b8a833511eeaceacbcd17b043214fcdb385148f9c1bc5556ca9623e2 \ --hash=sha256:f1be258c4d3dc609e654a1dc59d37b17d7fef05df912c01fc2e15eb43a9735f3 \
--hash=sha256:a9f1c3489736ff8e1c7652e9dc39f80cff820f23624f23d9eab6e122ac99b150 \ --hash=sha256:927a9dd016d6033bc12e0bf5dee1dde140235fc8d0d51099353c76081c03dc29 \
--hash=sha256:530f278849031b0eb12f46cca0e5db01cfe5177ab13bd6878c6e739319bae654 \ --hash=sha256:9232b09f5efee6a495a99ae6824881940d6447debe272ea400c02e3b68aad85d \
--hash=sha256:078306d19a33920004addeb5f4630781aaeabb6a8d01398045fcde085091a169 \ --hash=sha256:04da965dfebb5dac2619cb90fcf93efdb35b3c6994fea58a157a834f2f94b318 \
--hash=sha256:86545e351e879d0b72b620db6a3b96346921fa87b3d366d6c074e5a9a0b8dadb \ --hash=sha256:4d5bae0a37af799207140652a700f21a85946f107a199bcb06720b13a4f1f0b7 \
--hash=sha256:24f5c5ae618395ed871b3d8ebfcbb36e3f1091fd847bf54c4de623f9107942f3 \ --hash=sha256:4878e667ebabe9b65e785ac8da4d48886fe81193a84bbe49f12acff8f7a383a4 \
--hash=sha256:bbab6faf6568484707acc052f4dfc3802bdb0cafe079383fbaa23f1cdae9ecd4 \ --hash=sha256:1355755b62c28950f9ce123c7a41460ed9743c699905cbe664a5bcc5c9c7c7fb \
--hash=sha256:7993232bd4044392c47779a3c7e8889fea6883be46281d45a81451acfd704d7e \ --hash=sha256:bcaa1c495ce623966d9fc8a187da80082334236a2a1c7e141763ffaf7a405067 \
--hash=sha256:6d6483b1229470e1d8835e52e0ff3c6973b9b97b24cd1c116dca90b57a2cc613 \ --hash=sha256:6eafc048ea3f1b3c136c71a86db393be36b5b3d9c87b1c25204e7d397cee9536 \
--hash=sha256:ad4332a532e2d5acb231a2e5d33f943750091ee435daffca3fec0a53224e7e33 \ --hash=sha256:13c90064b224e10c14dcdf8086688d3f0e612db53766e7478d7754703295c7c8 \
--hash=sha256:db3535733f59e5605a88a706824dfcb9bd06725e709ecb017e165fc1d6e7d429 \ --hash=sha256:206a51077773c6c5d2ce1991327cda719063a47adc02bd703c56a662cdb6c58b \
--hash=sha256:5f148b0c6133fb928503cfcdfdba395010f997aa44bcf6474fcdd0c5398d9b63 \ --hash=sha256:e8f0c9d65da595cfe91713bc1222af9ecabd37971762cb830dea2fc3b3bb2acf \
--hash=sha256:8a31f24e2a0b6317f33aafbb2f0895c0bce772980ae60c2c640d82caac49628a \ --hash=sha256:8f0a4d179c9a941eb80c3a63cdb495e539e064f8054230844dcf2fcb812b71d3 \
--hash=sha256:719544565c2937c21a6f76d520e6e52b726d132815adb3447ccffbe9f44203c4 \ --hash=sha256:830c88747dce8a3e7525defa68afd742b4580df6aa2fdd6f0855481e3994d391 \
--hash=sha256:c0b88ed1ae66777a798dc54f627e32d3b81c8009967c63993c450ee4cbcbec15 \ --hash=sha256:1e1cf47774373777936c5aabad489fef7b1c087dcd1f426b621fda9dcc12994e \
--hash=sha256:fa9b7c450be85bfc6cd39f6df8c5b8cbd76b5d6fc1f69efec80203f9894b885f \ --hash=sha256:5974895115737a74a00b321e339b9c3f45c20275d226398ae79ac008d908bff7 \
--hash=sha256:e9f84ed9f4d50b74fbc77298ee5c870f67cb7e91dcdc1a6915cb1ff6a317476c \ --hash=sha256:1423631e3d51008871299525b541413c9b6c6423593e89f9c4cfbe8460afc0a2 \
--hash=sha256:1d650812b52d98679ed6c6b3b55cbb8fe5a5460a0aef29aeb08dc0b44577df85 \ --hash=sha256:2aaf6a0a6465d39b5ca69688fce82d20088c1838534982996ec46633dc7ad6cc \
--hash=sha256:80bbaddf2baab7e6de4bc47405e34948e694a9efe0861c61cdc23aa774fcb141 \ --hash=sha256:9f36de4cd0c262dd9927886cc2305aa3f2210db437aa4fed3fb4940b8bf4592c \
--hash=sha256:6f7b82934c08e28a2d537d870293236b1000d94d0b4583825ab9649aef7ddf63 \ --hash=sha256:ae06c1e4bc60ee076292e582a7512f304abdf6c70db59b56745cca1684f875a4 \
--hash=sha256:e1fd7d2fe11f1cb63d3336d147c852f6d07de0d0020d704c6031b46a30b02ca8 \ --hash=sha256:57e4d637258703d14171b54203fd6822fda218c6c2658a7d30816b10995f29f3 \
--hash=sha256:5045ee1ccd45a89c4daec1160217d363fcd23811e26734688007c26f28c9e9e7 \ --hash=sha256:6d279033bf614953c3fc4a0aa9ac33a21e8044ca72d4fa8b9273fe75359d5cca \
--hash=sha256:0c1978ff1fd81ed9dcbba4f91cf09faf1f8082c9d72eb122e92294716c605428 \ --hash=sha256:a60f90bba4c37962cbf210f0188ecca87daafdf60271f4c6948606e4dabf8785 \
--hash=sha256:52cbf2ff155b19dc4d4100f7442f6a697938bf4493f8d3b0c51d45568d5666b5 \ --hash=sha256:6ca2264f341dd81e41f3fffecec6e446aa2121e0b8d026fb5130e02de1402785 \
--hash=sha256:ce13d6291a5f47c1c8dbd375baa78551053bc6b5e5c0e9bb8e39c0a8359fd52f \ --hash=sha256:27e590352c76156f50f538dbcebd1925317a0f70540f7dc8c97d2931c595783a \
--hash=sha256:e11527dc23d5ef44d76fef11213215c34f36af1608074561fcc561d983aeb870 \ --hash=sha256:eea5d6443b093e1545ad0210e6cf27f920482bfcf5c77cdc8596aec73523bb7e \
--hash=sha256:60d2f60bd5a2a979df28ab309352cdcf8181bda0cca4529769a945f09aba06f9 \ --hash=sha256:f05251bbc2145349b8d0b77c0d4e5f3b228418807b1ee27cefb11f69ed3d233b \
--hash=sha256:62f93eac69ec0f4be98d1b96f4d6b964855b8255c345c17ff12c20b93f247b68 \ --hash=sha256:487c8e61d7acc50b8be82bda8c8d21d20e133c3cbf41bd8ad7eb1aaeb3f07c97 \
--hash=sha256:20b8a746a026017acf07da39fdb10aa80ad9877046c9182442bf80c84a1c4696 \ --hash=sha256:8d1a92d8e90b286d491e5626af53afef2ba04da33e82e30744795c71880eaa21 \
--hash=sha256:891dc8f522d7059ff0024cd3ae79fd224752676447f9c678f2a5c14b84d9a939 \ --hash=sha256:b570da8cd0012f4af9fa76a5635cd31f707473e65a5a335b186069d5c7121ff2 \
--hash=sha256:b6fc2e2fb6f532cf48b5fed57567ef286addcef38c28874458a41b7837a57807 \ --hash=sha256:5ef87fca280fb15342726bd5f980f6faf8b84a5287fcc2d4962ea8af88b35130 \
--hash=sha256:74eb65ec61e3c7c019d7169387d1b6ffcfea1b9ec5894d116a9a903636e4a0b1 \ --hash=sha256:93e414e3206779ef41e5ff2448067213febf260ba747fc65389a3ddaa3fb8715 \
--hash=sha256:627e79894770783c129cc5e89b947e52aa26e8e0557c7e205368a809da4b7939 \ --hash=sha256:6653071f4f9bac46fbc30f3c7838b0e9063ee335908c5d61fb7a4a86c8fd2036 \
--hash=sha256:545bd39c9481f2e3f2727c78c169425efbfb3fbba6e7db4f46a80ebb249819ca \ --hash=sha256:32a73c53783becdb7eaf75a2a1525ea8e49379fb7248c3eeefb9412123536387 \
--hash=sha256:5a58d0b12f5053e270510bf12f753a76aaf3d74c453c00942ed7d2c804ca845c \ --hash=sha256:1a7c59c6ffd6ef5db362b798f350e24ab2cfa5700d53ac6681918f314a4d3b94 \
--hash=sha256:ec4b4e75fc68da9dc0ed73dcdb431c25c57775383fec325d23a770a64e7ebc87 \ --hash=sha256:1436cf0063bba7888e43f1ba8d58824f085410ea2025befe81150aceb123e345 \
--hash=sha256:5804e04feb4e61babf3911c2a974a5b86f66ee227cc5006230b00ac6d285b3a9 \ --hash=sha256:4beea0f31491bc086991b97517b9683e5cfb369205dac0148ef685ac12a20a67 \
--hash=sha256:aa0cf4922da7a3c905d000b35065df6184c0dc1d866dd3b86fd961905bbad2ea \ --hash=sha256:41fb58868b816c202e8881fd0f179a4644ce6e7cbbb248ef0283a34b73ec73bb \
--hash=sha256:dd10383f1d6b7edf247d0960a3db274c07e96cf3a3fc7c41c8448f93eac3fb1c \ --hash=sha256:bd34f6d1810d9354dc7e35158aa6cc33456be7706df4420819af6ed966e85448 \
--hash=sha256:2403a6d6fb61c285969b71f4a3527873fe93fd0abe0832d858a17fe68c8fa507 \ --hash=sha256:edffbe3c510d8f4bf8640e02ca019e48a9b72357318383ca60e3330c23aaffc7 \
--hash=sha256:986b7a96228c9b4942ec420eff37556c5777bfba6758edcb95421e4a614b57f9 \ --hash=sha256:6d949f53ad4fc7cf02c44d6678e7ff05ec5f5552b235b9e136bd52e9bf730b91 \
--hash=sha256:6fe4ef4402df0250b75ba876c3795510d782def5c1e63890bde02d622570d39e \ --hash=sha256:079b68f197c796e42aa80b1f739f058dcee796dc725cc9a1be0cdb08fc45b000 \
--hash=sha256:f10ce66fcdeb3543df51d423ede7e238be98412232fca5daec3e54bcd16b8da0 \ --hash=sha256:9c3a88d20e4fe4a2a4a84bf439a5ac9c9aba400b85244c63a1ab7088f85d9d25 \
--hash=sha256:730766072fd5dcb219dd2b95c4c49752a54f00157f322bc6d71f7d2a31fecd79 \ --hash=sha256:4e285b5f2bf321fc0857b491b5028c5f276ec0c873b985d58d7748ece1d770dd \
--hash=sha256:8b99ec73073b37f9ebe8caf399001848fced9c08064effdbfc4da2b5a8d07b93 \ --hash=sha256:ef72013e20dd5ba86a8ae1aed7f56f31d3374189aa8b433e7b12ad182c0d2dfb \
--hash=sha256:f63f62fc60e6228a4ca9abae28228f35e1bd3ce675013d1dfb828688d50c6e23 --hash=sha256:10d2017f9150248563bb579cd0d07c61c58da85c922b780060dcc9a3aa9f432d \
--hash=sha256:0538747a9d7827ce3e16a8fdd201a99e661c7dee3c96c885d8ecba3c35d1032c \
--hash=sha256:0645e934e940107e2fdbe7c5b6fb8ec6232444260752598bc4d09511bd056c0b \
--hash=sha256:6daa662aba22ef3258934105be2dd9afa5bb45748f4f702a3b39a5bf53a1f4dc \
--hash=sha256:603a464c2e67d8a546ddaa206d98e3246e5db05594b97db844c2f0a1af37cf5b \
--hash=sha256:c4b2e0559b68455c085fb0f6178e9752c4be3bba104d6e881eb5573b399d1eb2 \
--hash=sha256:0f3f0059891d3254c7b5fb935330d6db38d6519ecd238ca4fce93c234b4a0f73 \
--hash=sha256:c852b1530083a620cb0de5f3cd6826f19862bafeaf77586f1aef326e49d95f0c \
--hash=sha256:287605bede6bd36e930577c5925fcea17cb30453d96a7b4c63c14a257118dbb9 \
--hash=sha256:fe749b052bb7233fe5d072fcb549221a8cb1a16725c47c37e42b0b9cb3ff2c3f
markupsafe==2.1.0 ; python_version >= "3.7" \ markupsafe==2.1.0 ; python_version >= "3.7" \
--hash=sha256:3028252424c72b2602a323f70fbf50aa80a5d3aa616ea6add4ba21ae9cc9da4c \ --hash=sha256:3028252424c72b2602a323f70fbf50aa80a5d3aa616ea6add4ba21ae9cc9da4c \
--hash=sha256:290b02bab3c9e216da57c1d11d2ba73a9f73a614bbdcc027d299a60cdfabb11a \ --hash=sha256:290b02bab3c9e216da57c1d11d2ba73a9f73a614bbdcc027d299a60cdfabb11a \

View file

@ -14,12 +14,18 @@
# By default Synapse is run in monolith mode. This can be overridden by # By default Synapse is run in monolith mode. This can be overridden by
# setting the WORKERS environment variable. # setting the WORKERS environment variable.
# #
# A regular expression of test method names can be supplied as the first # You can optionally give a "-f" argument (for "fast") before any to skip
# argument to the script. Complement will then only run those tests. If # rebuilding the docker images, if you just want to rerun the tests.
# no regex is supplied, all tests are run. For example; #
# Remaining commandline arguments are passed through to `go test`. For example,
# you can supply a regular expression of test method names via the "-run"
# argument:
# #
# ./complement.sh -run "TestOutboundFederation(Profile|Send)" # ./complement.sh -run "TestOutboundFederation(Profile|Send)"
# #
# Specifying TEST_ONLY_SKIP_DEP_HASH_VERIFICATION=1 will cause `poetry export`
# to not emit any hashes when building the Docker image. This then means that
# you can use 'unverifiable' sources such as git repositories as dependencies.
# Exit if a line returns a non-zero exit code # Exit if a line returns a non-zero exit code
set -e set -e
@ -32,6 +38,47 @@ echo_if_github() {
fi fi
} }
# Helper to print out the usage instructions
usage() {
cat >&2 <<EOF
Usage: $0 [-f] <go test arguments>...
Run the complement test suite on Synapse.
-f, --fast
Skip rebuilding the docker images, and just use the most recent
'complement-synapse:latest' image.
Conflicts with --build-only.
--build-only
Only build the Docker images. Don't actually run Complement.
Conflicts with -f/--fast.
For help on arguments to 'go test', run 'go help testflag'.
EOF
}
# parse our arguments
skip_docker_build=""
skip_complement_run=""
while [ $# -ge 1 ]; do
arg=$1
case "$arg" in
"-h")
usage
exit 1
;;
"-f"|"--fast")
skip_docker_build=1
;;
"--build-only")
skip_complement_run=1
;;
*)
# unknown arg: presumably an argument to gotest. break the loop.
break
esac
shift
done
# enable buildkit for the docker builds # enable buildkit for the docker builds
export DOCKER_BUILDKIT=1 export DOCKER_BUILDKIT=1
@ -49,9 +96,12 @@ if [[ -z "$COMPLEMENT_DIR" ]]; then
echo "Checkout available at 'complement-${COMPLEMENT_REF}'" echo "Checkout available at 'complement-${COMPLEMENT_REF}'"
fi fi
if [ -z "$skip_docker_build" ]; then
# Build the base Synapse image from the local checkout # Build the base Synapse image from the local checkout
echo_if_github "::group::Build Docker image: matrixdotorg/synapse" echo_if_github "::group::Build Docker image: matrixdotorg/synapse"
docker build -t matrixdotorg/synapse -f "docker/Dockerfile" . docker build -t matrixdotorg/synapse \
--build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \
-f "docker/Dockerfile" .
echo_if_github "::endgroup::" echo_if_github "::endgroup::"
# Build the workers docker image (from the base Synapse image we just built). # Build the workers docker image (from the base Synapse image we just built).
@ -64,6 +114,12 @@ echo_if_github "::group::Build Docker image: complement/Dockerfile"
docker build -t complement-synapse \ docker build -t complement-synapse \
-f "docker/complement/Dockerfile" "docker/complement" -f "docker/complement/Dockerfile" "docker/complement"
echo_if_github "::endgroup::" echo_if_github "::endgroup::"
fi
if [ -n "$skip_complement_run" ]; then
echo "Skipping Complement run as requested."
exit
fi
export COMPLEMENT_BASE_IMAGE=complement-synapse export COMPLEMENT_BASE_IMAGE=complement-synapse
@ -104,6 +160,18 @@ else
test_tags="$test_tags,faster_joins" test_tags="$test_tags,faster_joins"
fi fi
if [[ -n "$SYNAPSE_TEST_LOG_LEVEL" ]]; then
# Set the log level to what is desired
export PASS_SYNAPSE_LOG_LEVEL="$SYNAPSE_TEST_LOG_LEVEL"
# Allow logging sensitive things (currently SQL queries & parameters).
# (This won't have any effect if we're not logging at DEBUG level overall.)
# Since this is just a test suite, this is fine and won't reveal anyone's
# personal information
export PASS_SYNAPSE_LOG_SENSITIVE=1
fi
# Run the tests! # Run the tests!
echo "Images built; running complement" echo "Images built; running complement"
cd "$COMPLEMENT_DIR" cd "$COMPLEMENT_DIR"

View file

@ -268,6 +268,9 @@ class MockHomeserver:
def get_instance_name(self) -> str: def get_instance_name(self) -> str:
return "master" return "master"
def should_send_federation(self) -> bool:
return False
class Porter: class Porter:
def __init__( def __init__(
@ -415,12 +418,15 @@ class Porter:
self.progress.update(table, table_size) # Mark table as done self.progress.update(table, table_size) # Mark table as done
return return
# We sweep over rowids in two directions: one forwards (rowids 1, 2, 3, ...)
# and another backwards (rowids 0, -1, -2, ...).
forward_select = ( forward_select = (
"SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?" % (table,) "SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?" % (table,)
) )
backward_select = ( backward_select = (
"SELECT rowid, * FROM %s WHERE rowid <= ? ORDER BY rowid LIMIT ?" % (table,) "SELECT rowid, * FROM %s WHERE rowid <= ? ORDER BY rowid DESC LIMIT ?"
% (table,)
) )
do_forward = [True] do_forward = [True]
@ -618,6 +624,25 @@ class Porter:
self.postgres_store.db_pool.updates.has_completed_background_updates() self.postgres_store.db_pool.updates.has_completed_background_updates()
) )
@staticmethod
def _is_sqlite_autovacuum_enabled(txn: LoggingTransaction) -> bool:
"""
Returns true if auto_vacuum is enabled in SQLite.
https://www.sqlite.org/pragma.html#pragma_auto_vacuum
Vacuuming changes the rowids on rows in the database.
Auto-vacuuming is therefore dangerous when used in conjunction with this script.
Note that the auto_vacuum setting can't be changed without performing
a VACUUM after trying to change the pragma.
"""
txn.execute("PRAGMA auto_vacuum")
row = txn.fetchone()
assert row is not None, "`PRAGMA auto_vacuum` did not give a row."
(autovacuum_setting,) = row
# 0 means off. 1 means full. 2 means incremental.
return autovacuum_setting != 0
async def run(self) -> None: async def run(self) -> None:
"""Ports the SQLite database to a PostgreSQL database. """Ports the SQLite database to a PostgreSQL database.
@ -634,6 +659,21 @@ class Porter:
allow_outdated_version=True, allow_outdated_version=True,
) )
# For safety, ensure auto_vacuums are disabled.
if await self.sqlite_store.db_pool.runInteraction(
"is_sqlite_autovacuum_enabled", self._is_sqlite_autovacuum_enabled
):
end_error = (
"auto_vacuum is enabled in the SQLite database."
" (This is not the default configuration.)\n"
" This script relies on rowids being consistent and must not"
" be used if the database could be vacuumed between re-runs.\n"
" To disable auto_vacuum, you need to stop Synapse and run the following SQL:\n"
" PRAGMA auto_vacuum=off;\n"
" VACUUM;"
)
return
# Check if all background updates are done, abort if not. # Check if all background updates are done, abort if not.
updates_complete = ( updates_complete = (
await self.sqlite_store.db_pool.updates.has_completed_background_updates() await self.sqlite_store.db_pool.updates.has_completed_background_updates()

View file

@ -259,3 +259,13 @@ class ReceiptTypes:
READ: Final = "m.read" READ: Final = "m.read"
READ_PRIVATE: Final = "org.matrix.msc2285.read.private" READ_PRIVATE: Final = "org.matrix.msc2285.read.private"
FULLY_READ: Final = "m.fully_read" FULLY_READ: Final = "m.fully_read"
class PublicRoomsFilterFields:
"""Fields in the search filter for `/publicRooms` that we understand.
As defined in https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3publicrooms
"""
GENERIC_SEARCH_TERM: Final = "generic_search_term"
ROOM_TYPES: Final = "org.matrix.msc3827.room_types"

View file

@ -297,8 +297,14 @@ class AuthError(SynapseError):
other poorly-defined times. other poorly-defined times.
""" """
def __init__(self, code: int, msg: str, errcode: str = Codes.FORBIDDEN): def __init__(
super().__init__(code, msg, errcode) self,
code: int,
msg: str,
errcode: str = Codes.FORBIDDEN,
additional_fields: Optional[dict] = None,
):
super().__init__(code, msg, errcode, additional_fields)
class InvalidClientCredentialsError(SynapseError): class InvalidClientCredentialsError(SynapseError):

View file

@ -106,7 +106,9 @@ def register_sighup(func: Callable[P, None], *args: P.args, **kwargs: P.kwargs)
def start_worker_reactor( def start_worker_reactor(
appname: str, appname: str,
config: HomeServerConfig, config: HomeServerConfig,
run_command: Callable[[], None] = reactor.run, # Use a lambda to avoid binding to a given reactor at import time.
# (needed when synapse.app.complement_fork_starter is being used)
run_command: Callable[[], None] = lambda: reactor.run(),
) -> None: ) -> None:
"""Run the reactor in the main process """Run the reactor in the main process
@ -141,7 +143,9 @@ def start_reactor(
daemonize: bool, daemonize: bool,
print_pidfile: bool, print_pidfile: bool,
logger: logging.Logger, logger: logging.Logger,
run_command: Callable[[], None] = reactor.run, # Use a lambda to avoid binding to a given reactor at import time.
# (needed when synapse.app.complement_fork_starter is being used)
run_command: Callable[[], None] = lambda: reactor.run(),
) -> None: ) -> None:
"""Run the reactor in the main process """Run the reactor in the main process

View file

@ -39,6 +39,7 @@ from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.server import HomeServer from synapse.server import HomeServer
from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
from synapse.storage.databases.main.room import RoomWorkerStore from synapse.storage.databases.main.room import RoomWorkerStore
from synapse.types import StateMap from synapse.types import StateMap
from synapse.util import SYNAPSE_VERSION from synapse.util import SYNAPSE_VERSION
@ -60,7 +61,17 @@ class AdminCmdSlavedStore(
BaseSlavedStore, BaseSlavedStore,
RoomWorkerStore, RoomWorkerStore,
): ):
pass def __init__(
self,
database: DatabasePool,
db_conn: LoggingDatabaseConnection,
hs: "HomeServer",
):
super().__init__(database, db_conn, hs)
# Annoyingly `filter_events_for_client` assumes that this exists. We
# should refactor it to take a `Clock` directly.
self.clock = hs.get_clock()
class AdminCmdServer(HomeServer): class AdminCmdServer(HomeServer):

View file

@ -0,0 +1,190 @@
# Copyright 2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ## What this script does
#
# This script spawns multiple workers, whilst only going through the code loading
# process once. The net effect is that start-up time for a swarm of workers is
# reduced, particularly in CPU-constrained environments.
#
# Before the workers are spawned, the database is prepared in order to avoid the
# workers racing.
#
# ## Stability
#
# This script is only intended for use within the Synapse images for the
# Complement test suite.
# There are currently no stability guarantees whatsoever; especially not about:
# - whether it will continue to exist in future versions;
# - the format of its command-line arguments; or
# - any details about its behaviour or principles of operation.
#
# ## Usage
#
# The first argument should be the path to the database configuration, used to
# set up the database. The rest of the arguments are used as follows:
# Each worker is specified as an argument group (each argument group is
# separated by '--').
# The first argument in each argument group is the Python module name of the application
# to start. Further arguments are then passed to that module as-is.
#
# ## Example
#
# python -m synapse.app.complement_fork_starter path_to_db_config.yaml \
# synapse.app.homeserver [args..] -- \
# synapse.app.generic_worker [args..] -- \
# ...
# synapse.app.generic_worker [args..]
#
import argparse
import importlib
import itertools
import multiprocessing
import sys
from typing import Any, Callable, List
from twisted.internet.main import installReactor
class ProxiedReactor:
"""
Twisted tracks the 'installed' reactor as a global variable.
(Actually, it does some module trickery, but the effect is similar.)
The default EpollReactor is buggy if it's created before a process is
forked, then used in the child.
See https://twistedmatrix.com/trac/ticket/4759#comment:17.
However, importing certain Twisted modules will automatically create and
install a reactor if one hasn't already been installed.
It's not normally possible to re-install a reactor.
Given the goal of launching workers with fork() to only import the code once,
this presents a conflict.
Our work around is to 'install' this ProxiedReactor which prevents Twisted
from creating and installing one, but which lets us replace the actual reactor
in use later on.
"""
def __init__(self) -> None:
self.___reactor_target: Any = None
def _install_real_reactor(self, new_reactor: Any) -> None:
"""
Install a real reactor for this ProxiedReactor to forward lookups onto.
This method is specific to our ProxiedReactor and should not clash with
any names used on an actual Twisted reactor.
"""
self.___reactor_target = new_reactor
def __getattr__(self, attr_name: str) -> Any:
return getattr(self.___reactor_target, attr_name)
def _worker_entrypoint(
func: Callable[[], None], proxy_reactor: ProxiedReactor, args: List[str]
) -> None:
"""
Entrypoint for a forked worker process.
We just need to set up the command-line arguments, create our real reactor
and then kick off the worker's main() function.
"""
sys.argv = args
from twisted.internet.epollreactor import EPollReactor
proxy_reactor._install_real_reactor(EPollReactor())
func()
def main() -> None:
"""
Entrypoint for the forking launcher.
"""
parser = argparse.ArgumentParser()
parser.add_argument("db_config", help="Path to database config file")
parser.add_argument(
"args",
nargs="...",
help="Argument groups separated by `--`. "
"The first argument of each group is a Synapse app name. "
"Subsequent arguments are passed through.",
)
ns = parser.parse_args()
# Split up the subsequent arguments into each workers' arguments;
# `--` is our delimiter of choice.
args_by_worker: List[List[str]] = [
list(args)
for cond, args in itertools.groupby(ns.args, lambda ele: ele != "--")
if cond and args
]
# Prevent Twisted from installing a shared reactor that all the workers will
# inherit when we fork(), by installing our own beforehand.
proxy_reactor = ProxiedReactor()
installReactor(proxy_reactor)
# Import the entrypoints for all the workers.
worker_functions = []
for worker_args in args_by_worker:
worker_module = importlib.import_module(worker_args[0])
worker_functions.append(worker_module.main)
# We need to prepare the database first as otherwise all the workers will
# try to create a schema version table and some will crash out.
from synapse._scripts import update_synapse_database
update_proc = multiprocessing.Process(
target=_worker_entrypoint,
args=(
update_synapse_database.main,
proxy_reactor,
[
"update_synapse_database",
"--database-config",
ns.db_config,
"--run-background-updates",
],
),
)
print("===== PREPARING DATABASE =====", file=sys.stderr)
update_proc.start()
update_proc.join()
print("===== PREPARED DATABASE =====", file=sys.stderr)
# At this point, we've imported all the main entrypoints for all the workers.
# Now we basically just fork() out to create the workers we need.
# Because we're using fork(), all the workers get a clone of this launcher's
# memory space and don't need to repeat the work of loading the code!
# Instead of using fork() directly, we use the multiprocessing library,
# which uses fork() on Unix platforms.
processes = []
for (func, worker_args) in zip(worker_functions, args_by_worker):
process = multiprocessing.Process(
target=_worker_entrypoint, args=(func, proxy_reactor, worker_args)
)
process.start()
processes.append(process)
# Be a good parent and wait for our children to die before exiting.
for process in processes:
process.join()
if __name__ == "__main__":
main()

View file

@ -319,7 +319,9 @@ class _ServiceQueuer:
rooms_of_interesting_users.update(event.room_id for event in events) rooms_of_interesting_users.update(event.room_id for event in events)
# EDUs # EDUs
rooms_of_interesting_users.update( rooms_of_interesting_users.update(
ephemeral["room_id"] for ephemeral in ephemerals ephemeral["room_id"]
for ephemeral in ephemerals
if ephemeral.get("room_id") is not None
) )
# Look up the AS users in those rooms # Look up the AS users in those rooms
@ -329,8 +331,9 @@ class _ServiceQueuer:
) )
# Add recipients of to-device messages. # Add recipients of to-device messages.
# device_message["user_id"] is the ID of the recipient. users.update(
users.update(device_message["user_id"] for device_message in to_device_messages) device_message["to_user_id"] for device_message in to_device_messages
)
# Compute and return the counts / fallback key usage states # Compute and return the counts / fallback key usage states
otk_counts = await self._store.count_bulk_e2e_one_time_keys_for_as(users) otk_counts = await self._store.count_bulk_e2e_one_time_keys_for_as(users)

View file

@ -21,7 +21,7 @@ from typing import Any, Callable, Dict, Optional
import attr import attr
from synapse.types import JsonDict from synapse.types import JsonDict
from synapse.util.check_dependencies import DependencyException, check_requirements from synapse.util.check_dependencies import check_requirements
from ._base import Config, ConfigError from ._base import Config, ConfigError
@ -159,12 +159,7 @@ class CacheConfig(Config):
self.track_memory_usage = cache_config.get("track_memory_usage", False) self.track_memory_usage = cache_config.get("track_memory_usage", False)
if self.track_memory_usage: if self.track_memory_usage:
try:
check_requirements("cache_memory") check_requirements("cache_memory")
except DependencyException as e:
raise ConfigError(
e.message # noqa: B306, DependencyException.message is a property
)
expire_caches = cache_config.get("expire_caches", True) expire_caches = cache_config.get("expire_caches", True)
cache_entry_ttl = cache_config.get("cache_entry_ttl", "30m") cache_entry_ttl = cache_config.get("cache_entry_ttl", "30m")

View file

@ -145,7 +145,7 @@ class EmailConfig(Config):
raise ConfigError( raise ConfigError(
'The config option "trust_identity_server_for_password_resets" ' 'The config option "trust_identity_server_for_password_resets" '
'has been replaced by "account_threepid_delegate". ' 'has been replaced by "account_threepid_delegate". '
"Please consult the sample config at docs/sample_config.yaml for " "Please consult the configuration manual at docs/usage/configuration/config_documentation.md for "
"details and update your config file." "details and update your config file."
) )

View file

@ -87,3 +87,6 @@ class ExperimentalConfig(Config):
# MSC3715: dir param on /relations. # MSC3715: dir param on /relations.
self.msc3715_enabled: bool = experimental.get("msc3715_enabled", False) self.msc3715_enabled: bool = experimental.get("msc3715_enabled", False)
# MSC3827: Filtering of /publicRooms by room type
self.msc3827_enabled: bool = experimental.get("msc3827_enabled", False)

View file

@ -15,14 +15,9 @@
from typing import Any from typing import Any
from synapse.types import JsonDict from synapse.types import JsonDict
from synapse.util.check_dependencies import check_requirements
from ._base import Config, ConfigError from ._base import Config
MISSING_AUTHLIB = """Missing authlib library. This is required for jwt login.
Install by running:
pip install synapse[jwt]
"""
class JWTConfig(Config): class JWTConfig(Config):
@ -41,13 +36,7 @@ class JWTConfig(Config):
# that the claims exist on the JWT. # that the claims exist on the JWT.
self.jwt_issuer = jwt_config.get("issuer") self.jwt_issuer = jwt_config.get("issuer")
self.jwt_audiences = jwt_config.get("audiences") self.jwt_audiences = jwt_config.get("audiences")
check_requirements("jwt")
try:
from authlib.jose import JsonWebToken
JsonWebToken # To stop unused lint.
except ImportError:
raise ConfigError(MISSING_AUTHLIB)
else: else:
self.jwt_enabled = False self.jwt_enabled = False
self.jwt_secret = None self.jwt_secret = None

View file

@ -18,7 +18,7 @@ from typing import Any, Optional
import attr import attr
from synapse.types import JsonDict from synapse.types import JsonDict
from synapse.util.check_dependencies import DependencyException, check_requirements from synapse.util.check_dependencies import check_requirements
from ._base import Config, ConfigError from ._base import Config, ConfigError
@ -57,12 +57,7 @@ class MetricsConfig(Config):
self.sentry_enabled = "sentry" in config self.sentry_enabled = "sentry" in config
if self.sentry_enabled: if self.sentry_enabled:
try:
check_requirements("sentry") check_requirements("sentry")
except DependencyException as e:
raise ConfigError(
e.message # noqa: B306, DependencyException.message is a property
)
self.sentry_dsn = config["sentry"].get("dsn") self.sentry_dsn = config["sentry"].get("dsn")
if not self.sentry_dsn: if not self.sentry_dsn:

View file

@ -24,7 +24,7 @@ from synapse.types import JsonDict
from synapse.util.module_loader import load_module from synapse.util.module_loader import load_module
from synapse.util.stringutils import parse_and_validate_mxc_uri from synapse.util.stringutils import parse_and_validate_mxc_uri
from ..util.check_dependencies import DependencyException, check_requirements from ..util.check_dependencies import check_requirements
from ._base import Config, ConfigError, read_file from ._base import Config, ConfigError, read_file
DEFAULT_USER_MAPPING_PROVIDER = "synapse.handlers.oidc.JinjaOidcMappingProvider" DEFAULT_USER_MAPPING_PROVIDER = "synapse.handlers.oidc.JinjaOidcMappingProvider"
@ -41,12 +41,7 @@ class OIDCConfig(Config):
if not self.oidc_providers: if not self.oidc_providers:
return return
try:
check_requirements("oidc") check_requirements("oidc")
except DependencyException as e:
raise ConfigError(
e.message # noqa: B306, DependencyException.message is a property
) from e
# check we don't have any duplicate idp_ids now. (The SSO handler will also # check we don't have any duplicate idp_ids now. (The SSO handler will also
# check for duplicates when the REST listeners get registered, but that happens # check for duplicates when the REST listeners get registered, but that happens
@ -146,7 +141,6 @@ OIDC_PROVIDER_CONFIG_WITH_ID_SCHEMA = {
"allOf": [OIDC_PROVIDER_CONFIG_SCHEMA, {"required": ["idp_id", "idp_name"]}] "allOf": [OIDC_PROVIDER_CONFIG_SCHEMA, {"required": ["idp_id", "idp_name"]}]
} }
# the `oidc_providers` list can either be None (as it is in the default config), or # the `oidc_providers` list can either be None (as it is in the default config), or
# a list of provider configs, each of which requires an explicit ID and name. # a list of provider configs, each of which requires an explicit ID and name.
OIDC_PROVIDER_LIST_SCHEMA = { OIDC_PROVIDER_LIST_SCHEMA = {

View file

@ -136,6 +136,11 @@ class RatelimitConfig(Config):
defaults={"per_second": 0.003, "burst_count": 5}, defaults={"per_second": 0.003, "burst_count": 5},
) )
self.rc_invites_per_issuer = RateLimitConfig(
config.get("rc_invites", {}).get("per_issuer", {}),
defaults={"per_second": 0.3, "burst_count": 10},
)
self.rc_third_party_invite = RateLimitConfig( self.rc_third_party_invite = RateLimitConfig(
config.get("rc_third_party_invite", {}), config.get("rc_third_party_invite", {}),
defaults={ defaults={

View file

@ -21,7 +21,7 @@ import attr
from synapse.config.server import generate_ip_set from synapse.config.server import generate_ip_set
from synapse.types import JsonDict from synapse.types import JsonDict
from synapse.util.check_dependencies import DependencyException, check_requirements from synapse.util.check_dependencies import check_requirements
from synapse.util.module_loader import load_module from synapse.util.module_loader import load_module
from ._base import Config, ConfigError from ._base import Config, ConfigError
@ -185,14 +185,8 @@ class ContentRepositoryConfig(Config):
) )
self.url_preview_enabled = config.get("url_preview_enabled", False) self.url_preview_enabled = config.get("url_preview_enabled", False)
if self.url_preview_enabled: if self.url_preview_enabled:
try:
check_requirements("url_preview") check_requirements("url_preview")
except DependencyException as e:
raise ConfigError(
e.message # noqa: B306, DependencyException.message is a property
)
proxy_env = getproxies_environment() proxy_env = getproxies_environment()
if "url_preview_ip_range_blacklist" not in config: if "url_preview_ip_range_blacklist" not in config:
if "http" not in proxy_env or "https" not in proxy_env: if "http" not in proxy_env or "https" not in proxy_env:

View file

@ -18,7 +18,7 @@ from typing import Any, List, Set
from synapse.config.sso import SsoAttributeRequirement from synapse.config.sso import SsoAttributeRequirement
from synapse.types import JsonDict from synapse.types import JsonDict
from synapse.util.check_dependencies import DependencyException, check_requirements from synapse.util.check_dependencies import check_requirements
from synapse.util.module_loader import load_module, load_python_module from synapse.util.module_loader import load_module, load_python_module
from ._base import Config, ConfigError from ._base import Config, ConfigError
@ -76,12 +76,7 @@ class SAML2Config(Config):
if not saml2_config.get("sp_config") and not saml2_config.get("config_path"): if not saml2_config.get("sp_config") and not saml2_config.get("config_path"):
return return
try:
check_requirements("saml2") check_requirements("saml2")
except DependencyException as e:
raise ConfigError(
e.message # noqa: B306, DependencyException.message is a property
)
self.saml2_enabled = True self.saml2_enabled = True

View file

@ -15,7 +15,7 @@
from typing import Any, List, Set from typing import Any, List, Set
from synapse.types import JsonDict from synapse.types import JsonDict
from synapse.util.check_dependencies import DependencyException, check_requirements from synapse.util.check_dependencies import check_requirements
from ._base import Config, ConfigError from ._base import Config, ConfigError
@ -40,12 +40,7 @@ class TracerConfig(Config):
if not self.opentracer_enabled: if not self.opentracer_enabled:
return return
try:
check_requirements("opentracing") check_requirements("opentracing")
except DependencyException as e:
raise ConfigError(
e.message # noqa: B306, DependencyException.message is a property
)
# The tracer is enabled so sanitize the config # The tracer is enabled so sanitize the config

View file

@ -21,7 +21,6 @@ from typing import (
Awaitable, Awaitable,
Callable, Callable,
Collection, Collection,
Dict,
List, List,
Optional, Optional,
Tuple, Tuple,
@ -32,10 +31,11 @@ from typing import (
from typing_extensions import Literal from typing_extensions import Literal
import synapse import synapse
from synapse.api.errors import Codes
from synapse.rest.media.v1._base import FileInfo from synapse.rest.media.v1._base import FileInfo
from synapse.rest.media.v1.media_storage import ReadableFileWrapper from synapse.rest.media.v1.media_storage import ReadableFileWrapper
from synapse.spam_checker_api import RegistrationBehaviour from synapse.spam_checker_api import RegistrationBehaviour
from synapse.types import RoomAlias, UserProfile from synapse.types import JsonDict, RoomAlias, UserProfile
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
from synapse.util.metrics import Measure from synapse.util.metrics import Measure
@ -50,12 +50,12 @@ CHECK_EVENT_FOR_SPAM_CALLBACK = Callable[
Awaitable[ Awaitable[
Union[ Union[
str, str,
"synapse.api.errors.Codes", Codes,
# Highly experimental, not officially part of the spamchecker API, may # Highly experimental, not officially part of the spamchecker API, may
# disappear without warning depending on the results of ongoing # disappear without warning depending on the results of ongoing
# experiments. # experiments.
# Use this to return additional information as part of an error. # Use this to return additional information as part of an error.
Tuple["synapse.api.errors.Codes", Dict], Tuple[Codes, JsonDict],
# Deprecated # Deprecated
bool, bool,
] ]
@ -70,7 +70,12 @@ USER_MAY_JOIN_ROOM_CALLBACK = Callable[
Awaitable[ Awaitable[
Union[ Union[
Literal["NOT_SPAM"], Literal["NOT_SPAM"],
"synapse.api.errors.Codes", Codes,
# Highly experimental, not officially part of the spamchecker API, may
# disappear without warning depending on the results of ongoing
# experiments.
# Use this to return additional information as part of an error.
Tuple[Codes, JsonDict],
# Deprecated # Deprecated
bool, bool,
] ]
@ -81,7 +86,12 @@ USER_MAY_INVITE_CALLBACK = Callable[
Awaitable[ Awaitable[
Union[ Union[
Literal["NOT_SPAM"], Literal["NOT_SPAM"],
"synapse.api.errors.Codes", Codes,
# Highly experimental, not officially part of the spamchecker API, may
# disappear without warning depending on the results of ongoing
# experiments.
# Use this to return additional information as part of an error.
Tuple[Codes, JsonDict],
# Deprecated # Deprecated
bool, bool,
] ]
@ -92,7 +102,12 @@ USER_MAY_SEND_3PID_INVITE_CALLBACK = Callable[
Awaitable[ Awaitable[
Union[ Union[
Literal["NOT_SPAM"], Literal["NOT_SPAM"],
"synapse.api.errors.Codes", Codes,
# Highly experimental, not officially part of the spamchecker API, may
# disappear without warning depending on the results of ongoing
# experiments.
# Use this to return additional information as part of an error.
Tuple[Codes, JsonDict],
# Deprecated # Deprecated
bool, bool,
] ]
@ -103,7 +118,12 @@ USER_MAY_CREATE_ROOM_CALLBACK = Callable[
Awaitable[ Awaitable[
Union[ Union[
Literal["NOT_SPAM"], Literal["NOT_SPAM"],
"synapse.api.errors.Codes", Codes,
# Highly experimental, not officially part of the spamchecker API, may
# disappear without warning depending on the results of ongoing
# experiments.
# Use this to return additional information as part of an error.
Tuple[Codes, JsonDict],
# Deprecated # Deprecated
bool, bool,
] ]
@ -114,7 +134,12 @@ USER_MAY_CREATE_ROOM_ALIAS_CALLBACK = Callable[
Awaitable[ Awaitable[
Union[ Union[
Literal["NOT_SPAM"], Literal["NOT_SPAM"],
"synapse.api.errors.Codes", Codes,
# Highly experimental, not officially part of the spamchecker API, may
# disappear without warning depending on the results of ongoing
# experiments.
# Use this to return additional information as part of an error.
Tuple[Codes, JsonDict],
# Deprecated # Deprecated
bool, bool,
] ]
@ -125,7 +150,12 @@ USER_MAY_PUBLISH_ROOM_CALLBACK = Callable[
Awaitable[ Awaitable[
Union[ Union[
Literal["NOT_SPAM"], Literal["NOT_SPAM"],
"synapse.api.errors.Codes", Codes,
# Highly experimental, not officially part of the spamchecker API, may
# disappear without warning depending on the results of ongoing
# experiments.
# Use this to return additional information as part of an error.
Tuple[Codes, JsonDict],
# Deprecated # Deprecated
bool, bool,
] ]
@ -154,7 +184,12 @@ CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK = Callable[
Awaitable[ Awaitable[
Union[ Union[
Literal["NOT_SPAM"], Literal["NOT_SPAM"],
"synapse.api.errors.Codes", Codes,
# Highly experimental, not officially part of the spamchecker API, may
# disappear without warning depending on the results of ongoing
# experiments.
# Use this to return additional information as part of an error.
Tuple[Codes, JsonDict],
# Deprecated # Deprecated
bool, bool,
] ]
@ -345,7 +380,7 @@ class SpamChecker:
async def check_event_for_spam( async def check_event_for_spam(
self, event: "synapse.events.EventBase" self, event: "synapse.events.EventBase"
) -> Union[Tuple["synapse.api.errors.Codes", Dict], str]: ) -> Union[Tuple[Codes, JsonDict], str]:
"""Checks if a given event is considered "spammy" by this server. """Checks if a given event is considered "spammy" by this server.
If the server considers an event spammy, then it will be rejected if If the server considers an event spammy, then it will be rejected if
@ -376,7 +411,16 @@ class SpamChecker:
elif res is True: elif res is True:
# This spam-checker rejects the event with deprecated # This spam-checker rejects the event with deprecated
# return value `True` # return value `True`
return (synapse.api.errors.Codes.FORBIDDEN, {}) return synapse.api.errors.Codes.FORBIDDEN, {}
elif (
isinstance(res, tuple)
and len(res) == 2
and isinstance(res[0], synapse.api.errors.Codes)
and isinstance(res[1], dict)
):
return res
elif isinstance(res, synapse.api.errors.Codes):
return res, {}
elif not isinstance(res, str): elif not isinstance(res, str):
# mypy complains that we can't reach this code because of the # mypy complains that we can't reach this code because of the
# return type in CHECK_EVENT_FOR_SPAM_CALLBACK, but we don't know # return type in CHECK_EVENT_FOR_SPAM_CALLBACK, but we don't know
@ -422,7 +466,7 @@ class SpamChecker:
async def user_may_join_room( async def user_may_join_room(
self, user_id: str, room_id: str, is_invited: bool self, user_id: str, room_id: str, is_invited: bool
) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]: ) -> Union[Tuple[Codes, JsonDict], Literal["NOT_SPAM"]]:
"""Checks if a given users is allowed to join a room. """Checks if a given users is allowed to join a room.
Not called when a user creates a room. Not called when a user creates a room.
@ -432,7 +476,7 @@ class SpamChecker:
is_invited: Whether the user is invited into the room is_invited: Whether the user is invited into the room
Returns: Returns:
NOT_SPAM if the operation is permitted, Codes otherwise. NOT_SPAM if the operation is permitted, [Codes, Dict] otherwise.
""" """
for callback in self._user_may_join_room_callbacks: for callback in self._user_may_join_room_callbacks:
with Measure( with Measure(
@ -443,21 +487,28 @@ class SpamChecker:
if res is True or res is self.NOT_SPAM: if res is True or res is self.NOT_SPAM:
continue continue
elif res is False: elif res is False:
return synapse.api.errors.Codes.FORBIDDEN return synapse.api.errors.Codes.FORBIDDEN, {}
elif isinstance(res, synapse.api.errors.Codes): elif isinstance(res, synapse.api.errors.Codes):
return res, {}
elif (
isinstance(res, tuple)
and len(res) == 2
and isinstance(res[0], synapse.api.errors.Codes)
and isinstance(res[1], dict)
):
return res return res
else: else:
logger.warning( logger.warning(
"Module returned invalid value, rejecting join as spam" "Module returned invalid value, rejecting join as spam"
) )
return synapse.api.errors.Codes.FORBIDDEN return synapse.api.errors.Codes.FORBIDDEN, {}
# No spam-checker has rejected the request, let it pass. # No spam-checker has rejected the request, let it pass.
return self.NOT_SPAM return self.NOT_SPAM
async def user_may_invite( async def user_may_invite(
self, inviter_userid: str, invitee_userid: str, room_id: str self, inviter_userid: str, invitee_userid: str, room_id: str
) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]: ) -> Union[Tuple[Codes, dict], Literal["NOT_SPAM"]]:
"""Checks if a given user may send an invite """Checks if a given user may send an invite
Args: Args:
@ -479,21 +530,28 @@ class SpamChecker:
if res is True or res is self.NOT_SPAM: if res is True or res is self.NOT_SPAM:
continue continue
elif res is False: elif res is False:
return synapse.api.errors.Codes.FORBIDDEN return synapse.api.errors.Codes.FORBIDDEN, {}
elif isinstance(res, synapse.api.errors.Codes): elif isinstance(res, synapse.api.errors.Codes):
return res, {}
elif (
isinstance(res, tuple)
and len(res) == 2
and isinstance(res[0], synapse.api.errors.Codes)
and isinstance(res[1], dict)
):
return res return res
else: else:
logger.warning( logger.warning(
"Module returned invalid value, rejecting invite as spam" "Module returned invalid value, rejecting invite as spam"
) )
return synapse.api.errors.Codes.FORBIDDEN return synapse.api.errors.Codes.FORBIDDEN, {}
# No spam-checker has rejected the request, let it pass. # No spam-checker has rejected the request, let it pass.
return self.NOT_SPAM return self.NOT_SPAM
async def user_may_send_3pid_invite( async def user_may_send_3pid_invite(
self, inviter_userid: str, medium: str, address: str, room_id: str self, inviter_userid: str, medium: str, address: str, room_id: str
) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]: ) -> Union[Tuple[Codes, dict], Literal["NOT_SPAM"]]:
"""Checks if a given user may invite a given threepid into the room """Checks if a given user may invite a given threepid into the room
Note that if the threepid is already associated with a Matrix user ID, Synapse Note that if the threepid is already associated with a Matrix user ID, Synapse
@ -519,20 +577,27 @@ class SpamChecker:
if res is True or res is self.NOT_SPAM: if res is True or res is self.NOT_SPAM:
continue continue
elif res is False: elif res is False:
return synapse.api.errors.Codes.FORBIDDEN return synapse.api.errors.Codes.FORBIDDEN, {}
elif isinstance(res, synapse.api.errors.Codes): elif isinstance(res, synapse.api.errors.Codes):
return res, {}
elif (
isinstance(res, tuple)
and len(res) == 2
and isinstance(res[0], synapse.api.errors.Codes)
and isinstance(res[1], dict)
):
return res return res
else: else:
logger.warning( logger.warning(
"Module returned invalid value, rejecting 3pid invite as spam" "Module returned invalid value, rejecting 3pid invite as spam"
) )
return synapse.api.errors.Codes.FORBIDDEN return synapse.api.errors.Codes.FORBIDDEN, {}
return self.NOT_SPAM return self.NOT_SPAM
async def user_may_create_room( async def user_may_create_room(
self, userid: str self, userid: str
) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]: ) -> Union[Tuple[Codes, dict], Literal["NOT_SPAM"]]:
"""Checks if a given user may create a room """Checks if a given user may create a room
Args: Args:
@ -546,20 +611,27 @@ class SpamChecker:
if res is True or res is self.NOT_SPAM: if res is True or res is self.NOT_SPAM:
continue continue
elif res is False: elif res is False:
return synapse.api.errors.Codes.FORBIDDEN return synapse.api.errors.Codes.FORBIDDEN, {}
elif isinstance(res, synapse.api.errors.Codes): elif isinstance(res, synapse.api.errors.Codes):
return res, {}
elif (
isinstance(res, tuple)
and len(res) == 2
and isinstance(res[0], synapse.api.errors.Codes)
and isinstance(res[1], dict)
):
return res return res
else: else:
logger.warning( logger.warning(
"Module returned invalid value, rejecting room creation as spam" "Module returned invalid value, rejecting room creation as spam"
) )
return synapse.api.errors.Codes.FORBIDDEN return synapse.api.errors.Codes.FORBIDDEN, {}
return self.NOT_SPAM return self.NOT_SPAM
async def user_may_create_room_alias( async def user_may_create_room_alias(
self, userid: str, room_alias: RoomAlias self, userid: str, room_alias: RoomAlias
) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]: ) -> Union[Tuple[Codes, dict], Literal["NOT_SPAM"]]:
"""Checks if a given user may create a room alias """Checks if a given user may create a room alias
Args: Args:
@ -575,20 +647,27 @@ class SpamChecker:
if res is True or res is self.NOT_SPAM: if res is True or res is self.NOT_SPAM:
continue continue
elif res is False: elif res is False:
return synapse.api.errors.Codes.FORBIDDEN return synapse.api.errors.Codes.FORBIDDEN, {}
elif isinstance(res, synapse.api.errors.Codes): elif isinstance(res, synapse.api.errors.Codes):
return res, {}
elif (
isinstance(res, tuple)
and len(res) == 2
and isinstance(res[0], synapse.api.errors.Codes)
and isinstance(res[1], dict)
):
return res return res
else: else:
logger.warning( logger.warning(
"Module returned invalid value, rejecting room create as spam" "Module returned invalid value, rejecting room create as spam"
) )
return synapse.api.errors.Codes.FORBIDDEN return synapse.api.errors.Codes.FORBIDDEN, {}
return self.NOT_SPAM return self.NOT_SPAM
async def user_may_publish_room( async def user_may_publish_room(
self, userid: str, room_id: str self, userid: str, room_id: str
) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]: ) -> Union[Tuple[Codes, dict], Literal["NOT_SPAM"]]:
"""Checks if a given user may publish a room to the directory """Checks if a given user may publish a room to the directory
Args: Args:
@ -603,14 +682,21 @@ class SpamChecker:
if res is True or res is self.NOT_SPAM: if res is True or res is self.NOT_SPAM:
continue continue
elif res is False: elif res is False:
return synapse.api.errors.Codes.FORBIDDEN return synapse.api.errors.Codes.FORBIDDEN, {}
elif isinstance(res, synapse.api.errors.Codes): elif isinstance(res, synapse.api.errors.Codes):
return res, {}
elif (
isinstance(res, tuple)
and len(res) == 2
and isinstance(res[0], synapse.api.errors.Codes)
and isinstance(res[1], dict)
):
return res return res
else: else:
logger.warning( logger.warning(
"Module returned invalid value, rejecting room publication as spam" "Module returned invalid value, rejecting room publication as spam"
) )
return synapse.api.errors.Codes.FORBIDDEN return synapse.api.errors.Codes.FORBIDDEN, {}
return self.NOT_SPAM return self.NOT_SPAM
@ -678,7 +764,7 @@ class SpamChecker:
async def check_media_file_for_spam( async def check_media_file_for_spam(
self, file_wrapper: ReadableFileWrapper, file_info: FileInfo self, file_wrapper: ReadableFileWrapper, file_info: FileInfo
) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]: ) -> Union[Tuple[Codes, dict], Literal["NOT_SPAM"]]:
"""Checks if a piece of newly uploaded media should be blocked. """Checks if a piece of newly uploaded media should be blocked.
This will be called for local uploads, downloads of remote media, each This will be called for local uploads, downloads of remote media, each
@ -715,13 +801,20 @@ class SpamChecker:
if res is False or res is self.NOT_SPAM: if res is False or res is self.NOT_SPAM:
continue continue
elif res is True: elif res is True:
return synapse.api.errors.Codes.FORBIDDEN return synapse.api.errors.Codes.FORBIDDEN, {}
elif isinstance(res, synapse.api.errors.Codes): elif isinstance(res, synapse.api.errors.Codes):
return res, {}
elif (
isinstance(res, tuple)
and len(res) == 2
and isinstance(res[0], synapse.api.errors.Codes)
and isinstance(res[1], dict)
):
return res return res
else: else:
logger.warning( logger.warning(
"Module returned invalid value, rejecting media file as spam" "Module returned invalid value, rejecting media file as spam"
) )
return synapse.api.errors.Codes.FORBIDDEN return synapse.api.errors.Codes.FORBIDDEN, {}
return self.NOT_SPAM return self.NOT_SPAM

View file

@ -464,14 +464,7 @@ class ThirdPartyEventRules:
Returns: Returns:
A dict mapping (event type, state key) to state event. A dict mapping (event type, state key) to state event.
""" """
state_ids = await self._storage_controllers.state.get_current_state_ids(room_id) return await self._storage_controllers.state.get_current_state(room_id)
room_state_events = await self.store.get_events(state_ids.values())
state_events = {}
for key, event_id in state_ids.items():
state_events[key] = room_state_events[event_id]
return state_events
async def on_profile_update( async def on_profile_update(
self, user_id: str, new_profile: ProfileInfo, by_admin: bool, deactivation: bool self, user_id: str, new_profile: ProfileInfo, by_admin: bool, deactivation: bool

View file

@ -67,6 +67,7 @@ from synapse.replication.http.federation import (
ReplicationFederationSendEduRestServlet, ReplicationFederationSendEduRestServlet,
ReplicationGetQueryRestServlet, ReplicationGetQueryRestServlet,
) )
from synapse.storage.databases.main.events import PartialStateConflictError
from synapse.storage.databases.main.lock import Lock from synapse.storage.databases.main.lock import Lock
from synapse.types import JsonDict, StateMap, get_domain_from_id from synapse.types import JsonDict, StateMap, get_domain_from_id
from synapse.util import json_decoder, unwrapFirstError from synapse.util import json_decoder, unwrapFirstError
@ -882,6 +883,17 @@ class FederationServer(FederationBase):
logger.warning("%s", errmsg) logger.warning("%s", errmsg)
raise SynapseError(403, errmsg, Codes.FORBIDDEN) raise SynapseError(403, errmsg, Codes.FORBIDDEN)
try:
return await self._federation_event_handler.on_send_membership_event(
origin, event
)
except PartialStateConflictError:
# The room was un-partial stated while we were persisting the event.
# Try once more, with full state this time.
logger.info(
"Room %s was un-partial stated during `on_send_membership_event`, trying again.",
room_id,
)
return await self._federation_event_handler.on_send_membership_event( return await self._federation_event_handler.on_send_membership_event(
origin, event origin, event
) )

View file

@ -151,7 +151,8 @@ class DirectoryHandler:
raise AuthError( raise AuthError(
403, 403,
"This user is not permitted to create this alias", "This user is not permitted to create this alias",
spam_check, errcode=spam_check[0],
additional_fields=spam_check[1],
) )
if not self.config.roomdirectory.is_alias_creation_allowed( if not self.config.roomdirectory.is_alias_creation_allowed(
@ -443,7 +444,8 @@ class DirectoryHandler:
raise AuthError( raise AuthError(
403, 403,
"This user is not permitted to publish rooms to the room list", "This user is not permitted to publish rooms to the room list",
spam_check, errcode=spam_check[0],
additional_fields=spam_check[1],
) )
if requester.is_guest: if requester.is_guest:

View file

@ -45,6 +45,7 @@ from synapse.api.errors import (
FederationDeniedError, FederationDeniedError,
FederationError, FederationError,
HttpResponseException, HttpResponseException,
LimitExceededError,
NotFoundError, NotFoundError,
RequestSendFailed, RequestSendFailed,
SynapseError, SynapseError,
@ -64,6 +65,7 @@ from synapse.replication.http.federation import (
ReplicationCleanRoomRestServlet, ReplicationCleanRoomRestServlet,
ReplicationStoreRoomOnOutlierMembershipRestServlet, ReplicationStoreRoomOnOutlierMembershipRestServlet,
) )
from synapse.storage.databases.main.events import PartialStateConflictError
from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.storage.state import StateFilter from synapse.storage.state import StateFilter
from synapse.types import JsonDict, StateMap, get_domain_from_id from synapse.types import JsonDict, StateMap, get_domain_from_id
@ -549,7 +551,9 @@ class FederationHandler:
# https://github.com/matrix-org/synapse/issues/12998 # https://github.com/matrix-org/synapse/issues/12998
await self.store.store_partial_state_room(room_id, ret.servers_in_room) await self.store.store_partial_state_room(room_id, ret.servers_in_room)
max_stream_id = await self._federation_event_handler.process_remote_join( try:
max_stream_id = (
await self._federation_event_handler.process_remote_join(
origin, origin,
room_id, room_id,
auth_chain, auth_chain,
@ -558,6 +562,18 @@ class FederationHandler:
room_version_obj, room_version_obj,
partial_state=ret.partial_state, partial_state=ret.partial_state,
) )
)
except PartialStateConflictError as e:
# The homeserver was already in the room and it is no longer partial
# stated. We ought to be doing a local join instead. Turn the error into
# a 429, as a hint to the client to try again.
# TODO(faster_joins): `_should_perform_remote_join` suggests that we may
# do a remote join for restricted rooms even if we have full state.
logger.error(
"Room %s was un-partial stated while processing remote join.",
room_id,
)
raise LimitExceededError(msg=e.msg, errcode=e.errcode, retry_after_ms=0)
if ret.partial_state: if ret.partial_state:
# Kick off the process of asynchronously fetching the state for this # Kick off the process of asynchronously fetching the state for this
@ -828,7 +844,8 @@ class FederationHandler:
raise SynapseError( raise SynapseError(
403, 403,
"This user is not permitted to send invites to this server/user", "This user is not permitted to send invites to this server/user",
spam_check, errcode=spam_check[0],
additional_fields=spam_check[1],
) )
membership = event.content.get("membership") membership = event.content.get("membership")
@ -1543,14 +1560,9 @@ class FederationHandler:
# all the events are updated, so we can update current state and # all the events are updated, so we can update current state and
# clear the lazy-loading flag. # clear the lazy-loading flag.
logger.info("Updating current state for %s", room_id) logger.info("Updating current state for %s", room_id)
# TODO(faster_joins): support workers # TODO(faster_joins): notify workers in notify_room_un_partial_stated
# https://github.com/matrix-org/synapse/issues/12994 # https://github.com/matrix-org/synapse/issues/12994
assert ( await self.state_handler.update_current_state(room_id)
self._storage_controllers.persistence is not None
), "worker-mode deployments not currently supported here"
await self._storage_controllers.persistence.update_current_state(
room_id
)
logger.info("Clearing partial-state flag for %s", room_id) logger.info("Clearing partial-state flag for %s", room_id)
success = await self.store.clear_partial_state_room(room_id) success = await self.store.clear_partial_state_room(room_id)
@ -1567,11 +1579,6 @@ class FederationHandler:
# we raced against more events arriving with partial state. Go round # we raced against more events arriving with partial state. Go round
# the loop again. We've already logged a warning, so no need for more. # the loop again. We've already logged a warning, so no need for more.
# TODO(faster_joins): there is still a race here, whereby incoming events which raced
# with us will fail to be persisted after the call to `clear_partial_state_room` due to
# having partial state.
# https://github.com/matrix-org/synapse/issues/12988
#
continue continue
events = await self.store.get_events_as_list( events = await self.store.get_events_as_list(

View file

@ -64,6 +64,7 @@ from synapse.replication.http.federation import (
ReplicationFederationSendEventsRestServlet, ReplicationFederationSendEventsRestServlet,
) )
from synapse.state import StateResolutionStore from synapse.state import StateResolutionStore
from synapse.storage.databases.main.events import PartialStateConflictError
from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.storage.state import StateFilter from synapse.storage.state import StateFilter
from synapse.types import ( from synapse.types import (
@ -275,6 +276,15 @@ class FederationEventHandler:
affected=pdu.event_id, affected=pdu.event_id,
) )
try:
await self._process_received_pdu(origin, pdu, state_ids=None)
except PartialStateConflictError:
# The room was un-partial stated while we were processing the PDU.
# Try once more, with full state this time.
logger.info(
"Room %s was un-partial stated while processing the PDU, trying again.",
room_id,
)
await self._process_received_pdu(origin, pdu, state_ids=None) await self._process_received_pdu(origin, pdu, state_ids=None)
async def on_send_membership_event( async def on_send_membership_event(
@ -306,6 +316,9 @@ class FederationEventHandler:
Raises: Raises:
SynapseError if the event is not accepted into the room SynapseError if the event is not accepted into the room
PartialStateConflictError if the room was un-partial stated in between
computing the state at the event and persisting it. The caller should
retry exactly once in this case.
""" """
logger.debug( logger.debug(
"on_send_membership_event: Got event: %s, signatures: %s", "on_send_membership_event: Got event: %s, signatures: %s",
@ -423,6 +436,8 @@ class FederationEventHandler:
Raises: Raises:
SynapseError if the response is in some way invalid. SynapseError if the response is in some way invalid.
PartialStateConflictError if the homeserver is already in the room and it
has been un-partial stated.
""" """
create_event = None create_event = None
for e in state: for e in state:
@ -1084,28 +1099,31 @@ class FederationEventHandler:
state_ids: Normally None, but if we are handling a gap in the graph state_ids: Normally None, but if we are handling a gap in the graph
(ie, we are missing one or more prev_events), the resolved state at the (ie, we are missing one or more prev_events), the resolved state at the
event event. Must not be partial state.
backfilled: True if this is part of a historical batch of events (inhibits backfilled: True if this is part of a historical batch of events (inhibits
notification to clients, and validation of device keys.) notification to clients, and validation of device keys.)
PartialStateConflictError: if the room was un-partial stated in between
computing the state at the event and persisting it. The caller should retry
exactly once in this case. Will never be raised if `state_ids` is provided.
""" """
logger.debug("Processing event: %s", event) logger.debug("Processing event: %s", event)
assert not event.internal_metadata.outlier assert not event.internal_metadata.outlier
try:
context = await self._state_handler.compute_event_context( context = await self._state_handler.compute_event_context(
event, event,
state_ids_before_event=state_ids, state_ids_before_event=state_ids,
) )
try:
context = await self._check_event_auth( context = await self._check_event_auth(
origin, origin,
event, event,
context, context,
) )
except AuthError as e: except AuthError as e:
# FIXME richvdh 2021/10/07 I don't think this is reachable. Let's log it # This happens only if we couldn't find the auth events. We'll already have
# for now # logged a warning, so now we just convert to a FederationError.
logger.exception("Unexpected AuthError from _check_event_auth")
raise FederationError("ERROR", e.code, e.msg, affected=event.event_id) raise FederationError("ERROR", e.code, e.msg, affected=event.event_id)
if not backfilled and not context.rejected: if not backfilled and not context.rejected:
@ -1934,6 +1952,9 @@ class FederationEventHandler:
event: The event itself. event: The event itself.
context: The event context. context: The event context.
backfilled: True if the event was backfilled. backfilled: True if the event was backfilled.
PartialStateConflictError: if attempting to persist a partial state event in
a room that has been un-partial stated.
""" """
# this method should not be called on outliers (those code paths call # this method should not be called on outliers (those code paths call
# persist_events_and_notify directly.) # persist_events_and_notify directly.)
@ -1986,6 +2007,10 @@ class FederationEventHandler:
Returns: Returns:
The stream ID after which all events have been persisted. The stream ID after which all events have been persisted.
Raises:
PartialStateConflictError: if attempting to persist a partial state event in
a room that has been un-partial stated.
""" """
if not event_and_contexts: if not event_and_contexts:
return self._store.get_room_max_stream_ordering() return self._store.get_room_max_stream_ordering()
@ -1994,6 +2019,7 @@ class FederationEventHandler:
if instance != self._instance_name: if instance != self._instance_name:
# Limit the number of events sent over replication. We choose 200 # Limit the number of events sent over replication. We choose 200
# here as that is what we default to in `max_request_body_size(..)` # here as that is what we default to in `max_request_body_size(..)`
try:
for batch in batch_iter(event_and_contexts, 200): for batch in batch_iter(event_and_contexts, 200):
result = await self._send_events( result = await self._send_events(
instance_name=instance, instance_name=instance,
@ -2002,6 +2028,10 @@ class FederationEventHandler:
event_and_contexts=batch, event_and_contexts=batch,
backfilled=backfilled, backfilled=backfilled,
) )
except SynapseError as e:
if e.code == HTTPStatus.CONFLICT:
raise PartialStateConflictError()
raise
return result["max_stream_id"] return result["max_stream_id"]
else: else:
assert self._storage_controllers.persistence assert self._storage_controllers.persistence

View file

@ -37,6 +37,7 @@ from synapse.api.errors import (
AuthError, AuthError,
Codes, Codes,
ConsentNotGivenError, ConsentNotGivenError,
LimitExceededError,
NotFoundError, NotFoundError,
ShadowBanError, ShadowBanError,
SynapseError, SynapseError,
@ -53,6 +54,7 @@ from synapse.handlers.directory import DirectoryHandler
from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.metrics.background_process_metrics import run_as_background_process from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.replication.http.send_event import ReplicationSendEventRestServlet from synapse.replication.http.send_event import ReplicationSendEventRestServlet
from synapse.storage.databases.main.events import PartialStateConflictError
from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.storage.state import StateFilter from synapse.storage.state import StateFilter
from synapse.types import ( from synapse.types import (
@ -905,6 +907,9 @@ class EventCreationHandler:
await self.clock.sleep(random.randint(1, 10)) await self.clock.sleep(random.randint(1, 10))
raise ShadowBanError() raise ShadowBanError()
if ratelimit:
await self.request_ratelimiter.ratelimit(requester, update=False)
# We limit the number of concurrent event sends in a room so that we # We limit the number of concurrent event sends in a room so that we
# don't fork the DAG too much. If we don't limit then we can end up in # don't fork the DAG too much. If we don't limit then we can end up in
# a situation where event persistence can't keep up, causing # a situation where event persistence can't keep up, causing
@ -1252,6 +1257,8 @@ class EventCreationHandler:
Raises: Raises:
ShadowBanError if the requester has been shadow-banned. ShadowBanError if the requester has been shadow-banned.
SynapseError(503) if attempting to persist a partial state event in
a room that has been un-partial stated.
""" """
extra_users = extra_users or [] extra_users = extra_users or []
@ -1302,6 +1309,7 @@ class EventCreationHandler:
# We now persist the event (and update the cache in parallel, since we # We now persist the event (and update the cache in parallel, since we
# don't want to block on it). # don't want to block on it).
try:
result, _ = await make_deferred_yieldable( result, _ = await make_deferred_yieldable(
gather_results( gather_results(
( (
@ -1316,11 +1324,21 @@ class EventCreationHandler:
), ),
run_in_background( run_in_background(
self.cache_joined_hosts_for_event, event, context self.cache_joined_hosts_for_event, event, context
).addErrback(log_failure, "cache_joined_hosts_for_event failed"), ).addErrback(
log_failure, "cache_joined_hosts_for_event failed"
),
), ),
consumeErrors=True, consumeErrors=True,
) )
).addErrback(unwrapFirstError) ).addErrback(unwrapFirstError)
except PartialStateConflictError as e:
# The event context needs to be recomputed.
# Turn the error into a 429, as a hint to the client to try again.
logger.info(
"Room %s was un-partial stated while persisting client event.",
event.room_id,
)
raise LimitExceededError(msg=e.msg, errcode=e.errcode, retry_after_ms=0)
return result return result
@ -1336,6 +1354,9 @@ class EventCreationHandler:
"""Actually persists the event. Should only be called by """Actually persists the event. Should only be called by
`handle_new_client_event`, and see its docstring for documentation of `handle_new_client_event`, and see its docstring for documentation of
the arguments. the arguments.
PartialStateConflictError: if attempting to persist a partial state event in
a room that has been un-partial stated.
""" """
# Skip push notification actions for historical messages # Skip push notification actions for historical messages
@ -1352,6 +1373,7 @@ class EventCreationHandler:
# If we're a worker we need to hit out to the master. # If we're a worker we need to hit out to the master.
writer_instance = self._events_shard_config.get_instance(event.room_id) writer_instance = self._events_shard_config.get_instance(event.room_id)
if writer_instance != self._instance_name: if writer_instance != self._instance_name:
try:
result = await self.send_event( result = await self.send_event(
instance_name=writer_instance, instance_name=writer_instance,
event_id=event.event_id, event_id=event.event_id,
@ -1363,6 +1385,10 @@ class EventCreationHandler:
extra_users=extra_users, extra_users=extra_users,
dont_notify=dont_notify, dont_notify=dont_notify,
) )
except SynapseError as e:
if e.code == HTTPStatus.CONFLICT:
raise PartialStateConflictError()
raise
stream_id = result["stream_id"] stream_id = result["stream_id"]
event_id = result["event_id"] event_id = result["event_id"]
if event_id != event.event_id: if event_id != event.event_id:
@ -1491,6 +1517,10 @@ class EventCreationHandler:
The persisted event. This may be different than the given event if The persisted event. This may be different than the given event if
it was de-duplicated (e.g. because we had already persisted an it was de-duplicated (e.g. because we had already persisted an
event with the same transaction ID.) event with the same transaction ID.)
Raises:
PartialStateConflictError: if attempting to persist a partial state event in
a room that has been un-partial stated.
""" """
extra_users = extra_users or [] extra_users = extra_users or []

View file

@ -67,19 +67,14 @@ class ProfileHandler:
target_user = UserID.from_string(user_id) target_user = UserID.from_string(user_id)
if self.hs.is_mine(target_user): if self.hs.is_mine(target_user):
try: profileinfo = await self.store.get_profileinfo(target_user.localpart)
displayname = await self.store.get_profile_displayname( if profileinfo.display_name is None:
target_user.localpart
)
avatar_url = await self.store.get_profile_avatar_url(
target_user.localpart
)
except StoreError as e:
if e.code == 404:
raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND) raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
raise
return {"displayname": displayname, "avatar_url": avatar_url} return {
"displayname": profileinfo.display_name,
"avatar_url": profileinfo.avatar_url,
}
else: else:
try: try:
result = await self.federation.make_query( result = await self.federation.make_query(

View file

@ -440,7 +440,12 @@ class RoomCreationHandler:
spam_check = await self.spam_checker.user_may_create_room(user_id) spam_check = await self.spam_checker.user_may_create_room(user_id)
if spam_check != NOT_SPAM: if spam_check != NOT_SPAM:
raise SynapseError(403, "You are not permitted to create rooms", spam_check) raise SynapseError(
403,
"You are not permitted to create rooms",
errcode=spam_check[0],
additional_fields=spam_check[1],
)
creation_content: JsonDict = { creation_content: JsonDict = {
"room_version": new_room_version.identifier, "room_version": new_room_version.identifier,
@ -731,7 +736,10 @@ class RoomCreationHandler:
spam_check = await self.spam_checker.user_may_create_room(user_id) spam_check = await self.spam_checker.user_may_create_room(user_id)
if spam_check != NOT_SPAM: if spam_check != NOT_SPAM:
raise SynapseError( raise SynapseError(
403, "You are not permitted to create rooms", spam_check 403,
"You are not permitted to create rooms",
errcode=spam_check[0],
additional_fields=spam_check[1],
) )
if ratelimit: if ratelimit:
@ -1023,6 +1031,8 @@ class RoomCreationHandler:
event_keys = {"room_id": room_id, "sender": creator_id, "state_key": ""} event_keys = {"room_id": room_id, "sender": creator_id, "state_key": ""}
last_sent_event_id: Optional[str] = None
def create(etype: str, content: JsonDict, **kwargs: Any) -> JsonDict: def create(etype: str, content: JsonDict, **kwargs: Any) -> JsonDict:
e = {"type": etype, "content": content} e = {"type": etype, "content": content}
@ -1032,19 +1042,27 @@ class RoomCreationHandler:
return e return e
async def send(etype: str, content: JsonDict, **kwargs: Any) -> int: async def send(etype: str, content: JsonDict, **kwargs: Any) -> int:
nonlocal last_sent_event_id
event = create(etype, content, **kwargs) event = create(etype, content, **kwargs)
logger.debug("Sending %s in new room", etype) logger.debug("Sending %s in new room", etype)
# Allow these events to be sent even if the user is shadow-banned to # Allow these events to be sent even if the user is shadow-banned to
# allow the room creation to complete. # allow the room creation to complete.
( (
_, sent_event,
last_stream_id, last_stream_id,
) = await self.event_creation_handler.create_and_send_nonmember_event( ) = await self.event_creation_handler.create_and_send_nonmember_event(
creator, creator,
event, event,
ratelimit=False, ratelimit=False,
ignore_shadow_ban=True, ignore_shadow_ban=True,
# Note: we don't pass state_event_ids here because this triggers
# an additional query per event to look them up from the events table.
prev_event_ids=[last_sent_event_id] if last_sent_event_id else [],
) )
last_sent_event_id = sent_event.event_id
return last_stream_id return last_stream_id
try: try:
@ -1058,7 +1076,9 @@ class RoomCreationHandler:
await send(etype=EventTypes.Create, content=creation_content) await send(etype=EventTypes.Create, content=creation_content)
logger.debug("Sending %s in new room", EventTypes.Member) logger.debug("Sending %s in new room", EventTypes.Member)
await self.room_member_handler.update_membership( # Room create event must exist at this point
assert last_sent_event_id is not None
member_event_id, _ = await self.room_member_handler.update_membership(
creator, creator,
creator.user, creator.user,
room_id, room_id,
@ -1066,7 +1086,9 @@ class RoomCreationHandler:
ratelimit=ratelimit, ratelimit=ratelimit,
content=creator_join_profile, content=creator_join_profile,
new_room=True, new_room=True,
prev_event_ids=[last_sent_event_id],
) )
last_sent_event_id = member_event_id
# We treat the power levels override specially as this needs to be one # We treat the power levels override specially as this needs to be one
# of the first events that get sent into a room. # of the first events that get sent into a room.
@ -1387,6 +1409,7 @@ class TimestampLookupHandler:
# the timestamp given and the event we were able to find locally # the timestamp given and the event we were able to find locally
is_event_next_to_backward_gap = False is_event_next_to_backward_gap = False
is_event_next_to_forward_gap = False is_event_next_to_forward_gap = False
local_event = None
if local_event_id: if local_event_id:
local_event = await self.store.get_event( local_event = await self.store.get_event(
local_event_id, allow_none=False, allow_rejected=False local_event_id, allow_none=False, allow_rejected=False
@ -1473,7 +1496,10 @@ class TimestampLookupHandler:
ex.args, ex.args,
) )
if not local_event_id: # To appease mypy, we have to add both of these conditions to check for
# `None`. We only expect `local_event` to be `None` when
# `local_event_id` is `None` but mypy isn't as smart and assuming as us.
if not local_event_id or not local_event:
raise SynapseError( raise SynapseError(
404, 404,
"Unable to find event from %s in direction %s" % (timestamp, direction), "Unable to find event from %s in direction %s" % (timestamp, direction),

View file

@ -25,6 +25,7 @@ from synapse.api.constants import (
GuestAccess, GuestAccess,
HistoryVisibility, HistoryVisibility,
JoinRules, JoinRules,
PublicRoomsFilterFields,
) )
from synapse.api.errors import ( from synapse.api.errors import (
Codes, Codes,
@ -181,6 +182,7 @@ class RoomListHandler:
== HistoryVisibility.WORLD_READABLE, == HistoryVisibility.WORLD_READABLE,
"guest_can_join": room["guest_access"] == "can_join", "guest_can_join": room["guest_access"] == "can_join",
"join_rule": room["join_rules"], "join_rule": room["join_rules"],
"org.matrix.msc3827.room_type": room["room_type"],
} }
# Filter out Nones rather omit the field altogether # Filter out Nones rather omit the field altogether
@ -239,7 +241,9 @@ class RoomListHandler:
response["chunk"] = results response["chunk"] = results
response["total_room_count_estimate"] = await self.store.count_public_rooms( response["total_room_count_estimate"] = await self.store.count_public_rooms(
network_tuple, ignore_non_federatable=from_federation network_tuple,
ignore_non_federatable=from_federation,
search_filter=search_filter,
) )
return response return response
@ -508,8 +512,21 @@ class RoomListNextBatch:
def _matches_room_entry(room_entry: JsonDict, search_filter: dict) -> bool: def _matches_room_entry(room_entry: JsonDict, search_filter: dict) -> bool:
if search_filter and search_filter.get("generic_search_term", None): """Determines whether the given search filter matches a room entry returned over
generic_search_term = search_filter["generic_search_term"].upper() federation.
Only used if the remote server does not support MSC2197 remote-filtered search, and
hence does not support MSC3827 filtering of `/publicRooms` by room type either.
In this case, we cannot apply the `room_type` filter since no `room_type` field is
returned.
"""
if search_filter and search_filter.get(
PublicRoomsFilterFields.GENERIC_SEARCH_TERM, None
):
generic_search_term = search_filter[
PublicRoomsFilterFields.GENERIC_SEARCH_TERM
].upper()
if generic_search_term in room_entry.get("name", "").upper(): if generic_search_term in room_entry.get("name", "").upper():
return True return True
elif generic_search_term in room_entry.get("topic", "").upper(): elif generic_search_term in room_entry.get("topic", "").upper():

View file

@ -101,19 +101,33 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
burst_count=hs.config.ratelimiting.rc_joins_remote.burst_count, burst_count=hs.config.ratelimiting.rc_joins_remote.burst_count,
) )
# Ratelimiter for invites, keyed by room (across all issuers, all
# recipients).
self._invites_per_room_limiter = Ratelimiter( self._invites_per_room_limiter = Ratelimiter(
store=self.store, store=self.store,
clock=self.clock, clock=self.clock,
rate_hz=hs.config.ratelimiting.rc_invites_per_room.per_second, rate_hz=hs.config.ratelimiting.rc_invites_per_room.per_second,
burst_count=hs.config.ratelimiting.rc_invites_per_room.burst_count, burst_count=hs.config.ratelimiting.rc_invites_per_room.burst_count,
) )
self._invites_per_user_limiter = Ratelimiter(
# Ratelimiter for invites, keyed by recipient (across all rooms, all
# issuers).
self._invites_per_recipient_limiter = Ratelimiter(
store=self.store, store=self.store,
clock=self.clock, clock=self.clock,
rate_hz=hs.config.ratelimiting.rc_invites_per_user.per_second, rate_hz=hs.config.ratelimiting.rc_invites_per_user.per_second,
burst_count=hs.config.ratelimiting.rc_invites_per_user.burst_count, burst_count=hs.config.ratelimiting.rc_invites_per_user.burst_count,
) )
# Ratelimiter for invites, keyed by issuer (across all rooms, all
# recipients).
self._invites_per_issuer_limiter = Ratelimiter(
store=self.store,
clock=self.clock,
rate_hz=hs.config.ratelimiting.rc_invites_per_issuer.per_second,
burst_count=hs.config.ratelimiting.rc_invites_per_issuer.burst_count,
)
self._third_party_invite_limiter = Ratelimiter( self._third_party_invite_limiter = Ratelimiter(
store=self.store, store=self.store,
clock=self.clock, clock=self.clock,
@ -258,7 +272,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
if room_id: if room_id:
await self._invites_per_room_limiter.ratelimit(requester, room_id) await self._invites_per_room_limiter.ratelimit(requester, room_id)
await self._invites_per_user_limiter.ratelimit(requester, invitee_user_id) await self._invites_per_recipient_limiter.ratelimit(requester, invitee_user_id)
if requester is not None:
await self._invites_per_issuer_limiter.ratelimit(requester)
async def _local_membership_update( async def _local_membership_update(
self, self,
@ -669,7 +685,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
if target_id == self._server_notices_mxid: if target_id == self._server_notices_mxid:
raise SynapseError(HTTPStatus.FORBIDDEN, "Cannot invite this user") raise SynapseError(HTTPStatus.FORBIDDEN, "Cannot invite this user")
block_invite_code = None block_invite_result = None
if ( if (
self._server_notices_mxid is not None self._server_notices_mxid is not None
@ -687,18 +703,21 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
"Blocking invite: user is not admin and non-admin " "Blocking invite: user is not admin and non-admin "
"invites disabled" "invites disabled"
) )
block_invite_code = Codes.FORBIDDEN block_invite_result = (Codes.FORBIDDEN, {})
spam_check = await self.spam_checker.user_may_invite( spam_check = await self.spam_checker.user_may_invite(
requester.user.to_string(), target_id, room_id requester.user.to_string(), target_id, room_id
) )
if spam_check != NOT_SPAM: if spam_check != NOT_SPAM:
logger.info("Blocking invite due to spam checker") logger.info("Blocking invite due to spam checker")
block_invite_code = spam_check block_invite_result = spam_check
if block_invite_code is not None: if block_invite_result is not None:
raise SynapseError( raise SynapseError(
403, "Invites have been disabled on this server", block_invite_code 403,
"Invites have been disabled on this server",
errcode=block_invite_result[0],
additional_fields=block_invite_result[1],
) )
# An empty prev_events list is allowed as long as the auth_event_ids are present # An empty prev_events list is allowed as long as the auth_event_ids are present
@ -812,7 +831,12 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
target.to_string(), room_id, is_invited=inviter is not None target.to_string(), room_id, is_invited=inviter is not None
) )
if spam_check != NOT_SPAM: if spam_check != NOT_SPAM:
raise SynapseError(403, "Not allowed to join this room", spam_check) raise SynapseError(
403,
"Not allowed to join this room",
errcode=spam_check[0],
additional_fields=spam_check[1],
)
# Check if a remote join should be performed. # Check if a remote join should be performed.
remote_join, remote_room_hosts = await self._should_perform_remote_join( remote_join, remote_room_hosts = await self._should_perform_remote_join(
@ -830,10 +854,17 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
content["membership"] = Membership.JOIN content["membership"] = Membership.JOIN
try:
profile = self.profile_handler profile = self.profile_handler
if not content_specified: if not content_specified:
content["displayname"] = await profile.get_displayname(target) content["displayname"] = await profile.get_displayname(target)
content["avatar_url"] = await profile.get_avatar_url(target) content["avatar_url"] = await profile.get_avatar_url(target)
except Exception as e:
logger.info(
"Failed to get profile information while processing remote join for %r: %s",
target,
e,
)
if requester.is_guest: if requester.is_guest:
content["kind"] = "guest" content["kind"] = "guest"
@ -910,11 +941,18 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
content["membership"] = Membership.KNOCK content["membership"] = Membership.KNOCK
try:
profile = self.profile_handler profile = self.profile_handler
if "displayname" not in content: if "displayname" not in content:
content["displayname"] = await profile.get_displayname(target) content["displayname"] = await profile.get_displayname(target)
if "avatar_url" not in content: if "avatar_url" not in content:
content["avatar_url"] = await profile.get_avatar_url(target) content["avatar_url"] = await profile.get_avatar_url(target)
except Exception as e:
logger.info(
"Failed to get profile information while processing remote knock for %r: %s",
target,
e,
)
return await self.remote_knock( return await self.remote_knock(
remote_room_hosts, room_id, target, content remote_room_hosts, room_id, target, content
@ -1357,7 +1395,12 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
room_id=room_id, room_id=room_id,
) )
if spam_check != NOT_SPAM: if spam_check != NOT_SPAM:
raise SynapseError(403, "Cannot send threepid invite", spam_check) raise SynapseError(
403,
"Cannot send threepid invite",
errcode=spam_check[0],
additional_fields=spam_check[1],
)
stream_id = await self._make_and_store_3pid_invite( stream_id = await self._make_and_store_3pid_invite(
requester, requester,

View file

@ -271,6 +271,9 @@ class StatsHandler:
room_state["is_federatable"] = ( room_state["is_federatable"] = (
event_content.get(EventContentFields.FEDERATE, True) is True event_content.get(EventContentFields.FEDERATE, True) is True
) )
room_type = event_content.get(EventContentFields.ROOM_TYPE)
if isinstance(room_type, str):
room_state["room_type"] = room_type
elif typ == EventTypes.JoinRules: elif typ == EventTypes.JoinRules:
room_state["join_rules"] = event_content.get("join_rule") room_state["join_rules"] = event_content.get("join_rule")
elif typ == EventTypes.RoomHistoryVisibility: elif typ == EventTypes.RoomHistoryVisibility:

View file

@ -164,6 +164,7 @@ Gotchas
with an active span? with an active span?
""" """
import contextlib import contextlib
import enum
import inspect import inspect
import logging import logging
import re import re
@ -268,7 +269,7 @@ try:
_reporter: Reporter = attr.Factory(Reporter) _reporter: Reporter = attr.Factory(Reporter)
def set_process(self, *args, **kwargs): def set_process(self, *args: Any, **kwargs: Any) -> None:
return self._reporter.set_process(*args, **kwargs) return self._reporter.set_process(*args, **kwargs)
def report_span(self, span: "opentracing.Span") -> None: def report_span(self, span: "opentracing.Span") -> None:
@ -319,7 +320,11 @@ _homeserver_whitelist: Optional[Pattern[str]] = None
# Util methods # Util methods
Sentinel = object()
class _Sentinel(enum.Enum):
# defining a sentinel in this way allows mypy to correctly handle the
# type of a dictionary lookup.
sentinel = object()
P = ParamSpec("P") P = ParamSpec("P")
@ -339,12 +344,12 @@ def only_if_tracing(func: Callable[P, R]) -> Callable[P, Optional[R]]:
return _only_if_tracing_inner return _only_if_tracing_inner
def ensure_active_span(message, ret=None): def ensure_active_span(message: str, ret=None):
"""Executes the operation only if opentracing is enabled and there is an active span. """Executes the operation only if opentracing is enabled and there is an active span.
If there is no active span it logs message at the error level. If there is no active span it logs message at the error level.
Args: Args:
message (str): Message which fills in "There was no active span when trying to %s" message: Message which fills in "There was no active span when trying to %s"
in the error log if there is no active span and opentracing is enabled. in the error log if there is no active span and opentracing is enabled.
ret (object): return value if opentracing is None or there is no active span. ret (object): return value if opentracing is None or there is no active span.
@ -402,7 +407,7 @@ def init_tracer(hs: "HomeServer") -> None:
config = JaegerConfig( config = JaegerConfig(
config=hs.config.tracing.jaeger_config, config=hs.config.tracing.jaeger_config,
service_name=f"{hs.config.server.server_name} {hs.get_instance_name()}", service_name=f"{hs.config.server.server_name} {hs.get_instance_name()}",
scope_manager=LogContextScopeManager(hs.config), scope_manager=LogContextScopeManager(),
metrics_factory=PrometheusMetricsFactory(), metrics_factory=PrometheusMetricsFactory(),
) )
@ -451,15 +456,15 @@ def whitelisted_homeserver(destination: str) -> bool:
# Could use kwargs but I want these to be explicit # Could use kwargs but I want these to be explicit
def start_active_span( def start_active_span(
operation_name, operation_name: str,
child_of=None, child_of: Optional[Union["opentracing.Span", "opentracing.SpanContext"]] = None,
references=None, references: Optional[List["opentracing.Reference"]] = None,
tags=None, tags: Optional[Dict[str, str]] = None,
start_time=None, start_time: Optional[float] = None,
ignore_active_span=False, ignore_active_span: bool = False,
finish_on_close=True, finish_on_close: bool = True,
*, *,
tracer=None, tracer: Optional["opentracing.Tracer"] = None,
): ):
"""Starts an active opentracing span. """Starts an active opentracing span.
@ -493,11 +498,11 @@ def start_active_span(
def start_active_span_follows_from( def start_active_span_follows_from(
operation_name: str, operation_name: str,
contexts: Collection, contexts: Collection,
child_of=None, child_of: Optional[Union["opentracing.Span", "opentracing.SpanContext"]] = None,
start_time: Optional[float] = None, start_time: Optional[float] = None,
*, *,
inherit_force_tracing=False, inherit_force_tracing: bool = False,
tracer=None, tracer: Optional["opentracing.Tracer"] = None,
): ):
"""Starts an active opentracing span, with additional references to previous spans """Starts an active opentracing span, with additional references to previous spans
@ -540,7 +545,7 @@ def start_active_span_from_edu(
edu_content: Dict[str, Any], edu_content: Dict[str, Any],
operation_name: str, operation_name: str,
references: Optional[List["opentracing.Reference"]] = None, references: Optional[List["opentracing.Reference"]] = None,
tags: Optional[Dict] = None, tags: Optional[Dict[str, str]] = None,
start_time: Optional[float] = None, start_time: Optional[float] = None,
ignore_active_span: bool = False, ignore_active_span: bool = False,
finish_on_close: bool = True, finish_on_close: bool = True,
@ -617,23 +622,27 @@ def set_operation_name(operation_name: str) -> None:
@only_if_tracing @only_if_tracing
def force_tracing(span=Sentinel) -> None: def force_tracing(
span: Union["opentracing.Span", _Sentinel] = _Sentinel.sentinel
) -> None:
"""Force sampling for the active/given span and its children. """Force sampling for the active/given span and its children.
Args: Args:
span: span to force tracing for. By default, the active span. span: span to force tracing for. By default, the active span.
""" """
if span is Sentinel: if isinstance(span, _Sentinel):
span = opentracing.tracer.active_span span_to_trace = opentracing.tracer.active_span
if span is None: else:
span_to_trace = span
if span_to_trace is None:
logger.error("No active span in force_tracing") logger.error("No active span in force_tracing")
return return
span.set_tag(opentracing.tags.SAMPLING_PRIORITY, 1) span_to_trace.set_tag(opentracing.tags.SAMPLING_PRIORITY, 1)
# also set a bit of baggage, so that we have a way of figuring out if # also set a bit of baggage, so that we have a way of figuring out if
# it is enabled later # it is enabled later
span.set_baggage_item(SynapseBaggage.FORCE_TRACING, "1") span_to_trace.set_baggage_item(SynapseBaggage.FORCE_TRACING, "1")
def is_context_forced_tracing( def is_context_forced_tracing(
@ -789,7 +798,7 @@ def extract_text_map(carrier: Dict[str, str]) -> Optional["opentracing.SpanConte
# Tracing decorators # Tracing decorators
def trace(func=None, opname=None): def trace(func=None, opname: Optional[str] = None):
""" """
Decorator to trace a function. Decorator to trace a function.
Sets the operation name to that of the function's or that given Sets the operation name to that of the function's or that given
@ -822,11 +831,11 @@ def trace(func=None, opname=None):
result = func(*args, **kwargs) result = func(*args, **kwargs)
if isinstance(result, defer.Deferred): if isinstance(result, defer.Deferred):
def call_back(result): def call_back(result: R) -> R:
scope.__exit__(None, None, None) scope.__exit__(None, None, None)
return result return result
def err_back(result): def err_back(result: R) -> R:
scope.__exit__(None, None, None) scope.__exit__(None, None, None)
return result return result

View file

@ -16,11 +16,15 @@ import logging
from types import TracebackType from types import TracebackType
from typing import Optional, Type from typing import Optional, Type
from opentracing import Scope, ScopeManager from opentracing import Scope, ScopeManager, Span
import twisted import twisted
from synapse.logging.context import current_context, nested_logging_context from synapse.logging.context import (
LoggingContext,
current_context,
nested_logging_context,
)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -35,11 +39,11 @@ class LogContextScopeManager(ScopeManager):
but currently that doesn't work due to https://twistedmatrix.com/trac/ticket/10301. but currently that doesn't work due to https://twistedmatrix.com/trac/ticket/10301.
""" """
def __init__(self, config): def __init__(self) -> None:
pass pass
@property @property
def active(self): def active(self) -> Optional[Scope]:
""" """
Returns the currently active Scope which can be used to access the Returns the currently active Scope which can be used to access the
currently active Scope.span. currently active Scope.span.
@ -48,19 +52,18 @@ class LogContextScopeManager(ScopeManager):
Tracer.start_active_span() time. Tracer.start_active_span() time.
Return: Return:
(Scope) : the Scope that is active, or None if not The Scope that is active, or None if not available.
available.
""" """
ctx = current_context() ctx = current_context()
return ctx.scope return ctx.scope
def activate(self, span, finish_on_close): def activate(self, span: Span, finish_on_close: bool) -> Scope:
""" """
Makes a Span active. Makes a Span active.
Args Args
span (Span): the span that should become active. span: the span that should become active.
finish_on_close (Boolean): whether Span should be automatically finish_on_close: whether Span should be automatically finished when
finished when Scope.close() is called. Scope.close() is called.
Returns: Returns:
Scope to control the end of the active period for Scope to control the end of the active period for
@ -112,8 +115,8 @@ class _LogContextScope(Scope):
def __init__( def __init__(
self, self,
manager: LogContextScopeManager, manager: LogContextScopeManager,
span, span: Span,
logcontext, logcontext: LoggingContext,
enter_logcontext: bool, enter_logcontext: bool,
finish_on_close: bool, finish_on_close: bool,
): ):
@ -121,10 +124,10 @@ class _LogContextScope(Scope):
Args: Args:
manager: manager:
the manager that is responsible for this scope. the manager that is responsible for this scope.
span (Span): span:
the opentracing span which this scope represents the local the opentracing span which this scope represents the local
lifetime for. lifetime for.
logcontext (LogContext): logcontext:
the log context to which this scope is attached. the log context to which this scope is attached.
enter_logcontext: enter_logcontext:
if True the log context will be exited when the scope is finished if True the log context will be exited when the scope is finished

View file

@ -35,6 +35,7 @@ from typing_extensions import ParamSpec
from twisted.internet import defer from twisted.internet import defer
from twisted.web.resource import Resource from twisted.web.resource import Resource
from synapse.api import errors
from synapse.api.errors import SynapseError from synapse.api.errors import SynapseError
from synapse.events import EventBase from synapse.events import EventBase
from synapse.events.presence_router import ( from synapse.events.presence_router import (

View file

@ -17,7 +17,6 @@ import itertools
import logging import logging
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple, Union from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple, Union
import attr
from prometheus_client import Counter from prometheus_client import Counter
from synapse.api.constants import EventTypes, Membership, RelationTypes from synapse.api.constants import EventTypes, Membership, RelationTypes
@ -26,13 +25,11 @@ from synapse.events import EventBase, relation_from_event
from synapse.events.snapshot import EventContext from synapse.events.snapshot import EventContext
from synapse.state import POWER_KEY from synapse.state import POWER_KEY
from synapse.storage.databases.main.roommember import EventIdMembership from synapse.storage.databases.main.roommember import EventIdMembership
from synapse.util.async_helpers import Linearizer from synapse.storage.state import StateFilter
from synapse.util.caches import CacheMetric, register_cache from synapse.util.caches import register_cache
from synapse.util.caches.descriptors import lru_cache
from synapse.util.caches.lrucache import LruCache
from synapse.util.metrics import measure_func from synapse.util.metrics import measure_func
from synapse.visibility import filter_event_for_clients_with_state
from ..storage.state import StateFilter
from .push_rule_evaluator import PushRuleEvaluatorForEvent from .push_rule_evaluator import PushRuleEvaluatorForEvent
if TYPE_CHECKING: if TYPE_CHECKING:
@ -48,15 +45,6 @@ push_rules_state_size_counter = Counter(
"synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter", "" "synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter", ""
) )
# Measures whether we use the fast path of using state deltas, or if we have to
# recalculate from scratch
push_rules_delta_state_cache_metric = register_cache(
"cache",
"push_rules_delta_state_cache_metric",
cache=[], # Meaningless size, as this isn't a cache that stores values
resizable=False,
)
STATE_EVENT_TYPES_TO_MARK_UNREAD = { STATE_EVENT_TYPES_TO_MARK_UNREAD = {
EventTypes.Topic, EventTypes.Topic,
@ -111,10 +99,6 @@ class BulkPushRuleEvaluator:
self.clock = hs.get_clock() self.clock = hs.get_clock()
self._event_auth_handler = hs.get_event_auth_handler() self._event_auth_handler = hs.get_event_auth_handler()
# Used by `RulesForRoom` to ensure only one thing mutates the cache at a
# time. Keyed off room_id.
self._rules_linearizer = Linearizer(name="rules_for_room")
self.room_push_rule_cache_metrics = register_cache( self.room_push_rule_cache_metrics = register_cache(
"cache", "cache",
"room_push_rule_cache", "room_push_rule_cache",
@ -126,48 +110,48 @@ class BulkPushRuleEvaluator:
self._relations_match_enabled = self.hs.config.experimental.msc3772_enabled self._relations_match_enabled = self.hs.config.experimental.msc3772_enabled
async def _get_rules_for_event( async def _get_rules_for_event(
self, event: EventBase, context: EventContext self,
event: EventBase,
) -> Dict[str, List[Dict[str, Any]]]: ) -> Dict[str, List[Dict[str, Any]]]:
"""This gets the rules for all users in the room at the time of the event, """Get the push rules for all users who may need to be notified about
as well as the push rules for the invitee if the event is an invite. the event.
Note: this does not check if the user is allowed to see the event.
Returns: Returns:
dict of user_id -> push_rules Mapping of user ID to their push rules.
""" """
room_id = event.room_id # We get the users who may need to be notified by first fetching the
# local users currently in the room, finding those that have push rules,
# and *then* checking which users are actually allowed to see the event.
#
# The alternative is to first fetch all users that were joined at the
# event, but that requires fetching the full state at the event, which
# may be expensive for large rooms with few local users.
rules_for_room_data = self._get_rules_for_room(room_id) local_users = await self.store.get_local_users_in_room(event.room_id)
rules_for_room = RulesForRoom(
hs=self.hs,
room_id=room_id,
rules_for_room_cache=self._get_rules_for_room.cache,
room_push_rule_cache_metrics=self.room_push_rule_cache_metrics,
linearizer=self._rules_linearizer,
cached_data=rules_for_room_data,
)
rules_by_user = await rules_for_room.get_rules(event, context)
# if this event is an invite event, we may need to run rules for the user # if this event is an invite event, we may need to run rules for the user
# who's been invited, otherwise they won't get told they've been invited # who's been invited, otherwise they won't get told they've been invited
if event.type == "m.room.member" and event.content["membership"] == "invite": if event.type == EventTypes.Member and event.membership == Membership.INVITE:
invited = event.state_key invited = event.state_key
if invited and self.hs.is_mine_id(invited): if invited and self.hs.is_mine_id(invited) and invited not in local_users:
rules_by_user = dict(rules_by_user) local_users = list(local_users)
rules_by_user[invited] = await self.store.get_push_rules_for_user( local_users.append(invited)
invited
rules_by_user = await self.store.bulk_get_push_rules(local_users)
logger.debug("Users in room: %s", local_users)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
"Returning push rules for %r %r",
event.room_id,
list(rules_by_user.keys()),
) )
return rules_by_user return rules_by_user
@lru_cache()
def _get_rules_for_room(self, room_id: str) -> "RulesForRoomData":
"""Get the current RulesForRoomData object for the given room id"""
# It's important that the RulesForRoomData object gets added to self._get_rules_for_room.cache
# before any lookup methods get called on it as otherwise there may be
# a race if invalidate_all gets called (which assumes its in the cache)
return RulesForRoomData()
async def _get_power_levels_and_sender_level( async def _get_power_levels_and_sender_level(
self, event: EventBase, context: EventContext self, event: EventBase, context: EventContext
) -> Tuple[dict, int]: ) -> Tuple[dict, int]:
@ -262,10 +246,12 @@ class BulkPushRuleEvaluator:
count_as_unread = _should_count_as_unread(event, context) count_as_unread = _should_count_as_unread(event, context)
rules_by_user = await self._get_rules_for_event(event, context) rules_by_user = await self._get_rules_for_event(event)
actions_by_user: Dict[str, List[Union[dict, str]]] = {} actions_by_user: Dict[str, List[Union[dict, str]]] = {}
room_members = await self.store.get_joined_users_from_context(event, context) room_member_count = await self.store.get_number_joined_users_in_room(
event.room_id
)
( (
power_levels, power_levels,
@ -278,30 +264,36 @@ class BulkPushRuleEvaluator:
evaluator = PushRuleEvaluatorForEvent( evaluator = PushRuleEvaluatorForEvent(
event, event,
len(room_members), room_member_count,
sender_power_level, sender_power_level,
power_levels, power_levels,
relations, relations,
self._relations_match_enabled, self._relations_match_enabled,
) )
# If the event is not a state event check if any users ignore the sender. users = rules_by_user.keys()
if not event.is_state(): profiles = await self.store.get_subset_users_in_room_with_profiles(
ignorers = await self.store.ignored_by(event.sender) event.room_id, users
else: )
ignorers = frozenset()
# This is a check for the case where user joins a room without being
# allowed to see history, and then the server receives a delayed event
# from before the user joined, which they should not be pushed for
uids_with_visibility = await filter_event_for_clients_with_state(
self.store, users, event, context
)
for uid, rules in rules_by_user.items(): for uid, rules in rules_by_user.items():
if event.sender == uid: if event.sender == uid:
continue continue
if uid in ignorers: if uid not in uids_with_visibility:
continue continue
display_name = None display_name = None
profile_info = room_members.get(uid) profile = profiles.get(uid)
if profile_info: if profile:
display_name = profile_info.display_name display_name = profile.display_name
if not display_name: if not display_name:
# Handle the case where we are pushing a membership event to # Handle the case where we are pushing a membership event to
@ -346,283 +338,3 @@ MemberMap = Dict[str, Optional[EventIdMembership]]
Rule = Dict[str, dict] Rule = Dict[str, dict]
RulesByUser = Dict[str, List[Rule]] RulesByUser = Dict[str, List[Rule]]
StateGroup = Union[object, int] StateGroup = Union[object, int]
@attr.s(slots=True, auto_attribs=True)
class RulesForRoomData:
"""The data stored in the cache by `RulesForRoom`.
We don't store `RulesForRoom` directly in the cache as we want our caches to
*only* include data, and not references to e.g. the data stores.
"""
# event_id -> EventIdMembership
member_map: MemberMap = attr.Factory(dict)
# user_id -> rules
rules_by_user: RulesByUser = attr.Factory(dict)
# The last state group we updated the caches for. If the state_group of
# a new event comes along, we know that we can just return the cached
# result.
# On invalidation of the rules themselves (if the user changes them),
# we invalidate everything and set state_group to `object()`
state_group: StateGroup = attr.Factory(object)
# A sequence number to keep track of when we're allowed to update the
# cache. We bump the sequence number when we invalidate the cache. If
# the sequence number changes while we're calculating stuff we should
# not update the cache with it.
sequence: int = 0
# A cache of user_ids that we *know* aren't interesting, e.g. user_ids
# owned by AS's, or remote users, etc. (I.e. users we will never need to
# calculate push for)
# These never need to be invalidated as we will never set up push for
# them.
uninteresting_user_set: Set[str] = attr.Factory(set)
class RulesForRoom:
"""Caches push rules for users in a room.
This efficiently handles users joining/leaving the room by not invalidating
the entire cache for the room.
A new instance is constructed for each call to
`BulkPushRuleEvaluator._get_rules_for_event`, with the cached data from
previous calls passed in.
"""
def __init__(
self,
hs: "HomeServer",
room_id: str,
rules_for_room_cache: LruCache,
room_push_rule_cache_metrics: CacheMetric,
linearizer: Linearizer,
cached_data: RulesForRoomData,
):
"""
Args:
hs: The HomeServer object.
room_id: The room ID.
rules_for_room_cache: The cache object that caches these
RoomsForUser objects.
room_push_rule_cache_metrics: The metrics object
linearizer: The linearizer used to ensure only one thing mutates
the cache at a time. Keyed off room_id
cached_data: Cached data from previous calls to `self.get_rules`,
can be mutated.
"""
self.room_id = room_id
self.is_mine_id = hs.is_mine_id
self.store = hs.get_datastores().main
self.room_push_rule_cache_metrics = room_push_rule_cache_metrics
# Used to ensure only one thing mutates the cache at a time. Keyed off
# room_id.
self.linearizer = linearizer
self.data = cached_data
# We need to be clever on the invalidating caches callbacks, as
# otherwise the invalidation callback holds a reference to the object,
# potentially causing it to leak.
# To get around this we pass a function that on invalidations looks ups
# the RoomsForUser entry in the cache, rather than keeping a reference
# to self around in the callback.
self.invalidate_all_cb = _Invalidation(rules_for_room_cache, room_id)
async def get_rules(
self, event: EventBase, context: EventContext
) -> Dict[str, List[Dict[str, dict]]]:
"""Given an event context return the rules for all users who are
currently in the room.
"""
state_group = context.state_group
if state_group and self.data.state_group == state_group:
logger.debug("Using cached rules for %r", self.room_id)
self.room_push_rule_cache_metrics.inc_hits()
return self.data.rules_by_user
async with self.linearizer.queue(self.room_id):
if state_group and self.data.state_group == state_group:
logger.debug("Using cached rules for %r", self.room_id)
self.room_push_rule_cache_metrics.inc_hits()
return self.data.rules_by_user
self.room_push_rule_cache_metrics.inc_misses()
ret_rules_by_user = {}
missing_member_event_ids = {}
if state_group and self.data.state_group == context.prev_group:
# If we have a simple delta then we can reuse most of the previous
# results.
ret_rules_by_user = self.data.rules_by_user
current_state_ids = context.delta_ids
push_rules_delta_state_cache_metric.inc_hits()
else:
current_state_ids = await context.get_current_state_ids()
push_rules_delta_state_cache_metric.inc_misses()
# Ensure the state IDs exist.
assert current_state_ids is not None
push_rules_state_size_counter.inc(len(current_state_ids))
logger.debug(
"Looking for member changes in %r %r", state_group, current_state_ids
)
# Loop through to see which member events we've seen and have rules
# for and which we need to fetch
for key in current_state_ids:
typ, user_id = key
if typ != EventTypes.Member:
continue
if user_id in self.data.uninteresting_user_set:
continue
if not self.is_mine_id(user_id):
self.data.uninteresting_user_set.add(user_id)
continue
if self.store.get_if_app_services_interested_in_user(user_id):
self.data.uninteresting_user_set.add(user_id)
continue
event_id = current_state_ids[key]
res = self.data.member_map.get(event_id, None)
if res:
if res.membership == Membership.JOIN:
rules = self.data.rules_by_user.get(res.user_id, None)
if rules:
ret_rules_by_user[res.user_id] = rules
continue
# If a user has left a room we remove their push rule. If they
# joined then we re-add it later in _update_rules_with_member_event_ids
ret_rules_by_user.pop(user_id, None)
missing_member_event_ids[user_id] = event_id
if missing_member_event_ids:
# If we have some member events we haven't seen, look them up
# and fetch push rules for them if appropriate.
logger.debug("Found new member events %r", missing_member_event_ids)
await self._update_rules_with_member_event_ids(
ret_rules_by_user, missing_member_event_ids, state_group, event
)
else:
# The push rules didn't change but lets update the cache anyway
self.update_cache(
self.data.sequence,
members={}, # There were no membership changes
rules_by_user=ret_rules_by_user,
state_group=state_group,
)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
"Returning push rules for %r %r", self.room_id, ret_rules_by_user.keys()
)
return ret_rules_by_user
async def _update_rules_with_member_event_ids(
self,
ret_rules_by_user: Dict[str, list],
member_event_ids: Dict[str, str],
state_group: Optional[int],
event: EventBase,
) -> None:
"""Update the partially filled rules_by_user dict by fetching rules for
any newly joined users in the `member_event_ids` list.
Args:
ret_rules_by_user: Partially filled dict of push rules. Gets
updated with any new rules.
member_event_ids: Dict of user id to event id for membership events
that have happened since the last time we filled rules_by_user
state_group: The state group we are currently computing push rules
for. Used when updating the cache.
event: The event we are currently computing push rules for.
"""
sequence = self.data.sequence
members = await self.store.get_membership_from_event_ids(
member_event_ids.values()
)
# If the event is a join event then it will be in current state events
# map but not in the DB, so we have to explicitly insert it.
if event.type == EventTypes.Member:
for event_id in member_event_ids.values():
if event_id == event.event_id:
members[event_id] = EventIdMembership(
user_id=event.state_key, membership=event.membership
)
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Found members %r: %r", self.room_id, members.values())
joined_user_ids = {
entry.user_id
for entry in members.values()
if entry and entry.membership == Membership.JOIN
}
logger.debug("Joined: %r", joined_user_ids)
# Previously we only considered users with pushers or read receipts in that
# room. We can't do this anymore because we use push actions to calculate unread
# counts, which don't rely on the user having pushers or sent a read receipt into
# the room. Therefore we just need to filter for local users here.
user_ids = list(filter(self.is_mine_id, joined_user_ids))
rules_by_user = await self.store.bulk_get_push_rules(
user_ids, on_invalidate=self.invalidate_all_cb
)
ret_rules_by_user.update(
item for item in rules_by_user.items() if item[0] is not None
)
self.update_cache(sequence, members, ret_rules_by_user, state_group)
def update_cache(
self,
sequence: int,
members: MemberMap,
rules_by_user: RulesByUser,
state_group: StateGroup,
) -> None:
if sequence == self.data.sequence:
self.data.member_map.update(members)
self.data.rules_by_user = rules_by_user
self.data.state_group = state_group
@attr.attrs(slots=True, frozen=True, auto_attribs=True)
class _Invalidation:
# _Invalidation is passed as an `on_invalidate` callback to bulk_get_push_rules,
# which means that it it is stored on the bulk_get_push_rules cache entry. In order
# to ensure that we don't accumulate lots of redundant callbacks on the cache entry,
# we need to ensure that two _Invalidation objects are "equal" if they refer to the
# same `cache` and `room_id`.
#
# attrs provides suitable __hash__ and __eq__ methods, provided we remember to
# set `frozen=True`.
cache: LruCache
room_id: str
def __call__(self) -> None:
rules_data = self.cache.get(self.room_id, None, update_metrics=False)
if rules_data:
rules_data.sequence += 1
rules_data.state_group = object()
rules_data.member_map = {}
rules_data.rules_by_user = {}
push_rules_invalidation_counter.inc()

View file

@ -25,6 +25,7 @@ from synapse.replication.http import (
push, push,
register, register,
send_event, send_event,
state,
streams, streams,
) )
@ -48,6 +49,7 @@ class ReplicationRestResource(JsonResource):
streams.register_servlets(hs, self) streams.register_servlets(hs, self)
account_data.register_servlets(hs, self) account_data.register_servlets(hs, self)
push.register_servlets(hs, self) push.register_servlets(hs, self)
state.register_servlets(hs, self)
# The following can't currently be instantiated on workers. # The following can't currently be instantiated on workers.
if hs.config.worker.worker_app is None: if hs.config.worker.worker_app is None:

View file

@ -60,6 +60,9 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
{ {
"max_stream_id": 32443, "max_stream_id": 32443,
} }
Responds with a 409 when a `PartialStateConflictError` is raised due to an event
context that needs to be recomputed due to the un-partial stating of a room.
""" """
NAME = "fed_send_events" NAME = "fed_send_events"

View file

@ -59,6 +59,9 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
{ "stream_id": 12345, "event_id": "$abcdef..." } { "stream_id": 12345, "event_id": "$abcdef..." }
Responds with a 409 when a `PartialStateConflictError` is raised due to an event
context that needs to be recomputed due to the un-partial stating of a room.
The returned event ID may not match the sent event if it was deduplicated. The returned event ID may not match the sent event if it was deduplicated.
""" """

View file

@ -0,0 +1,75 @@
# Copyright 2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Tuple
from twisted.web.server import Request
from synapse.api.errors import SynapseError
from synapse.http.server import HttpServer
from synapse.replication.http._base import ReplicationEndpoint
from synapse.types import JsonDict
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class ReplicationUpdateCurrentStateRestServlet(ReplicationEndpoint):
"""Recalculates the current state for a room, and persists it.
The API looks like:
POST /_synapse/replication/update_current_state/:room_id
{}
200 OK
{}
"""
NAME = "update_current_state"
PATH_ARGS = ("room_id",)
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self._state_handler = hs.get_state_handler()
self._events_shard_config = hs.config.worker.events_shard_config
self._instance_name = hs.get_instance_name()
@staticmethod
async def _serialize_payload(room_id: str) -> JsonDict: # type: ignore[override]
return {}
async def _handle_request( # type: ignore[override]
self, request: Request, room_id: str
) -> Tuple[int, JsonDict]:
writer_instance = self._events_shard_config.get_instance(room_id)
if writer_instance != self._instance_name:
raise SynapseError(
400, "/update_current_state request was routed to the wrong worker"
)
await self._state_handler.update_current_state(room_id)
return 200, {}
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
if hs.get_instance_name() in hs.config.worker.writers.events:
ReplicationUpdateCurrentStateRestServlet(hs).register(http_server)

View file

@ -15,11 +15,11 @@
import logging import logging
from typing import TYPE_CHECKING, Tuple from typing import TYPE_CHECKING, Tuple
from synapse.api.errors import AuthError, NotFoundError, SynapseError from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
from synapse.http.server import HttpServer from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.http.servlet import RestServlet, parse_json_object_from_request
from synapse.http.site import SynapseRequest from synapse.http.site import SynapseRequest
from synapse.types import JsonDict from synapse.types import JsonDict, RoomID
from ._base import client_patterns from ._base import client_patterns
@ -104,6 +104,13 @@ class RoomAccountDataServlet(RestServlet):
if user_id != requester.user.to_string(): if user_id != requester.user.to_string():
raise AuthError(403, "Cannot add account data for other users.") raise AuthError(403, "Cannot add account data for other users.")
if not RoomID.is_valid(room_id):
raise SynapseError(
400,
f"{room_id} is not a valid room ID",
Codes.INVALID_PARAM,
)
body = parse_json_object_from_request(request) body = parse_json_object_from_request(request)
if account_data_type == "m.fully_read": if account_data_type == "m.fully_read":
@ -111,6 +118,7 @@ class RoomAccountDataServlet(RestServlet):
405, 405,
"Cannot set m.fully_read through this API." "Cannot set m.fully_read through this API."
" Use /rooms/!roomId:server.name/read_markers", " Use /rooms/!roomId:server.name/read_markers",
Codes.BAD_JSON,
) )
await self.handler.add_account_data_to_room( await self.handler.add_account_data_to_room(
@ -130,6 +138,13 @@ class RoomAccountDataServlet(RestServlet):
if user_id != requester.user.to_string(): if user_id != requester.user.to_string():
raise AuthError(403, "Cannot get account data for other users.") raise AuthError(403, "Cannot get account data for other users.")
if not RoomID.is_valid(room_id):
raise SynapseError(
400,
f"{room_id} is not a valid room ID",
Codes.INVALID_PARAM,
)
event = await self.store.get_account_data_for_room_and_type( event = await self.store.get_account_data_for_room_and_type(
user_id, room_id, account_data_type user_id, room_id, account_data_type
) )

View file

@ -95,6 +95,8 @@ class VersionsRestServlet(RestServlet):
"org.matrix.msc3026.busy_presence": self.config.experimental.msc3026_enabled, "org.matrix.msc3026.busy_presence": self.config.experimental.msc3026_enabled,
# Supports receiving private read receipts as per MSC2285 # Supports receiving private read receipts as per MSC2285
"org.matrix.msc2285": self.config.experimental.msc2285_enabled, "org.matrix.msc2285": self.config.experimental.msc2285_enabled,
# Supports filtering of /publicRooms by room type MSC3827
"org.matrix.msc3827": self.config.experimental.msc3827_enabled,
# Adds support for importing historical messages as per MSC2716 # Adds support for importing historical messages as per MSC2716
"org.matrix.msc2716": self.config.experimental.msc2716_enabled, "org.matrix.msc2716": self.config.experimental.msc2716_enabled,
# Adds support for jump to date endpoints (/timestamp_to_event) as per MSC3030 # Adds support for jump to date endpoints (/timestamp_to_event) as per MSC3030

View file

@ -154,7 +154,9 @@ class MediaStorage:
# Note that we'll delete the stored media, due to the # Note that we'll delete the stored media, due to the
# try/except below. The media also won't be stored in # try/except below. The media also won't be stored in
# the DB. # the DB.
raise SpamMediaException(errcode=spam_check) # We currently ignore any additional field returned by
# the spam-check API.
raise SpamMediaException(errcode=spam_check[0])
for provider in self.storage_providers: for provider in self.storage_providers:
await provider.store_file(path, file_info) await provider.store_file(path, file_info)

View file

@ -43,6 +43,7 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, StateResolutionVersio
from synapse.events import EventBase from synapse.events import EventBase
from synapse.events.snapshot import EventContext from synapse.events.snapshot import EventContext
from synapse.logging.context import ContextResourceUsage from synapse.logging.context import ContextResourceUsage
from synapse.replication.http.state import ReplicationUpdateCurrentStateRestServlet
from synapse.state import v1, v2 from synapse.state import v1, v2
from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.storage.roommember import ProfileInfo from synapse.storage.roommember import ProfileInfo
@ -129,6 +130,12 @@ class StateHandler:
self.hs = hs self.hs = hs
self._state_resolution_handler = hs.get_state_resolution_handler() self._state_resolution_handler = hs.get_state_resolution_handler()
self._storage_controllers = hs.get_storage_controllers() self._storage_controllers = hs.get_storage_controllers()
self._events_shard_config = hs.config.worker.events_shard_config
self._instance_name = hs.get_instance_name()
self._update_current_state_client = (
ReplicationUpdateCurrentStateRestServlet.make_client(hs)
)
async def get_current_state_ids( async def get_current_state_ids(
self, self,
@ -249,8 +256,12 @@ class StateHandler:
partial_state = True partial_state = True
logger.debug("calling resolve_state_groups from compute_event_context") logger.debug("calling resolve_state_groups from compute_event_context")
# we've already taken into account partial state, so no need to wait for
# complete state here.
entry = await self.resolve_state_groups_for_events( entry = await self.resolve_state_groups_for_events(
event.room_id, event.prev_event_ids() event.room_id,
event.prev_event_ids(),
await_full_state=False,
) )
state_ids_before_event = entry.state state_ids_before_event = entry.state
@ -335,7 +346,7 @@ class StateHandler:
@measure_func() @measure_func()
async def resolve_state_groups_for_events( async def resolve_state_groups_for_events(
self, room_id: str, event_ids: Collection[str] self, room_id: str, event_ids: Collection[str], await_full_state: bool = True
) -> _StateCacheEntry: ) -> _StateCacheEntry:
"""Given a list of event_ids this method fetches the state at each """Given a list of event_ids this method fetches the state at each
event, resolves conflicts between them and returns them. event, resolves conflicts between them and returns them.
@ -343,6 +354,8 @@ class StateHandler:
Args: Args:
room_id room_id
event_ids event_ids
await_full_state: if true, will block if we do not yet have complete
state at these events.
Returns: Returns:
The resolved state The resolved state
@ -350,7 +363,7 @@ class StateHandler:
logger.debug("resolve_state_groups event_ids %s", event_ids) logger.debug("resolve_state_groups event_ids %s", event_ids)
state_groups = await self._state_storage_controller.get_state_group_for_events( state_groups = await self._state_storage_controller.get_state_group_for_events(
event_ids event_ids, await_full_state=await_full_state
) )
state_group_ids = state_groups.values() state_group_ids = state_groups.values()
@ -417,6 +430,24 @@ class StateHandler:
return {key: state_map[ev_id] for key, ev_id in new_state.items()} return {key: state_map[ev_id] for key, ev_id in new_state.items()}
async def update_current_state(self, room_id: str) -> None:
"""Recalculates the current state for a room, and persists it.
Raises:
SynapseError(502): if all attempts to connect to the event persister worker
fail
"""
writer_instance = self._events_shard_config.get_instance(room_id)
if writer_instance != self._instance_name:
await self._update_current_state_client(
instance_name=writer_instance,
room_id=room_id,
)
return
assert self._storage_controllers.persistence is not None
await self._storage_controllers.persistence.update_current_state(room_id)
@attr.s(slots=True, auto_attribs=True) @attr.s(slots=True, auto_attribs=True)
class _StateResMetrics: class _StateResMetrics:

View file

@ -75,6 +75,15 @@ class SQLBaseStore(metaclass=ABCMeta):
self._attempt_to_invalidate_cache( self._attempt_to_invalidate_cache(
"get_users_in_room_with_profiles", (room_id,) "get_users_in_room_with_profiles", (room_id,)
) )
self._attempt_to_invalidate_cache(
"get_number_joined_users_in_room", (room_id,)
)
self._attempt_to_invalidate_cache("get_local_users_in_room", (room_id,))
for user_id in members_changed:
self._attempt_to_invalidate_cache(
"get_user_in_room_with_profile", (room_id, user_id)
)
# Purge other caches based on room state. # Purge other caches based on room state.
self._attempt_to_invalidate_cache("get_room_summary", (room_id,)) self._attempt_to_invalidate_cache("get_room_summary", (room_id,))

View file

@ -22,6 +22,7 @@ from typing import (
Any, Any,
Awaitable, Awaitable,
Callable, Callable,
ClassVar,
Collection, Collection,
Deque, Deque,
Dict, Dict,
@ -33,6 +34,7 @@ from typing import (
Set, Set,
Tuple, Tuple,
TypeVar, TypeVar,
Union,
) )
import attr import attr
@ -111,9 +113,43 @@ times_pruned_extremities = Counter(
@attr.s(auto_attribs=True, slots=True) @attr.s(auto_attribs=True, slots=True)
class _EventPersistQueueItem: class _PersistEventsTask:
"""A batch of events to persist."""
name: ClassVar[str] = "persist_event_batch" # used for opentracing
events_and_contexts: List[Tuple[EventBase, EventContext]] events_and_contexts: List[Tuple[EventBase, EventContext]]
backfilled: bool backfilled: bool
def try_merge(self, task: "_EventPersistQueueTask") -> bool:
"""Batches events with the same backfilled option together."""
if (
not isinstance(task, _PersistEventsTask)
or self.backfilled != task.backfilled
):
return False
self.events_and_contexts.extend(task.events_and_contexts)
return True
@attr.s(auto_attribs=True, slots=True)
class _UpdateCurrentStateTask:
"""A room whose current state needs recalculating."""
name: ClassVar[str] = "update_current_state" # used for opentracing
def try_merge(self, task: "_EventPersistQueueTask") -> bool:
"""Deduplicates consecutive recalculations of current state."""
return isinstance(task, _UpdateCurrentStateTask)
_EventPersistQueueTask = Union[_PersistEventsTask, _UpdateCurrentStateTask]
@attr.s(auto_attribs=True, slots=True)
class _EventPersistQueueItem:
task: _EventPersistQueueTask
deferred: ObservableDeferred deferred: ObservableDeferred
parent_opentracing_span_contexts: List = attr.ib(factory=list) parent_opentracing_span_contexts: List = attr.ib(factory=list)
@ -127,14 +163,16 @@ _PersistResult = TypeVar("_PersistResult")
class _EventPeristenceQueue(Generic[_PersistResult]): class _EventPeristenceQueue(Generic[_PersistResult]):
"""Queues up events so that they can be persisted in bulk with only one """Queues up tasks so that they can be processed with only one concurrent
concurrent transaction per room. transaction per room.
Tasks can be bulk persistence of events or recalculation of a room's current state.
""" """
def __init__( def __init__(
self, self,
per_item_callback: Callable[ per_item_callback: Callable[
[List[Tuple[EventBase, EventContext]], bool], [str, _EventPersistQueueTask],
Awaitable[_PersistResult], Awaitable[_PersistResult],
], ],
): ):
@ -150,18 +188,17 @@ class _EventPeristenceQueue(Generic[_PersistResult]):
async def add_to_queue( async def add_to_queue(
self, self,
room_id: str, room_id: str,
events_and_contexts: Iterable[Tuple[EventBase, EventContext]], task: _EventPersistQueueTask,
backfilled: bool,
) -> _PersistResult: ) -> _PersistResult:
"""Add events to the queue, with the given persist_event options. """Add a task to the queue.
If we are not already processing events in this room, starts off a background If we are not already processing tasks in this room, starts off a background
process to to so, calling the per_item_callback for each item. process to to so, calling the per_item_callback for each item.
Args: Args:
room_id (str): room_id (str):
events_and_contexts (list[(EventBase, EventContext)]): task (_EventPersistQueueTask): A _PersistEventsTask or
backfilled (bool): _UpdateCurrentStateTask to process.
Returns: Returns:
the result returned by the `_per_item_callback` passed to the result returned by the `_per_item_callback` passed to
@ -169,26 +206,20 @@ class _EventPeristenceQueue(Generic[_PersistResult]):
""" """
queue = self._event_persist_queues.setdefault(room_id, deque()) queue = self._event_persist_queues.setdefault(room_id, deque())
# if the last item in the queue has the same `backfilled` setting, if queue and queue[-1].task.try_merge(task):
# we can just add these new events to that item. # the new task has been merged into the last task in the queue
if queue and queue[-1].backfilled == backfilled:
end_item = queue[-1] end_item = queue[-1]
else: else:
# need to make a new queue item
deferred: ObservableDeferred[_PersistResult] = ObservableDeferred( deferred: ObservableDeferred[_PersistResult] = ObservableDeferred(
defer.Deferred(), consumeErrors=True defer.Deferred(), consumeErrors=True
) )
end_item = _EventPersistQueueItem( end_item = _EventPersistQueueItem(
events_and_contexts=[], task=task,
backfilled=backfilled,
deferred=deferred, deferred=deferred,
) )
queue.append(end_item) queue.append(end_item)
# add our events to the queue item
end_item.events_and_contexts.extend(events_and_contexts)
# also add our active opentracing span to the item so that we get a link back # also add our active opentracing span to the item so that we get a link back
span = opentracing.active_span() span = opentracing.active_span()
if span: if span:
@ -202,7 +233,7 @@ class _EventPeristenceQueue(Generic[_PersistResult]):
# add another opentracing span which links to the persist trace. # add another opentracing span which links to the persist trace.
with opentracing.start_active_span_follows_from( with opentracing.start_active_span_follows_from(
"persist_event_batch_complete", (end_item.opentracing_span_context,) f"{task.name}_complete", (end_item.opentracing_span_context,)
): ):
pass pass
@ -234,16 +265,14 @@ class _EventPeristenceQueue(Generic[_PersistResult]):
for item in queue: for item in queue:
try: try:
with opentracing.start_active_span_follows_from( with opentracing.start_active_span_follows_from(
"persist_event_batch", item.task.name,
item.parent_opentracing_span_contexts, item.parent_opentracing_span_contexts,
inherit_force_tracing=True, inherit_force_tracing=True,
) as scope: ) as scope:
if scope: if scope:
item.opentracing_span_context = scope.span.context item.opentracing_span_context = scope.span.context
ret = await self._per_item_callback( ret = await self._per_item_callback(room_id, item.task)
item.events_and_contexts, item.backfilled
)
except Exception: except Exception:
with PreserveLoggingContext(): with PreserveLoggingContext():
item.deferred.errback() item.deferred.errback()
@ -292,9 +321,32 @@ class EventsPersistenceStorageController:
self._clock = hs.get_clock() self._clock = hs.get_clock()
self._instance_name = hs.get_instance_name() self._instance_name = hs.get_instance_name()
self.is_mine_id = hs.is_mine_id self.is_mine_id = hs.is_mine_id
self._event_persist_queue = _EventPeristenceQueue(self._persist_event_batch) self._event_persist_queue = _EventPeristenceQueue(
self._process_event_persist_queue_task
)
self._state_resolution_handler = hs.get_state_resolution_handler() self._state_resolution_handler = hs.get_state_resolution_handler()
async def _process_event_persist_queue_task(
self,
room_id: str,
task: _EventPersistQueueTask,
) -> Dict[str, str]:
"""Callback for the _event_persist_queue
Returns:
A dictionary of event ID to event ID we didn't persist as we already
had another event persisted with the same TXN ID.
"""
if isinstance(task, _PersistEventsTask):
return await self._persist_event_batch(room_id, task)
elif isinstance(task, _UpdateCurrentStateTask):
await self._update_current_state(room_id, task)
return {}
else:
raise AssertionError(
f"Found an unexpected task type in event persistence queue: {task}"
)
@opentracing.trace @opentracing.trace
async def persist_events( async def persist_events(
self, self,
@ -315,6 +367,10 @@ class EventsPersistenceStorageController:
if they were deduplicated due to an event already existing that if they were deduplicated due to an event already existing that
matched the transaction ID; the existing event is returned in such matched the transaction ID; the existing event is returned in such
a case. a case.
Raises:
PartialStateConflictError: if attempting to persist a partial state event in
a room that has been un-partial stated.
""" """
partitioned: Dict[str, List[Tuple[EventBase, EventContext]]] = {} partitioned: Dict[str, List[Tuple[EventBase, EventContext]]] = {}
for event, ctx in events_and_contexts: for event, ctx in events_and_contexts:
@ -325,7 +381,8 @@ class EventsPersistenceStorageController:
) -> Dict[str, str]: ) -> Dict[str, str]:
room_id, evs_ctxs = item room_id, evs_ctxs = item
return await self._event_persist_queue.add_to_queue( return await self._event_persist_queue.add_to_queue(
room_id, evs_ctxs, backfilled=backfilled room_id,
_PersistEventsTask(events_and_contexts=evs_ctxs, backfilled=backfilled),
) )
ret_vals = await yieldable_gather_results(enqueue, partitioned.items()) ret_vals = await yieldable_gather_results(enqueue, partitioned.items())
@ -363,12 +420,19 @@ class EventsPersistenceStorageController:
latest persisted event. The returned event may not match the given latest persisted event. The returned event may not match the given
event if it was deduplicated due to an existing event matching the event if it was deduplicated due to an existing event matching the
transaction ID. transaction ID.
Raises:
PartialStateConflictError: if attempting to persist a partial state event in
a room that has been un-partial stated.
""" """
# add_to_queue returns a map from event ID to existing event ID if the # add_to_queue returns a map from event ID to existing event ID if the
# event was deduplicated. (The dict may also include other entries if # event was deduplicated. (The dict may also include other entries if
# the event was persisted in a batch with other events.) # the event was persisted in a batch with other events.)
replaced_events = await self._event_persist_queue.add_to_queue( replaced_events = await self._event_persist_queue.add_to_queue(
event.room_id, [(event, context)], backfilled=backfilled event.room_id,
_PersistEventsTask(
events_and_contexts=[(event, context)], backfilled=backfilled
),
) )
replaced_event = replaced_events.get(event.event_id) replaced_event = replaced_events.get(event.event_id)
if replaced_event: if replaced_event:
@ -383,20 +447,22 @@ class EventsPersistenceStorageController:
async def update_current_state(self, room_id: str) -> None: async def update_current_state(self, room_id: str) -> None:
"""Recalculate the current state for a room, and persist it""" """Recalculate the current state for a room, and persist it"""
await self._event_persist_queue.add_to_queue(
room_id,
_UpdateCurrentStateTask(),
)
async def _update_current_state(
self, room_id: str, _task: _UpdateCurrentStateTask
) -> None:
"""Callback for the _event_persist_queue
Recalculates the current state for a room, and persists it.
"""
state = await self._calculate_current_state(room_id) state = await self._calculate_current_state(room_id)
delta = await self._calculate_state_delta(room_id, state) delta = await self._calculate_state_delta(room_id, state)
# TODO(faster_joins): get a real stream ordering, to make this work correctly await self.persist_events_store.update_current_state(room_id, delta)
# across workers.
# https://github.com/matrix-org/synapse/issues/12994
#
# TODO(faster_joins): this can race against event persistence, in which case we
# will end up with incorrect state. Perhaps we should make this a job we
# farm out to the event persister thread, somehow.
# https://github.com/matrix-org/synapse/issues/13007
#
stream_id = self.main_store.get_room_max_stream_ordering()
await self.persist_events_store.update_current_state(room_id, delta, stream_id)
async def _calculate_current_state(self, room_id: str) -> StateMap[str]: async def _calculate_current_state(self, room_id: str) -> StateMap[str]:
"""Calculate the current state of a room, based on the forward extremities """Calculate the current state of a room, based on the forward extremities
@ -441,9 +507,7 @@ class EventsPersistenceStorageController:
return res.state return res.state
async def _persist_event_batch( async def _persist_event_batch(
self, self, _room_id: str, task: _PersistEventsTask
events_and_contexts: List[Tuple[EventBase, EventContext]],
backfilled: bool = False,
) -> Dict[str, str]: ) -> Dict[str, str]:
"""Callback for the _event_persist_queue """Callback for the _event_persist_queue
@ -453,7 +517,14 @@ class EventsPersistenceStorageController:
Returns: Returns:
A dictionary of event ID to event ID we didn't persist as we already A dictionary of event ID to event ID we didn't persist as we already
had another event persisted with the same TXN ID. had another event persisted with the same TXN ID.
Raises:
PartialStateConflictError: if attempting to persist a partial state event in
a room that has been un-partial stated.
""" """
events_and_contexts = task.events_and_contexts
backfilled = task.backfilled
replaced_events: Dict[str, str] = {} replaced_events: Dict[str, str] = {}
if not events_and_contexts: if not events_and_contexts:
return replaced_events return replaced_events

View file

@ -366,10 +366,11 @@ class LoggingTransaction:
*args: P.args, *args: P.args,
**kwargs: P.kwargs, **kwargs: P.kwargs,
) -> R: ) -> R:
sql = self._make_sql_one_line(sql) # Generate a one-line version of the SQL to better log it.
one_line_sql = self._make_sql_one_line(sql)
# TODO(paul): Maybe use 'info' and 'debug' for values? # TODO(paul): Maybe use 'info' and 'debug' for values?
sql_logger.debug("[SQL] {%s} %s", self.name, sql) sql_logger.debug("[SQL] {%s} %s", self.name, one_line_sql)
sql = self.database_engine.convert_param_style(sql) sql = self.database_engine.convert_param_style(sql)
if args: if args:
@ -389,7 +390,7 @@ class LoggingTransaction:
"db.query", "db.query",
tags={ tags={
opentracing.tags.DATABASE_TYPE: "sql", opentracing.tags.DATABASE_TYPE: "sql",
opentracing.tags.DATABASE_STATEMENT: sql, opentracing.tags.DATABASE_STATEMENT: one_line_sql,
}, },
): ):
return func(sql, *args, **kwargs) return func(sql, *args, **kwargs)

View file

@ -87,7 +87,6 @@ class DataStore(
RoomStore, RoomStore,
RoomBatchStore, RoomBatchStore,
RegistrationStore, RegistrationStore,
StreamWorkerStore,
ProfileStore, ProfileStore,
PresenceStore, PresenceStore,
TransactionWorkerStore, TransactionWorkerStore,
@ -112,6 +111,7 @@ class DataStore(
SearchStore, SearchStore,
TagsStore, TagsStore,
AccountDataStore, AccountDataStore,
StreamWorkerStore,
OpenIdStore, OpenIdStore,
ClientIpWorkerStore, ClientIpWorkerStore,
DeviceStore, DeviceStore,

View file

@ -25,8 +25,8 @@ from synapse.storage.database import (
LoggingDatabaseConnection, LoggingDatabaseConnection,
LoggingTransaction, LoggingTransaction,
) )
from synapse.storage.databases.main.events_worker import EventsWorkerStore
from synapse.storage.databases.main.receipts import ReceiptsWorkerStore from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
from synapse.storage.databases.main.stream import StreamWorkerStore
from synapse.util import json_encoder from synapse.util import json_encoder
from synapse.util.caches.descriptors import cached from synapse.util.caches.descriptors import cached
@ -122,7 +122,7 @@ def _deserialize_action(actions: str, is_highlight: bool) -> List[Union[dict, st
return DEFAULT_NOTIF_ACTION return DEFAULT_NOTIF_ACTION
class EventPushActionsWorkerStore(ReceiptsWorkerStore, EventsWorkerStore, SQLBaseStore): class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBaseStore):
def __init__( def __init__(
self, self,
database: DatabasePool, database: DatabasePool,
@ -143,7 +143,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, EventsWorkerStore, SQLBas
self._find_stream_orderings_for_times, 10 * 60 * 1000 self._find_stream_orderings_for_times, 10 * 60 * 1000
) )
self._rotate_delay = 3
self._rotate_count = 10000 self._rotate_count = 10000
self._doing_notif_rotation = False self._doing_notif_rotation = False
if hs.config.worker.run_background_tasks: if hs.config.worker.run_background_tasks:
@ -218,7 +217,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, EventsWorkerStore, SQLBas
retcol="event_id", retcol="event_id",
) )
stream_ordering = self.get_stream_id_for_event_txn(txn, event_id) # type: ignore[attr-defined] stream_ordering = self.get_stream_id_for_event_txn(txn, event_id)
return self._get_unread_counts_by_pos_txn( return self._get_unread_counts_by_pos_txn(
txn, room_id, user_id, stream_ordering txn, room_id, user_id, stream_ordering
@ -307,12 +306,22 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, EventsWorkerStore, SQLBas
actions that have been deleted from `event_push_actions` table. actions that have been deleted from `event_push_actions` table.
""" """
# If there have been no events in the room since the stream ordering,
# there can't be any push actions either.
if not self._events_stream_cache.has_entity_changed(room_id, stream_ordering):
return 0, 0
clause = "" clause = ""
args = [user_id, room_id, stream_ordering] args = [user_id, room_id, stream_ordering]
if max_stream_ordering is not None: if max_stream_ordering is not None:
clause = "AND ea.stream_ordering <= ?" clause = "AND ea.stream_ordering <= ?"
args.append(max_stream_ordering) args.append(max_stream_ordering)
# If the max stream ordering is less than the min stream ordering,
# then obviously there are zero push actions in that range.
if max_stream_ordering <= stream_ordering:
return 0, 0
sql = f""" sql = f"""
SELECT SELECT
COUNT(CASE WHEN notif = 1 THEN 1 END), COUNT(CASE WHEN notif = 1 THEN 1 END),
@ -837,7 +846,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, EventsWorkerStore, SQLBas
) )
if caught_up: if caught_up:
break break
await self.hs.get_clock().sleep(self._rotate_delay)
# Finally we clear out old event push actions. # Finally we clear out old event push actions.
await self._remove_old_push_actions_that_have_rotated() await self._remove_old_push_actions_that_have_rotated()
@ -1003,13 +1011,17 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, EventsWorkerStore, SQLBas
sql = """ sql = """
SELECT user_id, room_id, SELECT user_id, room_id,
coalesce(old.%s, 0) + upd.cnt, coalesce(old.%s, 0) + upd.cnt,
upd.stream_ordering, upd.stream_ordering
old.user_id
FROM ( FROM (
SELECT user_id, room_id, count(*) as cnt, SELECT user_id, room_id, count(*) as cnt,
max(stream_ordering) as stream_ordering max(ea.stream_ordering) as stream_ordering
FROM event_push_actions FROM event_push_actions AS ea
WHERE ? < stream_ordering AND stream_ordering <= ? LEFT JOIN event_push_summary AS old USING (user_id, room_id)
WHERE ? < ea.stream_ordering AND ea.stream_ordering <= ?
AND (
old.last_receipt_stream_ordering IS NULL
OR old.last_receipt_stream_ordering < ea.stream_ordering
)
AND %s = 1 AND %s = 1
GROUP BY user_id, room_id GROUP BY user_id, room_id
) AS upd ) AS upd
@ -1032,7 +1044,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, EventsWorkerStore, SQLBas
summaries[(row[0], row[1])] = _EventPushSummary( summaries[(row[0], row[1])] = _EventPushSummary(
unread_count=row[2], unread_count=row[2],
stream_ordering=row[3], stream_ordering=row[3],
old_user_id=row[4],
notif_count=0, notif_count=0,
) )
@ -1053,57 +1064,27 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, EventsWorkerStore, SQLBas
summaries[(row[0], row[1])] = _EventPushSummary( summaries[(row[0], row[1])] = _EventPushSummary(
unread_count=0, unread_count=0,
stream_ordering=row[3], stream_ordering=row[3],
old_user_id=row[4],
notif_count=row[2], notif_count=row[2],
) )
logger.info("Rotating notifications, handling %d rows", len(summaries)) logger.info("Rotating notifications, handling %d rows", len(summaries))
# If the `old.user_id` above is NULL then we know there isn't already an self.db_pool.simple_upsert_many_txn(
# entry in the table, so we simply insert it. Otherwise we update the
# existing table.
self.db_pool.simple_insert_many_txn(
txn, txn,
table="event_push_summary", table="event_push_summary",
keys=( key_names=("user_id", "room_id"),
"user_id", key_values=[(user_id, room_id) for user_id, room_id in summaries],
"room_id", value_names=("notif_count", "unread_count", "stream_ordering"),
"notif_count", value_values=[
"unread_count",
"stream_ordering",
),
values=[
( (
user_id,
room_id,
summary.notif_count, summary.notif_count,
summary.unread_count, summary.unread_count,
summary.stream_ordering, summary.stream_ordering,
) )
for ((user_id, room_id), summary) in summaries.items() for summary in summaries.values()
if summary.old_user_id is None
], ],
) )
txn.execute_batch(
"""
UPDATE event_push_summary
SET notif_count = ?, unread_count = ?, stream_ordering = ?
WHERE user_id = ? AND room_id = ?
""",
(
(
summary.notif_count,
summary.unread_count,
summary.stream_ordering,
user_id,
room_id,
)
for ((user_id, room_id), summary) in summaries.items()
if summary.old_user_id is not None
),
)
txn.execute( txn.execute(
"UPDATE event_push_summary_stream_ordering SET stream_ordering = ?", "UPDATE event_push_summary_stream_ordering SET stream_ordering = ?",
(rotate_to_stream_ordering,), (rotate_to_stream_ordering,),
@ -1131,12 +1112,12 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, EventsWorkerStore, SQLBas
) -> bool: ) -> bool:
# We don't want to clear out too much at a time, so we bound our # We don't want to clear out too much at a time, so we bound our
# deletes. # deletes.
batch_size = 10000 batch_size = self._rotate_count
txn.execute( txn.execute(
""" """
SELECT stream_ordering FROM event_push_actions SELECT stream_ordering FROM event_push_actions
WHERE stream_ordering < ? AND highlight = 0 WHERE stream_ordering <= ? AND highlight = 0
ORDER BY stream_ordering ASC LIMIT 1 OFFSET ? ORDER BY stream_ordering ASC LIMIT 1 OFFSET ?
""", """,
( (
@ -1151,10 +1132,12 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, EventsWorkerStore, SQLBas
else: else:
stream_ordering = max_stream_ordering_to_delete stream_ordering = max_stream_ordering_to_delete
# We need to use a inclusive bound here to handle the case where a
# single stream ordering has more than `batch_size` rows.
txn.execute( txn.execute(
""" """
DELETE FROM event_push_actions DELETE FROM event_push_actions
WHERE stream_ordering < ? AND highlight = 0 WHERE stream_ordering <= ? AND highlight = 0
""", """,
(stream_ordering,), (stream_ordering,),
) )
@ -1283,5 +1266,4 @@ class _EventPushSummary:
unread_count: int unread_count: int
stream_ordering: int stream_ordering: int
old_user_id: str
notif_count: int notif_count: int

View file

@ -16,6 +16,7 @@
import itertools import itertools
import logging import logging
from collections import OrderedDict from collections import OrderedDict
from http import HTTPStatus
from typing import ( from typing import (
TYPE_CHECKING, TYPE_CHECKING,
Any, Any,
@ -35,6 +36,7 @@ from prometheus_client import Counter
import synapse.metrics import synapse.metrics
from synapse.api.constants import EventContentFields, EventTypes, RelationTypes from synapse.api.constants import EventContentFields, EventTypes, RelationTypes
from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import RoomVersions from synapse.api.room_versions import RoomVersions
from synapse.events import EventBase, relation_from_event from synapse.events import EventBase, relation_from_event
from synapse.events.snapshot import EventContext from synapse.events.snapshot import EventContext
@ -69,6 +71,24 @@ event_counter = Counter(
) )
class PartialStateConflictError(SynapseError):
"""An internal error raised when attempting to persist an event with partial state
after the room containing the event has been un-partial stated.
This error should be handled by recomputing the event context and trying again.
This error has an HTTP status code so that it can be transported over replication.
It should not be exposed to clients.
"""
def __init__(self) -> None:
super().__init__(
HTTPStatus.CONFLICT,
msg="Cannot persist partial state event in un-partial stated room",
errcode=Codes.UNKNOWN,
)
@attr.s(slots=True, auto_attribs=True) @attr.s(slots=True, auto_attribs=True)
class DeltaState: class DeltaState:
"""Deltas to use to update the `current_state_events` table. """Deltas to use to update the `current_state_events` table.
@ -154,6 +174,10 @@ class PersistEventsStore:
Returns: Returns:
Resolves when the events have been persisted Resolves when the events have been persisted
Raises:
PartialStateConflictError: if attempting to persist a partial state event in
a room that has been un-partial stated.
""" """
# We want to calculate the stream orderings as late as possible, as # We want to calculate the stream orderings as late as possible, as
@ -354,6 +378,9 @@ class PersistEventsStore:
For each room, a list of the event ids which are the forward For each room, a list of the event ids which are the forward
extremities. extremities.
Raises:
PartialStateConflictError: if attempting to persist a partial state event in
a room that has been un-partial stated.
""" """
state_delta_for_room = state_delta_for_room or {} state_delta_for_room = state_delta_for_room or {}
new_forward_extremities = new_forward_extremities or {} new_forward_extremities = new_forward_extremities or {}
@ -980,15 +1007,15 @@ class PersistEventsStore:
self, self,
room_id: str, room_id: str,
state_delta: DeltaState, state_delta: DeltaState,
stream_id: int,
) -> None: ) -> None:
"""Update the current state stored in the datatabase for the given room""" """Update the current state stored in the datatabase for the given room"""
async with self._stream_id_gen.get_next() as stream_ordering:
await self.db_pool.runInteraction( await self.db_pool.runInteraction(
"update_current_state", "update_current_state",
self._update_current_state_txn, self._update_current_state_txn,
state_delta_by_room={room_id: state_delta}, state_delta_by_room={room_id: state_delta},
stream_id=stream_id, stream_id=stream_ordering,
) )
def _update_current_state_txn( def _update_current_state_txn(
@ -1304,6 +1331,10 @@ class PersistEventsStore:
Returns: Returns:
new list, without events which are already in the events table. new list, without events which are already in the events table.
Raises:
PartialStateConflictError: if attempting to persist a partial state event in
a room that has been un-partial stated.
""" """
txn.execute( txn.execute(
"SELECT event_id, outlier FROM events WHERE event_id in (%s)" "SELECT event_id, outlier FROM events WHERE event_id in (%s)"
@ -1766,6 +1797,18 @@ class PersistEventsStore:
self.store.get_invited_rooms_for_local_user.invalidate, self.store.get_invited_rooms_for_local_user.invalidate,
(event.state_key,), (event.state_key,),
) )
txn.call_after(
self.store.get_local_users_in_room.invalidate,
(event.room_id,),
)
txn.call_after(
self.store.get_number_joined_users_in_room.invalidate,
(event.room_id,),
)
txn.call_after(
self.store.get_user_in_room_with_profile.invalidate,
(event.room_id, event.state_key),
)
# The `_get_membership_from_event_id` is immutable, except for the # The `_get_membership_from_event_id` is immutable, except for the
# case where we look up an event *before* persisting it. # case where we look up an event *before* persisting it.
@ -2215,6 +2258,11 @@ class PersistEventsStore:
txn: LoggingTransaction, txn: LoggingTransaction,
events_and_contexts: Collection[Tuple[EventBase, EventContext]], events_and_contexts: Collection[Tuple[EventBase, EventContext]],
) -> None: ) -> None:
"""
Raises:
PartialStateConflictError: if attempting to persist a partial state event in
a room that has been un-partial stated.
"""
state_groups = {} state_groups = {}
for event, context in events_and_contexts: for event, context in events_and_contexts:
if event.internal_metadata.is_outlier(): if event.internal_metadata.is_outlier():
@ -2239,6 +2287,7 @@ class PersistEventsStore:
# if we have partial state for these events, record the fact. (This happens # if we have partial state for these events, record the fact. (This happens
# here rather than in _store_event_txn because it also needs to happen when # here rather than in _store_event_txn because it also needs to happen when
# we de-outlier an event.) # we de-outlier an event.)
try:
self.db_pool.simple_insert_many_txn( self.db_pool.simple_insert_many_txn(
txn, txn,
table="partial_state_events", table="partial_state_events",
@ -2252,6 +2301,23 @@ class PersistEventsStore:
if ctx.partial_state if ctx.partial_state
], ],
) )
except self.db_pool.engine.module.IntegrityError:
logger.info(
"Cannot persist events %s in rooms %s: room has been un-partial stated",
[
event.event_id
for event, ctx in events_and_contexts
if ctx.partial_state
],
list(
{
event.room_id
for event, ctx in events_and_contexts
if ctx.partial_state
}
),
)
raise PartialStateConflictError()
self.db_pool.simple_upsert_many_txn( self.db_pool.simple_upsert_many_txn(
txn, txn,

View file

@ -32,12 +32,17 @@ from typing import (
import attr import attr
from synapse.api.constants import EventContentFields, EventTypes, JoinRules from synapse.api.constants import (
EventContentFields,
EventTypes,
JoinRules,
PublicRoomsFilterFields,
)
from synapse.api.errors import StoreError from synapse.api.errors import StoreError
from synapse.api.room_versions import RoomVersion, RoomVersions from synapse.api.room_versions import RoomVersion, RoomVersions
from synapse.config.homeserver import HomeServerConfig from synapse.config.homeserver import HomeServerConfig
from synapse.events import EventBase from synapse.events import EventBase
from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
from synapse.storage.database import ( from synapse.storage.database import (
DatabasePool, DatabasePool,
LoggingDatabaseConnection, LoggingDatabaseConnection,
@ -199,10 +204,29 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
desc="get_public_room_ids", desc="get_public_room_ids",
) )
def _construct_room_type_where_clause(
self, room_types: Union[List[Union[str, None]], None]
) -> Tuple[Union[str, None], List[str]]:
if not room_types or not self.config.experimental.msc3827_enabled:
return None, []
else:
# We use None when we want get rooms without a type
is_null_clause = ""
if None in room_types:
is_null_clause = "OR room_type IS NULL"
room_types = [value for value in room_types if value is not None]
list_clause, args = make_in_list_sql_clause(
self.database_engine, "room_type", room_types
)
return f"({list_clause} {is_null_clause})", args
async def count_public_rooms( async def count_public_rooms(
self, self,
network_tuple: Optional[ThirdPartyInstanceID], network_tuple: Optional[ThirdPartyInstanceID],
ignore_non_federatable: bool, ignore_non_federatable: bool,
search_filter: Optional[dict],
) -> int: ) -> int:
"""Counts the number of public rooms as tracked in the room_stats_current """Counts the number of public rooms as tracked in the room_stats_current
and room_stats_state table. and room_stats_state table.
@ -210,11 +234,20 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
Args: Args:
network_tuple network_tuple
ignore_non_federatable: If true filters out non-federatable rooms ignore_non_federatable: If true filters out non-federatable rooms
search_filter
""" """
def _count_public_rooms_txn(txn: LoggingTransaction) -> int: def _count_public_rooms_txn(txn: LoggingTransaction) -> int:
query_args = [] query_args = []
room_type_clause, args = self._construct_room_type_where_clause(
search_filter.get(PublicRoomsFilterFields.ROOM_TYPES, None)
if search_filter
else None
)
room_type_clause = f" AND {room_type_clause}" if room_type_clause else ""
query_args += args
if network_tuple: if network_tuple:
if network_tuple.appservice_id: if network_tuple.appservice_id:
published_sql = """ published_sql = """
@ -249,6 +282,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
OR join_rules = '{JoinRules.KNOCK_RESTRICTED}' OR join_rules = '{JoinRules.KNOCK_RESTRICTED}'
OR history_visibility = 'world_readable' OR history_visibility = 'world_readable'
) )
{room_type_clause}
AND joined_members > 0 AND joined_members > 0
""" """
@ -347,8 +381,12 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
if ignore_non_federatable: if ignore_non_federatable:
where_clauses.append("is_federatable") where_clauses.append("is_federatable")
if search_filter and search_filter.get("generic_search_term", None): if search_filter and search_filter.get(
search_term = "%" + search_filter["generic_search_term"] + "%" PublicRoomsFilterFields.GENERIC_SEARCH_TERM, None
):
search_term = (
"%" + search_filter[PublicRoomsFilterFields.GENERIC_SEARCH_TERM] + "%"
)
where_clauses.append( where_clauses.append(
""" """
@ -365,6 +403,15 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
search_term.lower(), search_term.lower(),
] ]
room_type_clause, args = self._construct_room_type_where_clause(
search_filter.get(PublicRoomsFilterFields.ROOM_TYPES, None)
if search_filter
else None
)
if room_type_clause:
where_clauses.append(room_type_clause)
query_args += args
where_clause = "" where_clause = ""
if where_clauses: if where_clauses:
where_clause = " AND " + " AND ".join(where_clauses) where_clause = " AND " + " AND ".join(where_clauses)
@ -373,7 +420,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
sql = f""" sql = f"""
SELECT SELECT
room_id, name, topic, canonical_alias, joined_members, room_id, name, topic, canonical_alias, joined_members,
avatar, history_visibility, guest_access, join_rules avatar, history_visibility, guest_access, join_rules, room_type
FROM ( FROM (
{published_sql} {published_sql}
) published ) published
@ -1109,19 +1156,25 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
return room_servers return room_servers
async def clear_partial_state_room(self, room_id: str) -> bool: async def clear_partial_state_room(self, room_id: str) -> bool:
# this can race with incoming events, so we watch out for FK errors. """Clears the partial state flag for a room.
# TODO(faster_joins): this still doesn't completely fix the race, since the persist process
# is not atomic. I fear we need an application-level lock. Args:
# https://github.com/matrix-org/synapse/issues/12988 room_id: The room whose partial state flag is to be cleared.
Returns:
`True` if the partial state flag has been cleared successfully.
`False` if the partial state flag could not be cleared because the room
still contains events with partial state.
"""
try: try:
await self.db_pool.runInteraction( await self.db_pool.runInteraction(
"clear_partial_state_room", self._clear_partial_state_room_txn, room_id "clear_partial_state_room", self._clear_partial_state_room_txn, room_id
) )
return True return True
except self.db_pool.engine.module.DatabaseError as e: except self.db_pool.engine.module.IntegrityError as e:
# TODO(faster_joins): how do we distinguish between FK errors and other errors? # Assume that any `IntegrityError`s are due to partial state events.
# https://github.com/matrix-org/synapse/issues/12988 logger.info(
logger.warning(
"Exception while clearing lazy partial-state-room %s, retrying: %s", "Exception while clearing lazy partial-state-room %s, retrying: %s",
room_id, room_id,
e, e,
@ -1166,6 +1219,7 @@ class _BackgroundUpdates:
POPULATE_ROOM_DEPTH_MIN_DEPTH2 = "populate_room_depth_min_depth2" POPULATE_ROOM_DEPTH_MIN_DEPTH2 = "populate_room_depth_min_depth2"
REPLACE_ROOM_DEPTH_MIN_DEPTH = "replace_room_depth_min_depth" REPLACE_ROOM_DEPTH_MIN_DEPTH = "replace_room_depth_min_depth"
POPULATE_ROOMS_CREATOR_COLUMN = "populate_rooms_creator_column" POPULATE_ROOMS_CREATOR_COLUMN = "populate_rooms_creator_column"
ADD_ROOM_TYPE_COLUMN = "add_room_type_column"
_REPLACE_ROOM_DEPTH_SQL_COMMANDS = ( _REPLACE_ROOM_DEPTH_SQL_COMMANDS = (
@ -1200,6 +1254,11 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
self._background_add_rooms_room_version_column, self._background_add_rooms_room_version_column,
) )
self.db_pool.updates.register_background_update_handler(
_BackgroundUpdates.ADD_ROOM_TYPE_COLUMN,
self._background_add_room_type_column,
)
# BG updates to change the type of room_depth.min_depth # BG updates to change the type of room_depth.min_depth
self.db_pool.updates.register_background_update_handler( self.db_pool.updates.register_background_update_handler(
_BackgroundUpdates.POPULATE_ROOM_DEPTH_MIN_DEPTH2, _BackgroundUpdates.POPULATE_ROOM_DEPTH_MIN_DEPTH2,
@ -1569,6 +1628,69 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
return batch_size return batch_size
async def _background_add_room_type_column(
self, progress: JsonDict, batch_size: int
) -> int:
"""Background update to go and add room_type information to `room_stats_state`
table from `event_json` table.
"""
last_room_id = progress.get("room_id", "")
def _background_add_room_type_column_txn(
txn: LoggingTransaction,
) -> bool:
sql = """
SELECT state.room_id, json FROM event_json
INNER JOIN current_state_events AS state USING (event_id)
WHERE state.room_id > ? AND type = 'm.room.create'
ORDER BY state.room_id
LIMIT ?
"""
txn.execute(sql, (last_room_id, batch_size))
room_id_to_create_event_results = txn.fetchall()
new_last_room_id = None
for room_id, event_json in room_id_to_create_event_results:
event_dict = db_to_json(event_json)
room_type = event_dict.get("content", {}).get(
EventContentFields.ROOM_TYPE, None
)
if isinstance(room_type, str):
self.db_pool.simple_update_txn(
txn,
table="room_stats_state",
keyvalues={"room_id": room_id},
updatevalues={"room_type": room_type},
)
new_last_room_id = room_id
if new_last_room_id is None:
return True
self.db_pool.updates._background_update_progress_txn(
txn,
_BackgroundUpdates.ADD_ROOM_TYPE_COLUMN,
{"room_id": new_last_room_id},
)
return False
end = await self.db_pool.runInteraction(
"_background_add_room_type_column",
_background_add_room_type_column_txn,
)
if end:
await self.db_pool.updates._end_background_update(
_BackgroundUpdates.ADD_ROOM_TYPE_COLUMN
)
return batch_size
class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore): class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
def __init__( def __init__(

View file

@ -212,6 +212,60 @@ class RoomMemberWorkerStore(EventsWorkerStore):
txn.execute(sql, (room_id, Membership.JOIN)) txn.execute(sql, (room_id, Membership.JOIN))
return [r[0] for r in txn] return [r[0] for r in txn]
@cached()
def get_user_in_room_with_profile(
self, room_id: str, user_id: str
) -> Dict[str, ProfileInfo]:
raise NotImplementedError()
@cachedList(
cached_method_name="get_user_in_room_with_profile", list_name="user_ids"
)
async def get_subset_users_in_room_with_profiles(
self, room_id: str, user_ids: Collection[str]
) -> Dict[str, ProfileInfo]:
"""Get a mapping from user ID to profile information for a list of users
in a given room.
The profile information comes directly from this room's `m.room.member`
events, and so may be specific to this room rather than part of a user's
global profile. To avoid privacy leaks, the profile data should only be
revealed to users who are already in this room.
Args:
room_id: The ID of the room to retrieve the users of.
user_ids: a list of users in the room to run the query for
Returns:
A mapping from user ID to ProfileInfo.
"""
def _get_subset_users_in_room_with_profiles(
txn: LoggingTransaction,
) -> Dict[str, ProfileInfo]:
clause, ids = make_in_list_sql_clause(
self.database_engine, "m.user_id", user_ids
)
sql = """
SELECT state_key, display_name, avatar_url FROM room_memberships as m
INNER JOIN current_state_events as c
ON m.event_id = c.event_id
AND m.room_id = c.room_id
AND m.user_id = c.state_key
WHERE c.type = 'm.room.member' AND c.room_id = ? AND m.membership = ? AND %s
""" % (
clause,
)
txn.execute(sql, (room_id, Membership.JOIN, *ids))
return {r[0]: ProfileInfo(display_name=r[1], avatar_url=r[2]) for r in txn}
return await self.db_pool.runInteraction(
"get_subset_users_in_room_with_profiles",
_get_subset_users_in_room_with_profiles,
)
@cached(max_entries=100000, iterable=True) @cached(max_entries=100000, iterable=True)
async def get_users_in_room_with_profiles( async def get_users_in_room_with_profiles(
self, room_id: str self, room_id: str
@ -337,6 +391,15 @@ class RoomMemberWorkerStore(EventsWorkerStore):
"get_room_summary", _get_room_summary_txn "get_room_summary", _get_room_summary_txn
) )
@cached()
async def get_number_joined_users_in_room(self, room_id: str) -> int:
return await self.db_pool.simple_select_one_onecol(
table="current_state_events",
keyvalues={"room_id": room_id, "membership": Membership.JOIN},
retcol="COUNT(*)",
desc="get_number_joined_users_in_room",
)
@cached() @cached()
async def get_invited_rooms_for_local_user( async def get_invited_rooms_for_local_user(
self, user_id: str self, user_id: str
@ -416,6 +479,17 @@ class RoomMemberWorkerStore(EventsWorkerStore):
user_id: str, user_id: str,
membership_list: List[str], membership_list: List[str],
) -> List[RoomsForUser]: ) -> List[RoomsForUser]:
"""Get all the rooms for this *local* user where the membership for this user
matches one in the membership list.
Args:
user_id: The user ID.
membership_list: A list of synapse.api.constants.Membership
values which the user must be in.
Returns:
The RoomsForUser that the user matches the membership types.
"""
# Paranoia check. # Paranoia check.
if not self.hs.is_mine_id(user_id): if not self.hs.is_mine_id(user_id):
raise Exception( raise Exception(
@ -444,6 +518,18 @@ class RoomMemberWorkerStore(EventsWorkerStore):
return results return results
@cached(iterable=True)
async def get_local_users_in_room(self, room_id: str) -> List[str]:
"""
Retrieves a list of the current roommembers who are local to the server.
"""
return await self.db_pool.simple_select_onecol(
table="local_current_membership",
keyvalues={"room_id": room_id, "membership": Membership.JOIN},
retcol="user_id",
desc="get_local_users_in_room",
)
async def get_local_current_membership_for_user_in_room( async def get_local_current_membership_for_user_in_room(
self, user_id: str, room_id: str self, user_id: str, room_id: str
) -> Tuple[Optional[str], Optional[str]]: ) -> Tuple[Optional[str], Optional[str]]:

View file

@ -16,7 +16,7 @@
import logging import logging
from enum import Enum from enum import Enum
from itertools import chain from itertools import chain
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, cast from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union, cast
from typing_extensions import Counter from typing_extensions import Counter
@ -238,6 +238,7 @@ class StatsStore(StateDeltasStore):
* avatar * avatar
* canonical_alias * canonical_alias
* guest_access * guest_access
* room_type
A is_federatable key can also be included with a boolean value. A is_federatable key can also be included with a boolean value.
@ -263,6 +264,7 @@ class StatsStore(StateDeltasStore):
"avatar", "avatar",
"canonical_alias", "canonical_alias",
"guest_access", "guest_access",
"room_type",
): ):
field = fields.get(col, sentinel) field = fields.get(col, sentinel)
if field is not sentinel and (not isinstance(field, str) or "\0" in field): if field is not sentinel and (not isinstance(field, str) or "\0" in field):
@ -572,7 +574,7 @@ class StatsStore(StateDeltasStore):
state_event_map = await self.get_events(event_ids, get_prev_content=False) # type: ignore[attr-defined] state_event_map = await self.get_events(event_ids, get_prev_content=False) # type: ignore[attr-defined]
room_state = { room_state: Dict[str, Union[None, bool, str]] = {
"join_rules": None, "join_rules": None,
"history_visibility": None, "history_visibility": None,
"encryption": None, "encryption": None,
@ -581,6 +583,7 @@ class StatsStore(StateDeltasStore):
"avatar": None, "avatar": None,
"canonical_alias": None, "canonical_alias": None,
"is_federatable": True, "is_federatable": True,
"room_type": None,
} }
for event in state_event_map.values(): for event in state_event_map.values():
@ -604,6 +607,9 @@ class StatsStore(StateDeltasStore):
room_state["is_federatable"] = ( room_state["is_federatable"] = (
event.content.get(EventContentFields.FEDERATE, True) is True event.content.get(EventContentFields.FEDERATE, True) is True
) )
room_type = event.content.get(EventContentFields.ROOM_TYPE)
if isinstance(room_type, str):
room_state["room_type"] = room_type
await self.update_room_state(room_id, room_state) await self.update_room_state(room_id, room_state)

View file

@ -46,10 +46,12 @@ from typing import (
Set, Set,
Tuple, Tuple,
cast, cast,
overload,
) )
import attr import attr
from frozendict import frozendict from frozendict import frozendict
from typing_extensions import Literal
from twisted.internet import defer from twisted.internet import defer
@ -795,6 +797,24 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
) )
return RoomStreamToken(topo, stream_ordering) return RoomStreamToken(topo, stream_ordering)
@overload
def get_stream_id_for_event_txn(
self,
txn: LoggingTransaction,
event_id: str,
allow_none: Literal[False] = False,
) -> int:
...
@overload
def get_stream_id_for_event_txn(
self,
txn: LoggingTransaction,
event_id: str,
allow_none: bool = False,
) -> Optional[int]:
...
def get_stream_id_for_event_txn( def get_stream_id_for_event_txn(
self, self,
txn: LoggingTransaction, txn: LoggingTransaction,

View file

@ -0,0 +1,19 @@
/* Copyright 2022 The Matrix.org Foundation C.I.C
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
ALTER TABLE room_stats_state ADD room_type TEXT;
INSERT INTO background_updates (update_name, progress_json)
VALUES ('add_room_type_column', '{}');

View file

@ -20,6 +20,7 @@ from typing import Any, Callable, Dict, Generator, Optional
import attr import attr
from frozendict import frozendict from frozendict import frozendict
from matrix_common.versionstring import get_distribution_version_string from matrix_common.versionstring import get_distribution_version_string
from typing_extensions import ParamSpec
from twisted.internet import defer, task from twisted.internet import defer, task
from twisted.internet.defer import Deferred from twisted.internet.defer import Deferred
@ -82,6 +83,9 @@ def unwrapFirstError(failure: Failure) -> Failure:
return failure.value.subFailure # type: ignore[union-attr] # Issue in Twisted's annotations return failure.value.subFailure # type: ignore[union-attr] # Issue in Twisted's annotations
P = ParamSpec("P")
@attr.s(slots=True) @attr.s(slots=True)
class Clock: class Clock:
""" """
@ -110,7 +114,7 @@ class Clock:
return int(self.time() * 1000) return int(self.time() * 1000)
def looping_call( def looping_call(
self, f: Callable, msec: float, *args: Any, **kwargs: Any self, f: Callable[P, object], msec: float, *args: P.args, **kwargs: P.kwargs
) -> LoopingCall: ) -> LoopingCall:
"""Call a function repeatedly. """Call a function repeatedly.

View file

@ -109,7 +109,7 @@ GLOBAL_ROOT = ListNode["_Node"].create_root_node()
@wrap_as_background_process("LruCache._expire_old_entries") @wrap_as_background_process("LruCache._expire_old_entries")
async def _expire_old_entries( async def _expire_old_entries(
clock: Clock, expiry_seconds: int, autotune_config: Optional[dict] clock: Clock, expiry_seconds: float, autotune_config: Optional[dict]
) -> None: ) -> None:
"""Walks the global cache list to find cache entries that haven't been """Walks the global cache list to find cache entries that haven't been
accessed in the given number of seconds, or if a given memory threshold has been breached. accessed in the given number of seconds, or if a given memory threshold has been breached.

View file

@ -13,16 +13,21 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import logging import logging
from enum import Enum, auto
from typing import Collection, Dict, FrozenSet, List, Optional, Tuple from typing import Collection, Dict, FrozenSet, List, Optional, Tuple
import attr
from typing_extensions import Final from typing_extensions import Final
from synapse.api.constants import EventTypes, HistoryVisibility, Membership from synapse.api.constants import EventTypes, HistoryVisibility, Membership
from synapse.events import EventBase from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.events.utils import prune_event from synapse.events.utils import prune_event
from synapse.storage.controllers import StorageControllers from synapse.storage.controllers import StorageControllers
from synapse.storage.databases.main import DataStore
from synapse.storage.state import StateFilter from synapse.storage.state import StateFilter
from synapse.types import RetentionPolicy, StateMap, get_domain_from_id from synapse.types import RetentionPolicy, StateMap, get_domain_from_id
from synapse.util import Clock
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -106,9 +111,179 @@ async def filter_events_for_client(
filter_override = user_id in storage.hs.config.meow.filter_override filter_override = user_id in storage.hs.config.meow.filter_override
def allowed(event: EventBase) -> Optional[EventBase]: def allowed(event: EventBase) -> Optional[EventBase]:
return _check_client_allowed_to_see_event(
user_id=user_id,
event=event,
clock=storage.main.clock,
filter_send_to_client=filter_send_to_client,
sender_ignored=event.sender in ignore_list,
always_include_ids=always_include_ids,
retention_policy=retention_policies[room_id],
state=event_id_to_state.get(event.event_id),
is_peeking=is_peeking,
sender_erased=erased_senders.get(event.sender, False),
filter_override=filter_override,
)
# Check each event: gives an iterable of None or (a potentially modified)
# EventBase.
filtered_events = map(allowed, events)
# Turn it into a list and remove None entries before returning.
return [ev for ev in filtered_events if ev]
async def filter_event_for_clients_with_state(
store: DataStore,
user_ids: Collection[str],
event: EventBase,
context: EventContext,
is_peeking: bool = False,
filter_send_to_client: bool = True,
) -> Collection[str]:
""" """
Checks to see if an event is visible to the users in the list at the time of
the event.
Note: This does *not* check if the sender of the event was erased.
Args: Args:
event: event to check store: databases
user_ids: user_ids to be checked
event: the event to be checked
context: EventContext for the event to be checked
is_peeking: Whether the users are peeking into the room, ie not
currently joined
filter_send_to_client: Whether we're checking an event that's going to be
sent to a client. This might not always be the case since this function can
also be called to check whether a user can see the state at a given point.
Returns:
Collection of user IDs for whom the event is visible
"""
# None of the users should see the event if it is soft_failed
if event.internal_metadata.is_soft_failed():
return []
# Make a set for all user IDs that haven't been filtered out by a check.
allowed_user_ids = set(user_ids)
# Only run some checks if these events aren't about to be sent to clients. This is
# because, if this is not the case, we're probably only checking if the users can
# see events in the room at that point in the DAG, and that shouldn't be decided
# on those checks.
if filter_send_to_client:
ignored_by = await store.ignored_by(event.sender)
retention_policy = await store.get_retention_policy_for_room(event.room_id)
for user_id in user_ids:
if (
_check_filter_send_to_client(
event,
store.clock,
retention_policy,
sender_ignored=user_id in ignored_by,
)
== _CheckFilter.DENIED
):
allowed_user_ids.discard(user_id)
if event.internal_metadata.outlier:
# Normally these can't be seen by clients, but we make an exception for
# for out-of-band membership events (eg, incoming invites, or rejections of
# said invite) for the user themselves.
if event.type == EventTypes.Member and event.state_key in allowed_user_ids:
logger.debug("Returning out-of-band-membership event %s", event)
return {event.state_key}
return set()
# First we get just the history visibility in case its shared/world-readable
# room.
visibility_state_map = await _get_state_map(
store, event, context, StateFilter.from_types([_HISTORY_VIS_KEY])
)
visibility = get_effective_room_visibility_from_state(visibility_state_map)
if (
_check_history_visibility(event, visibility, is_peeking=is_peeking)
== _CheckVisibility.ALLOWED
):
return allowed_user_ids
# The history visibility isn't lax, so we now need to fetch the membership
# events of all the users.
filter_list = []
for user_id in allowed_user_ids:
filter_list.append((EventTypes.Member, user_id))
filter_list.append((EventTypes.RoomHistoryVisibility, ""))
state_filter = StateFilter.from_types(filter_list)
state_map = await _get_state_map(store, event, context, state_filter)
# Now we check whether the membership allows each user to see the event.
return {
user_id
for user_id in allowed_user_ids
if _check_membership(user_id, event, visibility, state_map, is_peeking).allowed
}
async def _get_state_map(
store: DataStore, event: EventBase, context: EventContext, state_filter: StateFilter
) -> StateMap[EventBase]:
"""Helper function for getting a `StateMap[EventBase]` from an `EventContext`"""
state_map = await context.get_prev_state_ids(state_filter)
# Use events rather than event ids as content from the events are needed in
# _check_visibility
event_map = await store.get_events(state_map.values(), get_prev_content=False)
updated_state_map = {}
for state_key, event_id in state_map.items():
state_event = event_map.get(event_id)
if state_event:
updated_state_map[state_key] = state_event
if event.is_state():
current_state_key = (event.type, event.state_key)
# Add current event to updated_state_map, we need to do this here as it
# may not have been persisted to the db yet
updated_state_map[current_state_key] = event
return updated_state_map
def _check_client_allowed_to_see_event(
user_id: str,
event: EventBase,
clock: Clock,
filter_send_to_client: bool,
is_peeking: bool,
always_include_ids: FrozenSet[str],
sender_ignored: bool,
retention_policy: RetentionPolicy,
state: Optional[StateMap[EventBase]],
sender_erased: bool,
filter_override: bool,
) -> Optional[EventBase]:
"""Check with the given user is allowed to see the given event
See `filter_events_for_client` for details about args
Args:
user_id
event
clock
filter_send_to_client
is_peeking
always_include_ids
sender_ignored: Whether the user is ignoring the event sender
retention_policy: The retention policy of the room
state: The state at the event, unless its an outlier
sender_erased: Whether the event sender has been marked as "erased"
filter_override: meow
Returns: Returns:
None if the user cannot see this event at all None if the user cannot see this event at all
@ -123,36 +298,15 @@ async def filter_events_for_client(
# see events in the room at that point in the DAG, and that shouldn't be decided # see events in the room at that point in the DAG, and that shouldn't be decided
# on those checks. # on those checks.
if filter_send_to_client and not filter_override: if filter_send_to_client and not filter_override:
if event.type == EventTypes.Dummy: if (
return None _check_filter_send_to_client(event, clock, retention_policy, sender_ignored)
== _CheckFilter.DENIED
if not event.is_state() and event.sender in ignore_list: ):
return None
# Until MSC2261 has landed we can't redact malicious alias events, so for
# now we temporarily filter out m.room.aliases entirely to mitigate
# abuse, while we spec a better solution to advertising aliases
# on rooms.
if event.type == EventTypes.Aliases:
return None
# Don't try to apply the room's retention policy if the event is a state
# event, as MSC1763 states that retention is only considered for non-state
# events.
if not event.is_state():
retention_policy = retention_policies[event.room_id]
max_lifetime = retention_policy.max_lifetime
if max_lifetime is not None:
oldest_allowed_ts = storage.main.clock.time_msec() - max_lifetime
if event.origin_server_ts < oldest_allowed_ts:
return None return None
# meow: even with filter_override, we want to filter ignored users # meow: even with filter_override, we want to filter ignored users
elif filter_send_to_client and not event.is_state() and event.sender in ignore_list: elif filter_send_to_client and not event.is_state() and sender_ignored:
return None return None
if event.event_id in always_include_ids: if event.event_id in always_include_ids:
return event return event
@ -167,28 +321,58 @@ async def filter_events_for_client(
return None return None
state = event_id_to_state[event.event_id] if state is None:
raise Exception("Missing state for non-outlier event")
# get the room_visibility at the time of the event. # get the room_visibility at the time of the event.
visibility = get_effective_room_visibility_from_state(state) visibility = get_effective_room_visibility_from_state(state)
# Always allow history visibility events on boundaries. This is done # Check if the room has lax history visibility, allowing us to skip
# by setting the effective visibility to the least restrictive # membership checks.
# of the old vs new. #
if event.type == EventTypes.RoomHistoryVisibility: # We can only do this check if the sender has *not* been erased, as if they
prev_content = event.unsigned.get("prev_content", {}) # have we need to check the user's membership.
prev_visibility = prev_content.get("history_visibility", None) if (
not sender_erased
and _check_history_visibility(event, visibility, is_peeking)
== _CheckVisibility.ALLOWED
):
return event
if prev_visibility not in VISIBILITY_PRIORITY: membership_result = _check_membership(user_id, event, visibility, state, is_peeking)
prev_visibility = HistoryVisibility.SHARED if not membership_result.allowed:
return None
new_priority = VISIBILITY_PRIORITY.index(visibility) # If the sender has been erased and the user was not joined at the time, we
old_priority = VISIBILITY_PRIORITY.index(prev_visibility) # must only return the redacted form.
if old_priority < new_priority: if sender_erased and not membership_result.joined:
visibility = prev_visibility event = prune_event(event)
# likewise, if the event is the user's own membership event, use return event
# the 'most joined' membership
@attr.s(frozen=True, slots=True, auto_attribs=True)
class _CheckMembershipReturn:
"Return value of _check_membership"
allowed: bool
joined: bool
def _check_membership(
user_id: str,
event: EventBase,
visibility: str,
state: StateMap[EventBase],
is_peeking: bool,
) -> _CheckMembershipReturn:
"""Check whether the user can see the event due to their membership
Returns:
True if they can, False if they can't, plus the membership of the user
at the event.
"""
# If the event is the user's own membership event, use the 'most joined'
# membership
membership = None membership = None
if event.type == EventTypes.Member and event.state_key == user_id: if event.type == EventTypes.Member and event.state_key == user_id:
membership = event.content.get("membership", None) membership = event.content.get("membership", None)
@ -208,7 +392,7 @@ async def filter_events_for_client(
if membership == "leave" and ( if membership == "leave" and (
prev_membership == "join" or prev_membership == "invite" prev_membership == "join" or prev_membership == "invite"
): ):
return event return _CheckMembershipReturn(True, membership == Membership.JOIN)
new_priority = MEMBERSHIP_PRIORITY.index(membership) new_priority = MEMBERSHIP_PRIORITY.index(membership)
old_priority = MEMBERSHIP_PRIORITY.index(prev_membership) old_priority = MEMBERSHIP_PRIORITY.index(prev_membership)
@ -224,19 +408,19 @@ async def filter_events_for_client(
# if the user was a member of the room at the time of the event, # if the user was a member of the room at the time of the event,
# they can see it. # they can see it.
if membership == Membership.JOIN: if membership == Membership.JOIN:
return event return _CheckMembershipReturn(True, True)
# otherwise, it depends on the room visibility. # otherwise, it depends on the room visibility.
if visibility == HistoryVisibility.JOINED: if visibility == HistoryVisibility.JOINED:
# we weren't a member at the time of the event, so we can't # we weren't a member at the time of the event, so we can't
# see this event. # see this event.
return None return _CheckMembershipReturn(False, False)
elif visibility == HistoryVisibility.INVITED: elif visibility == HistoryVisibility.INVITED:
# user can also see the event if they were *invited* at the time # user can also see the event if they were *invited* at the time
# of the event. # of the event.
return event if membership == Membership.INVITE else None return _CheckMembershipReturn(membership == Membership.INVITE, False)
elif visibility == HistoryVisibility.SHARED and is_peeking: elif visibility == HistoryVisibility.SHARED and is_peeking:
# if the visibility is shared, users cannot see the event unless # if the visibility is shared, users cannot see the event unless
@ -247,28 +431,96 @@ async def filter_events_for_client(
# ideally we would share history up to the point they left. But # ideally we would share history up to the point they left. But
# we don't know when they left. We just treat it as though they # we don't know when they left. We just treat it as though they
# never joined, and restrict access. # never joined, and restrict access.
return None return _CheckMembershipReturn(False, False)
# the visibility is either shared or world_readable, and the user was # The visibility is either shared or world_readable, and the user was
# not a member at the time. We allow it, provided the original sender # not a member at the time. We allow it.
# has not requested their data to be erased, in which case, we return return _CheckMembershipReturn(True, False)
# a redacted version.
if erased_senders[event.sender]:
return prune_event(event)
return event
# Check each event: gives an iterable of None or (a potentially modified) class _CheckFilter(Enum):
# EventBase. MAYBE_ALLOWED = auto()
filtered_events = map(allowed, events) DENIED = auto()
# Turn it into a list and remove None entries before returning.
return [ev for ev in filtered_events if ev] def _check_filter_send_to_client(
event: EventBase,
clock: Clock,
retention_policy: RetentionPolicy,
sender_ignored: bool,
) -> _CheckFilter:
"""Apply checks for sending events to client
Returns:
True if might be allowed to be sent to clients, False if definitely not.
"""
if event.type == EventTypes.Dummy:
return _CheckFilter.DENIED
if not event.is_state() and sender_ignored:
return _CheckFilter.DENIED
# Until MSC2261 has landed we can't redact malicious alias events, so for
# now we temporarily filter out m.room.aliases entirely to mitigate
# abuse, while we spec a better solution to advertising aliases
# on rooms.
if event.type == EventTypes.Aliases:
return _CheckFilter.DENIED
# Don't try to apply the room's retention policy if the event is a state
# event, as MSC1763 states that retention is only considered for non-state
# events.
if not event.is_state():
max_lifetime = retention_policy.max_lifetime
if max_lifetime is not None:
oldest_allowed_ts = clock.time_msec() - max_lifetime
if event.origin_server_ts < oldest_allowed_ts:
return _CheckFilter.DENIED
return _CheckFilter.MAYBE_ALLOWED
class _CheckVisibility(Enum):
ALLOWED = auto()
MAYBE_DENIED = auto()
def _check_history_visibility(
event: EventBase, visibility: str, is_peeking: bool
) -> _CheckVisibility:
"""Check if event is allowed to be seen due to lax history visibility.
Returns:
True if user can definitely see the event, False if maybe not.
"""
# Always allow history visibility events on boundaries. This is done
# by setting the effective visibility to the least restrictive
# of the old vs new.
if event.type == EventTypes.RoomHistoryVisibility:
prev_content = event.unsigned.get("prev_content", {})
prev_visibility = prev_content.get("history_visibility", None)
if prev_visibility not in VISIBILITY_PRIORITY:
prev_visibility = HistoryVisibility.SHARED
new_priority = VISIBILITY_PRIORITY.index(visibility)
old_priority = VISIBILITY_PRIORITY.index(prev_visibility)
if old_priority < new_priority:
visibility = prev_visibility
if visibility == HistoryVisibility.SHARED and not is_peeking:
return _CheckVisibility.ALLOWED
elif visibility == HistoryVisibility.WORLD_READABLE:
return _CheckVisibility.ALLOWED
return _CheckVisibility.MAYBE_DENIED
def get_effective_room_visibility_from_state(state: StateMap[EventBase]) -> str: def get_effective_room_visibility_from_state(state: StateMap[EventBase]) -> str:
"""Get the actual history vis, from a state map including the history_visibility event """Get the actual history vis, from a state map including the history_visibility event
Handles missing and invalid history visibility events. Handles missing and invalid history visibility events.
""" """
visibility_event = state.get(_HISTORY_VIS_KEY, None) visibility_event = state.get(_HISTORY_VIS_KEY, None)

View file

@ -50,7 +50,7 @@ class LogContextScopeManagerTestCase(TestCase):
# global variables that power opentracing. We create our own tracer instance # global variables that power opentracing. We create our own tracer instance
# and test with it. # and test with it.
scope_manager = LogContextScopeManager({}) scope_manager = LogContextScopeManager()
config = jaeger_client.config.Config( config = jaeger_client.config.Config(
config={}, service_name="test", scope_manager=scope_manager config={}, service_name="test", scope_manager=scope_manager
) )

View file

@ -1579,8 +1579,8 @@ class RoomTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok, access_token=self.admin_user_tok,
) )
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual(room_id, channel.json_body.get("rooms")[0].get("room_id")) self.assertEqual(room_id, channel.json_body["rooms"][0].get("room_id"))
self.assertEqual("ж", channel.json_body.get("rooms")[0].get("name")) self.assertEqual("ж", channel.json_body["rooms"][0].get("name"))
def test_single_room(self) -> None: def test_single_room(self) -> None:
"""Test that a single room can be requested correctly""" """Test that a single room can be requested correctly"""

View file

@ -1488,7 +1488,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
if channel.code != HTTPStatus.OK: if channel.code != HTTPStatus.OK:
raise HttpResponseException( raise HttpResponseException(
channel.code, channel.result["reason"], channel.json_body channel.code, channel.result["reason"], channel.result["body"]
) )
# Set monthly active users to the limit # Set monthly active users to the limit

View file

@ -949,7 +949,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase):
client_secret: str, client_secret: str,
next_link: Optional[str] = None, next_link: Optional[str] = None,
expect_code: int = 200, expect_code: int = 200,
) -> str: ) -> Optional[str]:
"""Request a validation token to add an email address to a user's account """Request a validation token to add an email address to a user's account
Args: Args:
@ -959,7 +959,8 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase):
expect_code: Expected return code of the call expect_code: Expected return code of the call
Returns: Returns:
The ID of the new threepid validation session The ID of the new threepid validation session, or None if the response
did not contain a session ID.
""" """
body = {"client_secret": client_secret, "email": email, "send_attempt": 1} body = {"client_secret": client_secret, "email": email, "send_attempt": 1}
if next_link: if next_link:

View file

@ -153,18 +153,22 @@ class ProfileTestCase(unittest.HomeserverTestCase):
) )
self.assertEqual(channel.code, 400, channel.result) self.assertEqual(channel.code, 400, channel.result)
def _get_displayname(self, name: Optional[str] = None) -> str: def _get_displayname(self, name: Optional[str] = None) -> Optional[str]:
channel = self.make_request( channel = self.make_request(
"GET", "/profile/%s/displayname" % (name or self.owner,) "GET", "/profile/%s/displayname" % (name or self.owner,)
) )
self.assertEqual(channel.code, 200, channel.result) self.assertEqual(channel.code, 200, channel.result)
return channel.json_body["displayname"] # FIXME: If a user has no displayname set, Synapse returns 200 and omits a
# displayname from the response. This contradicts the spec, see #13137.
return channel.json_body.get("displayname")
def _get_avatar_url(self, name: Optional[str] = None) -> str: def _get_avatar_url(self, name: Optional[str] = None) -> Optional[str]:
channel = self.make_request( channel = self.make_request(
"GET", "/profile/%s/avatar_url" % (name or self.owner,) "GET", "/profile/%s/avatar_url" % (name or self.owner,)
) )
self.assertEqual(channel.code, 200, channel.result) self.assertEqual(channel.code, 200, channel.result)
# FIXME: If a user has no avatar set, Synapse returns 200 and omits an
# avatar_url from the response. This contradicts the spec, see #13137.
return channel.json_body.get("avatar_url") return channel.json_body.get("avatar_url")
@unittest.override_config({"max_avatar_size": 50}) @unittest.override_config({"max_avatar_size": 50})

View file

@ -800,7 +800,7 @@ class RelationPaginationTestCase(BaseRelationsTestCase):
) )
expected_event_ids.append(channel.json_body["event_id"]) expected_event_ids.append(channel.json_body["event_id"])
prev_token = "" prev_token: Optional[str] = ""
found_event_ids: List[str] = [] found_event_ids: List[str] = []
for _ in range(20): for _ in range(20):
from_token = "" from_token = ""

View file

@ -18,11 +18,11 @@
"""Tests REST events for /rooms paths.""" """Tests REST events for /rooms paths."""
import json import json
from typing import Any, Dict, Iterable, List, Optional, Union from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
from unittest.mock import Mock, call from unittest.mock import Mock, call
from urllib import parse as urlparse from urllib import parse as urlparse
# `Literal` appears with Python 3.8. from parameterized import param, parameterized
from typing_extensions import Literal from typing_extensions import Literal
from twisted.test.proto_helpers import MemoryReactor from twisted.test.proto_helpers import MemoryReactor
@ -33,7 +33,9 @@ from synapse.api.constants import (
EventContentFields, EventContentFields,
EventTypes, EventTypes,
Membership, Membership,
PublicRoomsFilterFields,
RelationTypes, RelationTypes,
RoomTypes,
) )
from synapse.api.errors import Codes, HttpResponseException from synapse.api.errors import Codes, HttpResponseException
from synapse.handlers.pagination import PurgeStatus from synapse.handlers.pagination import PurgeStatus
@ -706,6 +708,21 @@ class RoomsCreateTestCase(RoomBase):
self.assertEqual(200, channel.code, channel.result) self.assertEqual(200, channel.code, channel.result)
self.assertTrue("room_id" in channel.json_body) self.assertTrue("room_id" in channel.json_body)
assert channel.resource_usage is not None
self.assertEqual(37, channel.resource_usage.db_txn_count)
def test_post_room_initial_state(self) -> None:
# POST with initial_state config key, expect new room id
channel = self.make_request(
"POST",
"/createRoom",
b'{"initial_state":[{"type": "m.bridge", "content": {}}]}',
)
self.assertEqual(200, channel.code, channel.result)
self.assertTrue("room_id" in channel.json_body)
assert channel.resource_usage is not None
self.assertEqual(41, channel.resource_usage.db_txn_count)
def test_post_room_visibility_key(self) -> None: def test_post_room_visibility_key(self) -> None:
# POST with visibility config key, expect new room id # POST with visibility config key, expect new room id
@ -813,14 +830,14 @@ class RoomsCreateTestCase(RoomBase):
In this test, we use the more recent API in which callbacks return a `Union[Codes, Literal["NOT_SPAM"]]`. In this test, we use the more recent API in which callbacks return a `Union[Codes, Literal["NOT_SPAM"]]`.
""" """
async def user_may_join_room( async def user_may_join_room_codes(
mxid: str, mxid: str,
room_id: str, room_id: str,
is_invite: bool, is_invite: bool,
) -> Codes: ) -> Codes:
return Codes.CONSENT_NOT_GIVEN return Codes.CONSENT_NOT_GIVEN
join_mock = Mock(side_effect=user_may_join_room) join_mock = Mock(side_effect=user_may_join_room_codes)
self.hs.get_spam_checker()._user_may_join_room_callbacks.append(join_mock) self.hs.get_spam_checker()._user_may_join_room_callbacks.append(join_mock)
channel = self.make_request( channel = self.make_request(
@ -832,6 +849,25 @@ class RoomsCreateTestCase(RoomBase):
self.assertEqual(join_mock.call_count, 0) self.assertEqual(join_mock.call_count, 0)
# Now change the return value of the callback to deny any join. Since we're
# creating the room, despite the return value, we should be able to join.
async def user_may_join_room_tuple(
mxid: str,
room_id: str,
is_invite: bool,
) -> Tuple[Codes, dict]:
return Codes.INCOMPATIBLE_ROOM_VERSION, {}
join_mock.side_effect = user_may_join_room_tuple
channel = self.make_request(
"POST",
"/createRoom",
{},
)
self.assertEqual(channel.code, 200, channel.json_body)
self.assertEqual(join_mock.call_count, 0)
class RoomTopicTestCase(RoomBase): class RoomTopicTestCase(RoomBase):
"""Tests /rooms/$room_id/topic REST events.""" """Tests /rooms/$room_id/topic REST events."""
@ -1111,13 +1147,15 @@ class RoomJoinTestCase(RoomBase):
""" """
# Register a dummy callback. Make it allow all room joins for now. # Register a dummy callback. Make it allow all room joins for now.
return_value: Union[Literal["NOT_SPAM"], Codes] = synapse.module_api.NOT_SPAM return_value: Union[
Literal["NOT_SPAM"], Tuple[Codes, dict], Codes
] = synapse.module_api.NOT_SPAM
async def user_may_join_room( async def user_may_join_room(
userid: str, userid: str,
room_id: str, room_id: str,
is_invited: bool, is_invited: bool,
) -> Union[Literal["NOT_SPAM"], Codes]: ) -> Union[Literal["NOT_SPAM"], Tuple[Codes, dict], Codes]:
return return_value return return_value
# `spec` argument is needed for this function mock to have `__qualname__`, which # `spec` argument is needed for this function mock to have `__qualname__`, which
@ -1161,8 +1199,28 @@ class RoomJoinTestCase(RoomBase):
) )
# Now make the callback deny all room joins, and check that a join actually fails. # Now make the callback deny all room joins, and check that a join actually fails.
# We pick an arbitrary Codes rather than the default `Codes.FORBIDDEN`.
return_value = Codes.CONSENT_NOT_GIVEN return_value = Codes.CONSENT_NOT_GIVEN
self.helper.join(self.room3, self.user2, expect_code=403, tok=self.tok2) self.helper.invite(self.room3, self.user1, self.user2, tok=self.tok1)
self.helper.join(
self.room3,
self.user2,
expect_code=403,
expect_errcode=return_value,
tok=self.tok2,
)
# Now make the callback deny all room joins, and check that a join actually fails.
# As above, with the experimental extension that lets us return dictionaries.
return_value = (Codes.BAD_ALIAS, {"another_field": "12345"})
self.helper.join(
self.room3,
self.user2,
expect_code=403,
expect_errcode=return_value[0],
tok=self.tok2,
expect_additional_fields=return_value[1],
)
class RoomJoinRatelimitTestCase(RoomBase): class RoomJoinRatelimitTestCase(RoomBase):
@ -1312,6 +1370,97 @@ class RoomMessagesTestCase(RoomBase):
channel = self.make_request("PUT", path, content) channel = self.make_request("PUT", path, content)
self.assertEqual(200, channel.code, msg=channel.result["body"]) self.assertEqual(200, channel.code, msg=channel.result["body"])
@parameterized.expand(
[
# Allow
param(
name="NOT_SPAM", value="NOT_SPAM", expected_code=200, expected_fields={}
),
param(name="False", value=False, expected_code=200, expected_fields={}),
# Block
param(
name="scalene string",
value="ANY OTHER STRING",
expected_code=403,
expected_fields={"errcode": "M_FORBIDDEN"},
),
param(
name="True",
value=True,
expected_code=403,
expected_fields={"errcode": "M_FORBIDDEN"},
),
param(
name="Code",
value=Codes.LIMIT_EXCEEDED,
expected_code=403,
expected_fields={"errcode": "M_LIMIT_EXCEEDED"},
),
param(
name="Tuple",
value=(Codes.SERVER_NOT_TRUSTED, {"additional_field": "12345"}),
expected_code=403,
expected_fields={
"errcode": "M_SERVER_NOT_TRUSTED",
"additional_field": "12345",
},
),
]
)
def test_spam_checker_check_event_for_spam(
self,
name: str,
value: Union[str, bool, Codes, Tuple[Codes, JsonDict]],
expected_code: int,
expected_fields: dict,
) -> None:
class SpamCheck:
mock_return_value: Union[
str, bool, Codes, Tuple[Codes, JsonDict], bool
] = "NOT_SPAM"
mock_content: Optional[JsonDict] = None
async def check_event_for_spam(
self,
event: synapse.events.EventBase,
) -> Union[str, Codes, Tuple[Codes, JsonDict], bool]:
self.mock_content = event.content
return self.mock_return_value
spam_checker = SpamCheck()
self.hs.get_spam_checker()._check_event_for_spam_callbacks.append(
spam_checker.check_event_for_spam
)
# Inject `value` as mock_return_value
spam_checker.mock_return_value = value
path = "/rooms/%s/send/m.room.message/check_event_for_spam_%s" % (
urlparse.quote(self.room_id),
urlparse.quote(name),
)
body = "test-%s" % name
content = '{"body":"%s","msgtype":"m.text"}' % body
channel = self.make_request("PUT", path, content)
# Check that the callback has witnessed the correct event.
self.assertIsNotNone(spam_checker.mock_content)
if (
spam_checker.mock_content is not None
): # Checked just above, but mypy doesn't know about that.
self.assertEqual(
spam_checker.mock_content["body"], body, spam_checker.mock_content
)
# Check that we have the correct result.
self.assertEqual(expected_code, channel.code, msg=channel.result["body"])
for expected_key, expected_value in expected_fields.items():
self.assertEqual(
channel.json_body.get(expected_key, None),
expected_value,
"Field %s absent or invalid " % expected_key,
)
class RoomPowerLevelOverridesTestCase(RoomBase): class RoomPowerLevelOverridesTestCase(RoomBase):
"""Tests that the power levels can be overridden with server config.""" """Tests that the power levels can be overridden with server config."""
@ -1858,6 +2007,90 @@ class PublicRoomsRestrictedTestCase(unittest.HomeserverTestCase):
self.assertEqual(channel.code, 200, channel.result) self.assertEqual(channel.code, 200, channel.result)
class PublicRoomsRoomTypeFilterTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets_for_client_rest_resource,
room.register_servlets,
login.register_servlets,
]
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
config = self.default_config()
config["allow_public_rooms_without_auth"] = True
config["experimental_features"] = {"msc3827_enabled": True}
self.hs = self.setup_test_homeserver(config=config)
self.url = b"/_matrix/client/r0/publicRooms"
return self.hs
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
user = self.register_user("alice", "pass")
self.token = self.login(user, "pass")
# Create a room
self.helper.create_room_as(
user,
is_public=True,
extra_content={"visibility": "public"},
tok=self.token,
)
# Create a space
self.helper.create_room_as(
user,
is_public=True,
extra_content={
"visibility": "public",
"creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE},
},
tok=self.token,
)
def make_public_rooms_request(
self, room_types: Union[List[Union[str, None]], None]
) -> Tuple[List[Dict[str, Any]], int]:
channel = self.make_request(
"POST",
self.url,
{"filter": {PublicRoomsFilterFields.ROOM_TYPES: room_types}},
self.token,
)
chunk = channel.json_body["chunk"]
count = channel.json_body["total_room_count_estimate"]
self.assertEqual(len(chunk), count)
return chunk, count
def test_returns_both_rooms_and_spaces_if_no_filter(self) -> None:
chunk, count = self.make_public_rooms_request(None)
self.assertEqual(count, 2)
def test_returns_only_rooms_based_on_filter(self) -> None:
chunk, count = self.make_public_rooms_request([None])
self.assertEqual(count, 1)
self.assertEqual(chunk[0].get("org.matrix.msc3827.room_type", None), None)
def test_returns_only_space_based_on_filter(self) -> None:
chunk, count = self.make_public_rooms_request(["m.space"])
self.assertEqual(count, 1)
self.assertEqual(chunk[0].get("org.matrix.msc3827.room_type", None), "m.space")
def test_returns_both_rooms_and_space_based_on_filter(self) -> None:
chunk, count = self.make_public_rooms_request(["m.space", None])
self.assertEqual(count, 2)
def test_returns_both_rooms_and_spaces_if_array_is_empty(self) -> None:
chunk, count = self.make_public_rooms_request([])
self.assertEqual(count, 2)
class PublicRoomsTestRemoteSearchFallbackTestCase(unittest.HomeserverTestCase): class PublicRoomsTestRemoteSearchFallbackTestCase(unittest.HomeserverTestCase):
"""Test that we correctly fallback to local filtering if a remote server """Test that we correctly fallback to local filtering if a remote server
doesn't support search. doesn't support search.
@ -1882,7 +2115,7 @@ class PublicRoomsTestRemoteSearchFallbackTestCase(unittest.HomeserverTestCase):
"Simple test for searching rooms over federation" "Simple test for searching rooms over federation"
self.federation_client.get_public_rooms.return_value = make_awaitable({}) # type: ignore[attr-defined] self.federation_client.get_public_rooms.return_value = make_awaitable({}) # type: ignore[attr-defined]
search_filter = {"generic_search_term": "foobar"} search_filter = {PublicRoomsFilterFields.GENERIC_SEARCH_TERM: "foobar"}
channel = self.make_request( channel = self.make_request(
"POST", "POST",
@ -1911,7 +2144,7 @@ class PublicRoomsTestRemoteSearchFallbackTestCase(unittest.HomeserverTestCase):
make_awaitable({}), make_awaitable({}),
) )
search_filter = {"generic_search_term": "foobar"} search_filter = {PublicRoomsFilterFields.GENERIC_SEARCH_TERM: "foobar"}
channel = self.make_request( channel = self.make_request(
"POST", "POST",
@ -3149,7 +3382,8 @@ class ThreepidInviteTestCase(unittest.HomeserverTestCase):
make_invite_mock.assert_called_once() make_invite_mock.assert_called_once()
# Now change the return value of the callback to deny any invite and test that # Now change the return value of the callback to deny any invite and test that
# we can't send the invite. # we can't send the invite. We pick an arbitrary error code to be able to check
# that the same code has been returned
mock.return_value = make_awaitable(Codes.CONSENT_NOT_GIVEN) mock.return_value = make_awaitable(Codes.CONSENT_NOT_GIVEN)
channel = self.make_request( channel = self.make_request(
method="POST", method="POST",
@ -3163,6 +3397,27 @@ class ThreepidInviteTestCase(unittest.HomeserverTestCase):
access_token=self.tok, access_token=self.tok,
) )
self.assertEqual(channel.code, 403) self.assertEqual(channel.code, 403)
self.assertEqual(channel.json_body["errcode"], Codes.CONSENT_NOT_GIVEN)
# Also check that it stopped before calling _make_and_store_3pid_invite.
make_invite_mock.assert_called_once()
# Run variant with `Tuple[Codes, dict]`.
mock.return_value = make_awaitable((Codes.EXPIRED_ACCOUNT, {"field": "value"}))
channel = self.make_request(
method="POST",
path="/rooms/" + self.room_id + "/invite",
content={
"id_server": "example.com",
"id_access_token": "sometoken",
"medium": "email",
"address": email_to_invite,
},
access_token=self.tok,
)
self.assertEqual(channel.code, 403)
self.assertEqual(channel.json_body["errcode"], Codes.EXPIRED_ACCOUNT)
self.assertEqual(channel.json_body["field"], "value")
# Also check that it stopped before calling _make_and_store_3pid_invite. # Also check that it stopped before calling _make_and_store_3pid_invite.
make_invite_mock.assert_called_once() make_invite_mock.assert_called_once()

View file

@ -41,6 +41,7 @@ from twisted.web.resource import Resource
from twisted.web.server import Site from twisted.web.server import Site
from synapse.api.constants import Membership from synapse.api.constants import Membership
from synapse.api.errors import Codes
from synapse.server import HomeServer from synapse.server import HomeServer
from synapse.types import JsonDict from synapse.types import JsonDict
@ -171,6 +172,8 @@ class RestHelper:
expect_code: int = HTTPStatus.OK, expect_code: int = HTTPStatus.OK,
tok: Optional[str] = None, tok: Optional[str] = None,
appservice_user_id: Optional[str] = None, appservice_user_id: Optional[str] = None,
expect_errcode: Optional[Codes] = None,
expect_additional_fields: Optional[dict] = None,
) -> None: ) -> None:
self.change_membership( self.change_membership(
room=room, room=room,
@ -180,6 +183,8 @@ class RestHelper:
appservice_user_id=appservice_user_id, appservice_user_id=appservice_user_id,
membership=Membership.JOIN, membership=Membership.JOIN,
expect_code=expect_code, expect_code=expect_code,
expect_errcode=expect_errcode,
expect_additional_fields=expect_additional_fields,
) )
def knock( def knock(
@ -263,6 +268,7 @@ class RestHelper:
appservice_user_id: Optional[str] = None, appservice_user_id: Optional[str] = None,
expect_code: int = HTTPStatus.OK, expect_code: int = HTTPStatus.OK,
expect_errcode: Optional[str] = None, expect_errcode: Optional[str] = None,
expect_additional_fields: Optional[dict] = None,
) -> None: ) -> None:
""" """
Send a membership state event into a room. Send a membership state event into a room.
@ -323,6 +329,21 @@ class RestHelper:
channel.result["body"], channel.result["body"],
) )
if expect_additional_fields is not None:
for expect_key, expect_value in expect_additional_fields.items():
assert expect_key in channel.json_body, "Expected field %s, got %s" % (
expect_key,
channel.json_body,
)
assert (
channel.json_body[expect_key] == expect_value
), "Expected: %s at %s, got: %s, resp: %s" % (
expect_value,
expect_key,
channel.json_body[expect_key],
channel.json_body,
)
self.auth_user_id = temp_id self.auth_user_id = temp_id
def send( def send(

View file

@ -23,11 +23,13 @@ from urllib import parse
import attr import attr
from parameterized import parameterized, parameterized_class from parameterized import parameterized, parameterized_class
from PIL import Image as Image from PIL import Image as Image
from typing_extensions import Literal
from twisted.internet import defer from twisted.internet import defer
from twisted.internet.defer import Deferred from twisted.internet.defer import Deferred
from twisted.test.proto_helpers import MemoryReactor from twisted.test.proto_helpers import MemoryReactor
from synapse.api.errors import Codes
from synapse.events import EventBase from synapse.events import EventBase
from synapse.events.spamcheck import load_legacy_spam_checkers from synapse.events.spamcheck import load_legacy_spam_checkers
from synapse.logging.context import make_deferred_yieldable from synapse.logging.context import make_deferred_yieldable
@ -570,9 +572,11 @@ class MediaRepoTests(unittest.HomeserverTestCase):
) )
class TestSpamChecker: class TestSpamCheckerLegacy:
"""A spam checker module that rejects all media that includes the bytes """A spam checker module that rejects all media that includes the bytes
`evil`. `evil`.
Uses the legacy Spam-Checker API.
""" """
def __init__(self, config: Dict[str, Any], api: ModuleApi) -> None: def __init__(self, config: Dict[str, Any], api: ModuleApi) -> None:
@ -613,7 +617,7 @@ class TestSpamChecker:
return b"evil" in buf.getvalue() return b"evil" in buf.getvalue()
class SpamCheckerTestCase(unittest.HomeserverTestCase): class SpamCheckerTestCaseLegacy(unittest.HomeserverTestCase):
servlets = [ servlets = [
login.register_servlets, login.register_servlets,
admin.register_servlets, admin.register_servlets,
@ -637,7 +641,8 @@ class SpamCheckerTestCase(unittest.HomeserverTestCase):
{ {
"spam_checker": [ "spam_checker": [
{ {
"module": TestSpamChecker.__module__ + ".TestSpamChecker", "module": TestSpamCheckerLegacy.__module__
+ ".TestSpamCheckerLegacy",
"config": {}, "config": {},
} }
] ]
@ -662,3 +667,62 @@ class SpamCheckerTestCase(unittest.HomeserverTestCase):
self.helper.upload_media( self.helper.upload_media(
self.upload_resource, data, tok=self.tok, expect_code=400 self.upload_resource, data, tok=self.tok, expect_code=400
) )
EVIL_DATA = b"Some evil data"
EVIL_DATA_EXPERIMENT = b"Some evil data to trigger the experimental tuple API"
class SpamCheckerTestCase(unittest.HomeserverTestCase):
servlets = [
login.register_servlets,
admin.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.user = self.register_user("user", "pass")
self.tok = self.login("user", "pass")
# Allow for uploading and downloading to/from the media repo
self.media_repo = hs.get_media_repository_resource()
self.download_resource = self.media_repo.children[b"download"]
self.upload_resource = self.media_repo.children[b"upload"]
hs.get_module_api().register_spam_checker_callbacks(
check_media_file_for_spam=self.check_media_file_for_spam
)
async def check_media_file_for_spam(
self, file_wrapper: ReadableFileWrapper, file_info: FileInfo
) -> Union[Codes, Literal["NOT_SPAM"]]:
buf = BytesIO()
await file_wrapper.write_chunks_to(buf.write)
if buf.getvalue() == EVIL_DATA:
return Codes.FORBIDDEN
elif buf.getvalue() == EVIL_DATA_EXPERIMENT:
return (Codes.FORBIDDEN, {})
else:
return "NOT_SPAM"
def test_upload_innocent(self) -> None:
"""Attempt to upload some innocent data that should be allowed."""
self.helper.upload_media(
self.upload_resource, SMALL_PNG, tok=self.tok, expect_code=200
)
def test_upload_ban(self) -> None:
"""Attempt to upload some data that includes bytes "evil", which should
get rejected by the spam checker.
"""
self.helper.upload_media(
self.upload_resource, EVIL_DATA, tok=self.tok, expect_code=400
)
self.helper.upload_media(
self.upload_resource,
EVIL_DATA_EXPERIMENT,
tok=self.tok,
expect_code=400,
)

Some files were not shown because too many files have changed in this diff Show more