mirror of
https://mau.dev/maunium/synapse.git
synced 2024-10-01 01:36:05 -04:00
Merge branch 'dinsic' into babolivier/fix_profile_replication
This commit is contained in:
commit
e0c56d6527
@ -26,7 +26,15 @@ steps:
|
|||||||
- docker#v3.0.1:
|
- docker#v3.0.1:
|
||||||
image: "python:3.6"
|
image: "python:3.6"
|
||||||
|
|
||||||
- wait
|
- command:
|
||||||
|
- "python -m pip install tox"
|
||||||
|
- "scripts-dev/check-newsfragment"
|
||||||
|
label: ":newspaper: Newsfile"
|
||||||
|
branches: "!master !develop !release-*"
|
||||||
|
plugins:
|
||||||
|
- docker#v3.0.1:
|
||||||
|
image: "python:3.6"
|
||||||
|
propagate-environment: true
|
||||||
|
|
||||||
- command:
|
- command:
|
||||||
- "python -m pip install tox"
|
- "python -m pip install tox"
|
||||||
@ -36,6 +44,8 @@ steps:
|
|||||||
- docker#v3.0.1:
|
- docker#v3.0.1:
|
||||||
image: "python:3.6"
|
image: "python:3.6"
|
||||||
|
|
||||||
|
- wait
|
||||||
|
|
||||||
- command:
|
- command:
|
||||||
- "python -m pip install tox"
|
- "python -m pip install tox"
|
||||||
- "tox -e py27,codecov"
|
- "tox -e py27,codecov"
|
||||||
@ -46,6 +56,12 @@ steps:
|
|||||||
- docker#v3.0.1:
|
- docker#v3.0.1:
|
||||||
image: "python:2.7"
|
image: "python:2.7"
|
||||||
propagate-environment: true
|
propagate-environment: true
|
||||||
|
retry:
|
||||||
|
automatic:
|
||||||
|
- exit_status: -1
|
||||||
|
limit: 2
|
||||||
|
- exit_status: 2
|
||||||
|
limit: 2
|
||||||
|
|
||||||
- command:
|
- command:
|
||||||
- "python -m pip install tox"
|
- "python -m pip install tox"
|
||||||
@ -57,6 +73,12 @@ steps:
|
|||||||
- docker#v3.0.1:
|
- docker#v3.0.1:
|
||||||
image: "python:3.5"
|
image: "python:3.5"
|
||||||
propagate-environment: true
|
propagate-environment: true
|
||||||
|
retry:
|
||||||
|
automatic:
|
||||||
|
- exit_status: -1
|
||||||
|
limit: 2
|
||||||
|
- exit_status: 2
|
||||||
|
limit: 2
|
||||||
|
|
||||||
- command:
|
- command:
|
||||||
- "python -m pip install tox"
|
- "python -m pip install tox"
|
||||||
@ -68,6 +90,12 @@ steps:
|
|||||||
- docker#v3.0.1:
|
- docker#v3.0.1:
|
||||||
image: "python:3.6"
|
image: "python:3.6"
|
||||||
propagate-environment: true
|
propagate-environment: true
|
||||||
|
retry:
|
||||||
|
automatic:
|
||||||
|
- exit_status: -1
|
||||||
|
limit: 2
|
||||||
|
- exit_status: 2
|
||||||
|
limit: 2
|
||||||
|
|
||||||
- command:
|
- command:
|
||||||
- "python -m pip install tox"
|
- "python -m pip install tox"
|
||||||
@ -79,6 +107,12 @@ steps:
|
|||||||
- docker#v3.0.1:
|
- docker#v3.0.1:
|
||||||
image: "python:3.7"
|
image: "python:3.7"
|
||||||
propagate-environment: true
|
propagate-environment: true
|
||||||
|
retry:
|
||||||
|
automatic:
|
||||||
|
- exit_status: -1
|
||||||
|
limit: 2
|
||||||
|
- exit_status: 2
|
||||||
|
limit: 2
|
||||||
|
|
||||||
- command:
|
- command:
|
||||||
- "python -m pip install tox"
|
- "python -m pip install tox"
|
||||||
@ -90,6 +124,12 @@ steps:
|
|||||||
- docker#v3.0.1:
|
- docker#v3.0.1:
|
||||||
image: "python:2.7"
|
image: "python:2.7"
|
||||||
propagate-environment: true
|
propagate-environment: true
|
||||||
|
retry:
|
||||||
|
automatic:
|
||||||
|
- exit_status: -1
|
||||||
|
limit: 2
|
||||||
|
- exit_status: 2
|
||||||
|
limit: 2
|
||||||
|
|
||||||
- label: ":python: 2.7 / :postgres: 9.4"
|
- label: ":python: 2.7 / :postgres: 9.4"
|
||||||
env:
|
env:
|
||||||
@ -101,6 +141,12 @@ steps:
|
|||||||
run: testenv
|
run: testenv
|
||||||
config:
|
config:
|
||||||
- .buildkite/docker-compose.py27.pg94.yaml
|
- .buildkite/docker-compose.py27.pg94.yaml
|
||||||
|
retry:
|
||||||
|
automatic:
|
||||||
|
- exit_status: -1
|
||||||
|
limit: 2
|
||||||
|
- exit_status: 2
|
||||||
|
limit: 2
|
||||||
|
|
||||||
- label: ":python: 2.7 / :postgres: 9.5"
|
- label: ":python: 2.7 / :postgres: 9.5"
|
||||||
env:
|
env:
|
||||||
@ -112,6 +158,12 @@ steps:
|
|||||||
run: testenv
|
run: testenv
|
||||||
config:
|
config:
|
||||||
- .buildkite/docker-compose.py27.pg95.yaml
|
- .buildkite/docker-compose.py27.pg95.yaml
|
||||||
|
retry:
|
||||||
|
automatic:
|
||||||
|
- exit_status: -1
|
||||||
|
limit: 2
|
||||||
|
- exit_status: 2
|
||||||
|
limit: 2
|
||||||
|
|
||||||
- label: ":python: 3.5 / :postgres: 9.4"
|
- label: ":python: 3.5 / :postgres: 9.4"
|
||||||
env:
|
env:
|
||||||
@ -123,6 +175,12 @@ steps:
|
|||||||
run: testenv
|
run: testenv
|
||||||
config:
|
config:
|
||||||
- .buildkite/docker-compose.py35.pg94.yaml
|
- .buildkite/docker-compose.py35.pg94.yaml
|
||||||
|
retry:
|
||||||
|
automatic:
|
||||||
|
- exit_status: -1
|
||||||
|
limit: 2
|
||||||
|
- exit_status: 2
|
||||||
|
limit: 2
|
||||||
|
|
||||||
- label: ":python: 3.5 / :postgres: 9.5"
|
- label: ":python: 3.5 / :postgres: 9.5"
|
||||||
env:
|
env:
|
||||||
@ -134,6 +192,12 @@ steps:
|
|||||||
run: testenv
|
run: testenv
|
||||||
config:
|
config:
|
||||||
- .buildkite/docker-compose.py35.pg95.yaml
|
- .buildkite/docker-compose.py35.pg95.yaml
|
||||||
|
retry:
|
||||||
|
automatic:
|
||||||
|
- exit_status: -1
|
||||||
|
limit: 2
|
||||||
|
- exit_status: 2
|
||||||
|
limit: 2
|
||||||
|
|
||||||
- label: ":python: 3.7 / :postgres: 9.5"
|
- label: ":python: 3.7 / :postgres: 9.5"
|
||||||
env:
|
env:
|
||||||
@ -145,6 +209,12 @@ steps:
|
|||||||
run: testenv
|
run: testenv
|
||||||
config:
|
config:
|
||||||
- .buildkite/docker-compose.py37.pg95.yaml
|
- .buildkite/docker-compose.py37.pg95.yaml
|
||||||
|
retry:
|
||||||
|
automatic:
|
||||||
|
- exit_status: -1
|
||||||
|
limit: 2
|
||||||
|
- exit_status: 2
|
||||||
|
limit: 2
|
||||||
|
|
||||||
- label: ":python: 3.7 / :postgres: 11"
|
- label: ":python: 3.7 / :postgres: 11"
|
||||||
env:
|
env:
|
||||||
@ -156,3 +226,9 @@ steps:
|
|||||||
run: testenv
|
run: testenv
|
||||||
config:
|
config:
|
||||||
- .buildkite/docker-compose.py37.pg11.yaml
|
- .buildkite/docker-compose.py37.pg11.yaml
|
||||||
|
retry:
|
||||||
|
automatic:
|
||||||
|
- exit_status: -1
|
||||||
|
limit: 2
|
||||||
|
- exit_status: 2
|
||||||
|
limit: 2
|
||||||
|
180
CHANGES.md
180
CHANGES.md
@ -1,3 +1,183 @@
|
|||||||
|
Synapse 1.0.0 (2019-06-11)
|
||||||
|
==========================
|
||||||
|
|
||||||
|
Bugfixes
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Fix bug where attempting to send transactions with large number of EDUs can fail. ([\#5418](https://github.com/matrix-org/synapse/issues/5418))
|
||||||
|
|
||||||
|
|
||||||
|
Improved Documentation
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
- Expand the federation guide to include relevant content from the MSC1711 FAQ ([\#5419](https://github.com/matrix-org/synapse/issues/5419))
|
||||||
|
|
||||||
|
|
||||||
|
Internal Changes
|
||||||
|
----------------
|
||||||
|
|
||||||
|
- Move password reset links to /_matrix/client/unstable namespace. ([\#5424](https://github.com/matrix-org/synapse/issues/5424))
|
||||||
|
|
||||||
|
|
||||||
|
Synapse 1.0.0rc3 (2019-06-10)
|
||||||
|
=============================
|
||||||
|
|
||||||
|
Security: Fix authentication bug introduced in 1.0.0rc1. Please upgrade to rc3 immediately
|
||||||
|
|
||||||
|
|
||||||
|
Synapse 1.0.0rc2 (2019-06-10)
|
||||||
|
=============================
|
||||||
|
|
||||||
|
Bugfixes
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Remove redundant warning about key server response validation. ([\#5392](https://github.com/matrix-org/synapse/issues/5392))
|
||||||
|
- Fix bug where old keys stored in the database with a null valid until timestamp caused all verification requests for that key to fail. ([\#5415](https://github.com/matrix-org/synapse/issues/5415))
|
||||||
|
- Fix excessive memory using with default `federation_verify_certificates: true` configuration. ([\#5417](https://github.com/matrix-org/synapse/issues/5417))
|
||||||
|
|
||||||
|
|
||||||
|
Synapse 1.0.0rc1 (2019-06-07)
|
||||||
|
=============================
|
||||||
|
|
||||||
|
Features
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Synapse now more efficiently collates room statistics. ([\#4338](https://github.com/matrix-org/synapse/issues/4338), [\#5260](https://github.com/matrix-org/synapse/issues/5260), [\#5324](https://github.com/matrix-org/synapse/issues/5324))
|
||||||
|
- Add experimental support for relations (aka reactions and edits). ([\#5220](https://github.com/matrix-org/synapse/issues/5220))
|
||||||
|
- Ability to configure default room version. ([\#5223](https://github.com/matrix-org/synapse/issues/5223), [\#5249](https://github.com/matrix-org/synapse/issues/5249))
|
||||||
|
- Allow configuring a range for the account validity startup job. ([\#5276](https://github.com/matrix-org/synapse/issues/5276))
|
||||||
|
- CAS login will now hit the r0 API, not the deprecated v1 one. ([\#5286](https://github.com/matrix-org/synapse/issues/5286))
|
||||||
|
- Validate federation server TLS certificates by default (implements [MSC1711](https://github.com/matrix-org/matrix-doc/blob/master/proposals/1711-x509-for-federation.md)). ([\#5359](https://github.com/matrix-org/synapse/issues/5359))
|
||||||
|
- Update /_matrix/client/versions to reference support for r0.5.0. ([\#5360](https://github.com/matrix-org/synapse/issues/5360))
|
||||||
|
- Add a script to generate new signing-key files. ([\#5361](https://github.com/matrix-org/synapse/issues/5361))
|
||||||
|
- Update upgrade and installation guides ahead of 1.0. ([\#5371](https://github.com/matrix-org/synapse/issues/5371))
|
||||||
|
- Replace the `perspectives` configuration section with `trusted_key_servers`, and make validating the signatures on responses optional (since TLS will do this job for us). ([\#5374](https://github.com/matrix-org/synapse/issues/5374))
|
||||||
|
- Add ability to perform password reset via email without trusting the identity server. ([\#5377](https://github.com/matrix-org/synapse/issues/5377))
|
||||||
|
- Set default room version to v4. ([\#5379](https://github.com/matrix-org/synapse/issues/5379))
|
||||||
|
|
||||||
|
|
||||||
|
Bugfixes
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Fixes client-server API not sending "m.heroes" to lazy-load /sync requests when a rooms name or its canonical alias are empty. Thanks to @dnaf for this work! ([\#5089](https://github.com/matrix-org/synapse/issues/5089))
|
||||||
|
- Prevent federation device list updates breaking when processing multiple updates at once. ([\#5156](https://github.com/matrix-org/synapse/issues/5156))
|
||||||
|
- Fix worker registration bug caused by ClientReaderSlavedStore being unable to see get_profileinfo. ([\#5200](https://github.com/matrix-org/synapse/issues/5200))
|
||||||
|
- Fix race when backfilling in rooms with worker mode. ([\#5221](https://github.com/matrix-org/synapse/issues/5221))
|
||||||
|
- Fix appservice timestamp massaging. ([\#5233](https://github.com/matrix-org/synapse/issues/5233))
|
||||||
|
- Ensure that server_keys fetched via a notary server are correctly signed. ([\#5251](https://github.com/matrix-org/synapse/issues/5251))
|
||||||
|
- Show the correct error when logging out and access token is missing. ([\#5256](https://github.com/matrix-org/synapse/issues/5256))
|
||||||
|
- Fix error code when there is an invalid parameter on /_matrix/client/r0/publicRooms ([\#5257](https://github.com/matrix-org/synapse/issues/5257))
|
||||||
|
- Fix error when downloading thumbnail with missing width/height parameter. ([\#5258](https://github.com/matrix-org/synapse/issues/5258))
|
||||||
|
- Fix schema update for account validity. ([\#5268](https://github.com/matrix-org/synapse/issues/5268))
|
||||||
|
- Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. ([\#5274](https://github.com/matrix-org/synapse/issues/5274), [\#5278](https://github.com/matrix-org/synapse/issues/5278), [\#5291](https://github.com/matrix-org/synapse/issues/5291))
|
||||||
|
- Fix "db txn 'update_presence' from sentinel context" log messages. ([\#5275](https://github.com/matrix-org/synapse/issues/5275))
|
||||||
|
- Fix dropped logcontexts during high outbound traffic. ([\#5277](https://github.com/matrix-org/synapse/issues/5277))
|
||||||
|
- Fix a bug where it is not possible to get events in the federation format with the request `GET /_matrix/client/r0/rooms/{roomId}/messages`. ([\#5293](https://github.com/matrix-org/synapse/issues/5293))
|
||||||
|
- Fix performance problems with the rooms stats background update. ([\#5294](https://github.com/matrix-org/synapse/issues/5294))
|
||||||
|
- Fix noisy 'no key for server' logs. ([\#5300](https://github.com/matrix-org/synapse/issues/5300))
|
||||||
|
- Fix bug where a notary server would sometimes forget old keys. ([\#5307](https://github.com/matrix-org/synapse/issues/5307))
|
||||||
|
- Prevent users from setting huge displaynames and avatar URLs. ([\#5309](https://github.com/matrix-org/synapse/issues/5309))
|
||||||
|
- Fix handling of failures when processing incoming events where calling `/event_auth` on remote server fails. ([\#5317](https://github.com/matrix-org/synapse/issues/5317))
|
||||||
|
- Ensure that we have an up-to-date copy of the signing key when validating incoming federation requests. ([\#5321](https://github.com/matrix-org/synapse/issues/5321))
|
||||||
|
- Fix various problems which made the signing-key notary server time out for some requests. ([\#5333](https://github.com/matrix-org/synapse/issues/5333))
|
||||||
|
- Fix bug which would make certain operations (such as room joins) block for 20 minutes while attemoting to fetch verification keys. ([\#5334](https://github.com/matrix-org/synapse/issues/5334))
|
||||||
|
- Fix a bug where we could rapidly mark a server as unreachable even though it was only down for a few minutes. ([\#5335](https://github.com/matrix-org/synapse/issues/5335), [\#5340](https://github.com/matrix-org/synapse/issues/5340))
|
||||||
|
- Fix a bug where account validity renewal emails could only be sent when email notifs were enabled. ([\#5341](https://github.com/matrix-org/synapse/issues/5341))
|
||||||
|
- Fix failure when fetching batches of events during backfill, etc. ([\#5342](https://github.com/matrix-org/synapse/issues/5342))
|
||||||
|
- Add a new room version where the timestamps on events are checked against the validity periods on signing keys. ([\#5348](https://github.com/matrix-org/synapse/issues/5348), [\#5354](https://github.com/matrix-org/synapse/issues/5354))
|
||||||
|
- Fix room stats and presence background updates to correctly handle missing events. ([\#5352](https://github.com/matrix-org/synapse/issues/5352))
|
||||||
|
- Include left members in room summaries' heroes. ([\#5355](https://github.com/matrix-org/synapse/issues/5355))
|
||||||
|
- Fix `federation_custom_ca_list` configuration option. ([\#5362](https://github.com/matrix-org/synapse/issues/5362))
|
||||||
|
- Fix missing logcontext warnings on shutdown. ([\#5369](https://github.com/matrix-org/synapse/issues/5369))
|
||||||
|
|
||||||
|
|
||||||
|
Improved Documentation
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
- Fix docs on resetting the user directory. ([\#5282](https://github.com/matrix-org/synapse/issues/5282))
|
||||||
|
- Fix notes about ACME in the MSC1711 faq. ([\#5357](https://github.com/matrix-org/synapse/issues/5357))
|
||||||
|
|
||||||
|
|
||||||
|
Internal Changes
|
||||||
|
----------------
|
||||||
|
|
||||||
|
- Synapse will now serve the experimental "room complexity" API endpoint. ([\#5216](https://github.com/matrix-org/synapse/issues/5216))
|
||||||
|
- The base classes for the v1 and v2_alpha REST APIs have been unified. ([\#5226](https://github.com/matrix-org/synapse/issues/5226), [\#5328](https://github.com/matrix-org/synapse/issues/5328))
|
||||||
|
- Simplifications and comments in do_auth. ([\#5227](https://github.com/matrix-org/synapse/issues/5227))
|
||||||
|
- Remove urllib3 pin as requests 2.22.0 has been released supporting urllib3 1.25.2. ([\#5230](https://github.com/matrix-org/synapse/issues/5230))
|
||||||
|
- Preparatory work for key-validity features. ([\#5232](https://github.com/matrix-org/synapse/issues/5232), [\#5234](https://github.com/matrix-org/synapse/issues/5234), [\#5235](https://github.com/matrix-org/synapse/issues/5235), [\#5236](https://github.com/matrix-org/synapse/issues/5236), [\#5237](https://github.com/matrix-org/synapse/issues/5237), [\#5244](https://github.com/matrix-org/synapse/issues/5244), [\#5250](https://github.com/matrix-org/synapse/issues/5250), [\#5296](https://github.com/matrix-org/synapse/issues/5296), [\#5299](https://github.com/matrix-org/synapse/issues/5299), [\#5343](https://github.com/matrix-org/synapse/issues/5343), [\#5347](https://github.com/matrix-org/synapse/issues/5347), [\#5356](https://github.com/matrix-org/synapse/issues/5356))
|
||||||
|
- Specify the type of reCAPTCHA key to use. ([\#5283](https://github.com/matrix-org/synapse/issues/5283))
|
||||||
|
- Improve sample config for monthly active user blocking. ([\#5284](https://github.com/matrix-org/synapse/issues/5284))
|
||||||
|
- Remove spurious debug from MatrixFederationHttpClient.get_json. ([\#5287](https://github.com/matrix-org/synapse/issues/5287))
|
||||||
|
- Improve logging for logcontext leaks. ([\#5288](https://github.com/matrix-org/synapse/issues/5288))
|
||||||
|
- Clarify that the admin change password API logs the user out. ([\#5303](https://github.com/matrix-org/synapse/issues/5303))
|
||||||
|
- New installs will now use the v54 full schema, rather than the full schema v14 and applying incremental updates to v54. ([\#5320](https://github.com/matrix-org/synapse/issues/5320))
|
||||||
|
- Improve docstrings on MatrixFederationClient. ([\#5332](https://github.com/matrix-org/synapse/issues/5332))
|
||||||
|
- Clean up FederationClient.get_events for clarity. ([\#5344](https://github.com/matrix-org/synapse/issues/5344))
|
||||||
|
- Various improvements to debug logging. ([\#5353](https://github.com/matrix-org/synapse/issues/5353))
|
||||||
|
- Don't run CI build checks until sample config check has passed. ([\#5370](https://github.com/matrix-org/synapse/issues/5370))
|
||||||
|
- Automatically retry buildkite builds (max twice) when an agent is lost. ([\#5380](https://github.com/matrix-org/synapse/issues/5380))
|
||||||
|
|
||||||
|
|
||||||
|
Synapse 0.99.5.2 (2019-05-30)
|
||||||
|
=============================
|
||||||
|
|
||||||
|
Bugfixes
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. ([\#5274](https://github.com/matrix-org/synapse/issues/5274), [\#5278](https://github.com/matrix-org/synapse/issues/5278), [\#5291](https://github.com/matrix-org/synapse/issues/5291))
|
||||||
|
|
||||||
|
|
||||||
|
Synapse 0.99.5.1 (2019-05-22)
|
||||||
|
=============================
|
||||||
|
|
||||||
|
0.99.5.1 supersedes 0.99.5 due to malformed debian changelog - no functional changes.
|
||||||
|
|
||||||
|
Synapse 0.99.5 (2019-05-22)
|
||||||
|
===========================
|
||||||
|
|
||||||
|
No significant changes.
|
||||||
|
|
||||||
|
|
||||||
|
Synapse 0.99.5rc1 (2019-05-21)
|
||||||
|
==============================
|
||||||
|
|
||||||
|
Features
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Add ability to blacklist IP ranges for the federation client. ([\#5043](https://github.com/matrix-org/synapse/issues/5043))
|
||||||
|
- Ratelimiting configuration for clients sending messages and the federation server has been altered to match login ratelimiting. The old configuration names will continue working. Check the sample config for details of the new names. ([\#5181](https://github.com/matrix-org/synapse/issues/5181))
|
||||||
|
- Drop support for the undocumented /_matrix/client/v2_alpha API prefix. ([\#5190](https://github.com/matrix-org/synapse/issues/5190))
|
||||||
|
- Add an option to disable per-room profiles. ([\#5196](https://github.com/matrix-org/synapse/issues/5196))
|
||||||
|
- Stick an expiration date to any registered user missing one at startup if account validity is enabled. ([\#5204](https://github.com/matrix-org/synapse/issues/5204))
|
||||||
|
- Add experimental support for relations (aka reactions and edits). ([\#5209](https://github.com/matrix-org/synapse/issues/5209), [\#5211](https://github.com/matrix-org/synapse/issues/5211), [\#5203](https://github.com/matrix-org/synapse/issues/5203), [\#5212](https://github.com/matrix-org/synapse/issues/5212))
|
||||||
|
- Add a room version 4 which uses a new event ID format, as per [MSC2002](https://github.com/matrix-org/matrix-doc/pull/2002). ([\#5210](https://github.com/matrix-org/synapse/issues/5210), [\#5217](https://github.com/matrix-org/synapse/issues/5217))
|
||||||
|
|
||||||
|
|
||||||
|
Bugfixes
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Fix image orientation when generating thumbnails (needs pillow>=4.3.0). Contributed by Pau Rodriguez-Estivill. ([\#5039](https://github.com/matrix-org/synapse/issues/5039))
|
||||||
|
- Exclude soft-failed events from forward-extremity candidates: fixes "No forward extremities left!" error. ([\#5146](https://github.com/matrix-org/synapse/issues/5146))
|
||||||
|
- Re-order stages in registration flows such that msisdn and email verification are done last. ([\#5174](https://github.com/matrix-org/synapse/issues/5174))
|
||||||
|
- Fix 3pid guest invites. ([\#5177](https://github.com/matrix-org/synapse/issues/5177))
|
||||||
|
- Fix a bug where the register endpoint would fail with M_THREEPID_IN_USE instead of returning an account previously registered in the same session. ([\#5187](https://github.com/matrix-org/synapse/issues/5187))
|
||||||
|
- Prevent registration for user ids that are too long to fit into a state key. Contributed by Reid Anderson. ([\#5198](https://github.com/matrix-org/synapse/issues/5198))
|
||||||
|
- Fix incompatibility between ACME support and Python 3.5.2. ([\#5218](https://github.com/matrix-org/synapse/issues/5218))
|
||||||
|
- Fix error handling for rooms whose versions are unknown. ([\#5219](https://github.com/matrix-org/synapse/issues/5219))
|
||||||
|
|
||||||
|
|
||||||
|
Internal Changes
|
||||||
|
----------------
|
||||||
|
|
||||||
|
- Make /sync attempt to return device updates for both joined and invited users. Note that this doesn't currently work correctly due to other bugs. ([\#3484](https://github.com/matrix-org/synapse/issues/3484))
|
||||||
|
- Update tests to consistently be configured via the same code that is used when loading from configuration files. ([\#5171](https://github.com/matrix-org/synapse/issues/5171), [\#5185](https://github.com/matrix-org/synapse/issues/5185))
|
||||||
|
- Allow client event serialization to be async. ([\#5183](https://github.com/matrix-org/synapse/issues/5183))
|
||||||
|
- Expose DataStore._get_events as get_events_as_list. ([\#5184](https://github.com/matrix-org/synapse/issues/5184))
|
||||||
|
- Make generating SQL bounds for pagination generic. ([\#5191](https://github.com/matrix-org/synapse/issues/5191))
|
||||||
|
- Stop telling people to install the optional dependencies by default. ([\#5197](https://github.com/matrix-org/synapse/issues/5197))
|
||||||
|
|
||||||
|
|
||||||
Synapse 0.99.4 (2019-05-15)
|
Synapse 0.99.4 (2019-05-15)
|
||||||
===========================
|
===========================
|
||||||
|
|
||||||
|
43
INSTALL.md
43
INSTALL.md
@ -1,13 +1,14 @@
|
|||||||
* [Installing Synapse](#installing-synapse)
|
- [Installing Synapse](#installing-synapse)
|
||||||
* [Installing from source](#installing-from-source)
|
- [Installing from source](#installing-from-source)
|
||||||
* [Platform-Specific Instructions](#platform-specific-instructions)
|
- [Platform-Specific Instructions](#platform-specific-instructions)
|
||||||
* [Troubleshooting Installation](#troubleshooting-installation)
|
- [Troubleshooting Installation](#troubleshooting-installation)
|
||||||
* [Prebuilt packages](#prebuilt-packages)
|
- [Prebuilt packages](#prebuilt-packages)
|
||||||
* [Setting up Synapse](#setting-up-synapse)
|
- [Setting up Synapse](#setting-up-synapse)
|
||||||
* [TLS certificates](#tls-certificates)
|
- [TLS certificates](#tls-certificates)
|
||||||
* [Registering a user](#registering-a-user)
|
- [Email](#email)
|
||||||
* [Setting up a TURN server](#setting-up-a-turn-server)
|
- [Registering a user](#registering-a-user)
|
||||||
* [URL previews](#url-previews)
|
- [Setting up a TURN server](#setting-up-a-turn-server)
|
||||||
|
- [URL previews](#url-previews)
|
||||||
|
|
||||||
# Installing Synapse
|
# Installing Synapse
|
||||||
|
|
||||||
@ -35,7 +36,7 @@ virtualenv -p python3 ~/synapse/env
|
|||||||
source ~/synapse/env/bin/activate
|
source ~/synapse/env/bin/activate
|
||||||
pip install --upgrade pip
|
pip install --upgrade pip
|
||||||
pip install --upgrade setuptools
|
pip install --upgrade setuptools
|
||||||
pip install matrix-synapse[all]
|
pip install matrix-synapse
|
||||||
```
|
```
|
||||||
|
|
||||||
This will download Synapse from [PyPI](https://pypi.org/project/matrix-synapse)
|
This will download Synapse from [PyPI](https://pypi.org/project/matrix-synapse)
|
||||||
@ -48,7 +49,7 @@ update flag:
|
|||||||
|
|
||||||
```
|
```
|
||||||
source ~/synapse/env/bin/activate
|
source ~/synapse/env/bin/activate
|
||||||
pip install -U matrix-synapse[all]
|
pip install -U matrix-synapse
|
||||||
```
|
```
|
||||||
|
|
||||||
Before you can start Synapse, you will need to generate a configuration
|
Before you can start Synapse, you will need to generate a configuration
|
||||||
@ -394,8 +395,22 @@ To configure Synapse to expose an HTTPS port, you will need to edit
|
|||||||
instance, if using certbot, use `fullchain.pem` as your certificate, not
|
instance, if using certbot, use `fullchain.pem` as your certificate, not
|
||||||
`cert.pem`).
|
`cert.pem`).
|
||||||
|
|
||||||
For those of you upgrading your TLS certificate in readiness for Synapse 1.0,
|
For a more detailed guide to configuring your server for federation, see
|
||||||
please take a look at [our guide](docs/MSC1711_certificates_FAQ.md#configuring-certificates-for-compatibility-with-synapse-100).
|
[federate.md](docs/federate.md)
|
||||||
|
|
||||||
|
|
||||||
|
## Email
|
||||||
|
|
||||||
|
It is desirable for Synapse to have the capability to send email. For example,
|
||||||
|
this is required to support the 'password reset' feature.
|
||||||
|
|
||||||
|
To configure an SMTP server for Synapse, modify the configuration section
|
||||||
|
headed ``email``, and be sure to have at least the ``smtp_host``, ``smtp_port``
|
||||||
|
and ``notif_from`` fields filled out. You may also need to set ``smtp_user``,
|
||||||
|
``smtp_pass``, and ``require_transport_security``.
|
||||||
|
|
||||||
|
If Synapse is not configured with an SMTP server, password reset via email will
|
||||||
|
be disabled by default.
|
||||||
|
|
||||||
## Registering a user
|
## Registering a user
|
||||||
|
|
||||||
|
@ -9,14 +9,19 @@ include demo/*.py
|
|||||||
include demo/*.sh
|
include demo/*.sh
|
||||||
|
|
||||||
recursive-include synapse/storage/schema *.sql
|
recursive-include synapse/storage/schema *.sql
|
||||||
|
recursive-include synapse/storage/schema *.sql.postgres
|
||||||
|
recursive-include synapse/storage/schema *.sql.sqlite
|
||||||
recursive-include synapse/storage/schema *.py
|
recursive-include synapse/storage/schema *.py
|
||||||
|
recursive-include synapse/storage/schema *.txt
|
||||||
|
|
||||||
recursive-include docs *
|
recursive-include docs *
|
||||||
recursive-include scripts *
|
recursive-include scripts *
|
||||||
recursive-include scripts-dev *
|
recursive-include scripts-dev *
|
||||||
recursive-include synapse *.pyi
|
recursive-include synapse *.pyi
|
||||||
recursive-include tests *.pem
|
|
||||||
recursive-include tests *.py
|
recursive-include tests *.py
|
||||||
|
include tests/http/ca.crt
|
||||||
|
include tests/http/ca.key
|
||||||
|
include tests/http/server.key
|
||||||
|
|
||||||
recursive-include synapse/res *
|
recursive-include synapse/res *
|
||||||
recursive-include synapse/static *.css
|
recursive-include synapse/static *.css
|
||||||
|
49
UPGRADE.rst
49
UPGRADE.rst
@ -49,6 +49,55 @@ returned by the Client-Server API:
|
|||||||
# configured on port 443.
|
# configured on port 443.
|
||||||
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
|
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
|
||||||
|
|
||||||
|
Upgrading to v1.0
|
||||||
|
=================
|
||||||
|
|
||||||
|
Validation of TLS certificates
|
||||||
|
------------------------------
|
||||||
|
|
||||||
|
Synapse v1.0 is the first release to enforce
|
||||||
|
validation of TLS certificates for the federation API. It is therefore
|
||||||
|
essential that your certificates are correctly configured. See the `FAQ
|
||||||
|
<docs/MSC1711_certificates_FAQ.md>`_ for more information.
|
||||||
|
|
||||||
|
Note, v1.0 installations will also no longer be able to federate with servers
|
||||||
|
that have not correctly configured their certificates.
|
||||||
|
|
||||||
|
In rare cases, it may be desirable to disable certificate checking: for
|
||||||
|
example, it might be essential to be able to federate with a given legacy
|
||||||
|
server in a closed federation. This can be done in one of two ways:-
|
||||||
|
|
||||||
|
* Configure the global switch ``federation_verify_certificates`` to ``false``.
|
||||||
|
* Configure a whitelist of server domains to trust via ``federation_certificate_verification_whitelist``.
|
||||||
|
|
||||||
|
See the `sample configuration file <docs/sample_config.yaml>`_
|
||||||
|
for more details on these settings.
|
||||||
|
|
||||||
|
Email
|
||||||
|
-----
|
||||||
|
When a user requests a password reset, Synapse will send an email to the
|
||||||
|
user to confirm the request.
|
||||||
|
|
||||||
|
Previous versions of Synapse delegated the job of sending this email to an
|
||||||
|
identity server. If the identity server was somehow malicious or became
|
||||||
|
compromised, it would be theoretically possible to hijack an account through
|
||||||
|
this means.
|
||||||
|
|
||||||
|
Therefore, by default, Synapse v1.0 will send the confirmation email itself. If
|
||||||
|
Synapse is not configured with an SMTP server, password reset via email will be
|
||||||
|
disabled.
|
||||||
|
|
||||||
|
To configure an SMTP server for Synapse, modify the configuration section
|
||||||
|
headed ``email``, and be sure to have at least the ``smtp_host``, ``smtp_port``
|
||||||
|
and ``notif_from`` fields filled out. You may also need to set ``smtp_user``,
|
||||||
|
``smtp_pass``, and ``require_transport_security``.
|
||||||
|
|
||||||
|
If you are absolutely certain that you wish to continue using an identity
|
||||||
|
server for password resets, set ``trust_identity_server_for_password_resets`` to ``true``.
|
||||||
|
|
||||||
|
See the `sample configuration file <docs/sample_config.yaml>`_
|
||||||
|
for more details on these settings.
|
||||||
|
|
||||||
Upgrading to v0.99.0
|
Upgrading to v0.99.0
|
||||||
====================
|
====================
|
||||||
|
|
||||||
|
@ -1 +0,0 @@
|
|||||||
Make /sync attempt to return device updates for both joined and invited users. Note that this doesn't currently work correctly due to other bugs.
|
|
@ -1 +0,0 @@
|
|||||||
Add ability to blacklist IP ranges for the federation client.
|
|
@ -1 +0,0 @@
|
|||||||
Update tests to consistently be configured via the same code that is used when loading from configuration files.
|
|
@ -1 +0,0 @@
|
|||||||
Ratelimiting configuration for clients sending messages and the federation server has been altered to match login ratelimiting. The old configuration names will continue working. Check the sample config for details of the new names.
|
|
@ -1 +0,0 @@
|
|||||||
Allow client event serialization to be async.
|
|
@ -1 +0,0 @@
|
|||||||
Expose DataStore._get_events as get_events_as_list.
|
|
@ -1 +0,0 @@
|
|||||||
Update tests to consistently be configured via the same code that is used when loading from configuration files.
|
|
@ -1 +0,0 @@
|
|||||||
Fix a bug where the register endpoint would fail with M_THREEPID_IN_USE instead of returning an account previously registered in the same session.
|
|
@ -1 +0,0 @@
|
|||||||
Drop support for the undocumented /_matrix/client/v2_alpha API prefix.
|
|
@ -1 +0,0 @@
|
|||||||
Add an option to disable per-room profiles.
|
|
@ -1 +0,0 @@
|
|||||||
Stick an expiration date to any registered user missing one at startup if account validity is enabled.
|
|
@ -1 +0,0 @@
|
|||||||
Fix schema update for account validity.
|
|
@ -1 +0,0 @@
|
|||||||
Allow configuring a range for the account validity startup job.
|
|
@ -1 +0,0 @@
|
|||||||
Fix a bug where account validity renewal emails could only be sent when email notifs were enabled.
|
|
18
debian/changelog
vendored
18
debian/changelog
vendored
@ -1,3 +1,21 @@
|
|||||||
|
matrix-synapse-py3 (1.0.0) stable; urgency=medium
|
||||||
|
|
||||||
|
* New synapse release 1.0.0.
|
||||||
|
|
||||||
|
-- Synapse Packaging team <packages@matrix.org> Tue, 11 Jun 2019 17:09:53 +0100
|
||||||
|
|
||||||
|
matrix-synapse-py3 (0.99.5.2) stable; urgency=medium
|
||||||
|
|
||||||
|
* New synapse release 0.99.5.2.
|
||||||
|
|
||||||
|
-- Synapse Packaging team <packages@matrix.org> Thu, 30 May 2019 16:28:07 +0100
|
||||||
|
|
||||||
|
matrix-synapse-py3 (0.99.5.1) stable; urgency=medium
|
||||||
|
|
||||||
|
* New synapse release 0.99.5.1.
|
||||||
|
|
||||||
|
-- Synapse Packaging team <packages@matrix.org> Wed, 22 May 2019 16:22:24 +0000
|
||||||
|
|
||||||
matrix-synapse-py3 (0.99.4) stable; urgency=medium
|
matrix-synapse-py3 (0.99.4) stable; urgency=medium
|
||||||
|
|
||||||
[ Christoph Müller ]
|
[ Christoph Müller ]
|
||||||
|
2
debian/test/.gitignore
vendored
Normal file
2
debian/test/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
.vagrant
|
||||||
|
*.log
|
23
debian/test/provision.sh
vendored
Normal file
23
debian/test/provision.sh
vendored
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# provisioning script for vagrant boxes for testing the matrix-synapse debs.
|
||||||
|
#
|
||||||
|
# Will install the most recent matrix-synapse-py3 deb for this platform from
|
||||||
|
# the /debs directory.
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
apt-get update
|
||||||
|
apt-get install -y lsb-release
|
||||||
|
|
||||||
|
deb=`ls /debs/matrix-synapse-py3_*+$(lsb_release -cs)*.deb | sort | tail -n1`
|
||||||
|
|
||||||
|
debconf-set-selections <<EOF
|
||||||
|
matrix-synapse matrix-synapse/report-stats boolean false
|
||||||
|
matrix-synapse matrix-synapse/server-name string localhost:18448
|
||||||
|
EOF
|
||||||
|
|
||||||
|
dpkg -i "$deb"
|
||||||
|
|
||||||
|
sed -i -e '/port: 8...$/{s/8448/18448/; s/8008/18008/}' -e '$aregistration_shared_secret: secret' /etc/matrix-synapse/homeserver.yaml
|
||||||
|
systemctl restart matrix-synapse
|
13
debian/test/stretch/Vagrantfile
vendored
Normal file
13
debian/test/stretch/Vagrantfile
vendored
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
# -*- mode: ruby -*-
|
||||||
|
# vi: set ft=ruby :
|
||||||
|
|
||||||
|
ver = `cd ../../..; dpkg-parsechangelog -S Version`.strip()
|
||||||
|
|
||||||
|
Vagrant.configure("2") do |config|
|
||||||
|
config.vm.box = "debian/stretch64"
|
||||||
|
|
||||||
|
config.vm.synced_folder ".", "/vagrant", disabled: true
|
||||||
|
config.vm.synced_folder "../../../../debs", "/debs", type: "nfs"
|
||||||
|
|
||||||
|
config.vm.provision "shell", path: "../provision.sh"
|
||||||
|
end
|
10
debian/test/xenial/Vagrantfile
vendored
Normal file
10
debian/test/xenial/Vagrantfile
vendored
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
# -*- mode: ruby -*-
|
||||||
|
# vi: set ft=ruby :
|
||||||
|
|
||||||
|
Vagrant.configure("2") do |config|
|
||||||
|
config.vm.box = "ubuntu/xenial64"
|
||||||
|
|
||||||
|
config.vm.synced_folder ".", "/vagrant", disabled: true
|
||||||
|
config.vm.synced_folder "../../../../debs", "/debs"
|
||||||
|
config.vm.provision "shell", path: "../provision.sh"
|
||||||
|
end
|
@ -161,7 +161,7 @@ specify values for `SYNAPSE_CONFIG_PATH`, `SYNAPSE_SERVER_NAME` and
|
|||||||
example:
|
example:
|
||||||
|
|
||||||
```
|
```
|
||||||
docker run -it --rm
|
docker run -it --rm \
|
||||||
--mount type=volume,src=synapse-data,dst=/data \
|
--mount type=volume,src=synapse-data,dst=/data \
|
||||||
-e SYNAPSE_CONFIG_PATH=/data/homeserver.yaml \
|
-e SYNAPSE_CONFIG_PATH=/data/homeserver.yaml \
|
||||||
-e SYNAPSE_SERVER_NAME=my.matrix.host \
|
-e SYNAPSE_SERVER_NAME=my.matrix.host \
|
||||||
|
@ -7,6 +7,7 @@ Requires a public/private key pair from:
|
|||||||
|
|
||||||
https://developers.google.com/recaptcha/
|
https://developers.google.com/recaptcha/
|
||||||
|
|
||||||
|
Must be a reCAPTCHA v2 key using the "I'm not a robot" Checkbox option
|
||||||
|
|
||||||
Setting ReCaptcha Keys
|
Setting ReCaptcha Keys
|
||||||
----------------------
|
----------------------
|
||||||
|
@ -1,5 +1,22 @@
|
|||||||
# MSC1711 Certificates FAQ
|
# MSC1711 Certificates FAQ
|
||||||
|
|
||||||
|
## Historical Note
|
||||||
|
This document was originally written to guide server admins through the upgrade
|
||||||
|
path towards Synapse 1.0. Specifically,
|
||||||
|
[MSC1711](https://github.com/matrix-org/matrix-doc/blob/master/proposals/1711-x509-for-federation.md)
|
||||||
|
required that all servers present valid TLS certificates on their federation
|
||||||
|
API. Admins were encouraged to achieve compliance from version 0.99.0 (released
|
||||||
|
in February 2019) ahead of version 1.0 (released June 2019) enforcing the
|
||||||
|
certificate checks.
|
||||||
|
|
||||||
|
Much of what follows is now outdated since most admins will have already
|
||||||
|
upgraded, however it may be of use to those with old installs returning to the
|
||||||
|
project.
|
||||||
|
|
||||||
|
If you are setting up a server from scratch you almost certainly should look at
|
||||||
|
the [installation guide](INSTALL.md) instead.
|
||||||
|
|
||||||
|
## Introduction
|
||||||
The goal of Synapse 0.99.0 is to act as a stepping stone to Synapse 1.0.0. It
|
The goal of Synapse 0.99.0 is to act as a stepping stone to Synapse 1.0.0. It
|
||||||
supports the r0.1 release of the server to server specification, but is
|
supports the r0.1 release of the server to server specification, but is
|
||||||
compatible with both the legacy Matrix federation behaviour (pre-r0.1) as well
|
compatible with both the legacy Matrix federation behaviour (pre-r0.1) as well
|
||||||
@ -68,16 +85,14 @@ Admins should upgrade and configure a valid CA cert. Homeservers that require a
|
|||||||
.well-known entry (see below), should retain their SRV record and use it
|
.well-known entry (see below), should retain their SRV record and use it
|
||||||
alongside their .well-known record.
|
alongside their .well-known record.
|
||||||
|
|
||||||
**>= 5th March 2019 - Synapse 1.0.0 is released**
|
**10th June 2019 - Synapse 1.0.0 is released**
|
||||||
|
|
||||||
1.0.0 will land no sooner than 1 month after 0.99.0, leaving server admins one
|
1.0.0 is scheduled for release on 10th June. In
|
||||||
month after 5th February to upgrade to 0.99.0 and deploy their certificates. In
|
|
||||||
accordance with the the [S2S spec](https://matrix.org/docs/spec/server_server/r0.1.0.html)
|
accordance with the the [S2S spec](https://matrix.org/docs/spec/server_server/r0.1.0.html)
|
||||||
1.0.0 will enforce certificate validity. This means that any homeserver without a
|
1.0.0 will enforce certificate validity. This means that any homeserver without a
|
||||||
valid certificate after this point will no longer be able to federate with
|
valid certificate after this point will no longer be able to federate with
|
||||||
1.0.0 servers.
|
1.0.0 servers.
|
||||||
|
|
||||||
|
|
||||||
## Configuring certificates for compatibility with Synapse 1.0.0
|
## Configuring certificates for compatibility with Synapse 1.0.0
|
||||||
|
|
||||||
### If you do not currently have an SRV record
|
### If you do not currently have an SRV record
|
||||||
@ -145,12 +160,11 @@ You can do this with a `.well-known` file as follows:
|
|||||||
1. Keep the SRV record in place - it is needed for backwards compatibility
|
1. Keep the SRV record in place - it is needed for backwards compatibility
|
||||||
with Synapse 0.34 and earlier.
|
with Synapse 0.34 and earlier.
|
||||||
|
|
||||||
2. Give synapse a certificate corresponding to the target domain
|
2. Give Synapse a certificate corresponding to the target domain
|
||||||
(`customer.example.net` in the above example). Currently Synapse's ACME
|
(`customer.example.net` in the above example). You can either use Synapse's
|
||||||
support [does not support
|
built-in [ACME support](./ACME.md) for this (via the `domain` parameter in
|
||||||
this](https://github.com/matrix-org/synapse/issues/4552), so you will have
|
the `acme` section), or acquire a certificate yourself and give it to
|
||||||
to acquire a certificate yourself and give it to Synapse via
|
Synapse via `tls_certificate_path` and `tls_private_key_path`.
|
||||||
`tls_certificate_path` and `tls_private_key_path`.
|
|
||||||
|
|
||||||
3. Restart Synapse to ensure the new certificate is loaded.
|
3. Restart Synapse to ensure the new certificate is loaded.
|
||||||
|
|
||||||
|
@ -69,7 +69,7 @@ An empty body may be passed for backwards compatibility.
|
|||||||
Reset password
|
Reset password
|
||||||
==============
|
==============
|
||||||
|
|
||||||
Changes the password of another user.
|
Changes the password of another user. This will automatically log the user out of all their devices.
|
||||||
|
|
||||||
The api is::
|
The api is::
|
||||||
|
|
||||||
|
@ -14,9 +14,9 @@ up and will work provided you set the ``server_name`` to match your
|
|||||||
machine's public DNS hostname, and provide Synapse with a TLS certificate
|
machine's public DNS hostname, and provide Synapse with a TLS certificate
|
||||||
which is valid for your ``server_name``.
|
which is valid for your ``server_name``.
|
||||||
|
|
||||||
Once you have completed the steps necessary to federate, you should be able to
|
Once federation has been configured, you should be able to join a room over
|
||||||
join a room via federation. (A good place to start is ``#synapse:matrix.org`` - a
|
federation. A good place to start is ``#synapse:matrix.org`` - a room for
|
||||||
room for Synapse admins.)
|
Synapse admins.
|
||||||
|
|
||||||
|
|
||||||
## Delegation
|
## Delegation
|
||||||
@ -98,6 +98,77 @@ _matrix._tcp.<server_name>``. In our example, we would expect this:
|
|||||||
Note that the target of a SRV record cannot be an alias (CNAME record): it has to point
|
Note that the target of a SRV record cannot be an alias (CNAME record): it has to point
|
||||||
directly to the server hosting the synapse instance.
|
directly to the server hosting the synapse instance.
|
||||||
|
|
||||||
|
### Delegation FAQ
|
||||||
|
#### When do I need a SRV record or .well-known URI?
|
||||||
|
|
||||||
|
If your homeserver listens on the default federation port (8448), and your
|
||||||
|
`server_name` points to the host that your homeserver runs on, you do not need an SRV
|
||||||
|
record or `.well-known/matrix/server` URI.
|
||||||
|
|
||||||
|
For instance, if you registered `example.com` and pointed its DNS A record at a
|
||||||
|
fresh server, you could install Synapse on that host,
|
||||||
|
giving it a `server_name` of `example.com`, and once [ACME](acme.md) support is enabled,
|
||||||
|
it would automatically generate a valid TLS certificate for you via Let's Encrypt
|
||||||
|
and no SRV record or .well-known URI would be needed.
|
||||||
|
|
||||||
|
This is the common case, although you can add an SRV record or
|
||||||
|
`.well-known/matrix/server` URI for completeness if you wish.
|
||||||
|
|
||||||
|
**However**, if your server does not listen on port 8448, or if your `server_name`
|
||||||
|
does not point to the host that your homeserver runs on, you will need to let
|
||||||
|
other servers know how to find it. The way to do this is via .well-known or an
|
||||||
|
SRV record.
|
||||||
|
|
||||||
|
#### I have created a .well-known URI. Do I still need an SRV record?
|
||||||
|
|
||||||
|
As of Synapse 0.99, Synapse will first check for the existence of a .well-known
|
||||||
|
URI and follow any delegation it suggests. It will only then check for the
|
||||||
|
existence of an SRV record.
|
||||||
|
|
||||||
|
That means that the SRV record will often be redundant. However, you should
|
||||||
|
remember that there may still be older versions of Synapse in the federation
|
||||||
|
which do not understand .well-known URIs, so if you removed your SRV record
|
||||||
|
you would no longer be able to federate with them.
|
||||||
|
|
||||||
|
It is therefore best to leave the SRV record in place for now. Synapse 0.34 and
|
||||||
|
earlier will follow the SRV record (and not care about the invalid
|
||||||
|
certificate). Synapse 0.99 and later will follow the .well-known URI, with the
|
||||||
|
correct certificate chain.
|
||||||
|
|
||||||
|
#### Can I manage my own certificates rather than having Synapse renew certificates itself?
|
||||||
|
|
||||||
|
Yes, you are welcome to manage your certificates yourself. Synapse will only
|
||||||
|
attempt to obtain certificates from Let's Encrypt if you configure it to do
|
||||||
|
so.The only requirement is that there is a valid TLS cert present for
|
||||||
|
federation end points.
|
||||||
|
|
||||||
|
#### Do you still recommend against using a reverse proxy on the federation port?
|
||||||
|
|
||||||
|
We no longer actively recommend against using a reverse proxy. Many admins will
|
||||||
|
find it easier to direct federation traffic to a reverse proxy and manage their
|
||||||
|
own TLS certificates, and this is a supported configuration.
|
||||||
|
|
||||||
|
See [reverse_proxy.rst](reverse_proxy.rst) for information on setting up a
|
||||||
|
reverse proxy.
|
||||||
|
|
||||||
|
#### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy?
|
||||||
|
|
||||||
|
Practically speaking, this is no longer necessary.
|
||||||
|
|
||||||
|
If you are using a reverse proxy for all of your TLS traffic, then you can set
|
||||||
|
`no_tls: True` in the Synapse config. In that case, the only reason Synapse
|
||||||
|
needs the certificate is to populate a legacy `tls_fingerprints` field in the
|
||||||
|
federation API. This is ignored by Synapse 0.99.0 and later, and the only time
|
||||||
|
pre-0.99 Synapses will check it is when attempting to fetch the server keys -
|
||||||
|
and generally this is delegated via `matrix.org`, which will be running a modern
|
||||||
|
version of Synapse.
|
||||||
|
|
||||||
|
#### Do I need the same certificate for the client and federation port?
|
||||||
|
|
||||||
|
No. There is nothing stopping you from using different certificates,
|
||||||
|
particularly if you are using a reverse proxy. However, Synapse will use the
|
||||||
|
same certificate on any ports where TLS is configured.
|
||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
||||||
You can use the [federation tester](
|
You can use the [federation tester](
|
||||||
|
@ -3,6 +3,28 @@ Using Postgres
|
|||||||
|
|
||||||
Postgres version 9.4 or later is known to work.
|
Postgres version 9.4 or later is known to work.
|
||||||
|
|
||||||
|
Install postgres client libraries
|
||||||
|
=================================
|
||||||
|
|
||||||
|
Synapse will require the python postgres client library in order to connect to
|
||||||
|
a postgres database.
|
||||||
|
|
||||||
|
* If you are using the `matrix.org debian/ubuntu
|
||||||
|
packages <../INSTALL.md#matrixorg-packages>`_,
|
||||||
|
the necessary libraries will already be installed.
|
||||||
|
|
||||||
|
* For other pre-built packages, please consult the documentation from the
|
||||||
|
relevant package.
|
||||||
|
|
||||||
|
* If you installed synapse `in a virtualenv
|
||||||
|
<../INSTALL.md#installing-from-source>`_, you can install the library with::
|
||||||
|
|
||||||
|
~/synapse/env/bin/pip install matrix-synapse[postgres]
|
||||||
|
|
||||||
|
(substituting the path to your virtualenv for ``~/synapse/env``, if you used a
|
||||||
|
different path). You will require the postgres development files. These are in
|
||||||
|
the ``libpq-dev`` package on Debian-derived distributions.
|
||||||
|
|
||||||
Set up database
|
Set up database
|
||||||
===============
|
===============
|
||||||
|
|
||||||
@ -26,29 +48,6 @@ encoding use, e.g.::
|
|||||||
This would create an appropriate database named ``synapse`` owned by the
|
This would create an appropriate database named ``synapse`` owned by the
|
||||||
``synapse_user`` user (which must already exist).
|
``synapse_user`` user (which must already exist).
|
||||||
|
|
||||||
Set up client in Debian/Ubuntu
|
|
||||||
===========================
|
|
||||||
|
|
||||||
Postgres support depends on the postgres python connector ``psycopg2``. In the
|
|
||||||
virtual env::
|
|
||||||
|
|
||||||
sudo apt-get install libpq-dev
|
|
||||||
pip install psycopg2
|
|
||||||
|
|
||||||
Set up client in RHEL/CentOs 7
|
|
||||||
==============================
|
|
||||||
|
|
||||||
Make sure you have the appropriate version of postgres-devel installed. For a
|
|
||||||
postgres 9.4, use the postgres 9.4 packages from
|
|
||||||
[here](https://wiki.postgresql.org/wiki/YUM_Installation).
|
|
||||||
|
|
||||||
As with Debian/Ubuntu, postgres support depends on the postgres python connector
|
|
||||||
``psycopg2``. In the virtual env::
|
|
||||||
|
|
||||||
sudo yum install postgresql-devel libpqxx-devel.x86_64
|
|
||||||
export PATH=/usr/pgsql-9.4/bin/:$PATH
|
|
||||||
pip install psycopg2
|
|
||||||
|
|
||||||
Tuning Postgres
|
Tuning Postgres
|
||||||
===============
|
===============
|
||||||
|
|
||||||
|
@ -83,6 +83,16 @@ pid_file: DATADIR/homeserver.pid
|
|||||||
#
|
#
|
||||||
#restrict_public_rooms_to_local_users: true
|
#restrict_public_rooms_to_local_users: true
|
||||||
|
|
||||||
|
# The default room version for newly created rooms.
|
||||||
|
#
|
||||||
|
# Known room versions are listed here:
|
||||||
|
# https://matrix.org/docs/spec/#complete-list-of-room-versions
|
||||||
|
#
|
||||||
|
# For example, for room version 1, default_room_version should be set
|
||||||
|
# to "1".
|
||||||
|
#
|
||||||
|
#default_room_version: "4"
|
||||||
|
|
||||||
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
|
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
|
||||||
#
|
#
|
||||||
#gc_thresholds: [700, 10, 10]
|
#gc_thresholds: [700, 10, 10]
|
||||||
@ -251,6 +261,22 @@ listeners:
|
|||||||
|
|
||||||
# Monthly Active User Blocking
|
# Monthly Active User Blocking
|
||||||
#
|
#
|
||||||
|
# Used in cases where the admin or server owner wants to limit to the
|
||||||
|
# number of monthly active users.
|
||||||
|
#
|
||||||
|
# 'limit_usage_by_mau' disables/enables monthly active user blocking. When
|
||||||
|
# anabled and a limit is reached the server returns a 'ResourceLimitError'
|
||||||
|
# with error type Codes.RESOURCE_LIMIT_EXCEEDED
|
||||||
|
#
|
||||||
|
# 'max_mau_value' is the hard limit of monthly active users above which
|
||||||
|
# the server will start blocking user actions.
|
||||||
|
#
|
||||||
|
# 'mau_trial_days' is a means to add a grace period for active users. It
|
||||||
|
# means that users must be active for this number of days before they
|
||||||
|
# can be considered active and guards against the case where lots of users
|
||||||
|
# sign up in a short space of time never to return after their initial
|
||||||
|
# session.
|
||||||
|
#
|
||||||
#limit_usage_by_mau: False
|
#limit_usage_by_mau: False
|
||||||
#max_mau_value: 50
|
#max_mau_value: 50
|
||||||
#mau_trial_days: 2
|
#mau_trial_days: 2
|
||||||
@ -303,12 +329,12 @@ listeners:
|
|||||||
#
|
#
|
||||||
#tls_private_key_path: "CONFDIR/SERVERNAME.tls.key"
|
#tls_private_key_path: "CONFDIR/SERVERNAME.tls.key"
|
||||||
|
|
||||||
# Whether to verify TLS certificates when sending federation traffic.
|
# Whether to verify TLS server certificates for outbound federation requests.
|
||||||
#
|
#
|
||||||
# This currently defaults to `false`, however this will change in
|
# Defaults to `true`. To disable certificate verification, uncomment the
|
||||||
# Synapse 1.0 when valid federation certificates will be required.
|
# following line.
|
||||||
#
|
#
|
||||||
#federation_verify_certificates: true
|
#federation_verify_certificates: false
|
||||||
|
|
||||||
# Skip federation certificate verification on the following whitelist
|
# Skip federation certificate verification on the following whitelist
|
||||||
# of domains.
|
# of domains.
|
||||||
@ -978,12 +1004,43 @@ signing_key_path: "CONFDIR/SERVERNAME.signing.key"
|
|||||||
|
|
||||||
# The trusted servers to download signing keys from.
|
# The trusted servers to download signing keys from.
|
||||||
#
|
#
|
||||||
#perspectives:
|
# When we need to fetch a signing key, each server is tried in parallel.
|
||||||
# servers:
|
#
|
||||||
# "matrix.org":
|
# Normally, the connection to the key server is validated via TLS certificates.
|
||||||
|
# Additional security can be provided by configuring a `verify key`, which
|
||||||
|
# will make synapse check that the response is signed by that key.
|
||||||
|
#
|
||||||
|
# This setting supercedes an older setting named `perspectives`. The old format
|
||||||
|
# is still supported for backwards-compatibility, but it is deprecated.
|
||||||
|
#
|
||||||
|
# Options for each entry in the list include:
|
||||||
|
#
|
||||||
|
# server_name: the name of the server. required.
|
||||||
|
#
|
||||||
|
# verify_keys: an optional map from key id to base64-encoded public key.
|
||||||
|
# If specified, we will check that the response is signed by at least
|
||||||
|
# one of the given keys.
|
||||||
|
#
|
||||||
|
# accept_keys_insecurely: a boolean. Normally, if `verify_keys` is unset,
|
||||||
|
# and federation_verify_certificates is not `true`, synapse will refuse
|
||||||
|
# to start, because this would allow anyone who can spoof DNS responses
|
||||||
|
# to masquerade as the trusted key server. If you know what you are doing
|
||||||
|
# and are sure that your network environment provides a secure connection
|
||||||
|
# to the key server, you can set this to `true` to override this
|
||||||
|
# behaviour.
|
||||||
|
#
|
||||||
|
# An example configuration might look like:
|
||||||
|
#
|
||||||
|
#trusted_key_servers:
|
||||||
|
# - server_name: "my_trusted_server.example.com"
|
||||||
# verify_keys:
|
# verify_keys:
|
||||||
# "ed25519:auto":
|
# "ed25519:auto": "abcdefghijklmnopqrstuvwxyzabcdefghijklmopqr"
|
||||||
# key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
|
# - server_name: "my_other_trusted_server.example.com"
|
||||||
|
#
|
||||||
|
# The default configuration is:
|
||||||
|
#
|
||||||
|
#trusted_key_servers:
|
||||||
|
# - server_name: "matrix.org"
|
||||||
|
|
||||||
|
|
||||||
# Enable SAML2 for registration and login. Uses pysaml2.
|
# Enable SAML2 for registration and login. Uses pysaml2.
|
||||||
@ -1090,10 +1147,8 @@ password_config:
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Enable sending emails for notification events or expiry notices
|
# Enable sending emails for password resets, notification events or
|
||||||
# Defining a custom URL for Riot is only needed if email notifications
|
# account expiry notices
|
||||||
# should contain links to a self-hosted installation of Riot; when set
|
|
||||||
# the "app_name" setting is ignored.
|
|
||||||
#
|
#
|
||||||
# If your SMTP server requires authentication, the optional smtp_user &
|
# If your SMTP server requires authentication, the optional smtp_user &
|
||||||
# smtp_pass variables should be used
|
# smtp_pass variables should be used
|
||||||
@ -1101,22 +1156,64 @@ password_config:
|
|||||||
#email:
|
#email:
|
||||||
# enable_notifs: false
|
# enable_notifs: false
|
||||||
# smtp_host: "localhost"
|
# smtp_host: "localhost"
|
||||||
# smtp_port: 25
|
# smtp_port: 25 # SSL: 465, STARTTLS: 587
|
||||||
# smtp_user: "exampleusername"
|
# smtp_user: "exampleusername"
|
||||||
# smtp_pass: "examplepassword"
|
# smtp_pass: "examplepassword"
|
||||||
# require_transport_security: False
|
# require_transport_security: False
|
||||||
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
|
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
|
||||||
# app_name: Matrix
|
# app_name: Matrix
|
||||||
# # if template_dir is unset, uses the example templates that are part of
|
#
|
||||||
# # the Synapse distribution.
|
# # Enable email notifications by default
|
||||||
|
# notif_for_new_users: True
|
||||||
|
#
|
||||||
|
# # Defining a custom URL for Riot is only needed if email notifications
|
||||||
|
# # should contain links to a self-hosted installation of Riot; when set
|
||||||
|
# # the "app_name" setting is ignored
|
||||||
|
# riot_base_url: "http://localhost/riot"
|
||||||
|
#
|
||||||
|
# # Enable sending password reset emails via the configured, trusted
|
||||||
|
# # identity servers
|
||||||
|
# #
|
||||||
|
# # IMPORTANT! This will give a malicious or overtaken identity server
|
||||||
|
# # the ability to reset passwords for your users! Make absolutely sure
|
||||||
|
# # that you want to do this! It is strongly recommended that password
|
||||||
|
# # reset emails be sent by the homeserver instead
|
||||||
|
# #
|
||||||
|
# # If this option is set to false and SMTP options have not been
|
||||||
|
# # configured, resetting user passwords via email will be disabled
|
||||||
|
# #trust_identity_server_for_password_resets: false
|
||||||
|
#
|
||||||
|
# # Configure the time that a validation email or text message code
|
||||||
|
# # will expire after sending
|
||||||
|
# #
|
||||||
|
# # This is currently used for password resets
|
||||||
|
# #validation_token_lifetime: 1h
|
||||||
|
#
|
||||||
|
# # Template directory. All template files should be stored within this
|
||||||
|
# # directory
|
||||||
|
# #
|
||||||
# #template_dir: res/templates
|
# #template_dir: res/templates
|
||||||
|
#
|
||||||
|
# # Templates for email notifications
|
||||||
|
# #
|
||||||
# notif_template_html: notif_mail.html
|
# notif_template_html: notif_mail.html
|
||||||
# notif_template_text: notif_mail.txt
|
# notif_template_text: notif_mail.txt
|
||||||
# # Templates for account expiry notices.
|
#
|
||||||
|
# # Templates for account expiry notices
|
||||||
|
# #
|
||||||
# expiry_template_html: notice_expiry.html
|
# expiry_template_html: notice_expiry.html
|
||||||
# expiry_template_text: notice_expiry.txt
|
# expiry_template_text: notice_expiry.txt
|
||||||
# notif_for_new_users: True
|
#
|
||||||
# riot_base_url: "http://localhost/riot"
|
# # Templates for password reset emails sent by the homeserver
|
||||||
|
# #
|
||||||
|
# #password_reset_template_html: password_reset.html
|
||||||
|
# #password_reset_template_text: password_reset.txt
|
||||||
|
#
|
||||||
|
# # Templates for password reset success and failure pages that a user
|
||||||
|
# # will see after attempting to reset their password
|
||||||
|
# #
|
||||||
|
# #password_reset_template_success_html: password_reset_success.html
|
||||||
|
# #password_reset_template_failure_html: password_reset_failure.html
|
||||||
|
|
||||||
|
|
||||||
#password_providers:
|
#password_providers:
|
||||||
@ -1177,9 +1274,9 @@ password_config:
|
|||||||
#
|
#
|
||||||
# 'search_all_users' defines whether to search all users visible to your HS
|
# 'search_all_users' defines whether to search all users visible to your HS
|
||||||
# when searching the user directory, rather than limiting to users visible
|
# when searching the user directory, rather than limiting to users visible
|
||||||
# in public rooms. Defaults to false. If you set it True, you'll have to run
|
# in public rooms. Defaults to false. If you set it True, you'll have to
|
||||||
# UPDATE user_directory_stream_pos SET stream_id = NULL;
|
# rebuild the user_directory search indexes, see
|
||||||
# on your database to tell it to rebuild the user_directory search indexes.
|
# https://github.com/matrix-org/synapse/blob/master/docs/user_directory.md
|
||||||
#
|
#
|
||||||
#user_directory:
|
#user_directory:
|
||||||
# enabled: true
|
# enabled: true
|
||||||
@ -1242,6 +1339,22 @@ password_config:
|
|||||||
#
|
#
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Local statistics collection. Used in populating the room directory.
|
||||||
|
#
|
||||||
|
# 'bucket_size' controls how large each statistics timeslice is. It can
|
||||||
|
# be defined in a human readable short form -- e.g. "1d", "1y".
|
||||||
|
#
|
||||||
|
# 'retention' controls how long historical statistics will be kept for.
|
||||||
|
# It can be defined in a human readable short form -- e.g. "1d", "1y".
|
||||||
|
#
|
||||||
|
#
|
||||||
|
#stats:
|
||||||
|
# enabled: true
|
||||||
|
# bucket_size: 1d
|
||||||
|
# retention: 1y
|
||||||
|
|
||||||
|
|
||||||
# Server Notices room configuration
|
# Server Notices room configuration
|
||||||
#
|
#
|
||||||
# Uncomment this section to enable a room which can be used to send notices
|
# Uncomment this section to enable a room which can be used to send notices
|
||||||
|
@ -7,11 +7,7 @@ who are present in a publicly viewable room present on the server.
|
|||||||
|
|
||||||
The directory info is stored in various tables, which can (typically after
|
The directory info is stored in various tables, which can (typically after
|
||||||
DB corruption) get stale or out of sync. If this happens, for now the
|
DB corruption) get stale or out of sync. If this happens, for now the
|
||||||
quickest solution to fix it is:
|
solution to fix it is to execute the SQL here
|
||||||
|
https://github.com/matrix-org/synapse/blob/master/synapse/storage/schema/delta/53/user_dir_populate.sql
|
||||||
```
|
and then restart synapse. This should then start a background task to
|
||||||
UPDATE user_directory_stream_pos SET stream_id = NULL;
|
|
||||||
```
|
|
||||||
|
|
||||||
and restart the synapse, which should then start a background task to
|
|
||||||
flush the current tables and regenerate the directory.
|
flush the current tables and regenerate the directory.
|
||||||
|
@ -20,9 +20,7 @@ class CallVisitor(ast.NodeVisitor):
|
|||||||
else:
|
else:
|
||||||
return
|
return
|
||||||
|
|
||||||
if name == "client_path_patterns":
|
if name == "client_patterns":
|
||||||
PATTERNS_V1.append(node.args[0].s)
|
|
||||||
elif name == "client_v2_patterns":
|
|
||||||
PATTERNS_V2.append(node.args[0].s)
|
PATTERNS_V2.append(node.args[0].s)
|
||||||
|
|
||||||
|
|
||||||
|
37
scripts/generate_signing_key.py
Executable file
37
scripts/generate_signing_key.py
Executable file
@ -0,0 +1,37 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import argparse
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from signedjson.key import write_signing_keys, generate_signing_key
|
||||||
|
|
||||||
|
from synapse.util.stringutils import random_string
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"-o", "--output_file",
|
||||||
|
|
||||||
|
type=argparse.FileType('w'),
|
||||||
|
default=sys.stdout,
|
||||||
|
help="Where to write the output to",
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
key_id = "a_" + random_string(4)
|
||||||
|
key = generate_signing_key(key_id),
|
||||||
|
write_signing_keys(args.output_file, key)
|
@ -27,4 +27,4 @@ try:
|
|||||||
except ImportError:
|
except ImportError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
__version__ = "0.99.4"
|
__version__ = "1.0.0"
|
||||||
|
@ -23,6 +23,9 @@ MAX_DEPTH = 2**63 - 1
|
|||||||
# the maximum length for a room alias is 255 characters
|
# the maximum length for a room alias is 255 characters
|
||||||
MAX_ALIAS_LENGTH = 255
|
MAX_ALIAS_LENGTH = 255
|
||||||
|
|
||||||
|
# the maximum length for a user id is 255 characters
|
||||||
|
MAX_USERID_LENGTH = 255
|
||||||
|
|
||||||
|
|
||||||
class Membership(object):
|
class Membership(object):
|
||||||
|
|
||||||
@ -76,6 +79,7 @@ class EventTypes(object):
|
|||||||
|
|
||||||
RoomHistoryVisibility = "m.room.history_visibility"
|
RoomHistoryVisibility = "m.room.history_visibility"
|
||||||
CanonicalAlias = "m.room.canonical_alias"
|
CanonicalAlias = "m.room.canonical_alias"
|
||||||
|
Encryption = "m.room.encryption"
|
||||||
RoomAvatar = "m.room.avatar"
|
RoomAvatar = "m.room.avatar"
|
||||||
RoomEncryption = "m.room.encryption"
|
RoomEncryption = "m.room.encryption"
|
||||||
GuestAccess = "m.room.guest_access"
|
GuestAccess = "m.room.guest_access"
|
||||||
@ -117,3 +121,11 @@ class UserTypes(object):
|
|||||||
"""
|
"""
|
||||||
SUPPORT = "support"
|
SUPPORT = "support"
|
||||||
ALL_USER_TYPES = (SUPPORT,)
|
ALL_USER_TYPES = (SUPPORT,)
|
||||||
|
|
||||||
|
|
||||||
|
class RelationTypes(object):
|
||||||
|
"""The types of relations known to this server.
|
||||||
|
"""
|
||||||
|
ANNOTATION = "m.annotation"
|
||||||
|
REPLACE = "m.replace"
|
||||||
|
REFERENCE = "m.reference"
|
||||||
|
@ -336,9 +336,32 @@ class RoomKeysVersionError(SynapseError):
|
|||||||
self.current_version = current_version
|
self.current_version = current_version
|
||||||
|
|
||||||
|
|
||||||
class IncompatibleRoomVersionError(SynapseError):
|
class UnsupportedRoomVersionError(SynapseError):
|
||||||
"""A server is trying to join a room whose version it does not support."""
|
"""The client's request to create a room used a room version that the server does
|
||||||
|
not support."""
|
||||||
|
def __init__(self):
|
||||||
|
super(UnsupportedRoomVersionError, self).__init__(
|
||||||
|
code=400,
|
||||||
|
msg="Homeserver does not support this room version",
|
||||||
|
errcode=Codes.UNSUPPORTED_ROOM_VERSION,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ThreepidValidationError(SynapseError):
|
||||||
|
"""An error raised when there was a problem authorising an event."""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
if "errcode" not in kwargs:
|
||||||
|
kwargs["errcode"] = Codes.FORBIDDEN
|
||||||
|
super(ThreepidValidationError, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class IncompatibleRoomVersionError(SynapseError):
|
||||||
|
"""A server is trying to join a room whose version it does not support.
|
||||||
|
|
||||||
|
Unlike UnsupportedRoomVersionError, it is specific to the case of the make_join
|
||||||
|
failing.
|
||||||
|
"""
|
||||||
def __init__(self, room_version):
|
def __init__(self, room_version):
|
||||||
super(IncompatibleRoomVersionError, self).__init__(
|
super(IncompatibleRoomVersionError, self).__init__(
|
||||||
code=400,
|
code=400,
|
||||||
|
@ -19,13 +19,15 @@ class EventFormatVersions(object):
|
|||||||
"""This is an internal enum for tracking the version of the event format,
|
"""This is an internal enum for tracking the version of the event format,
|
||||||
independently from the room version.
|
independently from the room version.
|
||||||
"""
|
"""
|
||||||
V1 = 1 # $id:server format
|
V1 = 1 # $id:server event id format
|
||||||
V2 = 2 # MSC1659-style $hash format: introduced for room v3
|
V2 = 2 # MSC1659-style $hash event id format: introduced for room v3
|
||||||
|
V3 = 3 # MSC1884-style $hash format: introduced for room v4
|
||||||
|
|
||||||
|
|
||||||
KNOWN_EVENT_FORMAT_VERSIONS = {
|
KNOWN_EVENT_FORMAT_VERSIONS = {
|
||||||
EventFormatVersions.V1,
|
EventFormatVersions.V1,
|
||||||
EventFormatVersions.V2,
|
EventFormatVersions.V2,
|
||||||
|
EventFormatVersions.V3,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -48,6 +50,7 @@ class RoomVersion(object):
|
|||||||
disposition = attr.ib() # str; one of the RoomDispositions
|
disposition = attr.ib() # str; one of the RoomDispositions
|
||||||
event_format = attr.ib() # int; one of the EventFormatVersions
|
event_format = attr.ib() # int; one of the EventFormatVersions
|
||||||
state_res = attr.ib() # int; one of the StateResolutionVersions
|
state_res = attr.ib() # int; one of the StateResolutionVersions
|
||||||
|
enforce_key_validity = attr.ib() # bool
|
||||||
|
|
||||||
|
|
||||||
class RoomVersions(object):
|
class RoomVersions(object):
|
||||||
@ -56,29 +59,36 @@ class RoomVersions(object):
|
|||||||
RoomDisposition.STABLE,
|
RoomDisposition.STABLE,
|
||||||
EventFormatVersions.V1,
|
EventFormatVersions.V1,
|
||||||
StateResolutionVersions.V1,
|
StateResolutionVersions.V1,
|
||||||
)
|
enforce_key_validity=False,
|
||||||
STATE_V2_TEST = RoomVersion(
|
|
||||||
"state-v2-test",
|
|
||||||
RoomDisposition.UNSTABLE,
|
|
||||||
EventFormatVersions.V1,
|
|
||||||
StateResolutionVersions.V2,
|
|
||||||
)
|
)
|
||||||
V2 = RoomVersion(
|
V2 = RoomVersion(
|
||||||
"2",
|
"2",
|
||||||
RoomDisposition.STABLE,
|
RoomDisposition.STABLE,
|
||||||
EventFormatVersions.V1,
|
EventFormatVersions.V1,
|
||||||
StateResolutionVersions.V2,
|
StateResolutionVersions.V2,
|
||||||
|
enforce_key_validity=False,
|
||||||
)
|
)
|
||||||
V3 = RoomVersion(
|
V3 = RoomVersion(
|
||||||
"3",
|
"3",
|
||||||
RoomDisposition.STABLE,
|
RoomDisposition.STABLE,
|
||||||
EventFormatVersions.V2,
|
EventFormatVersions.V2,
|
||||||
StateResolutionVersions.V2,
|
StateResolutionVersions.V2,
|
||||||
|
enforce_key_validity=False,
|
||||||
|
)
|
||||||
|
V4 = RoomVersion(
|
||||||
|
"4",
|
||||||
|
RoomDisposition.STABLE,
|
||||||
|
EventFormatVersions.V3,
|
||||||
|
StateResolutionVersions.V2,
|
||||||
|
enforce_key_validity=False,
|
||||||
|
)
|
||||||
|
V5 = RoomVersion(
|
||||||
|
"5",
|
||||||
|
RoomDisposition.STABLE,
|
||||||
|
EventFormatVersions.V3,
|
||||||
|
StateResolutionVersions.V2,
|
||||||
|
enforce_key_validity=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
# the version we will give rooms which are created on this server
|
|
||||||
DEFAULT_ROOM_VERSION = RoomVersions.V1
|
|
||||||
|
|
||||||
|
|
||||||
KNOWN_ROOM_VERSIONS = {
|
KNOWN_ROOM_VERSIONS = {
|
||||||
@ -86,6 +96,7 @@ KNOWN_ROOM_VERSIONS = {
|
|||||||
RoomVersions.V1,
|
RoomVersions.V1,
|
||||||
RoomVersions.V2,
|
RoomVersions.V2,
|
||||||
RoomVersions.V3,
|
RoomVersions.V3,
|
||||||
RoomVersions.STATE_V2_TEST,
|
RoomVersions.V4,
|
||||||
|
RoomVersions.V5,
|
||||||
)
|
)
|
||||||
} # type: dict[str, RoomVersion]
|
} # type: dict[str, RoomVersion]
|
||||||
|
@ -26,6 +26,7 @@ CLIENT_API_PREFIX = "/_matrix/client"
|
|||||||
FEDERATION_PREFIX = "/_matrix/federation"
|
FEDERATION_PREFIX = "/_matrix/federation"
|
||||||
FEDERATION_V1_PREFIX = FEDERATION_PREFIX + "/v1"
|
FEDERATION_V1_PREFIX = FEDERATION_PREFIX + "/v1"
|
||||||
FEDERATION_V2_PREFIX = FEDERATION_PREFIX + "/v2"
|
FEDERATION_V2_PREFIX = FEDERATION_PREFIX + "/v2"
|
||||||
|
FEDERATION_UNSTABLE_PREFIX = FEDERATION_PREFIX + "/unstable"
|
||||||
STATIC_PREFIX = "/_matrix/static"
|
STATIC_PREFIX = "/_matrix/static"
|
||||||
WEB_CLIENT_PREFIX = "/_matrix/client"
|
WEB_CLIENT_PREFIX = "/_matrix/client"
|
||||||
CONTENT_REPO_PREFIX = "/_matrix/content"
|
CONTENT_REPO_PREFIX = "/_matrix/content"
|
||||||
|
@ -344,14 +344,20 @@ class _LimitedHostnameResolver(object):
|
|||||||
|
|
||||||
def resolveHostName(self, resolutionReceiver, hostName, portNumber=0,
|
def resolveHostName(self, resolutionReceiver, hostName, portNumber=0,
|
||||||
addressTypes=None, transportSemantics='TCP'):
|
addressTypes=None, transportSemantics='TCP'):
|
||||||
# Note this is happening deep within the reactor, so we don't need to
|
|
||||||
# worry about log contexts.
|
|
||||||
|
|
||||||
# We need this function to return `resolutionReceiver` so we do all the
|
# We need this function to return `resolutionReceiver` so we do all the
|
||||||
# actual logic involving deferreds in a separate function.
|
# actual logic involving deferreds in a separate function.
|
||||||
|
|
||||||
|
# even though this is happening within the depths of twisted, we need to drop
|
||||||
|
# our logcontext before starting _resolve, otherwise: (a) _resolve will drop
|
||||||
|
# the logcontext if it returns an incomplete deferred; (b) _resolve will
|
||||||
|
# call the resolutionReceiver *with* a logcontext, which it won't be expecting.
|
||||||
|
with PreserveLoggingContext():
|
||||||
self._resolve(
|
self._resolve(
|
||||||
resolutionReceiver, hostName, portNumber,
|
resolutionReceiver,
|
||||||
addressTypes, transportSemantics,
|
hostName,
|
||||||
|
portNumber,
|
||||||
|
addressTypes,
|
||||||
|
transportSemantics,
|
||||||
)
|
)
|
||||||
|
|
||||||
return resolutionReceiver
|
return resolutionReceiver
|
||||||
|
@ -38,6 +38,7 @@ from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
|||||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||||
|
from synapse.replication.slave.storage.profile import SlavedProfileStore
|
||||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
@ -81,6 +82,7 @@ class ClientReaderSlavedStore(
|
|||||||
SlavedApplicationServiceStore,
|
SlavedApplicationServiceStore,
|
||||||
SlavedRegistrationStore,
|
SlavedRegistrationStore,
|
||||||
SlavedTransactionStore,
|
SlavedTransactionStore,
|
||||||
|
SlavedProfileStore,
|
||||||
SlavedClientIpStore,
|
SlavedClientIpStore,
|
||||||
BaseSlavedStore,
|
BaseSlavedStore,
|
||||||
):
|
):
|
||||||
|
@ -37,8 +37,7 @@ from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
|||||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
from synapse.rest.client.v1.base import ClientV1RestServlet, client_path_patterns
|
from synapse.rest.client.v2_alpha._base import client_patterns
|
||||||
from synapse.rest.client.v2_alpha._base import client_v2_patterns
|
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
@ -49,11 +48,11 @@ from synapse.util.versionstring import get_version_string
|
|||||||
logger = logging.getLogger("synapse.app.frontend_proxy")
|
logger = logging.getLogger("synapse.app.frontend_proxy")
|
||||||
|
|
||||||
|
|
||||||
class PresenceStatusStubServlet(ClientV1RestServlet):
|
class PresenceStatusStubServlet(RestServlet):
|
||||||
PATTERNS = client_path_patterns("/presence/(?P<user_id>[^/]*)/status")
|
PATTERNS = client_patterns("/presence/(?P<user_id>[^/]*)/status")
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(PresenceStatusStubServlet, self).__init__(hs)
|
super(PresenceStatusStubServlet, self).__init__()
|
||||||
self.http_client = hs.get_simple_http_client()
|
self.http_client = hs.get_simple_http_client()
|
||||||
self.auth = hs.get_auth()
|
self.auth = hs.get_auth()
|
||||||
self.main_uri = hs.config.worker_main_http_uri
|
self.main_uri = hs.config.worker_main_http_uri
|
||||||
@ -84,7 +83,7 @@ class PresenceStatusStubServlet(ClientV1RestServlet):
|
|||||||
|
|
||||||
|
|
||||||
class KeyUploadServlet(RestServlet):
|
class KeyUploadServlet(RestServlet):
|
||||||
PATTERNS = client_v2_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
|
PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
"""
|
"""
|
||||||
|
@ -50,6 +50,11 @@ class EmailConfig(Config):
|
|||||||
else:
|
else:
|
||||||
self.email_app_name = "Matrix"
|
self.email_app_name = "Matrix"
|
||||||
|
|
||||||
|
# TODO: Rename notif_from to something more generic, or have a separate
|
||||||
|
# from for password resets, message notifications, etc?
|
||||||
|
# Currently the email section is a bit bogged down with settings for
|
||||||
|
# multiple functions. Would be good to split it out into separate
|
||||||
|
# sections and only put the common ones under email:
|
||||||
self.email_notif_from = email_config.get("notif_from", None)
|
self.email_notif_from = email_config.get("notif_from", None)
|
||||||
if self.email_notif_from is not None:
|
if self.email_notif_from is not None:
|
||||||
# make sure it's valid
|
# make sure it's valid
|
||||||
@ -74,7 +79,28 @@ class EmailConfig(Config):
|
|||||||
"account_validity", {},
|
"account_validity", {},
|
||||||
).get("renew_at")
|
).get("renew_at")
|
||||||
|
|
||||||
if self.email_enable_notifs or account_validity_renewal_enabled:
|
email_trust_identity_server_for_password_resets = email_config.get(
|
||||||
|
"trust_identity_server_for_password_resets", False,
|
||||||
|
)
|
||||||
|
self.email_password_reset_behaviour = (
|
||||||
|
"remote" if email_trust_identity_server_for_password_resets else "local"
|
||||||
|
)
|
||||||
|
if self.email_password_reset_behaviour == "local" and email_config == {}:
|
||||||
|
logger.warn(
|
||||||
|
"User password resets have been disabled due to lack of email config"
|
||||||
|
)
|
||||||
|
self.email_password_reset_behaviour = "off"
|
||||||
|
|
||||||
|
# Get lifetime of a validation token in milliseconds
|
||||||
|
self.email_validation_token_lifetime = self.parse_duration(
|
||||||
|
email_config.get("validation_token_lifetime", "1h")
|
||||||
|
)
|
||||||
|
|
||||||
|
if (
|
||||||
|
self.email_enable_notifs
|
||||||
|
or account_validity_renewal_enabled
|
||||||
|
or self.email_password_reset_behaviour == "local"
|
||||||
|
):
|
||||||
# make sure we can import the required deps
|
# make sure we can import the required deps
|
||||||
import jinja2
|
import jinja2
|
||||||
import bleach
|
import bleach
|
||||||
@ -82,6 +108,67 @@ class EmailConfig(Config):
|
|||||||
jinja2
|
jinja2
|
||||||
bleach
|
bleach
|
||||||
|
|
||||||
|
if self.email_password_reset_behaviour == "local":
|
||||||
|
required = [
|
||||||
|
"smtp_host",
|
||||||
|
"smtp_port",
|
||||||
|
"notif_from",
|
||||||
|
]
|
||||||
|
|
||||||
|
missing = []
|
||||||
|
for k in required:
|
||||||
|
if k not in email_config:
|
||||||
|
missing.append(k)
|
||||||
|
|
||||||
|
if (len(missing) > 0):
|
||||||
|
raise RuntimeError(
|
||||||
|
"email.password_reset_behaviour is set to 'local' "
|
||||||
|
"but required keys are missing: %s" %
|
||||||
|
(", ".join(["email." + k for k in missing]),)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Templates for password reset emails
|
||||||
|
self.email_password_reset_template_html = email_config.get(
|
||||||
|
"password_reset_template_html", "password_reset.html",
|
||||||
|
)
|
||||||
|
self.email_password_reset_template_text = email_config.get(
|
||||||
|
"password_reset_template_text", "password_reset.txt",
|
||||||
|
)
|
||||||
|
self.email_password_reset_failure_template = email_config.get(
|
||||||
|
"password_reset_failure_template", "password_reset_failure.html",
|
||||||
|
)
|
||||||
|
# This template does not support any replaceable variables, so we will
|
||||||
|
# read it from the disk once during setup
|
||||||
|
email_password_reset_success_template = email_config.get(
|
||||||
|
"password_reset_success_template", "password_reset_success.html",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check templates exist
|
||||||
|
for f in [self.email_password_reset_template_html,
|
||||||
|
self.email_password_reset_template_text,
|
||||||
|
self.email_password_reset_failure_template,
|
||||||
|
email_password_reset_success_template]:
|
||||||
|
p = os.path.join(self.email_template_dir, f)
|
||||||
|
if not os.path.isfile(p):
|
||||||
|
raise ConfigError("Unable to find template file %s" % (p, ))
|
||||||
|
|
||||||
|
# Retrieve content of web templates
|
||||||
|
filepath = os.path.join(
|
||||||
|
self.email_template_dir,
|
||||||
|
email_password_reset_success_template,
|
||||||
|
)
|
||||||
|
self.email_password_reset_success_html_content = self.read_file(
|
||||||
|
filepath,
|
||||||
|
"email.password_reset_template_success_html",
|
||||||
|
)
|
||||||
|
|
||||||
|
if config.get("public_baseurl") is None:
|
||||||
|
raise RuntimeError(
|
||||||
|
"email.password_reset_behaviour is set to 'local' but no "
|
||||||
|
"public_baseurl is set. This is necessary to generate password "
|
||||||
|
"reset links"
|
||||||
|
)
|
||||||
|
|
||||||
if self.email_enable_notifs:
|
if self.email_enable_notifs:
|
||||||
required = [
|
required = [
|
||||||
"smtp_host",
|
"smtp_host",
|
||||||
@ -121,10 +208,6 @@ class EmailConfig(Config):
|
|||||||
self.email_riot_base_url = email_config.get(
|
self.email_riot_base_url = email_config.get(
|
||||||
"riot_base_url", None
|
"riot_base_url", None
|
||||||
)
|
)
|
||||||
else:
|
|
||||||
self.email_enable_notifs = False
|
|
||||||
# Not much point setting defaults for the rest: it would be an
|
|
||||||
# error for them to be used.
|
|
||||||
|
|
||||||
if account_validity_renewal_enabled:
|
if account_validity_renewal_enabled:
|
||||||
self.email_expiry_template_html = email_config.get(
|
self.email_expiry_template_html = email_config.get(
|
||||||
@ -141,10 +224,8 @@ class EmailConfig(Config):
|
|||||||
|
|
||||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||||
return """
|
return """
|
||||||
# Enable sending emails for notification events or expiry notices
|
# Enable sending emails for password resets, notification events or
|
||||||
# Defining a custom URL for Riot is only needed if email notifications
|
# account expiry notices
|
||||||
# should contain links to a self-hosted installation of Riot; when set
|
|
||||||
# the "app_name" setting is ignored.
|
|
||||||
#
|
#
|
||||||
# If your SMTP server requires authentication, the optional smtp_user &
|
# If your SMTP server requires authentication, the optional smtp_user &
|
||||||
# smtp_pass variables should be used
|
# smtp_pass variables should be used
|
||||||
@ -152,20 +233,62 @@ class EmailConfig(Config):
|
|||||||
#email:
|
#email:
|
||||||
# enable_notifs: false
|
# enable_notifs: false
|
||||||
# smtp_host: "localhost"
|
# smtp_host: "localhost"
|
||||||
# smtp_port: 25
|
# smtp_port: 25 # SSL: 465, STARTTLS: 587
|
||||||
# smtp_user: "exampleusername"
|
# smtp_user: "exampleusername"
|
||||||
# smtp_pass: "examplepassword"
|
# smtp_pass: "examplepassword"
|
||||||
# require_transport_security: False
|
# require_transport_security: False
|
||||||
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
|
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
|
||||||
# app_name: Matrix
|
# app_name: Matrix
|
||||||
# # if template_dir is unset, uses the example templates that are part of
|
#
|
||||||
# # the Synapse distribution.
|
# # Enable email notifications by default
|
||||||
|
# notif_for_new_users: True
|
||||||
|
#
|
||||||
|
# # Defining a custom URL for Riot is only needed if email notifications
|
||||||
|
# # should contain links to a self-hosted installation of Riot; when set
|
||||||
|
# # the "app_name" setting is ignored
|
||||||
|
# riot_base_url: "http://localhost/riot"
|
||||||
|
#
|
||||||
|
# # Enable sending password reset emails via the configured, trusted
|
||||||
|
# # identity servers
|
||||||
|
# #
|
||||||
|
# # IMPORTANT! This will give a malicious or overtaken identity server
|
||||||
|
# # the ability to reset passwords for your users! Make absolutely sure
|
||||||
|
# # that you want to do this! It is strongly recommended that password
|
||||||
|
# # reset emails be sent by the homeserver instead
|
||||||
|
# #
|
||||||
|
# # If this option is set to false and SMTP options have not been
|
||||||
|
# # configured, resetting user passwords via email will be disabled
|
||||||
|
# #trust_identity_server_for_password_resets: false
|
||||||
|
#
|
||||||
|
# # Configure the time that a validation email or text message code
|
||||||
|
# # will expire after sending
|
||||||
|
# #
|
||||||
|
# # This is currently used for password resets
|
||||||
|
# #validation_token_lifetime: 1h
|
||||||
|
#
|
||||||
|
# # Template directory. All template files should be stored within this
|
||||||
|
# # directory
|
||||||
|
# #
|
||||||
# #template_dir: res/templates
|
# #template_dir: res/templates
|
||||||
|
#
|
||||||
|
# # Templates for email notifications
|
||||||
|
# #
|
||||||
# notif_template_html: notif_mail.html
|
# notif_template_html: notif_mail.html
|
||||||
# notif_template_text: notif_mail.txt
|
# notif_template_text: notif_mail.txt
|
||||||
# # Templates for account expiry notices.
|
#
|
||||||
|
# # Templates for account expiry notices
|
||||||
|
# #
|
||||||
# expiry_template_html: notice_expiry.html
|
# expiry_template_html: notice_expiry.html
|
||||||
# expiry_template_text: notice_expiry.txt
|
# expiry_template_text: notice_expiry.txt
|
||||||
# notif_for_new_users: True
|
#
|
||||||
# riot_base_url: "http://localhost/riot"
|
# # Templates for password reset emails sent by the homeserver
|
||||||
|
# #
|
||||||
|
# #password_reset_template_html: password_reset.html
|
||||||
|
# #password_reset_template_text: password_reset.txt
|
||||||
|
#
|
||||||
|
# # Templates for password reset success and failure pages that a user
|
||||||
|
# # will see after attempting to reset their password
|
||||||
|
# #
|
||||||
|
# #password_reset_template_success_html: password_reset_success.html
|
||||||
|
# #password_reset_template_failure_html: password_reset_failure.html
|
||||||
"""
|
"""
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from .api import ApiConfig
|
from .api import ApiConfig
|
||||||
from .appservice import AppServiceConfig
|
from .appservice import AppServiceConfig
|
||||||
from .captcha import CaptchaConfig
|
from .captcha import CaptchaConfig
|
||||||
@ -36,20 +37,41 @@ from .saml2_config import SAML2Config
|
|||||||
from .server import ServerConfig
|
from .server import ServerConfig
|
||||||
from .server_notices_config import ServerNoticesConfig
|
from .server_notices_config import ServerNoticesConfig
|
||||||
from .spam_checker import SpamCheckerConfig
|
from .spam_checker import SpamCheckerConfig
|
||||||
|
from .stats import StatsConfig
|
||||||
from .tls import TlsConfig
|
from .tls import TlsConfig
|
||||||
from .user_directory import UserDirectoryConfig
|
from .user_directory import UserDirectoryConfig
|
||||||
from .voip import VoipConfig
|
from .voip import VoipConfig
|
||||||
from .workers import WorkerConfig
|
from .workers import WorkerConfig
|
||||||
|
|
||||||
|
|
||||||
class HomeServerConfig(ServerConfig, TlsConfig, DatabaseConfig, LoggingConfig,
|
class HomeServerConfig(
|
||||||
RatelimitConfig, ContentRepositoryConfig, CaptchaConfig,
|
ServerConfig,
|
||||||
VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig,
|
TlsConfig,
|
||||||
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
|
DatabaseConfig,
|
||||||
JWTConfig, PasswordConfig, EmailConfig,
|
LoggingConfig,
|
||||||
WorkerConfig, PasswordAuthProviderConfig, PushConfig,
|
RatelimitConfig,
|
||||||
SpamCheckerConfig, GroupsConfig, UserDirectoryConfig,
|
ContentRepositoryConfig,
|
||||||
|
CaptchaConfig,
|
||||||
|
VoipConfig,
|
||||||
|
RegistrationConfig,
|
||||||
|
MetricsConfig,
|
||||||
|
ApiConfig,
|
||||||
|
AppServiceConfig,
|
||||||
|
KeyConfig,
|
||||||
|
SAML2Config,
|
||||||
|
CasConfig,
|
||||||
|
JWTConfig,
|
||||||
|
PasswordConfig,
|
||||||
|
EmailConfig,
|
||||||
|
WorkerConfig,
|
||||||
|
PasswordAuthProviderConfig,
|
||||||
|
PushConfig,
|
||||||
|
SpamCheckerConfig,
|
||||||
|
GroupsConfig,
|
||||||
|
UserDirectoryConfig,
|
||||||
ConsentConfig,
|
ConsentConfig,
|
||||||
ServerNoticesConfig, RoomDirectoryConfig,
|
StatsConfig,
|
||||||
|
ServerNoticesConfig,
|
||||||
|
RoomDirectoryConfig,
|
||||||
):
|
):
|
||||||
pass
|
pass
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2015, 2016 OpenMarket Ltd
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
|
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -17,6 +18,8 @@ import hashlib
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
import attr
|
||||||
|
import jsonschema
|
||||||
from signedjson.key import (
|
from signedjson.key import (
|
||||||
NACL_ED25519,
|
NACL_ED25519,
|
||||||
decode_signing_key_base64,
|
decode_signing_key_base64,
|
||||||
@ -32,11 +35,36 @@ from synapse.util.stringutils import random_string, random_string_with_symbols
|
|||||||
|
|
||||||
from ._base import Config, ConfigError
|
from ._base import Config, ConfigError
|
||||||
|
|
||||||
|
INSECURE_NOTARY_ERROR = """\
|
||||||
|
Your server is configured to accept key server responses without signature
|
||||||
|
validation or TLS certificate validation. This is likely to be very insecure. If
|
||||||
|
you are *sure* you want to do this, set 'accept_keys_insecurely' on the
|
||||||
|
keyserver configuration."""
|
||||||
|
|
||||||
|
RELYING_ON_MATRIX_KEY_ERROR = """\
|
||||||
|
Your server is configured to accept key server responses without TLS certificate
|
||||||
|
validation, and which are only signed by the old (possibly compromised)
|
||||||
|
matrix.org signing key 'ed25519:auto'. This likely isn't what you want to do,
|
||||||
|
and you should enable 'federation_verify_certificates' in your configuration.
|
||||||
|
|
||||||
|
If you are *sure* you want to do this, set 'accept_keys_insecurely' on the
|
||||||
|
trusted_key_server configuration."""
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class KeyConfig(Config):
|
@attr.s
|
||||||
|
class TrustedKeyServer(object):
|
||||||
|
# string: name of the server.
|
||||||
|
server_name = attr.ib()
|
||||||
|
|
||||||
|
# dict[str,VerifyKey]|None: map from key id to key object, or None to disable
|
||||||
|
# signature verification.
|
||||||
|
verify_keys = attr.ib(default=None)
|
||||||
|
|
||||||
|
|
||||||
|
class KeyConfig(Config):
|
||||||
def read_config(self, config):
|
def read_config(self, config):
|
||||||
# the signing key can be specified inline or in a separate file
|
# the signing key can be specified inline or in a separate file
|
||||||
if "signing_key" in config:
|
if "signing_key" in config:
|
||||||
@ -49,16 +77,27 @@ class KeyConfig(Config):
|
|||||||
config.get("old_signing_keys", {})
|
config.get("old_signing_keys", {})
|
||||||
)
|
)
|
||||||
self.key_refresh_interval = self.parse_duration(
|
self.key_refresh_interval = self.parse_duration(
|
||||||
config.get("key_refresh_interval", "1d"),
|
config.get("key_refresh_interval", "1d")
|
||||||
)
|
)
|
||||||
self.perspectives = self.read_perspectives(
|
|
||||||
config.get("perspectives", {}).get("servers", {
|
# if neither trusted_key_servers nor perspectives are given, use the default.
|
||||||
"matrix.org": {"verify_keys": {
|
if "perspectives" not in config and "trusted_key_servers" not in config:
|
||||||
"ed25519:auto": {
|
key_servers = [{"server_name": "matrix.org"}]
|
||||||
"key": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw",
|
else:
|
||||||
}
|
key_servers = config.get("trusted_key_servers", [])
|
||||||
}}
|
|
||||||
})
|
if not isinstance(key_servers, list):
|
||||||
|
raise ConfigError(
|
||||||
|
"trusted_key_servers, if given, must be a list, not a %s"
|
||||||
|
% (type(key_servers).__name__,)
|
||||||
|
)
|
||||||
|
|
||||||
|
# merge the 'perspectives' config into the 'trusted_key_servers' config.
|
||||||
|
key_servers.extend(_perspectives_to_key_servers(config))
|
||||||
|
|
||||||
|
# list of TrustedKeyServer objects
|
||||||
|
self.key_servers = list(
|
||||||
|
_parse_key_servers(key_servers, self.federation_verify_certificates)
|
||||||
)
|
)
|
||||||
|
|
||||||
self.macaroon_secret_key = config.get(
|
self.macaroon_secret_key = config.get(
|
||||||
@ -78,8 +117,9 @@ class KeyConfig(Config):
|
|||||||
# falsification of values
|
# falsification of values
|
||||||
self.form_secret = config.get("form_secret", None)
|
self.form_secret = config.get("form_secret", None)
|
||||||
|
|
||||||
def default_config(self, config_dir_path, server_name, generate_secrets=False,
|
def default_config(
|
||||||
**kwargs):
|
self, config_dir_path, server_name, generate_secrets=False, **kwargs
|
||||||
|
):
|
||||||
base_key_name = os.path.join(config_dir_path, server_name)
|
base_key_name = os.path.join(config_dir_path, server_name)
|
||||||
|
|
||||||
if generate_secrets:
|
if generate_secrets:
|
||||||
@ -91,7 +131,8 @@ class KeyConfig(Config):
|
|||||||
macaroon_secret_key = "# macaroon_secret_key: <PRIVATE STRING>"
|
macaroon_secret_key = "# macaroon_secret_key: <PRIVATE STRING>"
|
||||||
form_secret = "# form_secret: <PRIVATE STRING>"
|
form_secret = "# form_secret: <PRIVATE STRING>"
|
||||||
|
|
||||||
return """\
|
return (
|
||||||
|
"""\
|
||||||
# a secret which is used to sign access tokens. If none is specified,
|
# a secret which is used to sign access tokens. If none is specified,
|
||||||
# the registration_shared_secret is used, if one is given; otherwise,
|
# the registration_shared_secret is used, if one is given; otherwise,
|
||||||
# a secret key is derived from the signing key.
|
# a secret key is derived from the signing key.
|
||||||
@ -133,33 +174,53 @@ class KeyConfig(Config):
|
|||||||
|
|
||||||
# The trusted servers to download signing keys from.
|
# The trusted servers to download signing keys from.
|
||||||
#
|
#
|
||||||
#perspectives:
|
# When we need to fetch a signing key, each server is tried in parallel.
|
||||||
# servers:
|
#
|
||||||
# "matrix.org":
|
# Normally, the connection to the key server is validated via TLS certificates.
|
||||||
|
# Additional security can be provided by configuring a `verify key`, which
|
||||||
|
# will make synapse check that the response is signed by that key.
|
||||||
|
#
|
||||||
|
# This setting supercedes an older setting named `perspectives`. The old format
|
||||||
|
# is still supported for backwards-compatibility, but it is deprecated.
|
||||||
|
#
|
||||||
|
# Options for each entry in the list include:
|
||||||
|
#
|
||||||
|
# server_name: the name of the server. required.
|
||||||
|
#
|
||||||
|
# verify_keys: an optional map from key id to base64-encoded public key.
|
||||||
|
# If specified, we will check that the response is signed by at least
|
||||||
|
# one of the given keys.
|
||||||
|
#
|
||||||
|
# accept_keys_insecurely: a boolean. Normally, if `verify_keys` is unset,
|
||||||
|
# and federation_verify_certificates is not `true`, synapse will refuse
|
||||||
|
# to start, because this would allow anyone who can spoof DNS responses
|
||||||
|
# to masquerade as the trusted key server. If you know what you are doing
|
||||||
|
# and are sure that your network environment provides a secure connection
|
||||||
|
# to the key server, you can set this to `true` to override this
|
||||||
|
# behaviour.
|
||||||
|
#
|
||||||
|
# An example configuration might look like:
|
||||||
|
#
|
||||||
|
#trusted_key_servers:
|
||||||
|
# - server_name: "my_trusted_server.example.com"
|
||||||
# verify_keys:
|
# verify_keys:
|
||||||
# "ed25519:auto":
|
# "ed25519:auto": "abcdefghijklmnopqrstuvwxyzabcdefghijklmopqr"
|
||||||
# key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
|
# - server_name: "my_other_trusted_server.example.com"
|
||||||
""" % locals()
|
#
|
||||||
|
# The default configuration is:
|
||||||
def read_perspectives(self, perspectives_servers):
|
#
|
||||||
servers = {}
|
#trusted_key_servers:
|
||||||
for server_name, server_config in perspectives_servers.items():
|
# - server_name: "matrix.org"
|
||||||
for key_id, key_data in server_config["verify_keys"].items():
|
"""
|
||||||
if is_signing_algorithm_supported(key_id):
|
% locals()
|
||||||
key_base64 = key_data["key"]
|
)
|
||||||
key_bytes = decode_base64(key_base64)
|
|
||||||
verify_key = decode_verify_key_bytes(key_id, key_bytes)
|
|
||||||
servers.setdefault(server_name, {})[key_id] = verify_key
|
|
||||||
return servers
|
|
||||||
|
|
||||||
def read_signing_key(self, signing_key_path):
|
def read_signing_key(self, signing_key_path):
|
||||||
signing_keys = self.read_file(signing_key_path, "signing_key")
|
signing_keys = self.read_file(signing_key_path, "signing_key")
|
||||||
try:
|
try:
|
||||||
return read_signing_keys(signing_keys.splitlines(True))
|
return read_signing_keys(signing_keys.splitlines(True))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise ConfigError(
|
raise ConfigError("Error reading signing_key: %s" % (str(e)))
|
||||||
"Error reading signing_key: %s" % (str(e))
|
|
||||||
)
|
|
||||||
|
|
||||||
def read_old_signing_keys(self, old_signing_keys):
|
def read_old_signing_keys(self, old_signing_keys):
|
||||||
keys = {}
|
keys = {}
|
||||||
@ -182,9 +243,7 @@ class KeyConfig(Config):
|
|||||||
if not self.path_exists(signing_key_path):
|
if not self.path_exists(signing_key_path):
|
||||||
with open(signing_key_path, "w") as signing_key_file:
|
with open(signing_key_path, "w") as signing_key_file:
|
||||||
key_id = "a_" + random_string(4)
|
key_id = "a_" + random_string(4)
|
||||||
write_signing_keys(
|
write_signing_keys(signing_key_file, (generate_signing_key(key_id),))
|
||||||
signing_key_file, (generate_signing_key(key_id),),
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
signing_keys = self.read_file(signing_key_path, "signing_key")
|
signing_keys = self.read_file(signing_key_path, "signing_key")
|
||||||
if len(signing_keys.split("\n")[0].split()) == 1:
|
if len(signing_keys.split("\n")[0].split()) == 1:
|
||||||
@ -194,6 +253,116 @@ class KeyConfig(Config):
|
|||||||
NACL_ED25519, key_id, signing_keys.split("\n")[0]
|
NACL_ED25519, key_id, signing_keys.split("\n")[0]
|
||||||
)
|
)
|
||||||
with open(signing_key_path, "w") as signing_key_file:
|
with open(signing_key_path, "w") as signing_key_file:
|
||||||
write_signing_keys(
|
write_signing_keys(signing_key_file, (key,))
|
||||||
signing_key_file, (key,),
|
|
||||||
|
|
||||||
|
def _perspectives_to_key_servers(config):
|
||||||
|
"""Convert old-style 'perspectives' configs into new-style 'trusted_key_servers'
|
||||||
|
|
||||||
|
Returns an iterable of entries to add to trusted_key_servers.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# 'perspectives' looks like:
|
||||||
|
#
|
||||||
|
# {
|
||||||
|
# "servers": {
|
||||||
|
# "matrix.org": {
|
||||||
|
# "verify_keys": {
|
||||||
|
# "ed25519:auto": {
|
||||||
|
# "key": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
|
||||||
|
# }
|
||||||
|
# }
|
||||||
|
# }
|
||||||
|
# }
|
||||||
|
# }
|
||||||
|
#
|
||||||
|
# 'trusted_keys' looks like:
|
||||||
|
#
|
||||||
|
# [
|
||||||
|
# {
|
||||||
|
# "server_name": "matrix.org",
|
||||||
|
# "verify_keys": {
|
||||||
|
# "ed25519:auto": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw",
|
||||||
|
# }
|
||||||
|
# }
|
||||||
|
# ]
|
||||||
|
|
||||||
|
perspectives_servers = config.get("perspectives", {}).get("servers", {})
|
||||||
|
|
||||||
|
for server_name, server_opts in perspectives_servers.items():
|
||||||
|
trusted_key_server_entry = {"server_name": server_name}
|
||||||
|
verify_keys = server_opts.get("verify_keys")
|
||||||
|
if verify_keys is not None:
|
||||||
|
trusted_key_server_entry["verify_keys"] = {
|
||||||
|
key_id: key_data["key"] for key_id, key_data in verify_keys.items()
|
||||||
|
}
|
||||||
|
yield trusted_key_server_entry
|
||||||
|
|
||||||
|
|
||||||
|
TRUSTED_KEY_SERVERS_SCHEMA = {
|
||||||
|
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||||
|
"description": "schema for the trusted_key_servers setting",
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"server_name": {"type": "string"},
|
||||||
|
"verify_keys": {
|
||||||
|
"type": "object",
|
||||||
|
# each key must be a base64 string
|
||||||
|
"additionalProperties": {"type": "string"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"required": ["server_name"],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_key_servers(key_servers, federation_verify_certificates):
|
||||||
|
try:
|
||||||
|
jsonschema.validate(key_servers, TRUSTED_KEY_SERVERS_SCHEMA)
|
||||||
|
except jsonschema.ValidationError as e:
|
||||||
|
raise ConfigError("Unable to parse 'trusted_key_servers': " + e.message)
|
||||||
|
|
||||||
|
for server in key_servers:
|
||||||
|
server_name = server["server_name"]
|
||||||
|
result = TrustedKeyServer(server_name=server_name)
|
||||||
|
|
||||||
|
verify_keys = server.get("verify_keys")
|
||||||
|
if verify_keys is not None:
|
||||||
|
result.verify_keys = {}
|
||||||
|
for key_id, key_base64 in verify_keys.items():
|
||||||
|
if not is_signing_algorithm_supported(key_id):
|
||||||
|
raise ConfigError(
|
||||||
|
"Unsupported signing algorithm on key %s for server %s in "
|
||||||
|
"trusted_key_servers" % (key_id, server_name)
|
||||||
)
|
)
|
||||||
|
try:
|
||||||
|
key_bytes = decode_base64(key_base64)
|
||||||
|
verify_key = decode_verify_key_bytes(key_id, key_bytes)
|
||||||
|
except Exception as e:
|
||||||
|
raise ConfigError(
|
||||||
|
"Unable to parse key %s for server %s in "
|
||||||
|
"trusted_key_servers: %s" % (key_id, server_name, e)
|
||||||
|
)
|
||||||
|
|
||||||
|
result.verify_keys[key_id] = verify_key
|
||||||
|
|
||||||
|
if (
|
||||||
|
not federation_verify_certificates and
|
||||||
|
not server.get("accept_keys_insecurely")
|
||||||
|
):
|
||||||
|
_assert_keyserver_has_verify_keys(result)
|
||||||
|
|
||||||
|
yield result
|
||||||
|
|
||||||
|
|
||||||
|
def _assert_keyserver_has_verify_keys(trusted_key_server):
|
||||||
|
if not trusted_key_server.verify_keys:
|
||||||
|
raise ConfigError(INSECURE_NOTARY_ERROR)
|
||||||
|
|
||||||
|
# also check that they are not blindly checking the old matrix.org key
|
||||||
|
if trusted_key_server.server_name == "matrix.org" and any(
|
||||||
|
key_id == "ed25519:auto" for key_id in trusted_key_server.verify_keys
|
||||||
|
):
|
||||||
|
raise ConfigError(RELYING_ON_MATRIX_KEY_ERROR)
|
||||||
|
@ -20,6 +20,7 @@ import os.path
|
|||||||
|
|
||||||
from netaddr import IPSet
|
from netaddr import IPSet
|
||||||
|
|
||||||
|
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||||
from synapse.http.endpoint import parse_and_validate_server_name
|
from synapse.http.endpoint import parse_and_validate_server_name
|
||||||
from synapse.python_dependencies import DependencyException, check_requirements
|
from synapse.python_dependencies import DependencyException, check_requirements
|
||||||
|
|
||||||
@ -35,6 +36,8 @@ logger = logging.Logger(__name__)
|
|||||||
# in the list.
|
# in the list.
|
||||||
DEFAULT_BIND_ADDRESSES = ['::', '0.0.0.0']
|
DEFAULT_BIND_ADDRESSES = ['::', '0.0.0.0']
|
||||||
|
|
||||||
|
DEFAULT_ROOM_VERSION = "4"
|
||||||
|
|
||||||
|
|
||||||
class ServerConfig(Config):
|
class ServerConfig(Config):
|
||||||
|
|
||||||
@ -88,6 +91,22 @@ class ServerConfig(Config):
|
|||||||
"restrict_public_rooms_to_local_users", False,
|
"restrict_public_rooms_to_local_users", False,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
default_room_version = config.get(
|
||||||
|
"default_room_version", DEFAULT_ROOM_VERSION,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Ensure room version is a str
|
||||||
|
default_room_version = str(default_room_version)
|
||||||
|
|
||||||
|
if default_room_version not in KNOWN_ROOM_VERSIONS:
|
||||||
|
raise ConfigError(
|
||||||
|
"Unknown default_room_version: %s, known room versions: %s" %
|
||||||
|
(default_room_version, list(KNOWN_ROOM_VERSIONS.keys()))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get the actual room version object rather than just the identifier
|
||||||
|
self.default_room_version = KNOWN_ROOM_VERSIONS[default_room_version]
|
||||||
|
|
||||||
# whether to enable search. If disabled, new entries will not be inserted
|
# whether to enable search. If disabled, new entries will not be inserted
|
||||||
# into the search tables and they will not be indexed. Users will receive
|
# into the search tables and they will not be indexed. Users will receive
|
||||||
# errors when attempting to search for messages.
|
# errors when attempting to search for messages.
|
||||||
@ -101,6 +120,11 @@ class ServerConfig(Config):
|
|||||||
"block_non_admin_invites", False,
|
"block_non_admin_invites", False,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Whether to enable experimental MSC1849 (aka relations) support
|
||||||
|
self.experimental_msc1849_support_enabled = config.get(
|
||||||
|
"experimental_msc1849_support_enabled", False,
|
||||||
|
)
|
||||||
|
|
||||||
# Options to control access by tracking MAU
|
# Options to control access by tracking MAU
|
||||||
self.limit_usage_by_mau = config.get("limit_usage_by_mau", False)
|
self.limit_usage_by_mau = config.get("limit_usage_by_mau", False)
|
||||||
self.max_mau_value = 0
|
self.max_mau_value = 0
|
||||||
@ -305,6 +329,10 @@ class ServerConfig(Config):
|
|||||||
unsecure_port = 8008
|
unsecure_port = 8008
|
||||||
|
|
||||||
pid_file = os.path.join(data_dir_path, "homeserver.pid")
|
pid_file = os.path.join(data_dir_path, "homeserver.pid")
|
||||||
|
|
||||||
|
# Bring DEFAULT_ROOM_VERSION into the local-scope for use in the
|
||||||
|
# default config string
|
||||||
|
default_room_version = DEFAULT_ROOM_VERSION
|
||||||
return """\
|
return """\
|
||||||
## Server ##
|
## Server ##
|
||||||
|
|
||||||
@ -379,6 +407,16 @@ class ServerConfig(Config):
|
|||||||
#
|
#
|
||||||
#restrict_public_rooms_to_local_users: true
|
#restrict_public_rooms_to_local_users: true
|
||||||
|
|
||||||
|
# The default room version for newly created rooms.
|
||||||
|
#
|
||||||
|
# Known room versions are listed here:
|
||||||
|
# https://matrix.org/docs/spec/#complete-list-of-room-versions
|
||||||
|
#
|
||||||
|
# For example, for room version 1, default_room_version should be set
|
||||||
|
# to "1".
|
||||||
|
#
|
||||||
|
#default_room_version: "%(default_room_version)s"
|
||||||
|
|
||||||
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
|
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
|
||||||
#
|
#
|
||||||
#gc_thresholds: [700, 10, 10]
|
#gc_thresholds: [700, 10, 10]
|
||||||
@ -547,6 +585,22 @@ class ServerConfig(Config):
|
|||||||
|
|
||||||
# Monthly Active User Blocking
|
# Monthly Active User Blocking
|
||||||
#
|
#
|
||||||
|
# Used in cases where the admin or server owner wants to limit to the
|
||||||
|
# number of monthly active users.
|
||||||
|
#
|
||||||
|
# 'limit_usage_by_mau' disables/enables monthly active user blocking. When
|
||||||
|
# anabled and a limit is reached the server returns a 'ResourceLimitError'
|
||||||
|
# with error type Codes.RESOURCE_LIMIT_EXCEEDED
|
||||||
|
#
|
||||||
|
# 'max_mau_value' is the hard limit of monthly active users above which
|
||||||
|
# the server will start blocking user actions.
|
||||||
|
#
|
||||||
|
# 'mau_trial_days' is a means to add a grace period for active users. It
|
||||||
|
# means that users must be active for this number of days before they
|
||||||
|
# can be considered active and guards against the case where lots of users
|
||||||
|
# sign up in a short space of time never to return after their initial
|
||||||
|
# session.
|
||||||
|
#
|
||||||
#limit_usage_by_mau: False
|
#limit_usage_by_mau: False
|
||||||
#max_mau_value: 50
|
#max_mau_value: 50
|
||||||
#mau_trial_days: 2
|
#mau_trial_days: 2
|
||||||
|
60
synapse/config/stats.py
Normal file
60
synapse/config/stats.py
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2018 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from __future__ import division
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from ._base import Config
|
||||||
|
|
||||||
|
|
||||||
|
class StatsConfig(Config):
|
||||||
|
"""Stats Configuration
|
||||||
|
Configuration for the behaviour of synapse's stats engine
|
||||||
|
"""
|
||||||
|
|
||||||
|
def read_config(self, config):
|
||||||
|
self.stats_enabled = True
|
||||||
|
self.stats_bucket_size = 86400
|
||||||
|
self.stats_retention = sys.maxsize
|
||||||
|
stats_config = config.get("stats", None)
|
||||||
|
if stats_config:
|
||||||
|
self.stats_enabled = stats_config.get("enabled", self.stats_enabled)
|
||||||
|
self.stats_bucket_size = (
|
||||||
|
self.parse_duration(stats_config.get("bucket_size", "1d")) / 1000
|
||||||
|
)
|
||||||
|
self.stats_retention = (
|
||||||
|
self.parse_duration(
|
||||||
|
stats_config.get("retention", "%ds" % (sys.maxsize,))
|
||||||
|
)
|
||||||
|
/ 1000
|
||||||
|
)
|
||||||
|
|
||||||
|
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||||
|
return """
|
||||||
|
# Local statistics collection. Used in populating the room directory.
|
||||||
|
#
|
||||||
|
# 'bucket_size' controls how large each statistics timeslice is. It can
|
||||||
|
# be defined in a human readable short form -- e.g. "1d", "1y".
|
||||||
|
#
|
||||||
|
# 'retention' controls how long historical statistics will be kept for.
|
||||||
|
# It can be defined in a human readable short form -- e.g. "1d", "1y".
|
||||||
|
#
|
||||||
|
#
|
||||||
|
#stats:
|
||||||
|
# enabled: true
|
||||||
|
# bucket_size: 1d
|
||||||
|
# retention: 1y
|
||||||
|
"""
|
@ -74,7 +74,7 @@ class TlsConfig(Config):
|
|||||||
|
|
||||||
# Whether to verify certificates on outbound federation traffic
|
# Whether to verify certificates on outbound federation traffic
|
||||||
self.federation_verify_certificates = config.get(
|
self.federation_verify_certificates = config.get(
|
||||||
"federation_verify_certificates", False,
|
"federation_verify_certificates", True,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Whitelist of domains to not verify certificates for
|
# Whitelist of domains to not verify certificates for
|
||||||
@ -107,7 +107,7 @@ class TlsConfig(Config):
|
|||||||
certs = []
|
certs = []
|
||||||
for ca_file in custom_ca_list:
|
for ca_file in custom_ca_list:
|
||||||
logger.debug("Reading custom CA certificate file: %s", ca_file)
|
logger.debug("Reading custom CA certificate file: %s", ca_file)
|
||||||
content = self.read_file(ca_file)
|
content = self.read_file(ca_file, "federation_custom_ca_list")
|
||||||
|
|
||||||
# Parse the CA certificates
|
# Parse the CA certificates
|
||||||
try:
|
try:
|
||||||
@ -241,12 +241,12 @@ class TlsConfig(Config):
|
|||||||
#
|
#
|
||||||
#tls_private_key_path: "%(tls_private_key_path)s"
|
#tls_private_key_path: "%(tls_private_key_path)s"
|
||||||
|
|
||||||
# Whether to verify TLS certificates when sending federation traffic.
|
# Whether to verify TLS server certificates for outbound federation requests.
|
||||||
#
|
#
|
||||||
# This currently defaults to `false`, however this will change in
|
# Defaults to `true`. To disable certificate verification, uncomment the
|
||||||
# Synapse 1.0 when valid federation certificates will be required.
|
# following line.
|
||||||
#
|
#
|
||||||
#federation_verify_certificates: true
|
#federation_verify_certificates: false
|
||||||
|
|
||||||
# Skip federation certificate verification on the following whitelist
|
# Skip federation certificate verification on the following whitelist
|
||||||
# of domains.
|
# of domains.
|
||||||
|
@ -47,9 +47,9 @@ class UserDirectoryConfig(Config):
|
|||||||
#
|
#
|
||||||
# 'search_all_users' defines whether to search all users visible to your HS
|
# 'search_all_users' defines whether to search all users visible to your HS
|
||||||
# when searching the user directory, rather than limiting to users visible
|
# when searching the user directory, rather than limiting to users visible
|
||||||
# in public rooms. Defaults to false. If you set it True, you'll have to run
|
# in public rooms. Defaults to false. If you set it True, you'll have to
|
||||||
# UPDATE user_directory_stream_pos SET stream_id = NULL;
|
# rebuild the user_directory search indexes, see
|
||||||
# on your database to tell it to rebuild the user_directory search indexes.
|
# https://github.com/matrix-org/synapse/blob/master/docs/user_directory.md
|
||||||
#
|
#
|
||||||
#user_directory:
|
#user_directory:
|
||||||
# enabled: true
|
# enabled: true
|
||||||
|
@ -15,10 +15,13 @@
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
import idna
|
||||||
|
from service_identity import VerificationError
|
||||||
|
from service_identity.pyopenssl import verify_hostname, verify_ip_address
|
||||||
from zope.interface import implementer
|
from zope.interface import implementer
|
||||||
|
|
||||||
from OpenSSL import SSL, crypto
|
from OpenSSL import SSL, crypto
|
||||||
from twisted.internet._sslverify import ClientTLSOptions, _defaultCurveName
|
from twisted.internet._sslverify import _defaultCurveName
|
||||||
from twisted.internet.abstract import isIPAddress, isIPv6Address
|
from twisted.internet.abstract import isIPAddress, isIPv6Address
|
||||||
from twisted.internet.interfaces import IOpenSSLClientConnectionCreator
|
from twisted.internet.interfaces import IOpenSSLClientConnectionCreator
|
||||||
from twisted.internet.ssl import CertificateOptions, ContextFactory, platformTrust
|
from twisted.internet.ssl import CertificateOptions, ContextFactory, platformTrust
|
||||||
@ -56,79 +59,19 @@ class ServerContextFactory(ContextFactory):
|
|||||||
return self._context
|
return self._context
|
||||||
|
|
||||||
|
|
||||||
def _idnaBytes(text):
|
|
||||||
"""
|
|
||||||
Convert some text typed by a human into some ASCII bytes. This is a
|
|
||||||
copy of twisted.internet._idna._idnaBytes. For documentation, see the
|
|
||||||
twisted documentation.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
import idna
|
|
||||||
except ImportError:
|
|
||||||
return text.encode("idna")
|
|
||||||
else:
|
|
||||||
return idna.encode(text)
|
|
||||||
|
|
||||||
|
|
||||||
def _tolerateErrors(wrapped):
|
|
||||||
"""
|
|
||||||
Wrap up an info_callback for pyOpenSSL so that if something goes wrong
|
|
||||||
the error is immediately logged and the connection is dropped if possible.
|
|
||||||
This is a copy of twisted.internet._sslverify._tolerateErrors. For
|
|
||||||
documentation, see the twisted documentation.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def infoCallback(connection, where, ret):
|
|
||||||
try:
|
|
||||||
return wrapped(connection, where, ret)
|
|
||||||
except: # noqa: E722, taken from the twisted implementation
|
|
||||||
f = Failure()
|
|
||||||
logger.exception("Error during info_callback")
|
|
||||||
connection.get_app_data().failVerification(f)
|
|
||||||
|
|
||||||
return infoCallback
|
|
||||||
|
|
||||||
|
|
||||||
@implementer(IOpenSSLClientConnectionCreator)
|
|
||||||
class ClientTLSOptionsNoVerify(object):
|
|
||||||
"""
|
|
||||||
Client creator for TLS without certificate identity verification. This is a
|
|
||||||
copy of twisted.internet._sslverify.ClientTLSOptions with the identity
|
|
||||||
verification left out. For documentation, see the twisted documentation.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, hostname, ctx):
|
|
||||||
self._ctx = ctx
|
|
||||||
|
|
||||||
if isIPAddress(hostname) or isIPv6Address(hostname):
|
|
||||||
self._hostnameBytes = hostname.encode('ascii')
|
|
||||||
self._sendSNI = False
|
|
||||||
else:
|
|
||||||
self._hostnameBytes = _idnaBytes(hostname)
|
|
||||||
self._sendSNI = True
|
|
||||||
|
|
||||||
ctx.set_info_callback(_tolerateErrors(self._identityVerifyingInfoCallback))
|
|
||||||
|
|
||||||
def clientConnectionForTLS(self, tlsProtocol):
|
|
||||||
context = self._ctx
|
|
||||||
connection = SSL.Connection(context, None)
|
|
||||||
connection.set_app_data(tlsProtocol)
|
|
||||||
return connection
|
|
||||||
|
|
||||||
def _identityVerifyingInfoCallback(self, connection, where, ret):
|
|
||||||
# Literal IPv4 and IPv6 addresses are not permitted
|
|
||||||
# as host names according to the RFCs
|
|
||||||
if where & SSL.SSL_CB_HANDSHAKE_START and self._sendSNI:
|
|
||||||
connection.set_tlsext_host_name(self._hostnameBytes)
|
|
||||||
|
|
||||||
|
|
||||||
class ClientTLSOptionsFactory(object):
|
class ClientTLSOptionsFactory(object):
|
||||||
"""Factory for Twisted ClientTLSOptions that are used to make connections
|
"""Factory for Twisted SSLClientConnectionCreators that are used to make connections
|
||||||
to remote servers for federation."""
|
to remote servers for federation.
|
||||||
|
|
||||||
|
Uses one of two OpenSSL context objects for all connections, depending on whether
|
||||||
|
we should do SSL certificate verification.
|
||||||
|
|
||||||
|
get_options decides whether we should do SSL certificate verification and
|
||||||
|
constructs an SSLClientConnectionCreator factory accordingly.
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(self, config):
|
def __init__(self, config):
|
||||||
self._config = config
|
self._config = config
|
||||||
self._options_noverify = CertificateOptions()
|
|
||||||
|
|
||||||
# Check if we're using a custom list of a CA certificates
|
# Check if we're using a custom list of a CA certificates
|
||||||
trust_root = config.federation_ca_trust_root
|
trust_root = config.federation_ca_trust_root
|
||||||
@ -136,11 +79,13 @@ class ClientTLSOptionsFactory(object):
|
|||||||
# Use CA root certs provided by OpenSSL
|
# Use CA root certs provided by OpenSSL
|
||||||
trust_root = platformTrust()
|
trust_root = platformTrust()
|
||||||
|
|
||||||
self._options_verify = CertificateOptions(trustRoot=trust_root)
|
self._verify_ssl_context = CertificateOptions(trustRoot=trust_root).getContext()
|
||||||
|
self._verify_ssl_context.set_info_callback(self._context_info_cb)
|
||||||
|
|
||||||
|
self._no_verify_ssl_context = CertificateOptions().getContext()
|
||||||
|
self._no_verify_ssl_context.set_info_callback(self._context_info_cb)
|
||||||
|
|
||||||
def get_options(self, host):
|
def get_options(self, host):
|
||||||
# Use _makeContext so that we get a fresh OpenSSL CTX each time.
|
|
||||||
|
|
||||||
# Check if certificate verification has been enabled
|
# Check if certificate verification has been enabled
|
||||||
should_verify = self._config.federation_verify_certificates
|
should_verify = self._config.federation_verify_certificates
|
||||||
|
|
||||||
@ -151,6 +96,93 @@ class ClientTLSOptionsFactory(object):
|
|||||||
should_verify = False
|
should_verify = False
|
||||||
break
|
break
|
||||||
|
|
||||||
if should_verify:
|
ssl_context = (
|
||||||
return ClientTLSOptions(host, self._options_verify._makeContext())
|
self._verify_ssl_context if should_verify else self._no_verify_ssl_context
|
||||||
return ClientTLSOptionsNoVerify(host, self._options_noverify._makeContext())
|
)
|
||||||
|
|
||||||
|
return SSLClientConnectionCreator(host, ssl_context, should_verify)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _context_info_cb(ssl_connection, where, ret):
|
||||||
|
"""The 'information callback' for our openssl context object."""
|
||||||
|
# we assume that the app_data on the connection object has been set to
|
||||||
|
# a TLSMemoryBIOProtocol object. (This is done by SSLClientConnectionCreator)
|
||||||
|
tls_protocol = ssl_connection.get_app_data()
|
||||||
|
try:
|
||||||
|
# ... we further assume that SSLClientConnectionCreator has set the
|
||||||
|
# '_synapse_tls_verifier' attribute to a ConnectionVerifier object.
|
||||||
|
tls_protocol._synapse_tls_verifier.verify_context_info_cb(
|
||||||
|
ssl_connection, where
|
||||||
|
)
|
||||||
|
except: # noqa: E722, taken from the twisted implementation
|
||||||
|
logger.exception("Error during info_callback")
|
||||||
|
f = Failure()
|
||||||
|
tls_protocol.failVerification(f)
|
||||||
|
|
||||||
|
|
||||||
|
@implementer(IOpenSSLClientConnectionCreator)
|
||||||
|
class SSLClientConnectionCreator(object):
|
||||||
|
"""Creates openssl connection objects for client connections.
|
||||||
|
|
||||||
|
Replaces twisted.internet.ssl.ClientTLSOptions
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, hostname, ctx, verify_certs):
|
||||||
|
self._ctx = ctx
|
||||||
|
self._verifier = ConnectionVerifier(hostname, verify_certs)
|
||||||
|
|
||||||
|
def clientConnectionForTLS(self, tls_protocol):
|
||||||
|
context = self._ctx
|
||||||
|
connection = SSL.Connection(context, None)
|
||||||
|
|
||||||
|
# as per twisted.internet.ssl.ClientTLSOptions, we set the application
|
||||||
|
# data to our TLSMemoryBIOProtocol...
|
||||||
|
connection.set_app_data(tls_protocol)
|
||||||
|
|
||||||
|
# ... and we also gut-wrench a '_synapse_tls_verifier' attribute into the
|
||||||
|
# tls_protocol so that the SSL context's info callback has something to
|
||||||
|
# call to do the cert verification.
|
||||||
|
setattr(tls_protocol, "_synapse_tls_verifier", self._verifier)
|
||||||
|
return connection
|
||||||
|
|
||||||
|
|
||||||
|
class ConnectionVerifier(object):
|
||||||
|
"""Set the SNI, and do cert verification
|
||||||
|
|
||||||
|
This is a thing which is attached to the TLSMemoryBIOProtocol, and is called by
|
||||||
|
the ssl context's info callback.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# This code is based on twisted.internet.ssl.ClientTLSOptions.
|
||||||
|
|
||||||
|
def __init__(self, hostname, verify_certs):
|
||||||
|
self._verify_certs = verify_certs
|
||||||
|
|
||||||
|
if isIPAddress(hostname) or isIPv6Address(hostname):
|
||||||
|
self._hostnameBytes = hostname.encode("ascii")
|
||||||
|
self._is_ip_address = True
|
||||||
|
else:
|
||||||
|
# twisted's ClientTLSOptions falls back to the stdlib impl here if
|
||||||
|
# idna is not installed, but points out that lacks support for
|
||||||
|
# IDNA2008 (http://bugs.python.org/issue17305).
|
||||||
|
#
|
||||||
|
# We can rely on having idna.
|
||||||
|
self._hostnameBytes = idna.encode(hostname)
|
||||||
|
self._is_ip_address = False
|
||||||
|
|
||||||
|
self._hostnameASCII = self._hostnameBytes.decode("ascii")
|
||||||
|
|
||||||
|
def verify_context_info_cb(self, ssl_connection, where):
|
||||||
|
if where & SSL.SSL_CB_HANDSHAKE_START and not self._is_ip_address:
|
||||||
|
ssl_connection.set_tlsext_host_name(self._hostnameBytes)
|
||||||
|
|
||||||
|
if where & SSL.SSL_CB_HANDSHAKE_DONE and self._verify_certs:
|
||||||
|
try:
|
||||||
|
if self._is_ip_address:
|
||||||
|
verify_ip_address(ssl_connection, self._hostnameASCII)
|
||||||
|
else:
|
||||||
|
verify_hostname(ssl_connection, self._hostnameASCII)
|
||||||
|
except VerificationError:
|
||||||
|
f = Failure()
|
||||||
|
tls_protocol = ssl_connection.get_app_data()
|
||||||
|
tls_protocol.failVerification(f)
|
||||||
|
@ -31,7 +31,11 @@ logger = logging.getLogger(__name__)
|
|||||||
def check_event_content_hash(event, hash_algorithm=hashlib.sha256):
|
def check_event_content_hash(event, hash_algorithm=hashlib.sha256):
|
||||||
"""Check whether the hash for this PDU matches the contents"""
|
"""Check whether the hash for this PDU matches the contents"""
|
||||||
name, expected_hash = compute_content_hash(event.get_pdu_json(), hash_algorithm)
|
name, expected_hash = compute_content_hash(event.get_pdu_json(), hash_algorithm)
|
||||||
logger.debug("Expecting hash: %s", encode_base64(expected_hash))
|
logger.debug(
|
||||||
|
"Verifying content hash on %s (expecting: %s)",
|
||||||
|
event.event_id,
|
||||||
|
encode_base64(expected_hash),
|
||||||
|
)
|
||||||
|
|
||||||
# some malformed events lack a 'hashes'. Protect against it being missing
|
# some malformed events lack a 'hashes'. Protect against it being missing
|
||||||
# or a weird type by basically treating it the same as an unhashed event.
|
# or a weird type by basically treating it the same as an unhashed event.
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -21,6 +21,7 @@ import six
|
|||||||
|
|
||||||
from unpaddedbase64 import encode_base64
|
from unpaddedbase64 import encode_base64
|
||||||
|
|
||||||
|
from synapse.api.errors import UnsupportedRoomVersionError
|
||||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, EventFormatVersions
|
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, EventFormatVersions
|
||||||
from synapse.util.caches import intern_dict
|
from synapse.util.caches import intern_dict
|
||||||
from synapse.util.frozenutils import freeze
|
from synapse.util.frozenutils import freeze
|
||||||
@ -335,13 +336,32 @@ class FrozenEventV2(EventBase):
|
|||||||
return self.__repr__()
|
return self.__repr__()
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "<FrozenEventV2 event_id='%s', type='%s', state_key='%s'>" % (
|
return "<%s event_id='%s', type='%s', state_key='%s'>" % (
|
||||||
|
self.__class__.__name__,
|
||||||
self.event_id,
|
self.event_id,
|
||||||
self.get("type", None),
|
self.get("type", None),
|
||||||
self.get("state_key", None),
|
self.get("state_key", None),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class FrozenEventV3(FrozenEventV2):
|
||||||
|
"""FrozenEventV3, which differs from FrozenEventV2 only in the event_id format"""
|
||||||
|
format_version = EventFormatVersions.V3 # All events of this type are V3
|
||||||
|
|
||||||
|
@property
|
||||||
|
def event_id(self):
|
||||||
|
# We have to import this here as otherwise we get an import loop which
|
||||||
|
# is hard to break.
|
||||||
|
from synapse.crypto.event_signing import compute_event_reference_hash
|
||||||
|
|
||||||
|
if self._event_id:
|
||||||
|
return self._event_id
|
||||||
|
self._event_id = "$" + encode_base64(
|
||||||
|
compute_event_reference_hash(self)[1], urlsafe=True
|
||||||
|
)
|
||||||
|
return self._event_id
|
||||||
|
|
||||||
|
|
||||||
def room_version_to_event_format(room_version):
|
def room_version_to_event_format(room_version):
|
||||||
"""Converts a room version string to the event format
|
"""Converts a room version string to the event format
|
||||||
|
|
||||||
@ -350,12 +370,15 @@ def room_version_to_event_format(room_version):
|
|||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
int
|
int
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
UnsupportedRoomVersionError if the room version is unknown
|
||||||
"""
|
"""
|
||||||
v = KNOWN_ROOM_VERSIONS.get(room_version)
|
v = KNOWN_ROOM_VERSIONS.get(room_version)
|
||||||
|
|
||||||
if not v:
|
if not v:
|
||||||
# We should have already checked version, so this should not happen
|
# this can happen if support is withdrawn for a room version
|
||||||
raise RuntimeError("Unrecognized room version %s" % (room_version,))
|
raise UnsupportedRoomVersionError()
|
||||||
|
|
||||||
return v.event_format
|
return v.event_format
|
||||||
|
|
||||||
@ -376,6 +399,8 @@ def event_type_from_format_version(format_version):
|
|||||||
return FrozenEvent
|
return FrozenEvent
|
||||||
elif format_version == EventFormatVersions.V2:
|
elif format_version == EventFormatVersions.V2:
|
||||||
return FrozenEventV2
|
return FrozenEventV2
|
||||||
|
elif format_version == EventFormatVersions.V3:
|
||||||
|
return FrozenEventV3
|
||||||
else:
|
else:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
"No event format %r" % (format_version,)
|
"No event format %r" % (format_version,)
|
||||||
|
@ -18,6 +18,7 @@ import attr
|
|||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.api.constants import MAX_DEPTH
|
from synapse.api.constants import MAX_DEPTH
|
||||||
|
from synapse.api.errors import UnsupportedRoomVersionError
|
||||||
from synapse.api.room_versions import (
|
from synapse.api.room_versions import (
|
||||||
KNOWN_EVENT_FORMAT_VERSIONS,
|
KNOWN_EVENT_FORMAT_VERSIONS,
|
||||||
KNOWN_ROOM_VERSIONS,
|
KNOWN_ROOM_VERSIONS,
|
||||||
@ -75,6 +76,7 @@ class EventBuilder(object):
|
|||||||
# someone tries to get them when they don't exist.
|
# someone tries to get them when they don't exist.
|
||||||
_state_key = attr.ib(default=None)
|
_state_key = attr.ib(default=None)
|
||||||
_redacts = attr.ib(default=None)
|
_redacts = attr.ib(default=None)
|
||||||
|
_origin_server_ts = attr.ib(default=None)
|
||||||
|
|
||||||
internal_metadata = attr.ib(default=attr.Factory(lambda: _EventInternalMetadata({})))
|
internal_metadata = attr.ib(default=attr.Factory(lambda: _EventInternalMetadata({})))
|
||||||
|
|
||||||
@ -141,6 +143,9 @@ class EventBuilder(object):
|
|||||||
if self._redacts is not None:
|
if self._redacts is not None:
|
||||||
event_dict["redacts"] = self._redacts
|
event_dict["redacts"] = self._redacts
|
||||||
|
|
||||||
|
if self._origin_server_ts is not None:
|
||||||
|
event_dict["origin_server_ts"] = self._origin_server_ts
|
||||||
|
|
||||||
defer.returnValue(
|
defer.returnValue(
|
||||||
create_local_event_from_event_dict(
|
create_local_event_from_event_dict(
|
||||||
clock=self._clock,
|
clock=self._clock,
|
||||||
@ -178,9 +183,8 @@ class EventBuilderFactory(object):
|
|||||||
"""
|
"""
|
||||||
v = KNOWN_ROOM_VERSIONS.get(room_version)
|
v = KNOWN_ROOM_VERSIONS.get(room_version)
|
||||||
if not v:
|
if not v:
|
||||||
raise Exception(
|
# this can happen if support is withdrawn for a room version
|
||||||
"No event format defined for version %r" % (room_version,)
|
raise UnsupportedRoomVersionError()
|
||||||
)
|
|
||||||
return self.for_room_version(v, key_values)
|
return self.for_room_version(v, key_values)
|
||||||
|
|
||||||
def for_room_version(self, room_version, key_values):
|
def for_room_version(self, room_version, key_values):
|
||||||
@ -209,6 +213,7 @@ class EventBuilderFactory(object):
|
|||||||
content=key_values.get("content", {}),
|
content=key_values.get("content", {}),
|
||||||
unsigned=key_values.get("unsigned", {}),
|
unsigned=key_values.get("unsigned", {}),
|
||||||
redacts=key_values.get("redacts", None),
|
redacts=key_values.get("redacts", None),
|
||||||
|
origin_server_ts=key_values.get("origin_server_ts", None),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -245,7 +250,7 @@ def create_local_event_from_event_dict(clock, hostname, signing_key,
|
|||||||
event_dict["event_id"] = _create_event_id(clock, hostname)
|
event_dict["event_id"] = _create_event_id(clock, hostname)
|
||||||
|
|
||||||
event_dict["origin"] = hostname
|
event_dict["origin"] = hostname
|
||||||
event_dict["origin_server_ts"] = time_now
|
event_dict.setdefault("origin_server_ts", time_now)
|
||||||
|
|
||||||
event_dict.setdefault("unsigned", {})
|
event_dict.setdefault("unsigned", {})
|
||||||
age = event_dict["unsigned"].pop("age", 0)
|
age = event_dict["unsigned"].pop("age", 0)
|
||||||
|
@ -21,7 +21,7 @@ from frozendict import frozendict
|
|||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.api.constants import EventTypes
|
from synapse.api.constants import EventTypes, RelationTypes
|
||||||
from synapse.util.async_helpers import yieldable_gather_results
|
from synapse.util.async_helpers import yieldable_gather_results
|
||||||
|
|
||||||
from . import EventBase
|
from . import EventBase
|
||||||
@ -324,21 +324,70 @@ class EventClientSerializer(object):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
pass
|
self.store = hs.get_datastore()
|
||||||
|
self.experimental_msc1849_support_enabled = (
|
||||||
|
hs.config.experimental_msc1849_support_enabled
|
||||||
|
)
|
||||||
|
|
||||||
def serialize_event(self, event, time_now, **kwargs):
|
@defer.inlineCallbacks
|
||||||
|
def serialize_event(self, event, time_now, bundle_aggregations=True, **kwargs):
|
||||||
"""Serializes a single event.
|
"""Serializes a single event.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
event (EventBase)
|
event (EventBase)
|
||||||
time_now (int): The current time in milliseconds
|
time_now (int): The current time in milliseconds
|
||||||
|
bundle_aggregations (bool): Whether to bundle in related events
|
||||||
**kwargs: Arguments to pass to `serialize_event`
|
**kwargs: Arguments to pass to `serialize_event`
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Deferred[dict]: The serialized event
|
Deferred[dict]: The serialized event
|
||||||
"""
|
"""
|
||||||
event = serialize_event(event, time_now, **kwargs)
|
# To handle the case of presence events and the like
|
||||||
return defer.succeed(event)
|
if not isinstance(event, EventBase):
|
||||||
|
defer.returnValue(event)
|
||||||
|
|
||||||
|
event_id = event.event_id
|
||||||
|
serialized_event = serialize_event(event, time_now, **kwargs)
|
||||||
|
|
||||||
|
# If MSC1849 is enabled then we need to look if thre are any relations
|
||||||
|
# we need to bundle in with the event
|
||||||
|
if self.experimental_msc1849_support_enabled and bundle_aggregations:
|
||||||
|
annotations = yield self.store.get_aggregation_groups_for_event(
|
||||||
|
event_id,
|
||||||
|
)
|
||||||
|
references = yield self.store.get_relations_for_event(
|
||||||
|
event_id, RelationTypes.REFERENCE, direction="f",
|
||||||
|
)
|
||||||
|
|
||||||
|
if annotations.chunk:
|
||||||
|
r = serialized_event["unsigned"].setdefault("m.relations", {})
|
||||||
|
r[RelationTypes.ANNOTATION] = annotations.to_dict()
|
||||||
|
|
||||||
|
if references.chunk:
|
||||||
|
r = serialized_event["unsigned"].setdefault("m.relations", {})
|
||||||
|
r[RelationTypes.REFERENCE] = references.to_dict()
|
||||||
|
|
||||||
|
edit = None
|
||||||
|
if event.type == EventTypes.Message:
|
||||||
|
edit = yield self.store.get_applicable_edit(event_id)
|
||||||
|
|
||||||
|
if edit:
|
||||||
|
# If there is an edit replace the content, preserving existing
|
||||||
|
# relations.
|
||||||
|
|
||||||
|
relations = event.content.get("m.relates_to")
|
||||||
|
serialized_event["content"] = edit.content.get("m.new_content", {})
|
||||||
|
if relations:
|
||||||
|
serialized_event["content"]["m.relates_to"] = relations
|
||||||
|
else:
|
||||||
|
serialized_event["content"].pop("m.relates_to", None)
|
||||||
|
|
||||||
|
r = serialized_event["unsigned"].setdefault("m.relations", {})
|
||||||
|
r[RelationTypes.REPLACE] = {
|
||||||
|
"event_id": edit.event_id,
|
||||||
|
}
|
||||||
|
|
||||||
|
defer.returnValue(serialized_event)
|
||||||
|
|
||||||
def serialize_events(self, events, time_now, **kwargs):
|
def serialize_events(self, events, time_now, **kwargs):
|
||||||
"""Serializes multiple events.
|
"""Serializes multiple events.
|
||||||
|
@ -223,9 +223,6 @@ def _check_sigs_on_pdus(keyring, room_version, pdus):
|
|||||||
the signatures are valid, or fail (with a SynapseError) if not.
|
the signatures are valid, or fail (with a SynapseError) if not.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# (currently this is written assuming the v1 room structure; we'll probably want a
|
|
||||||
# separate function for checking v2 rooms)
|
|
||||||
|
|
||||||
# we want to check that the event is signed by:
|
# we want to check that the event is signed by:
|
||||||
#
|
#
|
||||||
# (a) the sender's server
|
# (a) the sender's server
|
||||||
@ -257,6 +254,10 @@ def _check_sigs_on_pdus(keyring, room_version, pdus):
|
|||||||
for p in pdus
|
for p in pdus
|
||||||
]
|
]
|
||||||
|
|
||||||
|
v = KNOWN_ROOM_VERSIONS.get(room_version)
|
||||||
|
if not v:
|
||||||
|
raise RuntimeError("Unrecognized room version %s" % (room_version,))
|
||||||
|
|
||||||
# First we check that the sender event is signed by the sender's domain
|
# First we check that the sender event is signed by the sender's domain
|
||||||
# (except if its a 3pid invite, in which case it may be sent by any server)
|
# (except if its a 3pid invite, in which case it may be sent by any server)
|
||||||
pdus_to_check_sender = [
|
pdus_to_check_sender = [
|
||||||
@ -264,10 +265,17 @@ def _check_sigs_on_pdus(keyring, room_version, pdus):
|
|||||||
if not _is_invite_via_3pid(p.pdu)
|
if not _is_invite_via_3pid(p.pdu)
|
||||||
]
|
]
|
||||||
|
|
||||||
more_deferreds = keyring.verify_json_objects_for_server([
|
more_deferreds = keyring.verify_json_objects_for_server(
|
||||||
(p.sender_domain, p.redacted_pdu_json)
|
[
|
||||||
|
(
|
||||||
|
p.sender_domain,
|
||||||
|
p.redacted_pdu_json,
|
||||||
|
p.pdu.origin_server_ts if v.enforce_key_validity else 0,
|
||||||
|
p.pdu.event_id,
|
||||||
|
)
|
||||||
for p in pdus_to_check_sender
|
for p in pdus_to_check_sender
|
||||||
])
|
]
|
||||||
|
)
|
||||||
|
|
||||||
def sender_err(e, pdu_to_check):
|
def sender_err(e, pdu_to_check):
|
||||||
errmsg = "event id %s: unable to verify signature for sender %s: %s" % (
|
errmsg = "event id %s: unable to verify signature for sender %s: %s" % (
|
||||||
@ -287,20 +295,23 @@ def _check_sigs_on_pdus(keyring, room_version, pdus):
|
|||||||
# event id's domain (normally only the case for joins/leaves), and add additional
|
# event id's domain (normally only the case for joins/leaves), and add additional
|
||||||
# checks. Only do this if the room version has a concept of event ID domain
|
# checks. Only do this if the room version has a concept of event ID domain
|
||||||
# (ie, the room version uses old-style non-hash event IDs).
|
# (ie, the room version uses old-style non-hash event IDs).
|
||||||
v = KNOWN_ROOM_VERSIONS.get(room_version)
|
|
||||||
if not v:
|
|
||||||
raise RuntimeError("Unrecognized room version %s" % (room_version,))
|
|
||||||
|
|
||||||
if v.event_format == EventFormatVersions.V1:
|
if v.event_format == EventFormatVersions.V1:
|
||||||
pdus_to_check_event_id = [
|
pdus_to_check_event_id = [
|
||||||
p for p in pdus_to_check
|
p for p in pdus_to_check
|
||||||
if p.sender_domain != get_domain_from_id(p.pdu.event_id)
|
if p.sender_domain != get_domain_from_id(p.pdu.event_id)
|
||||||
]
|
]
|
||||||
|
|
||||||
more_deferreds = keyring.verify_json_objects_for_server([
|
more_deferreds = keyring.verify_json_objects_for_server(
|
||||||
(get_domain_from_id(p.pdu.event_id), p.redacted_pdu_json)
|
[
|
||||||
|
(
|
||||||
|
get_domain_from_id(p.pdu.event_id),
|
||||||
|
p.redacted_pdu_json,
|
||||||
|
p.pdu.origin_server_ts if v.enforce_key_validity else 0,
|
||||||
|
p.pdu.event_id,
|
||||||
|
)
|
||||||
for p in pdus_to_check_event_id
|
for p in pdus_to_check_event_id
|
||||||
])
|
]
|
||||||
|
)
|
||||||
|
|
||||||
def event_err(e, pdu_to_check):
|
def event_err(e, pdu_to_check):
|
||||||
errmsg = (
|
errmsg = (
|
||||||
|
@ -17,7 +17,6 @@
|
|||||||
import copy
|
import copy
|
||||||
import itertools
|
import itertools
|
||||||
import logging
|
import logging
|
||||||
import random
|
|
||||||
|
|
||||||
from six.moves import range
|
from six.moves import range
|
||||||
|
|
||||||
@ -233,7 +232,8 @@ class FederationClient(FederationBase):
|
|||||||
moving to the next destination. None indicates no timeout.
|
moving to the next destination. None indicates no timeout.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Deferred: Results in the requested PDU.
|
Deferred: Results in the requested PDU, or None if we were unable to find
|
||||||
|
it.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# TODO: Rate limit the number of times we try and get the same event.
|
# TODO: Rate limit the number of times we try and get the same event.
|
||||||
@ -258,7 +258,12 @@ class FederationClient(FederationBase):
|
|||||||
destination, event_id, timeout=timeout,
|
destination, event_id, timeout=timeout,
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.debug("transaction_data %r", transaction_data)
|
logger.debug(
|
||||||
|
"retrieved event id %s from %s: %r",
|
||||||
|
event_id,
|
||||||
|
destination,
|
||||||
|
transaction_data,
|
||||||
|
)
|
||||||
|
|
||||||
pdu_list = [
|
pdu_list = [
|
||||||
event_from_pdu_json(p, format_ver, outlier=outlier)
|
event_from_pdu_json(p, format_ver, outlier=outlier)
|
||||||
@ -280,6 +285,7 @@ class FederationClient(FederationBase):
|
|||||||
"Failed to get PDU %s from %s because %s",
|
"Failed to get PDU %s from %s because %s",
|
||||||
event_id, destination, e,
|
event_id, destination, e,
|
||||||
)
|
)
|
||||||
|
continue
|
||||||
except NotRetryingDestination as e:
|
except NotRetryingDestination as e:
|
||||||
logger.info(str(e))
|
logger.info(str(e))
|
||||||
continue
|
continue
|
||||||
@ -326,12 +332,16 @@ class FederationClient(FederationBase):
|
|||||||
state_event_ids = result["pdu_ids"]
|
state_event_ids = result["pdu_ids"]
|
||||||
auth_event_ids = result.get("auth_chain_ids", [])
|
auth_event_ids = result.get("auth_chain_ids", [])
|
||||||
|
|
||||||
fetched_events, failed_to_fetch = yield self.get_events(
|
fetched_events, failed_to_fetch = yield self.get_events_from_store_or_dest(
|
||||||
[destination], room_id, set(state_event_ids + auth_event_ids)
|
destination, room_id, set(state_event_ids + auth_event_ids)
|
||||||
)
|
)
|
||||||
|
|
||||||
if failed_to_fetch:
|
if failed_to_fetch:
|
||||||
logger.warn("Failed to get %r", failed_to_fetch)
|
logger.warning(
|
||||||
|
"Failed to fetch missing state/auth events for %s: %s",
|
||||||
|
room_id,
|
||||||
|
failed_to_fetch
|
||||||
|
)
|
||||||
|
|
||||||
event_map = {
|
event_map = {
|
||||||
ev.event_id: ev for ev in fetched_events
|
ev.event_id: ev for ev in fetched_events
|
||||||
@ -397,27 +407,20 @@ class FederationClient(FederationBase):
|
|||||||
defer.returnValue((signed_pdus, signed_auth))
|
defer.returnValue((signed_pdus, signed_auth))
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_events(self, destinations, room_id, event_ids, return_local=True):
|
def get_events_from_store_or_dest(self, destination, room_id, event_ids):
|
||||||
"""Fetch events from some remote destinations, checking if we already
|
"""Fetch events from a remote destination, checking if we already have them.
|
||||||
have them.
|
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
destinations (list)
|
destination (str)
|
||||||
room_id (str)
|
room_id (str)
|
||||||
event_ids (list)
|
event_ids (list)
|
||||||
return_local (bool): Whether to include events we already have in
|
|
||||||
the DB in the returned list of events
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Deferred: A deferred resolving to a 2-tuple where the first is a list of
|
Deferred: A deferred resolving to a 2-tuple where the first is a list of
|
||||||
events and the second is a list of event ids that we failed to fetch.
|
events and the second is a list of event ids that we failed to fetch.
|
||||||
"""
|
"""
|
||||||
if return_local:
|
|
||||||
seen_events = yield self.store.get_events(event_ids, allow_rejected=True)
|
seen_events = yield self.store.get_events(event_ids, allow_rejected=True)
|
||||||
signed_events = list(seen_events.values())
|
signed_events = list(seen_events.values())
|
||||||
else:
|
|
||||||
seen_events = yield self.store.have_seen_events(event_ids)
|
|
||||||
signed_events = []
|
|
||||||
|
|
||||||
failed_to_fetch = set()
|
failed_to_fetch = set()
|
||||||
|
|
||||||
@ -428,10 +431,11 @@ class FederationClient(FederationBase):
|
|||||||
if not missing_events:
|
if not missing_events:
|
||||||
defer.returnValue((signed_events, failed_to_fetch))
|
defer.returnValue((signed_events, failed_to_fetch))
|
||||||
|
|
||||||
def random_server_list():
|
logger.debug(
|
||||||
srvs = list(destinations)
|
"Fetching unknown state/auth events %s for room %s",
|
||||||
random.shuffle(srvs)
|
missing_events,
|
||||||
return srvs
|
event_ids,
|
||||||
|
)
|
||||||
|
|
||||||
room_version = yield self.store.get_room_version(room_id)
|
room_version = yield self.store.get_room_version(room_id)
|
||||||
|
|
||||||
@ -443,7 +447,7 @@ class FederationClient(FederationBase):
|
|||||||
deferreds = [
|
deferreds = [
|
||||||
run_in_background(
|
run_in_background(
|
||||||
self.get_pdu,
|
self.get_pdu,
|
||||||
destinations=random_server_list(),
|
destinations=[destination],
|
||||||
event_id=e_id,
|
event_id=e_id,
|
||||||
room_version=room_version,
|
room_version=room_version,
|
||||||
)
|
)
|
||||||
|
@ -33,6 +33,7 @@ from synapse.api.errors import (
|
|||||||
IncompatibleRoomVersionError,
|
IncompatibleRoomVersionError,
|
||||||
NotFoundError,
|
NotFoundError,
|
||||||
SynapseError,
|
SynapseError,
|
||||||
|
UnsupportedRoomVersionError,
|
||||||
)
|
)
|
||||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||||
from synapse.crypto.event_signing import compute_event_signature
|
from synapse.crypto.event_signing import compute_event_signature
|
||||||
@ -198,11 +199,22 @@ class FederationServer(FederationBase):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
room_version = yield self.store.get_room_version(room_id)
|
room_version = yield self.store.get_room_version(room_id)
|
||||||
format_ver = room_version_to_event_format(room_version)
|
|
||||||
except NotFoundError:
|
except NotFoundError:
|
||||||
logger.info("Ignoring PDU for unknown room_id: %s", room_id)
|
logger.info("Ignoring PDU for unknown room_id: %s", room_id)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
format_ver = room_version_to_event_format(room_version)
|
||||||
|
except UnsupportedRoomVersionError:
|
||||||
|
# this can happen if support for a given room version is withdrawn,
|
||||||
|
# so that we still get events for said room.
|
||||||
|
logger.info(
|
||||||
|
"Ignoring PDU for room %s with unknown version %s",
|
||||||
|
room_id,
|
||||||
|
room_version,
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
event = event_from_pdu_json(p, format_ver)
|
event = event_from_pdu_json(p, format_ver)
|
||||||
pdus_by_room.setdefault(room_id, []).append(event)
|
pdus_by_room.setdefault(room_id, []).append(event)
|
||||||
|
|
||||||
|
@ -349,9 +349,10 @@ class PerDestinationQueue(object):
|
|||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _get_new_device_messages(self, limit):
|
def _get_new_device_messages(self, limit):
|
||||||
last_device_list = self._last_device_list_stream_id
|
last_device_list = self._last_device_list_stream_id
|
||||||
# Will return at most 20 entries
|
|
||||||
|
# Retrieve list of new device updates to send to the destination
|
||||||
now_stream_id, results = yield self._store.get_devices_by_remote(
|
now_stream_id, results = yield self._store.get_devices_by_remote(
|
||||||
self._destination, last_device_list
|
self._destination, last_device_list, limit=limit,
|
||||||
)
|
)
|
||||||
edus = [
|
edus = [
|
||||||
Edu(
|
Edu(
|
||||||
|
@ -23,7 +23,11 @@ from twisted.internet import defer
|
|||||||
import synapse
|
import synapse
|
||||||
from synapse.api.errors import Codes, FederationDeniedError, SynapseError
|
from synapse.api.errors import Codes, FederationDeniedError, SynapseError
|
||||||
from synapse.api.room_versions import RoomVersions
|
from synapse.api.room_versions import RoomVersions
|
||||||
from synapse.api.urls import FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX
|
from synapse.api.urls import (
|
||||||
|
FEDERATION_UNSTABLE_PREFIX,
|
||||||
|
FEDERATION_V1_PREFIX,
|
||||||
|
FEDERATION_V2_PREFIX,
|
||||||
|
)
|
||||||
from synapse.http.endpoint import parse_and_validate_server_name
|
from synapse.http.endpoint import parse_and_validate_server_name
|
||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.http.servlet import (
|
from synapse.http.servlet import (
|
||||||
@ -90,6 +94,7 @@ class NoAuthenticationError(AuthenticationError):
|
|||||||
|
|
||||||
class Authenticator(object):
|
class Authenticator(object):
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
|
self._clock = hs.get_clock()
|
||||||
self.keyring = hs.get_keyring()
|
self.keyring = hs.get_keyring()
|
||||||
self.server_name = hs.hostname
|
self.server_name = hs.hostname
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
@ -98,6 +103,7 @@ class Authenticator(object):
|
|||||||
# A method just so we can pass 'self' as the authenticator to the Servlets
|
# A method just so we can pass 'self' as the authenticator to the Servlets
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def authenticate_request(self, request, content):
|
def authenticate_request(self, request, content):
|
||||||
|
now = self._clock.time_msec()
|
||||||
json_request = {
|
json_request = {
|
||||||
"method": request.method.decode('ascii'),
|
"method": request.method.decode('ascii'),
|
||||||
"uri": request.uri.decode('ascii'),
|
"uri": request.uri.decode('ascii'),
|
||||||
@ -134,7 +140,9 @@ class Authenticator(object):
|
|||||||
401, "Missing Authorization headers", Codes.UNAUTHORIZED,
|
401, "Missing Authorization headers", Codes.UNAUTHORIZED,
|
||||||
)
|
)
|
||||||
|
|
||||||
yield self.keyring.verify_json_for_server(origin, json_request)
|
yield self.keyring.verify_json_for_server(
|
||||||
|
origin, json_request, now, "Incoming request"
|
||||||
|
)
|
||||||
|
|
||||||
logger.info("Request from %s", origin)
|
logger.info("Request from %s", origin)
|
||||||
request.authenticated_entity = origin
|
request.authenticated_entity = origin
|
||||||
@ -1304,6 +1312,30 @@ class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet):
|
|||||||
defer.returnValue((200, new_content))
|
defer.returnValue((200, new_content))
|
||||||
|
|
||||||
|
|
||||||
|
class RoomComplexityServlet(BaseFederationServlet):
|
||||||
|
"""
|
||||||
|
Indicates to other servers how complex (and therefore likely
|
||||||
|
resource-intensive) a public room this server knows about is.
|
||||||
|
"""
|
||||||
|
PATH = "/rooms/(?P<room_id>[^/]*)/complexity"
|
||||||
|
PREFIX = FEDERATION_UNSTABLE_PREFIX
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_GET(self, origin, content, query, room_id):
|
||||||
|
|
||||||
|
store = self.handler.hs.get_datastore()
|
||||||
|
|
||||||
|
is_public = yield store.is_room_world_readable_or_publicly_joinable(
|
||||||
|
room_id
|
||||||
|
)
|
||||||
|
|
||||||
|
if not is_public:
|
||||||
|
raise SynapseError(404, "Room not found", errcode=Codes.INVALID_PARAM)
|
||||||
|
|
||||||
|
complexity = yield store.get_room_complexity(room_id)
|
||||||
|
defer.returnValue((200, complexity))
|
||||||
|
|
||||||
|
|
||||||
FEDERATION_SERVLET_CLASSES = (
|
FEDERATION_SERVLET_CLASSES = (
|
||||||
FederationSendServlet,
|
FederationSendServlet,
|
||||||
FederationEventServlet,
|
FederationEventServlet,
|
||||||
@ -1327,6 +1359,7 @@ FEDERATION_SERVLET_CLASSES = (
|
|||||||
FederationThirdPartyInviteExchangeServlet,
|
FederationThirdPartyInviteExchangeServlet,
|
||||||
On3pidBindServlet,
|
On3pidBindServlet,
|
||||||
FederationVersionServlet,
|
FederationVersionServlet,
|
||||||
|
RoomComplexityServlet,
|
||||||
)
|
)
|
||||||
|
|
||||||
OPENID_SERVLET_CLASSES = (
|
OPENID_SERVLET_CLASSES = (
|
||||||
|
@ -97,10 +97,13 @@ class GroupAttestationSigning(object):
|
|||||||
|
|
||||||
# TODO: We also want to check that *new* attestations that people give
|
# TODO: We also want to check that *new* attestations that people give
|
||||||
# us to store are valid for at least a little while.
|
# us to store are valid for at least a little while.
|
||||||
if valid_until_ms < self.clock.time_msec():
|
now = self.clock.time_msec()
|
||||||
|
if valid_until_ms < now:
|
||||||
raise SynapseError(400, "Attestation expired")
|
raise SynapseError(400, "Attestation expired")
|
||||||
|
|
||||||
yield self.keyring.verify_json_for_server(server_name, attestation)
|
yield self.keyring.verify_json_for_server(
|
||||||
|
server_name, attestation, now, "Group attestation"
|
||||||
|
)
|
||||||
|
|
||||||
def create_attestation(self, group_id, user_id):
|
def create_attestation(self, group_id, user_id):
|
||||||
"""Create an attestation for the group_id and user_id with default
|
"""Create an attestation for the group_id and user_id with default
|
||||||
|
@ -162,7 +162,7 @@ class AuthHandler(BaseHandler):
|
|||||||
defer.returnValue(params)
|
defer.returnValue(params)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def check_auth(self, flows, clientdict, clientip):
|
def check_auth(self, flows, clientdict, clientip, password_servlet=False):
|
||||||
"""
|
"""
|
||||||
Takes a dictionary sent by the client in the login / registration
|
Takes a dictionary sent by the client in the login / registration
|
||||||
protocol and handles the User-Interactive Auth flow.
|
protocol and handles the User-Interactive Auth flow.
|
||||||
@ -186,6 +186,16 @@ class AuthHandler(BaseHandler):
|
|||||||
|
|
||||||
clientip (str): The IP address of the client.
|
clientip (str): The IP address of the client.
|
||||||
|
|
||||||
|
password_servlet (bool): Whether the request originated from
|
||||||
|
PasswordRestServlet.
|
||||||
|
XXX: This is a temporary hack to distinguish between checking
|
||||||
|
for threepid validations locally (in the case of password
|
||||||
|
resets) and using the identity server (in the case of binding
|
||||||
|
a 3PID during registration). Once we start using the
|
||||||
|
homeserver for both tasks, this distinction will no longer be
|
||||||
|
necessary.
|
||||||
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
defer.Deferred[dict, dict, str]: a deferred tuple of
|
defer.Deferred[dict, dict, str]: a deferred tuple of
|
||||||
(creds, params, session_id).
|
(creds, params, session_id).
|
||||||
@ -241,7 +251,9 @@ class AuthHandler(BaseHandler):
|
|||||||
if 'type' in authdict:
|
if 'type' in authdict:
|
||||||
login_type = authdict['type']
|
login_type = authdict['type']
|
||||||
try:
|
try:
|
||||||
result = yield self._check_auth_dict(authdict, clientip)
|
result = yield self._check_auth_dict(
|
||||||
|
authdict, clientip, password_servlet=password_servlet,
|
||||||
|
)
|
||||||
if result:
|
if result:
|
||||||
creds[login_type] = result
|
creds[login_type] = result
|
||||||
self._save_session(session)
|
self._save_session(session)
|
||||||
@ -351,7 +363,7 @@ class AuthHandler(BaseHandler):
|
|||||||
return sess.setdefault('serverdict', {}).get(key, default)
|
return sess.setdefault('serverdict', {}).get(key, default)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _check_auth_dict(self, authdict, clientip):
|
def _check_auth_dict(self, authdict, clientip, password_servlet=False):
|
||||||
"""Attempt to validate the auth dict provided by a client
|
"""Attempt to validate the auth dict provided by a client
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@ -369,7 +381,13 @@ class AuthHandler(BaseHandler):
|
|||||||
login_type = authdict['type']
|
login_type = authdict['type']
|
||||||
checker = self.checkers.get(login_type)
|
checker = self.checkers.get(login_type)
|
||||||
if checker is not None:
|
if checker is not None:
|
||||||
res = yield checker(authdict, clientip)
|
# XXX: Temporary workaround for having Synapse handle password resets
|
||||||
|
# See AuthHandler.check_auth for further details
|
||||||
|
res = yield checker(
|
||||||
|
authdict,
|
||||||
|
clientip=clientip,
|
||||||
|
password_servlet=password_servlet,
|
||||||
|
)
|
||||||
defer.returnValue(res)
|
defer.returnValue(res)
|
||||||
|
|
||||||
# build a v1-login-style dict out of the authdict and fall back to the
|
# build a v1-login-style dict out of the authdict and fall back to the
|
||||||
@ -383,7 +401,7 @@ class AuthHandler(BaseHandler):
|
|||||||
defer.returnValue(canonical_id)
|
defer.returnValue(canonical_id)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _check_recaptcha(self, authdict, clientip):
|
def _check_recaptcha(self, authdict, clientip, **kwargs):
|
||||||
try:
|
try:
|
||||||
user_response = authdict["response"]
|
user_response = authdict["response"]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
@ -429,20 +447,20 @@ class AuthHandler(BaseHandler):
|
|||||||
defer.returnValue(True)
|
defer.returnValue(True)
|
||||||
raise LoginError(401, "", errcode=Codes.UNAUTHORIZED)
|
raise LoginError(401, "", errcode=Codes.UNAUTHORIZED)
|
||||||
|
|
||||||
def _check_email_identity(self, authdict, _):
|
def _check_email_identity(self, authdict, **kwargs):
|
||||||
return self._check_threepid('email', authdict)
|
return self._check_threepid('email', authdict, **kwargs)
|
||||||
|
|
||||||
def _check_msisdn(self, authdict, _):
|
def _check_msisdn(self, authdict, **kwargs):
|
||||||
return self._check_threepid('msisdn', authdict)
|
return self._check_threepid('msisdn', authdict)
|
||||||
|
|
||||||
def _check_dummy_auth(self, authdict, _):
|
def _check_dummy_auth(self, authdict, **kwargs):
|
||||||
return defer.succeed(True)
|
return defer.succeed(True)
|
||||||
|
|
||||||
def _check_terms_auth(self, authdict, _):
|
def _check_terms_auth(self, authdict, **kwargs):
|
||||||
return defer.succeed(True)
|
return defer.succeed(True)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _check_threepid(self, medium, authdict):
|
def _check_threepid(self, medium, authdict, password_servlet=False, **kwargs):
|
||||||
if 'threepid_creds' not in authdict:
|
if 'threepid_creds' not in authdict:
|
||||||
raise LoginError(400, "Missing threepid_creds", Codes.MISSING_PARAM)
|
raise LoginError(400, "Missing threepid_creds", Codes.MISSING_PARAM)
|
||||||
|
|
||||||
@ -451,7 +469,30 @@ class AuthHandler(BaseHandler):
|
|||||||
identity_handler = self.hs.get_handlers().identity_handler
|
identity_handler = self.hs.get_handlers().identity_handler
|
||||||
|
|
||||||
logger.info("Getting validated threepid. threepidcreds: %r", (threepid_creds,))
|
logger.info("Getting validated threepid. threepidcreds: %r", (threepid_creds,))
|
||||||
|
if (
|
||||||
|
not password_servlet
|
||||||
|
or self.hs.config.email_password_reset_behaviour == "remote"
|
||||||
|
):
|
||||||
threepid = yield identity_handler.threepid_from_creds(threepid_creds)
|
threepid = yield identity_handler.threepid_from_creds(threepid_creds)
|
||||||
|
elif self.hs.config.email_password_reset_behaviour == "local":
|
||||||
|
row = yield self.store.get_threepid_validation_session(
|
||||||
|
medium,
|
||||||
|
threepid_creds["client_secret"],
|
||||||
|
sid=threepid_creds["sid"],
|
||||||
|
validated=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
threepid = {
|
||||||
|
"medium": row["medium"],
|
||||||
|
"address": row["address"],
|
||||||
|
"validated_at": row["validated_at"],
|
||||||
|
} if row else None
|
||||||
|
|
||||||
|
if row:
|
||||||
|
# Valid threepid returned, delete from the db
|
||||||
|
yield self.store.delete_threepid_session(threepid_creds["sid"])
|
||||||
|
else:
|
||||||
|
raise SynapseError(400, "Password resets are not enabled on this homeserver")
|
||||||
|
|
||||||
if not threepid:
|
if not threepid:
|
||||||
raise LoginError(401, "", errcode=Codes.UNAUTHORIZED)
|
raise LoginError(401, "", errcode=Codes.UNAUTHORIZED)
|
||||||
|
@ -122,6 +122,9 @@ class EventStreamHandler(BaseHandler):
|
|||||||
|
|
||||||
chunks = yield self._event_serializer.serialize_events(
|
chunks = yield self._event_serializer.serialize_events(
|
||||||
events, time_now, as_client_event=as_client_event,
|
events, time_now, as_client_event=as_client_event,
|
||||||
|
# We don't bundle "live" events, as otherwise clients
|
||||||
|
# will end up double counting annotations.
|
||||||
|
bundle_aggregations=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
chunk = {
|
chunk = {
|
||||||
|
@ -35,6 +35,7 @@ from synapse.api.errors import (
|
|||||||
CodeMessageException,
|
CodeMessageException,
|
||||||
FederationDeniedError,
|
FederationDeniedError,
|
||||||
FederationError,
|
FederationError,
|
||||||
|
RequestSendFailed,
|
||||||
StoreError,
|
StoreError,
|
||||||
SynapseError,
|
SynapseError,
|
||||||
)
|
)
|
||||||
@ -1920,6 +1921,11 @@ class FederationHandler(BaseHandler):
|
|||||||
event.room_id, latest_event_ids=extrem_ids,
|
event.room_id, latest_event_ids=extrem_ids,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
"Doing soft-fail check for %s: state %s",
|
||||||
|
event.event_id, current_state_ids,
|
||||||
|
)
|
||||||
|
|
||||||
# Now check if event pass auth against said current state
|
# Now check if event pass auth against said current state
|
||||||
auth_types = auth_types_for_event(event)
|
auth_types = auth_types_for_event(event)
|
||||||
current_state_ids = [
|
current_state_ids = [
|
||||||
@ -1936,7 +1942,7 @@ class FederationHandler(BaseHandler):
|
|||||||
self.auth.check(room_version, event, auth_events=current_auth_events)
|
self.auth.check(room_version, event, auth_events=current_auth_events)
|
||||||
except AuthError as e:
|
except AuthError as e:
|
||||||
logger.warn(
|
logger.warn(
|
||||||
"Failed current state auth resolution for %r because %s",
|
"Soft-failing %r because %s",
|
||||||
event, e,
|
event, e,
|
||||||
)
|
)
|
||||||
event.internal_metadata.soft_failed = True
|
event.internal_metadata.soft_failed = True
|
||||||
@ -2012,15 +2018,65 @@ class FederationHandler(BaseHandler):
|
|||||||
|
|
||||||
Args:
|
Args:
|
||||||
origin (str):
|
origin (str):
|
||||||
event (synapse.events.FrozenEvent):
|
event (synapse.events.EventBase):
|
||||||
context (synapse.events.snapshot.EventContext):
|
context (synapse.events.snapshot.EventContext):
|
||||||
auth_events (dict[(str, str)->str]):
|
auth_events (dict[(str, str)->synapse.events.EventBase]):
|
||||||
|
Map from (event_type, state_key) to event
|
||||||
|
|
||||||
|
What we expect the event's auth_events to be, based on the event's
|
||||||
|
position in the dag. I think? maybe??
|
||||||
|
|
||||||
|
Also NB that this function adds entries to it.
|
||||||
|
Returns:
|
||||||
|
defer.Deferred[None]
|
||||||
|
"""
|
||||||
|
room_version = yield self.store.get_room_version(event.room_id)
|
||||||
|
|
||||||
|
try:
|
||||||
|
yield self._update_auth_events_and_context_for_auth(
|
||||||
|
origin, event, context, auth_events
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
# We don't really mind if the above fails, so lets not fail
|
||||||
|
# processing if it does. However, it really shouldn't fail so
|
||||||
|
# let's still log as an exception since we'll still want to fix
|
||||||
|
# any bugs.
|
||||||
|
logger.exception(
|
||||||
|
"Failed to double check auth events for %s with remote. "
|
||||||
|
"Ignoring failure and continuing processing of event.",
|
||||||
|
event.event_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.auth.check(room_version, event, auth_events=auth_events)
|
||||||
|
except AuthError as e:
|
||||||
|
logger.warn("Failed auth resolution for %r because %s", event, e)
|
||||||
|
raise e
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _update_auth_events_and_context_for_auth(
|
||||||
|
self, origin, event, context, auth_events
|
||||||
|
):
|
||||||
|
"""Helper for do_auth. See there for docs.
|
||||||
|
|
||||||
|
Checks whether a given event has the expected auth events. If it
|
||||||
|
doesn't then we talk to the remote server to compare state to see if
|
||||||
|
we can come to a consensus (e.g. if one server missed some valid
|
||||||
|
state).
|
||||||
|
|
||||||
|
This attempts to resovle any potential divergence of state between
|
||||||
|
servers, but is not essential and so failures should not block further
|
||||||
|
processing of the event.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
origin (str):
|
||||||
|
event (synapse.events.EventBase):
|
||||||
|
context (synapse.events.snapshot.EventContext):
|
||||||
|
auth_events (dict[(str, str)->synapse.events.EventBase]):
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
defer.Deferred[None]
|
defer.Deferred[None]
|
||||||
"""
|
"""
|
||||||
# Check if we have all the auth events.
|
|
||||||
current_state = set(e.event_id for e in auth_events.values())
|
|
||||||
event_auth_events = set(event.auth_event_ids())
|
event_auth_events = set(event.auth_event_ids())
|
||||||
|
|
||||||
if event.is_state():
|
if event.is_state():
|
||||||
@ -2028,11 +2084,21 @@ class FederationHandler(BaseHandler):
|
|||||||
else:
|
else:
|
||||||
event_key = None
|
event_key = None
|
||||||
|
|
||||||
if event_auth_events - current_state:
|
# if the event's auth_events refers to events which are not in our
|
||||||
|
# calculated auth_events, we need to fetch those events from somewhere.
|
||||||
|
#
|
||||||
|
# we start by fetching them from the store, and then try calling /event_auth/.
|
||||||
|
missing_auth = event_auth_events.difference(
|
||||||
|
e.event_id for e in auth_events.values()
|
||||||
|
)
|
||||||
|
|
||||||
|
if missing_auth:
|
||||||
# TODO: can we use store.have_seen_events here instead?
|
# TODO: can we use store.have_seen_events here instead?
|
||||||
have_events = yield self.store.get_seen_events_with_rejections(
|
have_events = yield self.store.get_seen_events_with_rejections(
|
||||||
event_auth_events - current_state
|
missing_auth
|
||||||
)
|
)
|
||||||
|
logger.debug("Got events %s from store", have_events)
|
||||||
|
missing_auth.difference_update(have_events.keys())
|
||||||
else:
|
else:
|
||||||
have_events = {}
|
have_events = {}
|
||||||
|
|
||||||
@ -2041,17 +2107,22 @@ class FederationHandler(BaseHandler):
|
|||||||
for e in auth_events.values()
|
for e in auth_events.values()
|
||||||
})
|
})
|
||||||
|
|
||||||
seen_events = set(have_events.keys())
|
|
||||||
|
|
||||||
missing_auth = event_auth_events - seen_events - current_state
|
|
||||||
|
|
||||||
if missing_auth:
|
if missing_auth:
|
||||||
logger.info("Missing auth: %s", missing_auth)
|
|
||||||
# If we don't have all the auth events, we need to get them.
|
# If we don't have all the auth events, we need to get them.
|
||||||
|
logger.info(
|
||||||
|
"auth_events contains unknown events: %s",
|
||||||
|
missing_auth,
|
||||||
|
)
|
||||||
|
try:
|
||||||
try:
|
try:
|
||||||
remote_auth_chain = yield self.federation_client.get_event_auth(
|
remote_auth_chain = yield self.federation_client.get_event_auth(
|
||||||
origin, event.room_id, event.event_id
|
origin, event.room_id, event.event_id
|
||||||
)
|
)
|
||||||
|
except RequestSendFailed as e:
|
||||||
|
# The other side isn't around or doesn't implement the
|
||||||
|
# endpoint, so lets just bail out.
|
||||||
|
logger.info("Failed to get event auth from remote: %s", e)
|
||||||
|
return
|
||||||
|
|
||||||
seen_remotes = yield self.store.have_seen_events(
|
seen_remotes = yield self.store.have_seen_events(
|
||||||
[e.event_id for e in remote_auth_chain]
|
[e.event_id for e in remote_auth_chain]
|
||||||
@ -2088,22 +2159,31 @@ class FederationHandler(BaseHandler):
|
|||||||
have_events = yield self.store.get_seen_events_with_rejections(
|
have_events = yield self.store.get_seen_events_with_rejections(
|
||||||
event.auth_event_ids()
|
event.auth_event_ids()
|
||||||
)
|
)
|
||||||
seen_events = set(have_events.keys())
|
|
||||||
except Exception:
|
except Exception:
|
||||||
# FIXME:
|
# FIXME:
|
||||||
logger.exception("Failed to get auth chain")
|
logger.exception("Failed to get auth chain")
|
||||||
|
|
||||||
|
if event.internal_metadata.is_outlier():
|
||||||
|
logger.info("Skipping auth_event fetch for outlier")
|
||||||
|
return
|
||||||
|
|
||||||
# FIXME: Assumes we have and stored all the state for all the
|
# FIXME: Assumes we have and stored all the state for all the
|
||||||
# prev_events
|
# prev_events
|
||||||
current_state = set(e.event_id for e in auth_events.values())
|
different_auth = event_auth_events.difference(
|
||||||
different_auth = event_auth_events - current_state
|
e.event_id for e in auth_events.values()
|
||||||
|
)
|
||||||
|
|
||||||
|
if not different_auth:
|
||||||
|
return
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"auth_events refers to events which are not in our calculated auth "
|
||||||
|
"chain: %s",
|
||||||
|
different_auth,
|
||||||
|
)
|
||||||
|
|
||||||
room_version = yield self.store.get_room_version(event.room_id)
|
room_version = yield self.store.get_room_version(event.room_id)
|
||||||
|
|
||||||
if different_auth and not event.internal_metadata.is_outlier():
|
|
||||||
# Do auth conflict res.
|
|
||||||
logger.info("Different auth: %s", different_auth)
|
|
||||||
|
|
||||||
different_events = yield logcontext.make_deferred_yieldable(
|
different_events = yield logcontext.make_deferred_yieldable(
|
||||||
defer.gatherResults([
|
defer.gatherResults([
|
||||||
logcontext.run_in_background(
|
logcontext.run_in_background(
|
||||||
@ -2130,34 +2210,54 @@ class FederationHandler(BaseHandler):
|
|||||||
event
|
event
|
||||||
)
|
)
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"After state res: updating auth_events with new state %s",
|
||||||
|
{
|
||||||
|
(d.type, d.state_key): d.event_id for d in new_state.values()
|
||||||
|
if auth_events.get((d.type, d.state_key)) != d
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
auth_events.update(new_state)
|
auth_events.update(new_state)
|
||||||
|
|
||||||
current_state = set(e.event_id for e in auth_events.values())
|
different_auth = event_auth_events.difference(
|
||||||
different_auth = event_auth_events - current_state
|
e.event_id for e in auth_events.values()
|
||||||
|
)
|
||||||
|
|
||||||
yield self._update_context_for_auth_events(
|
yield self._update_context_for_auth_events(
|
||||||
event, context, auth_events, event_key,
|
event, context, auth_events, event_key,
|
||||||
)
|
)
|
||||||
|
|
||||||
if different_auth and not event.internal_metadata.is_outlier():
|
if not different_auth:
|
||||||
logger.info("Different auth after resolution: %s", different_auth)
|
# we're done
|
||||||
|
return
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"auth_events still refers to events which are not in the calculated auth "
|
||||||
|
"chain after state resolution: %s",
|
||||||
|
different_auth,
|
||||||
|
)
|
||||||
|
|
||||||
# Only do auth resolution if we have something new to say.
|
# Only do auth resolution if we have something new to say.
|
||||||
# We can't rove an auth failure.
|
# We can't prove an auth failure.
|
||||||
do_resolution = False
|
do_resolution = False
|
||||||
|
|
||||||
provable = [
|
|
||||||
RejectedReason.NOT_ANCESTOR, RejectedReason.NOT_ANCESTOR,
|
|
||||||
]
|
|
||||||
|
|
||||||
for e_id in different_auth:
|
for e_id in different_auth:
|
||||||
if e_id in have_events:
|
if e_id in have_events:
|
||||||
if have_events[e_id] in provable:
|
if have_events[e_id] == RejectedReason.NOT_ANCESTOR:
|
||||||
do_resolution = True
|
do_resolution = True
|
||||||
break
|
break
|
||||||
|
|
||||||
if do_resolution:
|
if not do_resolution:
|
||||||
|
logger.info(
|
||||||
|
"Skipping auth resolution due to lack of provable rejection reasons"
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
logger.info("Doing auth resolution")
|
||||||
|
|
||||||
prev_state_ids = yield context.get_prev_state_ids(self.store)
|
prev_state_ids = yield context.get_prev_state_ids(self.store)
|
||||||
|
|
||||||
# 1. Get what we think is the auth chain.
|
# 1. Get what we think is the auth chain.
|
||||||
auth_ids = yield self.auth.compute_auth_events(
|
auth_ids = yield self.auth.compute_auth_events(
|
||||||
event, prev_state_ids
|
event, prev_state_ids
|
||||||
@ -2168,12 +2268,18 @@ class FederationHandler(BaseHandler):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
# 2. Get remote difference.
|
# 2. Get remote difference.
|
||||||
|
try:
|
||||||
result = yield self.federation_client.query_auth(
|
result = yield self.federation_client.query_auth(
|
||||||
origin,
|
origin,
|
||||||
event.room_id,
|
event.room_id,
|
||||||
event.event_id,
|
event.event_id,
|
||||||
local_auth_chain,
|
local_auth_chain,
|
||||||
)
|
)
|
||||||
|
except RequestSendFailed as e:
|
||||||
|
# The other side isn't around or doesn't implement the
|
||||||
|
# endpoint, so lets just bail out.
|
||||||
|
logger.info("Failed to query auth from remote: %s", e)
|
||||||
|
return
|
||||||
|
|
||||||
seen_remotes = yield self.store.have_seen_events(
|
seen_remotes = yield self.store.have_seen_events(
|
||||||
[e.event_id for e in result["auth_chain"]]
|
[e.event_id for e in result["auth_chain"]]
|
||||||
@ -2222,12 +2328,6 @@ class FederationHandler(BaseHandler):
|
|||||||
event, context, auth_events, event_key,
|
event, context, auth_events, event_key,
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
|
||||||
self.auth.check(room_version, event, auth_events=auth_events)
|
|
||||||
except AuthError as e:
|
|
||||||
logger.warn("Failed auth resolution for %r because %s", event, e)
|
|
||||||
raise e
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _update_context_for_auth_events(self, event, context, auth_events,
|
def _update_context_for_auth_events(self, event, context, auth_events,
|
||||||
event_key):
|
event_key):
|
||||||
|
@ -270,7 +270,14 @@ class IdentityHandler(BaseHandler):
|
|||||||
defer.returnValue(changed)
|
defer.returnValue(changed)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def requestEmailToken(self, id_server, email, client_secret, send_attempt, **kwargs):
|
def requestEmailToken(
|
||||||
|
self,
|
||||||
|
id_server,
|
||||||
|
email,
|
||||||
|
client_secret,
|
||||||
|
send_attempt,
|
||||||
|
next_link=None,
|
||||||
|
):
|
||||||
if not self._should_trust_id_server(id_server):
|
if not self._should_trust_id_server(id_server):
|
||||||
raise SynapseError(
|
raise SynapseError(
|
||||||
400, "Untrusted ID server '%s'" % id_server,
|
400, "Untrusted ID server '%s'" % id_server,
|
||||||
@ -282,11 +289,15 @@ class IdentityHandler(BaseHandler):
|
|||||||
'client_secret': client_secret,
|
'client_secret': client_secret,
|
||||||
'send_attempt': send_attempt,
|
'send_attempt': send_attempt,
|
||||||
}
|
}
|
||||||
params.update(kwargs)
|
|
||||||
# if we have a rewrite rule set for the identity server,
|
# if we have a rewrite rule set for the identity server,
|
||||||
# apply it now.
|
# apply it now.
|
||||||
if id_server in self.rewrite_identity_server_urls:
|
if id_server in self.rewrite_identity_server_urls:
|
||||||
id_server = self.rewrite_identity_server_urls[id_server]
|
id_server = self.rewrite_identity_server_urls[id_server]
|
||||||
|
|
||||||
|
if next_link:
|
||||||
|
params.update({'next_link': next_link})
|
||||||
|
|
||||||
try:
|
try:
|
||||||
data = yield self.http_client.post_json_get_json(
|
data = yield self.http_client.post_json_get_json(
|
||||||
"https://%s%s" % (
|
"https://%s%s" % (
|
||||||
|
@ -22,7 +22,7 @@ from canonicaljson import encode_canonical_json, json
|
|||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
from twisted.internet.defer import succeed
|
from twisted.internet.defer import succeed
|
||||||
|
|
||||||
from synapse.api.constants import EventTypes, Membership
|
from synapse.api.constants import EventTypes, Membership, RelationTypes
|
||||||
from synapse.api.errors import (
|
from synapse.api.errors import (
|
||||||
AuthError,
|
AuthError,
|
||||||
Codes,
|
Codes,
|
||||||
@ -166,6 +166,9 @@ class MessageHandler(object):
|
|||||||
now = self.clock.time_msec()
|
now = self.clock.time_msec()
|
||||||
events = yield self._event_serializer.serialize_events(
|
events = yield self._event_serializer.serialize_events(
|
||||||
room_state.values(), now,
|
room_state.values(), now,
|
||||||
|
# We don't bother bundling aggregations in when asked for state
|
||||||
|
# events, as clients won't use them.
|
||||||
|
bundle_aggregations=False,
|
||||||
)
|
)
|
||||||
defer.returnValue(events)
|
defer.returnValue(events)
|
||||||
|
|
||||||
@ -601,6 +604,20 @@ class EventCreationHandler(object):
|
|||||||
|
|
||||||
self.validator.validate_new(event)
|
self.validator.validate_new(event)
|
||||||
|
|
||||||
|
# If this event is an annotation then we check that that the sender
|
||||||
|
# can't annotate the same way twice (e.g. stops users from liking an
|
||||||
|
# event multiple times).
|
||||||
|
relation = event.content.get("m.relates_to", {})
|
||||||
|
if relation.get("rel_type") == RelationTypes.ANNOTATION:
|
||||||
|
relates_to = relation["event_id"]
|
||||||
|
aggregation_key = relation["key"]
|
||||||
|
|
||||||
|
already_exists = yield self.store.has_user_annotated_event(
|
||||||
|
relates_to, event.type, aggregation_key, event.sender,
|
||||||
|
)
|
||||||
|
if already_exists:
|
||||||
|
raise SynapseError(400, "Can't send same reaction twice")
|
||||||
|
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"Created event %s",
|
"Created event %s",
|
||||||
event.event_id,
|
event.event_id,
|
||||||
|
@ -158,7 +158,13 @@ class PresenceHandler(object):
|
|||||||
# have not yet been persisted
|
# have not yet been persisted
|
||||||
self.unpersisted_users_changes = set()
|
self.unpersisted_users_changes = set()
|
||||||
|
|
||||||
hs.get_reactor().addSystemEventTrigger("before", "shutdown", self._on_shutdown)
|
hs.get_reactor().addSystemEventTrigger(
|
||||||
|
"before",
|
||||||
|
"shutdown",
|
||||||
|
run_as_background_process,
|
||||||
|
"presence.on_shutdown",
|
||||||
|
self._on_shutdown,
|
||||||
|
)
|
||||||
|
|
||||||
self.serial_to_user = {}
|
self.serial_to_user = {}
|
||||||
self._next_serial = 1
|
self._next_serial = 1
|
||||||
@ -182,17 +188,27 @@ class PresenceHandler(object):
|
|||||||
# Start a LoopingCall in 30s that fires every 5s.
|
# Start a LoopingCall in 30s that fires every 5s.
|
||||||
# The initial delay is to allow disconnected clients a chance to
|
# The initial delay is to allow disconnected clients a chance to
|
||||||
# reconnect before we treat them as offline.
|
# reconnect before we treat them as offline.
|
||||||
|
def run_timeout_handler():
|
||||||
|
return run_as_background_process(
|
||||||
|
"handle_presence_timeouts", self._handle_timeouts
|
||||||
|
)
|
||||||
|
|
||||||
self.clock.call_later(
|
self.clock.call_later(
|
||||||
30,
|
30,
|
||||||
self.clock.looping_call,
|
self.clock.looping_call,
|
||||||
self._handle_timeouts,
|
run_timeout_handler,
|
||||||
5000,
|
5000,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def run_persister():
|
||||||
|
return run_as_background_process(
|
||||||
|
"persist_presence_changes", self._persist_unpersisted_changes
|
||||||
|
)
|
||||||
|
|
||||||
self.clock.call_later(
|
self.clock.call_later(
|
||||||
60,
|
60,
|
||||||
self.clock.looping_call,
|
self.clock.looping_call,
|
||||||
self._persist_unpersisted_changes,
|
run_persister,
|
||||||
60 * 1000,
|
60 * 1000,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -229,6 +245,7 @@ class PresenceHandler(object):
|
|||||||
)
|
)
|
||||||
|
|
||||||
if self.unpersisted_users_changes:
|
if self.unpersisted_users_changes:
|
||||||
|
|
||||||
yield self.store.update_presence([
|
yield self.store.update_presence([
|
||||||
self.user_to_current_state[user_id]
|
self.user_to_current_state[user_id]
|
||||||
for user_id in self.unpersisted_users_changes
|
for user_id in self.unpersisted_users_changes
|
||||||
@ -240,30 +257,18 @@ class PresenceHandler(object):
|
|||||||
"""We periodically persist the unpersisted changes, as otherwise they
|
"""We periodically persist the unpersisted changes, as otherwise they
|
||||||
may stack up and slow down shutdown times.
|
may stack up and slow down shutdown times.
|
||||||
"""
|
"""
|
||||||
logger.info(
|
|
||||||
"Performing _persist_unpersisted_changes. Persisting %d unpersisted changes",
|
|
||||||
len(self.unpersisted_users_changes)
|
|
||||||
)
|
|
||||||
|
|
||||||
unpersisted = self.unpersisted_users_changes
|
unpersisted = self.unpersisted_users_changes
|
||||||
self.unpersisted_users_changes = set()
|
self.unpersisted_users_changes = set()
|
||||||
|
|
||||||
if unpersisted:
|
if unpersisted:
|
||||||
|
logger.info(
|
||||||
|
"Persisting %d upersisted presence updates", len(unpersisted)
|
||||||
|
)
|
||||||
yield self.store.update_presence([
|
yield self.store.update_presence([
|
||||||
self.user_to_current_state[user_id]
|
self.user_to_current_state[user_id]
|
||||||
for user_id in unpersisted
|
for user_id in unpersisted
|
||||||
])
|
])
|
||||||
|
|
||||||
logger.info("Finished _persist_unpersisted_changes")
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def _update_states_and_catch_exception(self, new_states):
|
|
||||||
try:
|
|
||||||
res = yield self._update_states(new_states)
|
|
||||||
defer.returnValue(res)
|
|
||||||
except Exception:
|
|
||||||
logger.exception("Error updating presence")
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _update_states(self, new_states):
|
def _update_states(self, new_states):
|
||||||
"""Updates presence of users. Sets the appropriate timeouts. Pokes
|
"""Updates presence of users. Sets the appropriate timeouts. Pokes
|
||||||
@ -338,8 +343,6 @@ class PresenceHandler(object):
|
|||||||
logger.info("Handling presence timeouts")
|
logger.info("Handling presence timeouts")
|
||||||
now = self.clock.time_msec()
|
now = self.clock.time_msec()
|
||||||
|
|
||||||
try:
|
|
||||||
with Measure(self.clock, "presence_handle_timeouts"):
|
|
||||||
# Fetch the list of users that *may* have timed out. Things may have
|
# Fetch the list of users that *may* have timed out. Things may have
|
||||||
# changed since the timeout was set, so we won't necessarily have to
|
# changed since the timeout was set, so we won't necessarily have to
|
||||||
# take any action.
|
# take any action.
|
||||||
@ -374,9 +377,7 @@ class PresenceHandler(object):
|
|||||||
now=now,
|
now=now,
|
||||||
)
|
)
|
||||||
|
|
||||||
run_in_background(self._update_states_and_catch_exception, changes)
|
return self._update_states(changes)
|
||||||
except Exception:
|
|
||||||
logger.exception("Exception in _handle_timeouts loop")
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def bump_presence_active_time(self, user):
|
def bump_presence_active_time(self, user):
|
||||||
@ -833,14 +834,17 @@ class PresenceHandler(object):
|
|||||||
# joins.
|
# joins.
|
||||||
continue
|
continue
|
||||||
|
|
||||||
event = yield self.store.get_event(event_id)
|
event = yield self.store.get_event(event_id, allow_none=True)
|
||||||
if event.content.get("membership") != Membership.JOIN:
|
if not event or event.content.get("membership") != Membership.JOIN:
|
||||||
# We only care about joins
|
# We only care about joins
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if prev_event_id:
|
if prev_event_id:
|
||||||
prev_event = yield self.store.get_event(prev_event_id)
|
prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
|
||||||
if prev_event.content.get("membership") == Membership.JOIN:
|
if (
|
||||||
|
prev_event
|
||||||
|
and prev_event.content.get("membership") == Membership.JOIN
|
||||||
|
):
|
||||||
# Ignore changes to join events.
|
# Ignore changes to join events.
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
@ -37,6 +37,9 @@ from ._base import BaseHandler
|
|||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
MAX_DISPLAYNAME_LEN = 100
|
||||||
|
MAX_AVATAR_URL_LEN = 1000
|
||||||
|
|
||||||
|
|
||||||
class BaseProfileHandler(BaseHandler):
|
class BaseProfileHandler(BaseHandler):
|
||||||
"""Handles fetching and updating user profile information.
|
"""Handles fetching and updating user profile information.
|
||||||
@ -253,6 +256,11 @@ class BaseProfileHandler(BaseHandler):
|
|||||||
if profile.display_name:
|
if profile.display_name:
|
||||||
raise SynapseError(400, "Changing displayname is disabled on this server")
|
raise SynapseError(400, "Changing displayname is disabled on this server")
|
||||||
|
|
||||||
|
if len(new_displayname) > MAX_DISPLAYNAME_LEN:
|
||||||
|
raise SynapseError(
|
||||||
|
400, "Displayname is too long (max %i)" % (MAX_DISPLAYNAME_LEN, ),
|
||||||
|
)
|
||||||
|
|
||||||
if new_displayname == '':
|
if new_displayname == '':
|
||||||
new_displayname = None
|
new_displayname = None
|
||||||
|
|
||||||
@ -355,6 +363,11 @@ class BaseProfileHandler(BaseHandler):
|
|||||||
else:
|
else:
|
||||||
new_batchnum = None
|
new_batchnum = None
|
||||||
|
|
||||||
|
if len(new_avatar_url) > MAX_AVATAR_URL_LEN:
|
||||||
|
raise SynapseError(
|
||||||
|
400, "Avatar URL is too long (max %i)" % (MAX_AVATAR_URL_LEN, ),
|
||||||
|
)
|
||||||
|
|
||||||
yield self.store.set_profile_avatar_url(
|
yield self.store.set_profile_avatar_url(
|
||||||
target_user.localpart, new_avatar_url, new_batchnum,
|
target_user.localpart, new_avatar_url, new_batchnum,
|
||||||
)
|
)
|
||||||
|
@ -19,7 +19,7 @@ import logging
|
|||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse import types
|
from synapse import types
|
||||||
from synapse.api.constants import LoginType
|
from synapse.api.constants import MAX_USERID_LENGTH, LoginType
|
||||||
from synapse.api.errors import (
|
from synapse.api.errors import (
|
||||||
AuthError,
|
AuthError,
|
||||||
Codes,
|
Codes,
|
||||||
@ -124,6 +124,15 @@ class RegistrationHandler(BaseHandler):
|
|||||||
|
|
||||||
self.check_user_id_not_appservice_exclusive(user_id)
|
self.check_user_id_not_appservice_exclusive(user_id)
|
||||||
|
|
||||||
|
if len(user_id) > MAX_USERID_LENGTH:
|
||||||
|
raise SynapseError(
|
||||||
|
400,
|
||||||
|
"User ID may not be longer than %s characters" % (
|
||||||
|
MAX_USERID_LENGTH,
|
||||||
|
),
|
||||||
|
Codes.INVALID_USERNAME
|
||||||
|
)
|
||||||
|
|
||||||
users = yield self.store.get_users_by_id_case_insensitive(user_id)
|
users = yield self.store.get_users_by_id_case_insensitive(user_id)
|
||||||
if users:
|
if users:
|
||||||
if not guest_access_token:
|
if not guest_access_token:
|
||||||
@ -620,6 +629,8 @@ class RegistrationHandler(BaseHandler):
|
|||||||
A tuple of (user_id, access_token).
|
A tuple of (user_id, access_token).
|
||||||
Raises:
|
Raises:
|
||||||
RegistrationError if there was a problem registering.
|
RegistrationError if there was a problem registering.
|
||||||
|
|
||||||
|
NB this is only used in tests. TODO: move it to the test package!
|
||||||
"""
|
"""
|
||||||
if localpart is None:
|
if localpart is None:
|
||||||
raise SynapseError(400, "Request must include user id")
|
raise SynapseError(400, "Request must include user id")
|
||||||
|
@ -27,7 +27,7 @@ from twisted.internet import defer
|
|||||||
|
|
||||||
from synapse.api.constants import EventTypes, JoinRules, RoomCreationPreset
|
from synapse.api.constants import EventTypes, JoinRules, RoomCreationPreset
|
||||||
from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, SynapseError
|
from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, SynapseError
|
||||||
from synapse.api.room_versions import DEFAULT_ROOM_VERSION, KNOWN_ROOM_VERSIONS
|
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||||
from synapse.storage.state import StateFilter
|
from synapse.storage.state import StateFilter
|
||||||
from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID
|
from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID
|
||||||
from synapse.util import stringutils
|
from synapse.util import stringutils
|
||||||
@ -72,6 +72,7 @@ class RoomCreationHandler(BaseHandler):
|
|||||||
self.spam_checker = hs.get_spam_checker()
|
self.spam_checker = hs.get_spam_checker()
|
||||||
self.event_creation_handler = hs.get_event_creation_handler()
|
self.event_creation_handler = hs.get_event_creation_handler()
|
||||||
self.room_member_handler = hs.get_room_member_handler()
|
self.room_member_handler = hs.get_room_member_handler()
|
||||||
|
self.config = hs.config
|
||||||
|
|
||||||
# linearizer to stop two upgrades happening at once
|
# linearizer to stop two upgrades happening at once
|
||||||
self._upgrade_linearizer = Linearizer("room_upgrade_linearizer")
|
self._upgrade_linearizer = Linearizer("room_upgrade_linearizer")
|
||||||
@ -511,7 +512,11 @@ class RoomCreationHandler(BaseHandler):
|
|||||||
if ratelimit:
|
if ratelimit:
|
||||||
yield self.ratelimit(requester)
|
yield self.ratelimit(requester)
|
||||||
|
|
||||||
room_version = config.get("room_version", DEFAULT_ROOM_VERSION.identifier)
|
room_version = config.get(
|
||||||
|
"room_version",
|
||||||
|
self.config.default_room_version.identifier,
|
||||||
|
)
|
||||||
|
|
||||||
if not isinstance(room_version, string_types):
|
if not isinstance(room_version, string_types):
|
||||||
raise SynapseError(
|
raise SynapseError(
|
||||||
400,
|
400,
|
||||||
|
@ -992,7 +992,7 @@ class RoomMemberHandler(object):
|
|||||||
}
|
}
|
||||||
|
|
||||||
if self.config.invite_3pid_guest:
|
if self.config.invite_3pid_guest:
|
||||||
guest_access_token, guest_user_id = yield self.get_or_register_3pid_guest(
|
guest_user_id, guest_access_token = yield self.get_or_register_3pid_guest(
|
||||||
requester=requester,
|
requester=requester,
|
||||||
medium=medium,
|
medium=medium,
|
||||||
address=address,
|
address=address,
|
||||||
|
333
synapse/handlers/stats.py
Normal file
333
synapse/handlers/stats.py
Normal file
@ -0,0 +1,333 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2018 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from twisted.internet import defer
|
||||||
|
|
||||||
|
from synapse.api.constants import EventTypes, JoinRules, Membership
|
||||||
|
from synapse.handlers.state_deltas import StateDeltasHandler
|
||||||
|
from synapse.metrics import event_processing_positions
|
||||||
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
|
from synapse.types import UserID
|
||||||
|
from synapse.util.metrics import Measure
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class StatsHandler(StateDeltasHandler):
|
||||||
|
"""Handles keeping the *_stats tables updated with a simple time-series of
|
||||||
|
information about the users, rooms and media on the server, such that admins
|
||||||
|
have some idea of who is consuming their resources.
|
||||||
|
|
||||||
|
Heavily derived from UserDirectoryHandler
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, hs):
|
||||||
|
super(StatsHandler, self).__init__(hs)
|
||||||
|
self.hs = hs
|
||||||
|
self.store = hs.get_datastore()
|
||||||
|
self.state = hs.get_state_handler()
|
||||||
|
self.server_name = hs.hostname
|
||||||
|
self.clock = hs.get_clock()
|
||||||
|
self.notifier = hs.get_notifier()
|
||||||
|
self.is_mine_id = hs.is_mine_id
|
||||||
|
self.stats_bucket_size = hs.config.stats_bucket_size
|
||||||
|
|
||||||
|
# The current position in the current_state_delta stream
|
||||||
|
self.pos = None
|
||||||
|
|
||||||
|
# Guard to ensure we only process deltas one at a time
|
||||||
|
self._is_processing = False
|
||||||
|
|
||||||
|
if hs.config.stats_enabled:
|
||||||
|
self.notifier.add_replication_callback(self.notify_new_event)
|
||||||
|
|
||||||
|
# We kick this off so that we don't have to wait for a change before
|
||||||
|
# we start populating stats
|
||||||
|
self.clock.call_later(0, self.notify_new_event)
|
||||||
|
|
||||||
|
def notify_new_event(self):
|
||||||
|
"""Called when there may be more deltas to process
|
||||||
|
"""
|
||||||
|
if not self.hs.config.stats_enabled:
|
||||||
|
return
|
||||||
|
|
||||||
|
if self._is_processing:
|
||||||
|
return
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def process():
|
||||||
|
try:
|
||||||
|
yield self._unsafe_process()
|
||||||
|
finally:
|
||||||
|
self._is_processing = False
|
||||||
|
|
||||||
|
self._is_processing = True
|
||||||
|
run_as_background_process("stats.notify_new_event", process)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _unsafe_process(self):
|
||||||
|
# If self.pos is None then means we haven't fetched it from DB
|
||||||
|
if self.pos is None:
|
||||||
|
self.pos = yield self.store.get_stats_stream_pos()
|
||||||
|
|
||||||
|
# If still None then the initial background update hasn't happened yet
|
||||||
|
if self.pos is None:
|
||||||
|
defer.returnValue(None)
|
||||||
|
|
||||||
|
# Loop round handling deltas until we're up to date
|
||||||
|
while True:
|
||||||
|
with Measure(self.clock, "stats_delta"):
|
||||||
|
deltas = yield self.store.get_current_state_deltas(self.pos)
|
||||||
|
if not deltas:
|
||||||
|
return
|
||||||
|
|
||||||
|
logger.info("Handling %d state deltas", len(deltas))
|
||||||
|
yield self._handle_deltas(deltas)
|
||||||
|
|
||||||
|
self.pos = deltas[-1]["stream_id"]
|
||||||
|
yield self.store.update_stats_stream_pos(self.pos)
|
||||||
|
|
||||||
|
event_processing_positions.labels("stats").set(self.pos)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _handle_deltas(self, deltas):
|
||||||
|
"""
|
||||||
|
Called with the state deltas to process
|
||||||
|
"""
|
||||||
|
for delta in deltas:
|
||||||
|
typ = delta["type"]
|
||||||
|
state_key = delta["state_key"]
|
||||||
|
room_id = delta["room_id"]
|
||||||
|
event_id = delta["event_id"]
|
||||||
|
stream_id = delta["stream_id"]
|
||||||
|
prev_event_id = delta["prev_event_id"]
|
||||||
|
stream_pos = delta["stream_id"]
|
||||||
|
|
||||||
|
logger.debug("Handling: %r %r, %s", typ, state_key, event_id)
|
||||||
|
|
||||||
|
token = yield self.store.get_earliest_token_for_room_stats(room_id)
|
||||||
|
|
||||||
|
# If the earliest token to begin from is larger than our current
|
||||||
|
# stream ID, skip processing this delta.
|
||||||
|
if token is not None and token >= stream_id:
|
||||||
|
logger.debug(
|
||||||
|
"Ignoring: %s as earlier than this room's initial ingestion event",
|
||||||
|
event_id,
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
if event_id is None and prev_event_id is None:
|
||||||
|
# Errr...
|
||||||
|
continue
|
||||||
|
|
||||||
|
event_content = {}
|
||||||
|
|
||||||
|
if event_id is not None:
|
||||||
|
event = yield self.store.get_event(event_id, allow_none=True)
|
||||||
|
if event:
|
||||||
|
event_content = event.content or {}
|
||||||
|
|
||||||
|
# We use stream_pos here rather than fetch by event_id as event_id
|
||||||
|
# may be None
|
||||||
|
now = yield self.store.get_received_ts_by_stream_pos(stream_pos)
|
||||||
|
|
||||||
|
# quantise time to the nearest bucket
|
||||||
|
now = (now // 1000 // self.stats_bucket_size) * self.stats_bucket_size
|
||||||
|
|
||||||
|
if typ == EventTypes.Member:
|
||||||
|
# we could use _get_key_change here but it's a bit inefficient
|
||||||
|
# given we're not testing for a specific result; might as well
|
||||||
|
# just grab the prev_membership and membership strings and
|
||||||
|
# compare them.
|
||||||
|
prev_event_content = {}
|
||||||
|
if prev_event_id is not None:
|
||||||
|
prev_event = yield self.store.get_event(
|
||||||
|
prev_event_id, allow_none=True,
|
||||||
|
)
|
||||||
|
if prev_event:
|
||||||
|
prev_event_content = prev_event.content
|
||||||
|
|
||||||
|
membership = event_content.get("membership", Membership.LEAVE)
|
||||||
|
prev_membership = prev_event_content.get("membership", Membership.LEAVE)
|
||||||
|
|
||||||
|
if prev_membership == membership:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if prev_membership == Membership.JOIN:
|
||||||
|
yield self.store.update_stats_delta(
|
||||||
|
now, "room", room_id, "joined_members", -1
|
||||||
|
)
|
||||||
|
elif prev_membership == Membership.INVITE:
|
||||||
|
yield self.store.update_stats_delta(
|
||||||
|
now, "room", room_id, "invited_members", -1
|
||||||
|
)
|
||||||
|
elif prev_membership == Membership.LEAVE:
|
||||||
|
yield self.store.update_stats_delta(
|
||||||
|
now, "room", room_id, "left_members", -1
|
||||||
|
)
|
||||||
|
elif prev_membership == Membership.BAN:
|
||||||
|
yield self.store.update_stats_delta(
|
||||||
|
now, "room", room_id, "banned_members", -1
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
err = "%s is not a valid prev_membership" % (repr(prev_membership),)
|
||||||
|
logger.error(err)
|
||||||
|
raise ValueError(err)
|
||||||
|
|
||||||
|
if membership == Membership.JOIN:
|
||||||
|
yield self.store.update_stats_delta(
|
||||||
|
now, "room", room_id, "joined_members", +1
|
||||||
|
)
|
||||||
|
elif membership == Membership.INVITE:
|
||||||
|
yield self.store.update_stats_delta(
|
||||||
|
now, "room", room_id, "invited_members", +1
|
||||||
|
)
|
||||||
|
elif membership == Membership.LEAVE:
|
||||||
|
yield self.store.update_stats_delta(
|
||||||
|
now, "room", room_id, "left_members", +1
|
||||||
|
)
|
||||||
|
elif membership == Membership.BAN:
|
||||||
|
yield self.store.update_stats_delta(
|
||||||
|
now, "room", room_id, "banned_members", +1
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
err = "%s is not a valid membership" % (repr(membership),)
|
||||||
|
logger.error(err)
|
||||||
|
raise ValueError(err)
|
||||||
|
|
||||||
|
user_id = state_key
|
||||||
|
if self.is_mine_id(user_id):
|
||||||
|
# update user_stats as it's one of our users
|
||||||
|
public = yield self._is_public_room(room_id)
|
||||||
|
|
||||||
|
if membership == Membership.LEAVE:
|
||||||
|
yield self.store.update_stats_delta(
|
||||||
|
now,
|
||||||
|
"user",
|
||||||
|
user_id,
|
||||||
|
"public_rooms" if public else "private_rooms",
|
||||||
|
-1,
|
||||||
|
)
|
||||||
|
elif membership == Membership.JOIN:
|
||||||
|
yield self.store.update_stats_delta(
|
||||||
|
now,
|
||||||
|
"user",
|
||||||
|
user_id,
|
||||||
|
"public_rooms" if public else "private_rooms",
|
||||||
|
+1,
|
||||||
|
)
|
||||||
|
|
||||||
|
elif typ == EventTypes.Create:
|
||||||
|
# Newly created room. Add it with all blank portions.
|
||||||
|
yield self.store.update_room_state(
|
||||||
|
room_id,
|
||||||
|
{
|
||||||
|
"join_rules": None,
|
||||||
|
"history_visibility": None,
|
||||||
|
"encryption": None,
|
||||||
|
"name": None,
|
||||||
|
"topic": None,
|
||||||
|
"avatar": None,
|
||||||
|
"canonical_alias": None,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
elif typ == EventTypes.JoinRules:
|
||||||
|
yield self.store.update_room_state(
|
||||||
|
room_id, {"join_rules": event_content.get("join_rule")}
|
||||||
|
)
|
||||||
|
|
||||||
|
is_public = yield self._get_key_change(
|
||||||
|
prev_event_id, event_id, "join_rule", JoinRules.PUBLIC
|
||||||
|
)
|
||||||
|
if is_public is not None:
|
||||||
|
yield self.update_public_room_stats(now, room_id, is_public)
|
||||||
|
|
||||||
|
elif typ == EventTypes.RoomHistoryVisibility:
|
||||||
|
yield self.store.update_room_state(
|
||||||
|
room_id,
|
||||||
|
{"history_visibility": event_content.get("history_visibility")},
|
||||||
|
)
|
||||||
|
|
||||||
|
is_public = yield self._get_key_change(
|
||||||
|
prev_event_id, event_id, "history_visibility", "world_readable"
|
||||||
|
)
|
||||||
|
if is_public is not None:
|
||||||
|
yield self.update_public_room_stats(now, room_id, is_public)
|
||||||
|
|
||||||
|
elif typ == EventTypes.Encryption:
|
||||||
|
yield self.store.update_room_state(
|
||||||
|
room_id, {"encryption": event_content.get("algorithm")}
|
||||||
|
)
|
||||||
|
elif typ == EventTypes.Name:
|
||||||
|
yield self.store.update_room_state(
|
||||||
|
room_id, {"name": event_content.get("name")}
|
||||||
|
)
|
||||||
|
elif typ == EventTypes.Topic:
|
||||||
|
yield self.store.update_room_state(
|
||||||
|
room_id, {"topic": event_content.get("topic")}
|
||||||
|
)
|
||||||
|
elif typ == EventTypes.RoomAvatar:
|
||||||
|
yield self.store.update_room_state(
|
||||||
|
room_id, {"avatar": event_content.get("url")}
|
||||||
|
)
|
||||||
|
elif typ == EventTypes.CanonicalAlias:
|
||||||
|
yield self.store.update_room_state(
|
||||||
|
room_id, {"canonical_alias": event_content.get("alias")}
|
||||||
|
)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def update_public_room_stats(self, ts, room_id, is_public):
|
||||||
|
"""
|
||||||
|
Increment/decrement a user's number of public rooms when a room they are
|
||||||
|
in changes to/from public visibility.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
ts (int): Timestamp in seconds
|
||||||
|
room_id (str)
|
||||||
|
is_public (bool)
|
||||||
|
"""
|
||||||
|
# For now, blindly iterate over all local users in the room so that
|
||||||
|
# we can handle the whole problem of copying buckets over as needed
|
||||||
|
user_ids = yield self.store.get_users_in_room(room_id)
|
||||||
|
|
||||||
|
for user_id in user_ids:
|
||||||
|
if self.hs.is_mine(UserID.from_string(user_id)):
|
||||||
|
yield self.store.update_stats_delta(
|
||||||
|
ts, "user", user_id, "public_rooms", +1 if is_public else -1
|
||||||
|
)
|
||||||
|
yield self.store.update_stats_delta(
|
||||||
|
ts, "user", user_id, "private_rooms", -1 if is_public else +1
|
||||||
|
)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _is_public_room(self, room_id):
|
||||||
|
join_rules = yield self.state.get_current_state(room_id, EventTypes.JoinRules)
|
||||||
|
history_visibility = yield self.state.get_current_state(
|
||||||
|
room_id, EventTypes.RoomHistoryVisibility
|
||||||
|
)
|
||||||
|
|
||||||
|
if (join_rules and join_rules.content.get("join_rule") == JoinRules.PUBLIC) or (
|
||||||
|
(
|
||||||
|
history_visibility
|
||||||
|
and history_visibility.content.get("history_visibility")
|
||||||
|
== "world_readable"
|
||||||
|
)
|
||||||
|
):
|
||||||
|
defer.returnValue(True)
|
||||||
|
else:
|
||||||
|
defer.returnValue(False)
|
@ -583,30 +583,42 @@ class SyncHandler(object):
|
|||||||
)
|
)
|
||||||
|
|
||||||
# if the room has a name or canonical_alias set, we can skip
|
# if the room has a name or canonical_alias set, we can skip
|
||||||
# calculating heroes. we assume that if the event has contents, it'll
|
# calculating heroes. Empty strings are falsey, so we check
|
||||||
# be a valid name or canonical_alias - i.e. we're checking that they
|
# for the "name" value and default to an empty string.
|
||||||
# haven't been "deleted" by blatting {} over the top.
|
|
||||||
if name_id:
|
if name_id:
|
||||||
name = yield self.store.get_event(name_id, allow_none=True)
|
name = yield self.store.get_event(name_id, allow_none=True)
|
||||||
if name and name.content:
|
if name and name.content.get("name"):
|
||||||
defer.returnValue(summary)
|
defer.returnValue(summary)
|
||||||
|
|
||||||
if canonical_alias_id:
|
if canonical_alias_id:
|
||||||
canonical_alias = yield self.store.get_event(
|
canonical_alias = yield self.store.get_event(
|
||||||
canonical_alias_id, allow_none=True,
|
canonical_alias_id, allow_none=True,
|
||||||
)
|
)
|
||||||
if canonical_alias and canonical_alias.content:
|
if canonical_alias and canonical_alias.content.get("alias"):
|
||||||
defer.returnValue(summary)
|
defer.returnValue(summary)
|
||||||
|
|
||||||
|
me = sync_config.user.to_string()
|
||||||
|
|
||||||
joined_user_ids = [
|
joined_user_ids = [
|
||||||
r[0] for r in details.get(Membership.JOIN, empty_ms).members
|
r[0]
|
||||||
|
for r in details.get(Membership.JOIN, empty_ms).members
|
||||||
|
if r[0] != me
|
||||||
]
|
]
|
||||||
invited_user_ids = [
|
invited_user_ids = [
|
||||||
r[0] for r in details.get(Membership.INVITE, empty_ms).members
|
r[0]
|
||||||
|
for r in details.get(Membership.INVITE, empty_ms).members
|
||||||
|
if r[0] != me
|
||||||
]
|
]
|
||||||
gone_user_ids = (
|
gone_user_ids = (
|
||||||
[r[0] for r in details.get(Membership.LEAVE, empty_ms).members] +
|
[
|
||||||
[r[0] for r in details.get(Membership.BAN, empty_ms).members]
|
r[0]
|
||||||
|
for r in details.get(Membership.LEAVE, empty_ms).members
|
||||||
|
if r[0] != me
|
||||||
|
] + [
|
||||||
|
r[0]
|
||||||
|
for r in details.get(Membership.BAN, empty_ms).members
|
||||||
|
if r[0] != me
|
||||||
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
# FIXME: only build up a member_ids list for our heroes
|
# FIXME: only build up a member_ids list for our heroes
|
||||||
@ -621,22 +633,13 @@ class SyncHandler(object):
|
|||||||
member_ids[user_id] = event_id
|
member_ids[user_id] = event_id
|
||||||
|
|
||||||
# FIXME: order by stream ordering rather than as returned by SQL
|
# FIXME: order by stream ordering rather than as returned by SQL
|
||||||
me = sync_config.user.to_string()
|
|
||||||
if (joined_user_ids or invited_user_ids):
|
if (joined_user_ids or invited_user_ids):
|
||||||
summary['m.heroes'] = sorted(
|
summary['m.heroes'] = sorted(
|
||||||
[
|
[user_id for user_id in (joined_user_ids + invited_user_ids)]
|
||||||
user_id
|
|
||||||
for user_id in (joined_user_ids + invited_user_ids)
|
|
||||||
if user_id != me
|
|
||||||
]
|
|
||||||
)[0:5]
|
)[0:5]
|
||||||
else:
|
else:
|
||||||
summary['m.heroes'] = sorted(
|
summary['m.heroes'] = sorted(
|
||||||
[
|
[user_id for user_id in gone_user_ids]
|
||||||
user_id
|
|
||||||
for user_id in gone_user_ids
|
|
||||||
if user_id != me
|
|
||||||
]
|
|
||||||
)[0:5]
|
)[0:5]
|
||||||
|
|
||||||
if not sync_config.filter_collection.lazy_load_members():
|
if not sync_config.filter_collection.lazy_load_members():
|
||||||
|
@ -285,7 +285,24 @@ class MatrixFederationHttpClient(object):
|
|||||||
request (MatrixFederationRequest): details of request to be sent
|
request (MatrixFederationRequest): details of request to be sent
|
||||||
|
|
||||||
timeout (int|None): number of milliseconds to wait for the response headers
|
timeout (int|None): number of milliseconds to wait for the response headers
|
||||||
(including connecting to the server). 60s by default.
|
(including connecting to the server), *for each attempt*.
|
||||||
|
60s by default.
|
||||||
|
|
||||||
|
long_retries (bool): whether to use the long retry algorithm.
|
||||||
|
|
||||||
|
The regular retry algorithm makes 4 attempts, with intervals
|
||||||
|
[0.5s, 1s, 2s].
|
||||||
|
|
||||||
|
The long retry algorithm makes 11 attempts, with intervals
|
||||||
|
[4s, 16s, 60s, 60s, ...]
|
||||||
|
|
||||||
|
Both algorithms add -20%/+40% jitter to the retry intervals.
|
||||||
|
|
||||||
|
Note that the above intervals are *in addition* to the time spent
|
||||||
|
waiting for the request to complete (up to `timeout` ms).
|
||||||
|
|
||||||
|
NB: the long retry algorithm takes over 20 minutes to complete, with
|
||||||
|
a default timeout of 60s!
|
||||||
|
|
||||||
ignore_backoff (bool): true to ignore the historical backoff data
|
ignore_backoff (bool): true to ignore the historical backoff data
|
||||||
and try the request anyway.
|
and try the request anyway.
|
||||||
@ -566,10 +583,14 @@ class MatrixFederationHttpClient(object):
|
|||||||
the request body. This will be encoded as JSON.
|
the request body. This will be encoded as JSON.
|
||||||
json_data_callback (callable): A callable returning the dict to
|
json_data_callback (callable): A callable returning the dict to
|
||||||
use as the request body.
|
use as the request body.
|
||||||
long_retries (bool): A boolean that indicates whether we should
|
|
||||||
retry for a short or long time.
|
long_retries (bool): whether to use the long retry algorithm. See
|
||||||
timeout(int): How long to try (in ms) the destination for before
|
docs on _send_request for details.
|
||||||
giving up. None indicates no timeout.
|
|
||||||
|
timeout (int|None): number of milliseconds to wait for the response headers
|
||||||
|
(including connecting to the server), *for each attempt*.
|
||||||
|
self._default_timeout (60s) by default.
|
||||||
|
|
||||||
ignore_backoff (bool): true to ignore the historical backoff data
|
ignore_backoff (bool): true to ignore the historical backoff data
|
||||||
and try the request anyway.
|
and try the request anyway.
|
||||||
backoff_on_404 (bool): True if we should count a 404 response as
|
backoff_on_404 (bool): True if we should count a 404 response as
|
||||||
@ -627,15 +648,22 @@ class MatrixFederationHttpClient(object):
|
|||||||
Args:
|
Args:
|
||||||
destination (str): The remote server to send the HTTP request
|
destination (str): The remote server to send the HTTP request
|
||||||
to.
|
to.
|
||||||
|
|
||||||
path (str): The HTTP path.
|
path (str): The HTTP path.
|
||||||
|
|
||||||
data (dict): A dict containing the data that will be used as
|
data (dict): A dict containing the data that will be used as
|
||||||
the request body. This will be encoded as JSON.
|
the request body. This will be encoded as JSON.
|
||||||
long_retries (bool): A boolean that indicates whether we should
|
|
||||||
retry for a short or long time.
|
long_retries (bool): whether to use the long retry algorithm. See
|
||||||
timeout(int): How long to try (in ms) the destination for before
|
docs on _send_request for details.
|
||||||
giving up. None indicates no timeout.
|
|
||||||
|
timeout (int|None): number of milliseconds to wait for the response headers
|
||||||
|
(including connecting to the server), *for each attempt*.
|
||||||
|
self._default_timeout (60s) by default.
|
||||||
|
|
||||||
ignore_backoff (bool): true to ignore the historical backoff data and
|
ignore_backoff (bool): true to ignore the historical backoff data and
|
||||||
try the request anyway.
|
try the request anyway.
|
||||||
|
|
||||||
args (dict): query params
|
args (dict): query params
|
||||||
Returns:
|
Returns:
|
||||||
Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
|
Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
|
||||||
@ -686,14 +714,19 @@ class MatrixFederationHttpClient(object):
|
|||||||
Args:
|
Args:
|
||||||
destination (str): The remote server to send the HTTP request
|
destination (str): The remote server to send the HTTP request
|
||||||
to.
|
to.
|
||||||
|
|
||||||
path (str): The HTTP path.
|
path (str): The HTTP path.
|
||||||
|
|
||||||
args (dict|None): A dictionary used to create query strings, defaults to
|
args (dict|None): A dictionary used to create query strings, defaults to
|
||||||
None.
|
None.
|
||||||
timeout (int): How long to try (in ms) the destination for before
|
|
||||||
giving up. None indicates no timeout and that the request will
|
timeout (int|None): number of milliseconds to wait for the response headers
|
||||||
be retried.
|
(including connecting to the server), *for each attempt*.
|
||||||
|
self._default_timeout (60s) by default.
|
||||||
|
|
||||||
ignore_backoff (bool): true to ignore the historical backoff data
|
ignore_backoff (bool): true to ignore the historical backoff data
|
||||||
and try the request anyway.
|
and try the request anyway.
|
||||||
|
|
||||||
try_trailing_slash_on_400 (bool): True if on a 400 M_UNRECOGNIZED
|
try_trailing_slash_on_400 (bool): True if on a 400 M_UNRECOGNIZED
|
||||||
response we should try appending a trailing slash to the end of
|
response we should try appending a trailing slash to the end of
|
||||||
the request. Workaround for #3622 in Synapse <= v0.99.3.
|
the request. Workaround for #3622 in Synapse <= v0.99.3.
|
||||||
@ -711,10 +744,6 @@ class MatrixFederationHttpClient(object):
|
|||||||
RequestSendFailed: If there were problems connecting to the
|
RequestSendFailed: If there were problems connecting to the
|
||||||
remote, due to e.g. DNS failures, connection timeouts etc.
|
remote, due to e.g. DNS failures, connection timeouts etc.
|
||||||
"""
|
"""
|
||||||
logger.debug("get_json args: %s", args)
|
|
||||||
|
|
||||||
logger.debug("Query bytes: %s Retry DNS: %s", args, retry_on_dns_fail)
|
|
||||||
|
|
||||||
request = MatrixFederationRequest(
|
request = MatrixFederationRequest(
|
||||||
method="GET",
|
method="GET",
|
||||||
destination=destination,
|
destination=destination,
|
||||||
@ -746,12 +775,18 @@ class MatrixFederationHttpClient(object):
|
|||||||
destination (str): The remote server to send the HTTP request
|
destination (str): The remote server to send the HTTP request
|
||||||
to.
|
to.
|
||||||
path (str): The HTTP path.
|
path (str): The HTTP path.
|
||||||
long_retries (bool): A boolean that indicates whether we should
|
|
||||||
retry for a short or long time.
|
long_retries (bool): whether to use the long retry algorithm. See
|
||||||
timeout(int): How long to try (in ms) the destination for before
|
docs on _send_request for details.
|
||||||
giving up. None indicates no timeout.
|
|
||||||
|
timeout (int|None): number of milliseconds to wait for the response headers
|
||||||
|
(including connecting to the server), *for each attempt*.
|
||||||
|
self._default_timeout (60s) by default.
|
||||||
|
|
||||||
ignore_backoff (bool): true to ignore the historical backoff data and
|
ignore_backoff (bool): true to ignore the historical backoff data and
|
||||||
try the request anyway.
|
try the request anyway.
|
||||||
|
|
||||||
|
args (dict): query params
|
||||||
Returns:
|
Returns:
|
||||||
Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
|
Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
|
||||||
result will be the decoded JSON body.
|
result will be the decoded JSON body.
|
||||||
|
@ -55,7 +55,7 @@ def parse_integer_from_args(args, name, default=None, required=False):
|
|||||||
return int(args[name][0])
|
return int(args[name][0])
|
||||||
except Exception:
|
except Exception:
|
||||||
message = "Query parameter %r must be an integer" % (name,)
|
message = "Query parameter %r must be an integer" % (name,)
|
||||||
raise SynapseError(400, message)
|
raise SynapseError(400, message, errcode=Codes.INVALID_PARAM)
|
||||||
else:
|
else:
|
||||||
if required:
|
if required:
|
||||||
message = "Missing integer query parameter %r" % (name,)
|
message = "Missing integer query parameter %r" % (name,)
|
||||||
|
@ -80,10 +80,10 @@ ALLOWED_ATTRS = {
|
|||||||
|
|
||||||
|
|
||||||
class Mailer(object):
|
class Mailer(object):
|
||||||
def __init__(self, hs, app_name, notif_template_html, notif_template_text):
|
def __init__(self, hs, app_name, template_html, template_text):
|
||||||
self.hs = hs
|
self.hs = hs
|
||||||
self.notif_template_html = notif_template_html
|
self.template_html = template_html
|
||||||
self.notif_template_text = notif_template_text
|
self.template_text = template_text
|
||||||
|
|
||||||
self.sendmail = self.hs.get_sendmail()
|
self.sendmail = self.hs.get_sendmail()
|
||||||
self.store = self.hs.get_datastore()
|
self.store = self.hs.get_datastore()
|
||||||
@ -93,22 +93,49 @@ class Mailer(object):
|
|||||||
|
|
||||||
logger.info("Created Mailer for app_name %s" % app_name)
|
logger.info("Created Mailer for app_name %s" % app_name)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def send_password_reset_mail(
|
||||||
|
self,
|
||||||
|
email_address,
|
||||||
|
token,
|
||||||
|
client_secret,
|
||||||
|
sid,
|
||||||
|
):
|
||||||
|
"""Send an email with a password reset link to a user
|
||||||
|
|
||||||
|
Args:
|
||||||
|
email_address (str): Email address we're sending the password
|
||||||
|
reset to
|
||||||
|
token (str): Unique token generated by the server to verify
|
||||||
|
password reset email was received
|
||||||
|
client_secret (str): Unique token generated by the client to
|
||||||
|
group together multiple email sending attempts
|
||||||
|
sid (str): The generated session ID
|
||||||
|
"""
|
||||||
|
if email.utils.parseaddr(email_address)[1] == '':
|
||||||
|
raise RuntimeError("Invalid 'to' email address")
|
||||||
|
|
||||||
|
link = (
|
||||||
|
self.hs.config.public_baseurl +
|
||||||
|
"_matrix/client/unstable/password_reset/email/submit_token"
|
||||||
|
"?token=%s&client_secret=%s&sid=%s" %
|
||||||
|
(token, client_secret, sid)
|
||||||
|
)
|
||||||
|
|
||||||
|
template_vars = {
|
||||||
|
"link": link,
|
||||||
|
}
|
||||||
|
|
||||||
|
yield self.send_email(
|
||||||
|
email_address,
|
||||||
|
"[%s] Password Reset Email" % self.hs.config.server_name,
|
||||||
|
template_vars,
|
||||||
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def send_notification_mail(self, app_id, user_id, email_address,
|
def send_notification_mail(self, app_id, user_id, email_address,
|
||||||
push_actions, reason):
|
push_actions, reason):
|
||||||
try:
|
"""Send email regarding a user's room notifications"""
|
||||||
from_string = self.hs.config.email_notif_from % {
|
|
||||||
"app": self.app_name
|
|
||||||
}
|
|
||||||
except TypeError:
|
|
||||||
from_string = self.hs.config.email_notif_from
|
|
||||||
|
|
||||||
raw_from = email.utils.parseaddr(from_string)[1]
|
|
||||||
raw_to = email.utils.parseaddr(email_address)[1]
|
|
||||||
|
|
||||||
if raw_to == '':
|
|
||||||
raise RuntimeError("Invalid 'to' address")
|
|
||||||
|
|
||||||
rooms_in_order = deduped_ordered_list(
|
rooms_in_order = deduped_ordered_list(
|
||||||
[pa['room_id'] for pa in push_actions]
|
[pa['room_id'] for pa in push_actions]
|
||||||
)
|
)
|
||||||
@ -176,14 +203,36 @@ class Mailer(object):
|
|||||||
"reason": reason,
|
"reason": reason,
|
||||||
}
|
}
|
||||||
|
|
||||||
html_text = self.notif_template_html.render(**template_vars)
|
yield self.send_email(
|
||||||
|
email_address,
|
||||||
|
"[%s] %s" % (self.app_name, summary_text),
|
||||||
|
template_vars,
|
||||||
|
)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def send_email(self, email_address, subject, template_vars):
|
||||||
|
"""Send an email with the given information and template text"""
|
||||||
|
try:
|
||||||
|
from_string = self.hs.config.email_notif_from % {
|
||||||
|
"app": self.app_name
|
||||||
|
}
|
||||||
|
except TypeError:
|
||||||
|
from_string = self.hs.config.email_notif_from
|
||||||
|
|
||||||
|
raw_from = email.utils.parseaddr(from_string)[1]
|
||||||
|
raw_to = email.utils.parseaddr(email_address)[1]
|
||||||
|
|
||||||
|
if raw_to == '':
|
||||||
|
raise RuntimeError("Invalid 'to' address")
|
||||||
|
|
||||||
|
html_text = self.template_html.render(**template_vars)
|
||||||
html_part = MIMEText(html_text, "html", "utf8")
|
html_part = MIMEText(html_text, "html", "utf8")
|
||||||
|
|
||||||
plain_text = self.notif_template_text.render(**template_vars)
|
plain_text = self.template_text.render(**template_vars)
|
||||||
text_part = MIMEText(plain_text, "plain", "utf8")
|
text_part = MIMEText(plain_text, "plain", "utf8")
|
||||||
|
|
||||||
multipart_msg = MIMEMultipart('alternative')
|
multipart_msg = MIMEMultipart('alternative')
|
||||||
multipart_msg['Subject'] = "[%s] %s" % (self.app_name, summary_text)
|
multipart_msg['Subject'] = subject
|
||||||
multipart_msg['From'] = from_string
|
multipart_msg['From'] = from_string
|
||||||
multipart_msg['To'] = email_address
|
multipart_msg['To'] = email_address
|
||||||
multipart_msg['Date'] = email.utils.formatdate()
|
multipart_msg['Date'] = email.utils.formatdate()
|
||||||
|
@ -70,8 +70,8 @@ class PusherFactory(object):
|
|||||||
mailer = Mailer(
|
mailer = Mailer(
|
||||||
hs=self.hs,
|
hs=self.hs,
|
||||||
app_name=app_name,
|
app_name=app_name,
|
||||||
notif_template_html=self.notif_template_html,
|
template_html=self.notif_template_html,
|
||||||
notif_template_text=self.notif_template_text,
|
template_text=self.notif_template_text,
|
||||||
)
|
)
|
||||||
self.mailers[app_name] = mailer
|
self.mailers[app_name] = mailer
|
||||||
return EmailPusher(self.hs, pusherdict, mailer)
|
return EmailPusher(self.hs, pusherdict, mailer)
|
||||||
|
@ -16,7 +16,12 @@
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from pkg_resources import DistributionNotFound, VersionConflict, get_distribution
|
from pkg_resources import (
|
||||||
|
DistributionNotFound,
|
||||||
|
Requirement,
|
||||||
|
VersionConflict,
|
||||||
|
get_provider,
|
||||||
|
)
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -39,7 +44,10 @@ REQUIREMENTS = [
|
|||||||
"canonicaljson>=1.1.3",
|
"canonicaljson>=1.1.3",
|
||||||
"signedjson>=1.0.0",
|
"signedjson>=1.0.0",
|
||||||
"pynacl>=1.2.1",
|
"pynacl>=1.2.1",
|
||||||
"service_identity>=16.0.0",
|
"idna>=2",
|
||||||
|
|
||||||
|
# validating SSL certs for IP addresses requires service_identity 18.1.
|
||||||
|
"service_identity>=18.1.0",
|
||||||
|
|
||||||
# our logcontext handling relies on the ability to cancel inlineCallbacks
|
# our logcontext handling relies on the ability to cancel inlineCallbacks
|
||||||
# (https://twistedmatrix.com/trac/ticket/4632) which landed in Twisted 18.7.
|
# (https://twistedmatrix.com/trac/ticket/4632) which landed in Twisted 18.7.
|
||||||
@ -53,7 +61,7 @@ REQUIREMENTS = [
|
|||||||
"pyasn1-modules>=0.0.7",
|
"pyasn1-modules>=0.0.7",
|
||||||
"daemonize>=2.3.1",
|
"daemonize>=2.3.1",
|
||||||
"bcrypt>=3.1.0",
|
"bcrypt>=3.1.0",
|
||||||
"pillow>=3.1.2",
|
"pillow>=4.3.0",
|
||||||
"sortedcontainers>=1.4.4",
|
"sortedcontainers>=1.4.4",
|
||||||
"psutil>=2.0.0",
|
"psutil>=2.0.0",
|
||||||
"pymacaroons>=0.13.0",
|
"pymacaroons>=0.13.0",
|
||||||
@ -69,18 +77,10 @@ REQUIREMENTS = [
|
|||||||
"attrs>=17.4.0",
|
"attrs>=17.4.0",
|
||||||
|
|
||||||
"netaddr>=0.7.18",
|
"netaddr>=0.7.18",
|
||||||
|
|
||||||
# requests is a transitive dep of treq, and urlib3 is a transitive dep
|
|
||||||
# of requests, as well as of sentry-sdk.
|
|
||||||
#
|
|
||||||
# As of requests 2.21, requests does not yet support urllib3 1.25.
|
|
||||||
# (If we do not pin it here, pip will give us the latest urllib3
|
|
||||||
# due to the dep via sentry-sdk.)
|
|
||||||
"urllib3<1.25",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
CONDITIONAL_REQUIREMENTS = {
|
CONDITIONAL_REQUIREMENTS = {
|
||||||
"email.enable_notifs": ["Jinja2>=2.9", "bleach>=1.4.2"],
|
"email": ["Jinja2>=2.9", "bleach>=1.4.3"],
|
||||||
"matrix-synapse-ldap3": ["matrix-synapse-ldap3>=0.1"],
|
"matrix-synapse-ldap3": ["matrix-synapse-ldap3>=0.1"],
|
||||||
|
|
||||||
# we use execute_batch, which arrived in psycopg 2.7.
|
# we use execute_batch, which arrived in psycopg 2.7.
|
||||||
@ -91,7 +91,13 @@ CONDITIONAL_REQUIREMENTS = {
|
|||||||
|
|
||||||
# ACME support is required to provision TLS certificates from authorities
|
# ACME support is required to provision TLS certificates from authorities
|
||||||
# that use the protocol, such as Let's Encrypt.
|
# that use the protocol, such as Let's Encrypt.
|
||||||
"acme": ["txacme>=0.9.2"],
|
"acme": [
|
||||||
|
"txacme>=0.9.2",
|
||||||
|
|
||||||
|
# txacme depends on eliot. Eliot 1.8.0 is incompatible with
|
||||||
|
# python 3.5.2, as per https://github.com/itamarst/eliot/issues/418
|
||||||
|
'eliot<1.8.0;python_version<"3.5.3"',
|
||||||
|
],
|
||||||
|
|
||||||
"saml2": ["pysaml2>=4.5.0"],
|
"saml2": ["pysaml2>=4.5.0"],
|
||||||
"systemd": ["systemd-python>=231"],
|
"systemd": ["systemd-python>=231"],
|
||||||
@ -125,10 +131,10 @@ class DependencyException(Exception):
|
|||||||
@property
|
@property
|
||||||
def dependencies(self):
|
def dependencies(self):
|
||||||
for i in self.args[0]:
|
for i in self.args[0]:
|
||||||
yield '"' + i + '"'
|
yield "'" + i + "'"
|
||||||
|
|
||||||
|
|
||||||
def check_requirements(for_feature=None, _get_distribution=get_distribution):
|
def check_requirements(for_feature=None):
|
||||||
deps_needed = []
|
deps_needed = []
|
||||||
errors = []
|
errors = []
|
||||||
|
|
||||||
@ -139,7 +145,7 @@ def check_requirements(for_feature=None, _get_distribution=get_distribution):
|
|||||||
|
|
||||||
for dependency in reqs:
|
for dependency in reqs:
|
||||||
try:
|
try:
|
||||||
_get_distribution(dependency)
|
_check_requirement(dependency)
|
||||||
except VersionConflict as e:
|
except VersionConflict as e:
|
||||||
deps_needed.append(dependency)
|
deps_needed.append(dependency)
|
||||||
errors.append(
|
errors.append(
|
||||||
@ -157,7 +163,7 @@ def check_requirements(for_feature=None, _get_distribution=get_distribution):
|
|||||||
|
|
||||||
for dependency in OPTS:
|
for dependency in OPTS:
|
||||||
try:
|
try:
|
||||||
_get_distribution(dependency)
|
_check_requirement(dependency)
|
||||||
except VersionConflict as e:
|
except VersionConflict as e:
|
||||||
deps_needed.append(dependency)
|
deps_needed.append(dependency)
|
||||||
errors.append(
|
errors.append(
|
||||||
@ -175,6 +181,23 @@ def check_requirements(for_feature=None, _get_distribution=get_distribution):
|
|||||||
raise DependencyException(deps_needed)
|
raise DependencyException(deps_needed)
|
||||||
|
|
||||||
|
|
||||||
|
def _check_requirement(dependency_string):
|
||||||
|
"""Parses a dependency string, and checks if the specified requirement is installed
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
VersionConflict if the requirement is installed, but with the the wrong version
|
||||||
|
DistributionNotFound if nothing is found to provide the requirement
|
||||||
|
"""
|
||||||
|
req = Requirement.parse(dependency_string)
|
||||||
|
|
||||||
|
# first check if the markers specify that this requirement needs installing
|
||||||
|
if req.marker is not None and not req.marker.evaluate():
|
||||||
|
# not required for this environment
|
||||||
|
return
|
||||||
|
|
||||||
|
get_provider(req)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
@ -23,6 +23,7 @@ from synapse.replication.tcp.streams.events import (
|
|||||||
from synapse.storage.event_federation import EventFederationWorkerStore
|
from synapse.storage.event_federation import EventFederationWorkerStore
|
||||||
from synapse.storage.event_push_actions import EventPushActionsWorkerStore
|
from synapse.storage.event_push_actions import EventPushActionsWorkerStore
|
||||||
from synapse.storage.events_worker import EventsWorkerStore
|
from synapse.storage.events_worker import EventsWorkerStore
|
||||||
|
from synapse.storage.relations import RelationsWorkerStore
|
||||||
from synapse.storage.roommember import RoomMemberWorkerStore
|
from synapse.storage.roommember import RoomMemberWorkerStore
|
||||||
from synapse.storage.signatures import SignatureWorkerStore
|
from synapse.storage.signatures import SignatureWorkerStore
|
||||||
from synapse.storage.state import StateGroupWorkerStore
|
from synapse.storage.state import StateGroupWorkerStore
|
||||||
@ -52,6 +53,7 @@ class SlavedEventStore(EventFederationWorkerStore,
|
|||||||
EventsWorkerStore,
|
EventsWorkerStore,
|
||||||
SignatureWorkerStore,
|
SignatureWorkerStore,
|
||||||
UserErasureWorkerStore,
|
UserErasureWorkerStore,
|
||||||
|
RelationsWorkerStore,
|
||||||
BaseSlavedStore):
|
BaseSlavedStore):
|
||||||
|
|
||||||
def __init__(self, db_conn, hs):
|
def __init__(self, db_conn, hs):
|
||||||
@ -89,7 +91,7 @@ class SlavedEventStore(EventFederationWorkerStore,
|
|||||||
for row in rows:
|
for row in rows:
|
||||||
self.invalidate_caches_for_event(
|
self.invalidate_caches_for_event(
|
||||||
-token, row.event_id, row.room_id, row.type, row.state_key,
|
-token, row.event_id, row.room_id, row.type, row.state_key,
|
||||||
row.redacts,
|
row.redacts, row.relates_to,
|
||||||
backfilled=True,
|
backfilled=True,
|
||||||
)
|
)
|
||||||
return super(SlavedEventStore, self).process_replication_rows(
|
return super(SlavedEventStore, self).process_replication_rows(
|
||||||
@ -102,7 +104,7 @@ class SlavedEventStore(EventFederationWorkerStore,
|
|||||||
if row.type == EventsStreamEventRow.TypeId:
|
if row.type == EventsStreamEventRow.TypeId:
|
||||||
self.invalidate_caches_for_event(
|
self.invalidate_caches_for_event(
|
||||||
token, data.event_id, data.room_id, data.type, data.state_key,
|
token, data.event_id, data.room_id, data.type, data.state_key,
|
||||||
data.redacts,
|
data.redacts, data.relates_to,
|
||||||
backfilled=False,
|
backfilled=False,
|
||||||
)
|
)
|
||||||
elif row.type == EventsStreamCurrentStateRow.TypeId:
|
elif row.type == EventsStreamCurrentStateRow.TypeId:
|
||||||
@ -114,7 +116,8 @@ class SlavedEventStore(EventFederationWorkerStore,
|
|||||||
raise Exception("Unknown events stream row type %s" % (row.type, ))
|
raise Exception("Unknown events stream row type %s" % (row.type, ))
|
||||||
|
|
||||||
def invalidate_caches_for_event(self, stream_ordering, event_id, room_id,
|
def invalidate_caches_for_event(self, stream_ordering, event_id, room_id,
|
||||||
etype, state_key, redacts, backfilled):
|
etype, state_key, redacts, relates_to,
|
||||||
|
backfilled):
|
||||||
self._invalidate_get_event_cache(event_id)
|
self._invalidate_get_event_cache(event_id)
|
||||||
|
|
||||||
self.get_latest_event_ids_in_room.invalidate((room_id,))
|
self.get_latest_event_ids_in_room.invalidate((room_id,))
|
||||||
@ -136,3 +139,8 @@ class SlavedEventStore(EventFederationWorkerStore,
|
|||||||
state_key, stream_ordering
|
state_key, stream_ordering
|
||||||
)
|
)
|
||||||
self.get_invited_rooms_for_user.invalidate((state_key,))
|
self.get_invited_rooms_for_user.invalidate((state_key,))
|
||||||
|
|
||||||
|
if relates_to:
|
||||||
|
self.get_relations_for_event.invalidate_many((relates_to,))
|
||||||
|
self.get_aggregation_groups_for_event.invalidate_many((relates_to,))
|
||||||
|
self.get_applicable_edit.invalidate((relates_to,))
|
||||||
|
@ -32,6 +32,7 @@ BackfillStreamRow = namedtuple("BackfillStreamRow", (
|
|||||||
"type", # str
|
"type", # str
|
||||||
"state_key", # str, optional
|
"state_key", # str, optional
|
||||||
"redacts", # str, optional
|
"redacts", # str, optional
|
||||||
|
"relates_to", # str, optional
|
||||||
))
|
))
|
||||||
PresenceStreamRow = namedtuple("PresenceStreamRow", (
|
PresenceStreamRow = namedtuple("PresenceStreamRow", (
|
||||||
"user_id", # str
|
"user_id", # str
|
||||||
|
@ -85,6 +85,7 @@ class EventsStreamEventRow(BaseEventsStreamRow):
|
|||||||
type = attr.ib() # str
|
type = attr.ib() # str
|
||||||
state_key = attr.ib() # str, optional
|
state_key = attr.ib() # str, optional
|
||||||
redacts = attr.ib() # str, optional
|
redacts = attr.ib() # str, optional
|
||||||
|
relates_to = attr.ib() # str, optional
|
||||||
|
|
||||||
|
|
||||||
@attr.s(slots=True, frozen=True)
|
@attr.s(slots=True, frozen=True)
|
||||||
|
9
synapse/res/templates/password_reset.html
Normal file
9
synapse/res/templates/password_reset.html
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
<html>
|
||||||
|
<body>
|
||||||
|
<p>A password reset request has been received for your Matrix account. If this was you, please click the link below to confirm resetting your password:</p>
|
||||||
|
|
||||||
|
<a href="{{ link }}">{{ link }}</a>
|
||||||
|
|
||||||
|
<p>If this was not you, please disregard this email and contact your server administrator. Thank you.</p>
|
||||||
|
</body>
|
||||||
|
</html>
|
7
synapse/res/templates/password_reset.txt
Normal file
7
synapse/res/templates/password_reset.txt
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
A password reset request has been received for your Matrix account. If this
|
||||||
|
was you, please click the link below to confirm resetting your password:
|
||||||
|
|
||||||
|
{{ link }}
|
||||||
|
|
||||||
|
If this was not you, please disregard this email and contact your server
|
||||||
|
administrator. Thank you.
|
6
synapse/res/templates/password_reset_failure.html
Normal file
6
synapse/res/templates/password_reset_failure.html
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
<html>
|
||||||
|
<head></head>
|
||||||
|
<body>
|
||||||
|
<p>{{ failure_reason }}. Your password has not been reset.</p>
|
||||||
|
</body>
|
||||||
|
</html>
|
6
synapse/res/templates/password_reset_success.html
Normal file
6
synapse/res/templates/password_reset_success.html
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
<html>
|
||||||
|
<head></head>
|
||||||
|
<body>
|
||||||
|
<p>Your email has now been validated, please return to your client to reset your password. You may now close this window.</p>
|
||||||
|
</body>
|
||||||
|
</html>
|
@ -45,6 +45,7 @@ from synapse.rest.client.v2_alpha import (
|
|||||||
read_marker,
|
read_marker,
|
||||||
receipts,
|
receipts,
|
||||||
register,
|
register,
|
||||||
|
relations,
|
||||||
report_event,
|
report_event,
|
||||||
room_keys,
|
room_keys,
|
||||||
room_upgrade_rest_servlet,
|
room_upgrade_rest_servlet,
|
||||||
@ -117,6 +118,7 @@ class ClientRestResource(JsonResource):
|
|||||||
capabilities.register_servlets(hs, client_resource)
|
capabilities.register_servlets(hs, client_resource)
|
||||||
account_validity.register_servlets(hs, client_resource)
|
account_validity.register_servlets(hs, client_resource)
|
||||||
password_policy.register_servlets(hs, client_resource)
|
password_policy.register_servlets(hs, client_resource)
|
||||||
|
relations.register_servlets(hs, client_resource)
|
||||||
|
|
||||||
# moving to /_synapse/admin
|
# moving to /_synapse/admin
|
||||||
synapse.rest.admin.register_servlets_for_client_rest_resource(
|
synapse.rest.admin.register_servlets_for_client_rest_resource(
|
||||||
|
@ -822,10 +822,16 @@ class AdminRestResource(JsonResource):
|
|||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
JsonResource.__init__(self, hs, canonical_json=False)
|
JsonResource.__init__(self, hs, canonical_json=False)
|
||||||
|
register_servlets(hs, self)
|
||||||
|
|
||||||
register_servlets_for_client_rest_resource(hs, self)
|
|
||||||
SendServerNoticeServlet(hs).register(self)
|
def register_servlets(hs, http_server):
|
||||||
VersionServlet(hs).register(self)
|
"""
|
||||||
|
Register all the admin servlets.
|
||||||
|
"""
|
||||||
|
register_servlets_for_client_rest_resource(hs, http_server)
|
||||||
|
SendServerNoticeServlet(hs).register(http_server)
|
||||||
|
VersionServlet(hs).register(http_server)
|
||||||
|
|
||||||
|
|
||||||
def register_servlets_for_client_rest_resource(hs, http_server):
|
def register_servlets_for_client_rest_resource(hs, http_server):
|
||||||
|
@ -1,65 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
"""This module contains base REST classes for constructing client v1 servlets.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import re
|
|
||||||
|
|
||||||
from synapse.api.urls import CLIENT_API_PREFIX
|
|
||||||
from synapse.http.servlet import RestServlet
|
|
||||||
from synapse.rest.client.transactions import HttpTransactionCache
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def client_path_patterns(path_regex, releases=(0,), include_in_unstable=True):
|
|
||||||
"""Creates a regex compiled client path with the correct client path
|
|
||||||
prefix.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
path_regex (str): The regex string to match. This should NOT have a ^
|
|
||||||
as this will be prefixed.
|
|
||||||
Returns:
|
|
||||||
SRE_Pattern
|
|
||||||
"""
|
|
||||||
patterns = [re.compile("^" + CLIENT_API_PREFIX + "/api/v1" + path_regex)]
|
|
||||||
if include_in_unstable:
|
|
||||||
unstable_prefix = CLIENT_API_PREFIX + "/unstable"
|
|
||||||
patterns.append(re.compile("^" + unstable_prefix + path_regex))
|
|
||||||
for release in releases:
|
|
||||||
new_prefix = CLIENT_API_PREFIX + "/r%d" % (release,)
|
|
||||||
patterns.append(re.compile("^" + new_prefix + path_regex))
|
|
||||||
return patterns
|
|
||||||
|
|
||||||
|
|
||||||
class ClientV1RestServlet(RestServlet):
|
|
||||||
"""A base Synapse REST Servlet for the client version 1 API.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# This subclass was presumably created to allow the auth for the v1
|
|
||||||
# protocol version to be different, however this behaviour was removed.
|
|
||||||
# it may no longer be necessary
|
|
||||||
|
|
||||||
def __init__(self, hs):
|
|
||||||
"""
|
|
||||||
Args:
|
|
||||||
hs (synapse.server.HomeServer):
|
|
||||||
"""
|
|
||||||
self.hs = hs
|
|
||||||
self.builder_factory = hs.get_event_builder_factory()
|
|
||||||
self.auth = hs.get_auth()
|
|
||||||
self.txns = HttpTransactionCache(hs)
|
|
@ -19,11 +19,10 @@ import logging
|
|||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
|
from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
|
||||||
from synapse.http.servlet import parse_json_object_from_request
|
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||||
|
from synapse.rest.client.v2_alpha._base import client_patterns
|
||||||
from synapse.types import RoomAlias
|
from synapse.types import RoomAlias
|
||||||
|
|
||||||
from .base import ClientV1RestServlet, client_path_patterns
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@ -33,13 +32,14 @@ def register_servlets(hs, http_server):
|
|||||||
ClientAppserviceDirectoryListServer(hs).register(http_server)
|
ClientAppserviceDirectoryListServer(hs).register(http_server)
|
||||||
|
|
||||||
|
|
||||||
class ClientDirectoryServer(ClientV1RestServlet):
|
class ClientDirectoryServer(RestServlet):
|
||||||
PATTERNS = client_path_patterns("/directory/room/(?P<room_alias>[^/]*)$")
|
PATTERNS = client_patterns("/directory/room/(?P<room_alias>[^/]*)$", v1=True)
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(ClientDirectoryServer, self).__init__(hs)
|
super(ClientDirectoryServer, self).__init__()
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
self.handlers = hs.get_handlers()
|
self.handlers = hs.get_handlers()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_GET(self, request, room_alias):
|
def on_GET(self, request, room_alias):
|
||||||
@ -120,13 +120,14 @@ class ClientDirectoryServer(ClientV1RestServlet):
|
|||||||
defer.returnValue((200, {}))
|
defer.returnValue((200, {}))
|
||||||
|
|
||||||
|
|
||||||
class ClientDirectoryListServer(ClientV1RestServlet):
|
class ClientDirectoryListServer(RestServlet):
|
||||||
PATTERNS = client_path_patterns("/directory/list/room/(?P<room_id>[^/]*)$")
|
PATTERNS = client_patterns("/directory/list/room/(?P<room_id>[^/]*)$", v1=True)
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(ClientDirectoryListServer, self).__init__(hs)
|
super(ClientDirectoryListServer, self).__init__()
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
self.handlers = hs.get_handlers()
|
self.handlers = hs.get_handlers()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_GET(self, request, room_id):
|
def on_GET(self, request, room_id):
|
||||||
@ -162,15 +163,16 @@ class ClientDirectoryListServer(ClientV1RestServlet):
|
|||||||
defer.returnValue((200, {}))
|
defer.returnValue((200, {}))
|
||||||
|
|
||||||
|
|
||||||
class ClientAppserviceDirectoryListServer(ClientV1RestServlet):
|
class ClientAppserviceDirectoryListServer(RestServlet):
|
||||||
PATTERNS = client_path_patterns(
|
PATTERNS = client_patterns(
|
||||||
"/directory/list/appservice/(?P<network_id>[^/]*)/(?P<room_id>[^/]*)$"
|
"/directory/list/appservice/(?P<network_id>[^/]*)/(?P<room_id>[^/]*)$", v1=True
|
||||||
)
|
)
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(ClientAppserviceDirectoryListServer, self).__init__(hs)
|
super(ClientAppserviceDirectoryListServer, self).__init__()
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
self.handlers = hs.get_handlers()
|
self.handlers = hs.get_handlers()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
def on_PUT(self, request, network_id, room_id):
|
def on_PUT(self, request, network_id, room_id):
|
||||||
content = parse_json_object_from_request(request)
|
content = parse_json_object_from_request(request)
|
||||||
|
@ -19,21 +19,22 @@ import logging
|
|||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.api.errors import SynapseError
|
from synapse.api.errors import SynapseError
|
||||||
|
from synapse.http.servlet import RestServlet
|
||||||
|
from synapse.rest.client.v2_alpha._base import client_patterns
|
||||||
from synapse.streams.config import PaginationConfig
|
from synapse.streams.config import PaginationConfig
|
||||||
|
|
||||||
from .base import ClientV1RestServlet, client_path_patterns
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class EventStreamRestServlet(ClientV1RestServlet):
|
class EventStreamRestServlet(RestServlet):
|
||||||
PATTERNS = client_path_patterns("/events$")
|
PATTERNS = client_patterns("/events$", v1=True)
|
||||||
|
|
||||||
DEFAULT_LONGPOLL_TIME_MS = 30000
|
DEFAULT_LONGPOLL_TIME_MS = 30000
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(EventStreamRestServlet, self).__init__(hs)
|
super(EventStreamRestServlet, self).__init__()
|
||||||
self.event_stream_handler = hs.get_event_stream_handler()
|
self.event_stream_handler = hs.get_event_stream_handler()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_GET(self, request):
|
def on_GET(self, request):
|
||||||
@ -76,11 +77,11 @@ class EventStreamRestServlet(ClientV1RestServlet):
|
|||||||
|
|
||||||
|
|
||||||
# TODO: Unit test gets, with and without auth, with different kinds of events.
|
# TODO: Unit test gets, with and without auth, with different kinds of events.
|
||||||
class EventRestServlet(ClientV1RestServlet):
|
class EventRestServlet(RestServlet):
|
||||||
PATTERNS = client_path_patterns("/events/(?P<event_id>[^/]*)$")
|
PATTERNS = client_patterns("/events/(?P<event_id>[^/]*)$", v1=True)
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(EventRestServlet, self).__init__(hs)
|
super(EventRestServlet, self).__init__()
|
||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
self.event_handler = hs.get_event_handler()
|
self.event_handler = hs.get_event_handler()
|
||||||
self._event_serializer = hs.get_event_client_serializer()
|
self._event_serializer = hs.get_event_client_serializer()
|
||||||
|
@ -15,19 +15,19 @@
|
|||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.http.servlet import parse_boolean
|
from synapse.http.servlet import RestServlet, parse_boolean
|
||||||
|
from synapse.rest.client.v2_alpha._base import client_patterns
|
||||||
from synapse.streams.config import PaginationConfig
|
from synapse.streams.config import PaginationConfig
|
||||||
|
|
||||||
from .base import ClientV1RestServlet, client_path_patterns
|
|
||||||
|
|
||||||
|
|
||||||
# TODO: Needs unit testing
|
# TODO: Needs unit testing
|
||||||
class InitialSyncRestServlet(ClientV1RestServlet):
|
class InitialSyncRestServlet(RestServlet):
|
||||||
PATTERNS = client_path_patterns("/initialSync$")
|
PATTERNS = client_patterns("/initialSync$", v1=True)
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(InitialSyncRestServlet, self).__init__(hs)
|
super(InitialSyncRestServlet, self).__init__()
|
||||||
self.initial_sync_handler = hs.get_initial_sync_handler()
|
self.initial_sync_handler = hs.get_initial_sync_handler()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_GET(self, request):
|
def on_GET(self, request):
|
||||||
|
@ -29,12 +29,11 @@ from synapse.http.servlet import (
|
|||||||
parse_json_object_from_request,
|
parse_json_object_from_request,
|
||||||
parse_string,
|
parse_string,
|
||||||
)
|
)
|
||||||
|
from synapse.rest.client.v2_alpha._base import client_patterns
|
||||||
from synapse.rest.well_known import WellKnownBuilder
|
from synapse.rest.well_known import WellKnownBuilder
|
||||||
from synapse.types import UserID, map_username_to_mxid_localpart
|
from synapse.types import UserID, map_username_to_mxid_localpart
|
||||||
from synapse.util.msisdn import phone_number_to_msisdn
|
from synapse.util.msisdn import phone_number_to_msisdn
|
||||||
|
|
||||||
from .base import ClientV1RestServlet, client_path_patterns
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@ -81,15 +80,16 @@ def login_id_thirdparty_from_phone(identifier):
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class LoginRestServlet(ClientV1RestServlet):
|
class LoginRestServlet(RestServlet):
|
||||||
PATTERNS = client_path_patterns("/login$")
|
PATTERNS = client_patterns("/login$", v1=True)
|
||||||
CAS_TYPE = "m.login.cas"
|
CAS_TYPE = "m.login.cas"
|
||||||
SSO_TYPE = "m.login.sso"
|
SSO_TYPE = "m.login.sso"
|
||||||
TOKEN_TYPE = "m.login.token"
|
TOKEN_TYPE = "m.login.token"
|
||||||
JWT_TYPE = "m.login.jwt"
|
JWT_TYPE = "m.login.jwt"
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(LoginRestServlet, self).__init__(hs)
|
super(LoginRestServlet, self).__init__()
|
||||||
|
self.hs = hs
|
||||||
self.jwt_enabled = hs.config.jwt_enabled
|
self.jwt_enabled = hs.config.jwt_enabled
|
||||||
self.jwt_secret = hs.config.jwt_secret
|
self.jwt_secret = hs.config.jwt_secret
|
||||||
self.jwt_algorithm = hs.config.jwt_algorithm
|
self.jwt_algorithm = hs.config.jwt_algorithm
|
||||||
@ -371,7 +371,7 @@ class LoginRestServlet(ClientV1RestServlet):
|
|||||||
|
|
||||||
|
|
||||||
class CasRedirectServlet(RestServlet):
|
class CasRedirectServlet(RestServlet):
|
||||||
PATTERNS = client_path_patterns("/login/(cas|sso)/redirect")
|
PATTERNS = client_patterns("/login/(cas|sso)/redirect", v1=True)
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(CasRedirectServlet, self).__init__()
|
super(CasRedirectServlet, self).__init__()
|
||||||
@ -386,7 +386,7 @@ class CasRedirectServlet(RestServlet):
|
|||||||
b"redirectUrl": args[b"redirectUrl"][0]
|
b"redirectUrl": args[b"redirectUrl"][0]
|
||||||
}).encode('ascii')
|
}).encode('ascii')
|
||||||
hs_redirect_url = (self.cas_service_url +
|
hs_redirect_url = (self.cas_service_url +
|
||||||
b"/_matrix/client/api/v1/login/cas/ticket")
|
b"/_matrix/client/r0/login/cas/ticket")
|
||||||
service_param = urllib.parse.urlencode({
|
service_param = urllib.parse.urlencode({
|
||||||
b"service": b"%s?%s" % (hs_redirect_url, client_redirect_url_param)
|
b"service": b"%s?%s" % (hs_redirect_url, client_redirect_url_param)
|
||||||
}).encode('ascii')
|
}).encode('ascii')
|
||||||
@ -394,27 +394,27 @@ class CasRedirectServlet(RestServlet):
|
|||||||
finish_request(request)
|
finish_request(request)
|
||||||
|
|
||||||
|
|
||||||
class CasTicketServlet(ClientV1RestServlet):
|
class CasTicketServlet(RestServlet):
|
||||||
PATTERNS = client_path_patterns("/login/cas/ticket", releases=())
|
PATTERNS = client_patterns("/login/cas/ticket", v1=True)
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(CasTicketServlet, self).__init__(hs)
|
super(CasTicketServlet, self).__init__()
|
||||||
self.cas_server_url = hs.config.cas_server_url
|
self.cas_server_url = hs.config.cas_server_url
|
||||||
self.cas_service_url = hs.config.cas_service_url
|
self.cas_service_url = hs.config.cas_service_url
|
||||||
self.cas_required_attributes = hs.config.cas_required_attributes
|
self.cas_required_attributes = hs.config.cas_required_attributes
|
||||||
self._sso_auth_handler = SSOAuthHandler(hs)
|
self._sso_auth_handler = SSOAuthHandler(hs)
|
||||||
|
self._http_client = hs.get_simple_http_client()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_GET(self, request):
|
def on_GET(self, request):
|
||||||
client_redirect_url = parse_string(request, "redirectUrl", required=True)
|
client_redirect_url = parse_string(request, "redirectUrl", required=True)
|
||||||
http_client = self.hs.get_simple_http_client()
|
|
||||||
uri = self.cas_server_url + "/proxyValidate"
|
uri = self.cas_server_url + "/proxyValidate"
|
||||||
args = {
|
args = {
|
||||||
"ticket": parse_string(request, "ticket", required=True),
|
"ticket": parse_string(request, "ticket", required=True),
|
||||||
"service": self.cas_service_url
|
"service": self.cas_service_url
|
||||||
}
|
}
|
||||||
try:
|
try:
|
||||||
body = yield http_client.get_raw(uri, args)
|
body = yield self._http_client.get_raw(uri, args)
|
||||||
except PartialDownloadError as pde:
|
except PartialDownloadError as pde:
|
||||||
# Twisted raises this error if the connection is closed,
|
# Twisted raises this error if the connection is closed,
|
||||||
# even if that's being used old-http style to signal end-of-data
|
# even if that's being used old-http style to signal end-of-data
|
||||||
|
@ -17,19 +17,18 @@ import logging
|
|||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.api.errors import AuthError
|
from synapse.http.servlet import RestServlet
|
||||||
|
from synapse.rest.client.v2_alpha._base import client_patterns
|
||||||
from .base import ClientV1RestServlet, client_path_patterns
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class LogoutRestServlet(ClientV1RestServlet):
|
class LogoutRestServlet(RestServlet):
|
||||||
PATTERNS = client_path_patterns("/logout$")
|
PATTERNS = client_patterns("/logout$", v1=True)
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(LogoutRestServlet, self).__init__(hs)
|
super(LogoutRestServlet, self).__init__()
|
||||||
self._auth = hs.get_auth()
|
self.auth = hs.get_auth()
|
||||||
self._auth_handler = hs.get_auth_handler()
|
self._auth_handler = hs.get_auth_handler()
|
||||||
self._device_handler = hs.get_device_handler()
|
self._device_handler = hs.get_device_handler()
|
||||||
|
|
||||||
@ -38,19 +37,12 @@ class LogoutRestServlet(ClientV1RestServlet):
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_POST(self, request):
|
def on_POST(self, request):
|
||||||
try:
|
|
||||||
requester = yield self.auth.get_user_by_req(request)
|
requester = yield self.auth.get_user_by_req(request)
|
||||||
except AuthError:
|
|
||||||
# this implies the access token has already been deleted.
|
|
||||||
defer.returnValue((401, {
|
|
||||||
"errcode": "M_UNKNOWN_TOKEN",
|
|
||||||
"error": "Access Token unknown or expired"
|
|
||||||
}))
|
|
||||||
else:
|
|
||||||
if requester.device_id is None:
|
if requester.device_id is None:
|
||||||
# the acccess token wasn't associated with a device.
|
# the acccess token wasn't associated with a device.
|
||||||
# Just delete the access token
|
# Just delete the access token
|
||||||
access_token = self._auth.get_access_token_from_request(request)
|
access_token = self.auth.get_access_token_from_request(request)
|
||||||
yield self._auth_handler.delete_access_token(access_token)
|
yield self._auth_handler.delete_access_token(access_token)
|
||||||
else:
|
else:
|
||||||
yield self._device_handler.delete_device(
|
yield self._device_handler.delete_device(
|
||||||
@ -59,11 +51,11 @@ class LogoutRestServlet(ClientV1RestServlet):
|
|||||||
defer.returnValue((200, {}))
|
defer.returnValue((200, {}))
|
||||||
|
|
||||||
|
|
||||||
class LogoutAllRestServlet(ClientV1RestServlet):
|
class LogoutAllRestServlet(RestServlet):
|
||||||
PATTERNS = client_path_patterns("/logout/all$")
|
PATTERNS = client_patterns("/logout/all$", v1=True)
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(LogoutAllRestServlet, self).__init__(hs)
|
super(LogoutAllRestServlet, self).__init__()
|
||||||
self.auth = hs.get_auth()
|
self.auth = hs.get_auth()
|
||||||
self._auth_handler = hs.get_auth_handler()
|
self._auth_handler = hs.get_auth_handler()
|
||||||
self._device_handler = hs.get_device_handler()
|
self._device_handler = hs.get_device_handler()
|
||||||
|
@ -23,21 +23,22 @@ from twisted.internet import defer
|
|||||||
|
|
||||||
from synapse.api.errors import AuthError, SynapseError
|
from synapse.api.errors import AuthError, SynapseError
|
||||||
from synapse.handlers.presence import format_user_presence_state
|
from synapse.handlers.presence import format_user_presence_state
|
||||||
from synapse.http.servlet import parse_json_object_from_request
|
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||||
|
from synapse.rest.client.v2_alpha._base import client_patterns
|
||||||
from synapse.types import UserID
|
from synapse.types import UserID
|
||||||
|
|
||||||
from .base import ClientV1RestServlet, client_path_patterns
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class PresenceStatusRestServlet(ClientV1RestServlet):
|
class PresenceStatusRestServlet(RestServlet):
|
||||||
PATTERNS = client_path_patterns("/presence/(?P<user_id>[^/]*)/status")
|
PATTERNS = client_patterns("/presence/(?P<user_id>[^/]*)/status", v1=True)
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(PresenceStatusRestServlet, self).__init__(hs)
|
super(PresenceStatusRestServlet, self).__init__()
|
||||||
|
self.hs = hs
|
||||||
self.presence_handler = hs.get_presence_handler()
|
self.presence_handler = hs.get_presence_handler()
|
||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_GET(self, request, user_id):
|
def on_GET(self, request, user_id):
|
||||||
|
@ -18,21 +18,23 @@ import logging
|
|||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.http.servlet import parse_json_object_from_request
|
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||||
|
from synapse.rest.client.v2_alpha._base import client_patterns
|
||||||
from synapse.types import UserID
|
from synapse.types import UserID
|
||||||
|
|
||||||
from .base import ClientV1RestServlet, client_path_patterns
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class ProfileDisplaynameRestServlet(ClientV1RestServlet):
|
class ProfileDisplaynameRestServlet(RestServlet):
|
||||||
PATTERNS = client_path_patterns("/profile/(?P<user_id>[^/]*)/displayname")
|
PATTERNS = client_patterns("/profile/(?P<user_id>[^/]*)/displayname", v1=True)
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(ProfileDisplaynameRestServlet, self).__init__(hs)
|
super(ProfileDisplaynameRestServlet, self).__init__()
|
||||||
|
self.hs = hs
|
||||||
self.profile_handler = hs.get_profile_handler()
|
self.profile_handler = hs.get_profile_handler()
|
||||||
self.http_client = hs.get_simple_http_client()
|
self.http_client = hs.get_simple_http_client()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_GET(self, request, user_id):
|
def on_GET(self, request, user_id):
|
||||||
@ -95,13 +97,15 @@ class ProfileDisplaynameRestServlet(ClientV1RestServlet):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class ProfileAvatarURLRestServlet(ClientV1RestServlet):
|
class ProfileAvatarURLRestServlet(RestServlet):
|
||||||
PATTERNS = client_path_patterns("/profile/(?P<user_id>[^/]*)/avatar_url")
|
PATTERNS = client_patterns("/profile/(?P<user_id>[^/]*)/avatar_url", v1=True)
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(ProfileAvatarURLRestServlet, self).__init__(hs)
|
super(ProfileAvatarURLRestServlet, self).__init__()
|
||||||
|
self.hs = hs
|
||||||
self.profile_handler = hs.get_profile_handler()
|
self.profile_handler = hs.get_profile_handler()
|
||||||
self.http_client = hs.get_simple_http_client()
|
self.http_client = hs.get_simple_http_client()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_GET(self, request, user_id):
|
def on_GET(self, request, user_id):
|
||||||
@ -163,12 +167,14 @@ class ProfileAvatarURLRestServlet(ClientV1RestServlet):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class ProfileRestServlet(ClientV1RestServlet):
|
class ProfileRestServlet(RestServlet):
|
||||||
PATTERNS = client_path_patterns("/profile/(?P<user_id>[^/]*)")
|
PATTERNS = client_patterns("/profile/(?P<user_id>[^/]*)", v1=True)
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(ProfileRestServlet, self).__init__(hs)
|
super(ProfileRestServlet, self).__init__()
|
||||||
|
self.hs = hs
|
||||||
self.profile_handler = hs.get_profile_handler()
|
self.profile_handler = hs.get_profile_handler()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_GET(self, request, user_id):
|
def on_GET(self, request, user_id):
|
||||||
|
@ -21,22 +21,22 @@ from synapse.api.errors import (
|
|||||||
SynapseError,
|
SynapseError,
|
||||||
UnrecognizedRequestError,
|
UnrecognizedRequestError,
|
||||||
)
|
)
|
||||||
from synapse.http.servlet import parse_json_value_from_request, parse_string
|
from synapse.http.servlet import RestServlet, parse_json_value_from_request, parse_string
|
||||||
from synapse.push.baserules import BASE_RULE_IDS
|
from synapse.push.baserules import BASE_RULE_IDS
|
||||||
from synapse.push.clientformat import format_push_rules_for_user
|
from synapse.push.clientformat import format_push_rules_for_user
|
||||||
from synapse.push.rulekinds import PRIORITY_CLASS_MAP
|
from synapse.push.rulekinds import PRIORITY_CLASS_MAP
|
||||||
|
from synapse.rest.client.v2_alpha._base import client_patterns
|
||||||
from synapse.storage.push_rule import InconsistentRuleException, RuleNotFoundException
|
from synapse.storage.push_rule import InconsistentRuleException, RuleNotFoundException
|
||||||
|
|
||||||
from .base import ClientV1RestServlet, client_path_patterns
|
|
||||||
|
|
||||||
|
class PushRuleRestServlet(RestServlet):
|
||||||
class PushRuleRestServlet(ClientV1RestServlet):
|
PATTERNS = client_patterns("/(?P<path>pushrules/.*)$", v1=True)
|
||||||
PATTERNS = client_path_patterns("/(?P<path>pushrules/.*)$")
|
|
||||||
SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR = (
|
SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR = (
|
||||||
"Unrecognised request: You probably wanted a trailing slash")
|
"Unrecognised request: You probably wanted a trailing slash")
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(PushRuleRestServlet, self).__init__(hs)
|
super(PushRuleRestServlet, self).__init__()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
self.notifier = hs.get_notifier()
|
self.notifier = hs.get_notifier()
|
||||||
self._is_worker = hs.config.worker_app is not None
|
self._is_worker = hs.config.worker_app is not None
|
||||||
|
@ -26,17 +26,18 @@ from synapse.http.servlet import (
|
|||||||
parse_string,
|
parse_string,
|
||||||
)
|
)
|
||||||
from synapse.push import PusherConfigException
|
from synapse.push import PusherConfigException
|
||||||
|
from synapse.rest.client.v2_alpha._base import client_patterns
|
||||||
from .base import ClientV1RestServlet, client_path_patterns
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class PushersRestServlet(ClientV1RestServlet):
|
class PushersRestServlet(RestServlet):
|
||||||
PATTERNS = client_path_patterns("/pushers$")
|
PATTERNS = client_patterns("/pushers$", v1=True)
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(PushersRestServlet, self).__init__(hs)
|
super(PushersRestServlet, self).__init__()
|
||||||
|
self.hs = hs
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_GET(self, request):
|
def on_GET(self, request):
|
||||||
@ -69,11 +70,13 @@ class PushersRestServlet(ClientV1RestServlet):
|
|||||||
return 200, {}
|
return 200, {}
|
||||||
|
|
||||||
|
|
||||||
class PushersSetRestServlet(ClientV1RestServlet):
|
class PushersSetRestServlet(RestServlet):
|
||||||
PATTERNS = client_path_patterns("/pushers/set$")
|
PATTERNS = client_patterns("/pushers/set$", v1=True)
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(PushersSetRestServlet, self).__init__(hs)
|
super(PushersSetRestServlet, self).__init__()
|
||||||
|
self.hs = hs
|
||||||
|
self.auth = hs.get_auth()
|
||||||
self.notifier = hs.get_notifier()
|
self.notifier = hs.get_notifier()
|
||||||
self.pusher_pool = self.hs.get_pusherpool()
|
self.pusher_pool = self.hs.get_pusherpool()
|
||||||
|
|
||||||
@ -141,7 +144,7 @@ class PushersRemoveRestServlet(RestServlet):
|
|||||||
"""
|
"""
|
||||||
To allow pusher to be delete by clicking a link (ie. GET request)
|
To allow pusher to be delete by clicking a link (ie. GET request)
|
||||||
"""
|
"""
|
||||||
PATTERNS = client_path_patterns("/pushers/remove$")
|
PATTERNS = client_patterns("/pushers/remove$", v1=True)
|
||||||
SUCCESS_HTML = b"<html><body>You have been unsubscribed</body><html>"
|
SUCCESS_HTML = b"<html><body>You have been unsubscribed</body><html>"
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
|
@ -28,37 +28,45 @@ from synapse.api.errors import AuthError, Codes, SynapseError
|
|||||||
from synapse.api.filtering import Filter
|
from synapse.api.filtering import Filter
|
||||||
from synapse.events.utils import format_event_for_client_v2
|
from synapse.events.utils import format_event_for_client_v2
|
||||||
from synapse.http.servlet import (
|
from synapse.http.servlet import (
|
||||||
|
RestServlet,
|
||||||
assert_params_in_dict,
|
assert_params_in_dict,
|
||||||
parse_integer,
|
parse_integer,
|
||||||
parse_json_object_from_request,
|
parse_json_object_from_request,
|
||||||
parse_string,
|
parse_string,
|
||||||
)
|
)
|
||||||
|
from synapse.rest.client.transactions import HttpTransactionCache
|
||||||
|
from synapse.rest.client.v2_alpha._base import client_patterns
|
||||||
from synapse.storage.state import StateFilter
|
from synapse.storage.state import StateFilter
|
||||||
from synapse.streams.config import PaginationConfig
|
from synapse.streams.config import PaginationConfig
|
||||||
from synapse.types import RoomAlias, RoomID, StreamToken, ThirdPartyInstanceID, UserID
|
from synapse.types import RoomAlias, RoomID, StreamToken, ThirdPartyInstanceID, UserID
|
||||||
|
|
||||||
from .base import ClientV1RestServlet, client_path_patterns
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class RoomCreateRestServlet(ClientV1RestServlet):
|
class TransactionRestServlet(RestServlet):
|
||||||
|
def __init__(self, hs):
|
||||||
|
super(TransactionRestServlet, self).__init__()
|
||||||
|
self.txns = HttpTransactionCache(hs)
|
||||||
|
|
||||||
|
|
||||||
|
class RoomCreateRestServlet(TransactionRestServlet):
|
||||||
# No PATTERN; we have custom dispatch rules here
|
# No PATTERN; we have custom dispatch rules here
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(RoomCreateRestServlet, self).__init__(hs)
|
super(RoomCreateRestServlet, self).__init__(hs)
|
||||||
self._room_creation_handler = hs.get_room_creation_handler()
|
self._room_creation_handler = hs.get_room_creation_handler()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
def register(self, http_server):
|
def register(self, http_server):
|
||||||
PATTERNS = "/createRoom"
|
PATTERNS = "/createRoom"
|
||||||
register_txn_path(self, PATTERNS, http_server)
|
register_txn_path(self, PATTERNS, http_server)
|
||||||
# define CORS for all of /rooms in RoomCreateRestServlet for simplicity
|
# define CORS for all of /rooms in RoomCreateRestServlet for simplicity
|
||||||
http_server.register_paths("OPTIONS",
|
http_server.register_paths("OPTIONS",
|
||||||
client_path_patterns("/rooms(?:/.*)?$"),
|
client_patterns("/rooms(?:/.*)?$", v1=True),
|
||||||
self.on_OPTIONS)
|
self.on_OPTIONS)
|
||||||
# define CORS for /createRoom[/txnid]
|
# define CORS for /createRoom[/txnid]
|
||||||
http_server.register_paths("OPTIONS",
|
http_server.register_paths("OPTIONS",
|
||||||
client_path_patterns("/createRoom(?:/.*)?$"),
|
client_patterns("/createRoom(?:/.*)?$", v1=True),
|
||||||
self.on_OPTIONS)
|
self.on_OPTIONS)
|
||||||
|
|
||||||
def on_PUT(self, request, txn_id):
|
def on_PUT(self, request, txn_id):
|
||||||
@ -85,13 +93,14 @@ class RoomCreateRestServlet(ClientV1RestServlet):
|
|||||||
|
|
||||||
|
|
||||||
# TODO: Needs unit testing for generic events
|
# TODO: Needs unit testing for generic events
|
||||||
class RoomStateEventRestServlet(ClientV1RestServlet):
|
class RoomStateEventRestServlet(TransactionRestServlet):
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(RoomStateEventRestServlet, self).__init__(hs)
|
super(RoomStateEventRestServlet, self).__init__(hs)
|
||||||
self.handlers = hs.get_handlers()
|
self.handlers = hs.get_handlers()
|
||||||
self.event_creation_handler = hs.get_event_creation_handler()
|
self.event_creation_handler = hs.get_event_creation_handler()
|
||||||
self.room_member_handler = hs.get_room_member_handler()
|
self.room_member_handler = hs.get_room_member_handler()
|
||||||
self.message_handler = hs.get_message_handler()
|
self.message_handler = hs.get_message_handler()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
def register(self, http_server):
|
def register(self, http_server):
|
||||||
# /room/$roomid/state/$eventtype
|
# /room/$roomid/state/$eventtype
|
||||||
@ -102,16 +111,16 @@ class RoomStateEventRestServlet(ClientV1RestServlet):
|
|||||||
"(?P<event_type>[^/]*)/(?P<state_key>[^/]*)$")
|
"(?P<event_type>[^/]*)/(?P<state_key>[^/]*)$")
|
||||||
|
|
||||||
http_server.register_paths("GET",
|
http_server.register_paths("GET",
|
||||||
client_path_patterns(state_key),
|
client_patterns(state_key, v1=True),
|
||||||
self.on_GET)
|
self.on_GET)
|
||||||
http_server.register_paths("PUT",
|
http_server.register_paths("PUT",
|
||||||
client_path_patterns(state_key),
|
client_patterns(state_key, v1=True),
|
||||||
self.on_PUT)
|
self.on_PUT)
|
||||||
http_server.register_paths("GET",
|
http_server.register_paths("GET",
|
||||||
client_path_patterns(no_state_key),
|
client_patterns(no_state_key, v1=True),
|
||||||
self.on_GET_no_state_key)
|
self.on_GET_no_state_key)
|
||||||
http_server.register_paths("PUT",
|
http_server.register_paths("PUT",
|
||||||
client_path_patterns(no_state_key),
|
client_patterns(no_state_key, v1=True),
|
||||||
self.on_PUT_no_state_key)
|
self.on_PUT_no_state_key)
|
||||||
|
|
||||||
def on_GET_no_state_key(self, request, room_id, event_type):
|
def on_GET_no_state_key(self, request, room_id, event_type):
|
||||||
@ -185,11 +194,12 @@ class RoomStateEventRestServlet(ClientV1RestServlet):
|
|||||||
|
|
||||||
|
|
||||||
# TODO: Needs unit testing for generic events + feedback
|
# TODO: Needs unit testing for generic events + feedback
|
||||||
class RoomSendEventRestServlet(ClientV1RestServlet):
|
class RoomSendEventRestServlet(TransactionRestServlet):
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(RoomSendEventRestServlet, self).__init__(hs)
|
super(RoomSendEventRestServlet, self).__init__(hs)
|
||||||
self.event_creation_handler = hs.get_event_creation_handler()
|
self.event_creation_handler = hs.get_event_creation_handler()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
def register(self, http_server):
|
def register(self, http_server):
|
||||||
# /rooms/$roomid/send/$event_type[/$txn_id]
|
# /rooms/$roomid/send/$event_type[/$txn_id]
|
||||||
@ -229,10 +239,11 @@ class RoomSendEventRestServlet(ClientV1RestServlet):
|
|||||||
|
|
||||||
|
|
||||||
# TODO: Needs unit testing for room ID + alias joins
|
# TODO: Needs unit testing for room ID + alias joins
|
||||||
class JoinRoomAliasServlet(ClientV1RestServlet):
|
class JoinRoomAliasServlet(TransactionRestServlet):
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(JoinRoomAliasServlet, self).__init__(hs)
|
super(JoinRoomAliasServlet, self).__init__(hs)
|
||||||
self.room_member_handler = hs.get_room_member_handler()
|
self.room_member_handler = hs.get_room_member_handler()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
def register(self, http_server):
|
def register(self, http_server):
|
||||||
# /join/$room_identifier[/$txn_id]
|
# /join/$room_identifier[/$txn_id]
|
||||||
@ -291,8 +302,13 @@ class JoinRoomAliasServlet(ClientV1RestServlet):
|
|||||||
|
|
||||||
|
|
||||||
# TODO: Needs unit testing
|
# TODO: Needs unit testing
|
||||||
class PublicRoomListRestServlet(ClientV1RestServlet):
|
class PublicRoomListRestServlet(TransactionRestServlet):
|
||||||
PATTERNS = client_path_patterns("/publicRooms$")
|
PATTERNS = client_patterns("/publicRooms$", v1=True)
|
||||||
|
|
||||||
|
def __init__(self, hs):
|
||||||
|
super(PublicRoomListRestServlet, self).__init__(hs)
|
||||||
|
self.hs = hs
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_GET(self, request):
|
def on_GET(self, request):
|
||||||
@ -382,12 +398,13 @@ class PublicRoomListRestServlet(ClientV1RestServlet):
|
|||||||
|
|
||||||
|
|
||||||
# TODO: Needs unit testing
|
# TODO: Needs unit testing
|
||||||
class RoomMemberListRestServlet(ClientV1RestServlet):
|
class RoomMemberListRestServlet(RestServlet):
|
||||||
PATTERNS = client_path_patterns("/rooms/(?P<room_id>[^/]*)/members$")
|
PATTERNS = client_patterns("/rooms/(?P<room_id>[^/]*)/members$", v1=True)
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(RoomMemberListRestServlet, self).__init__(hs)
|
super(RoomMemberListRestServlet, self).__init__()
|
||||||
self.message_handler = hs.get_message_handler()
|
self.message_handler = hs.get_message_handler()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_GET(self, request, room_id):
|
def on_GET(self, request, room_id):
|
||||||
@ -436,12 +453,13 @@ class RoomMemberListRestServlet(ClientV1RestServlet):
|
|||||||
|
|
||||||
# deprecated in favour of /members?membership=join?
|
# deprecated in favour of /members?membership=join?
|
||||||
# except it does custom AS logic and has a simpler return format
|
# except it does custom AS logic and has a simpler return format
|
||||||
class JoinedRoomMemberListRestServlet(ClientV1RestServlet):
|
class JoinedRoomMemberListRestServlet(RestServlet):
|
||||||
PATTERNS = client_path_patterns("/rooms/(?P<room_id>[^/]*)/joined_members$")
|
PATTERNS = client_patterns("/rooms/(?P<room_id>[^/]*)/joined_members$", v1=True)
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(JoinedRoomMemberListRestServlet, self).__init__(hs)
|
super(JoinedRoomMemberListRestServlet, self).__init__()
|
||||||
self.message_handler = hs.get_message_handler()
|
self.message_handler = hs.get_message_handler()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_GET(self, request, room_id):
|
def on_GET(self, request, room_id):
|
||||||
@ -457,12 +475,13 @@ class JoinedRoomMemberListRestServlet(ClientV1RestServlet):
|
|||||||
|
|
||||||
|
|
||||||
# TODO: Needs better unit testing
|
# TODO: Needs better unit testing
|
||||||
class RoomMessageListRestServlet(ClientV1RestServlet):
|
class RoomMessageListRestServlet(RestServlet):
|
||||||
PATTERNS = client_path_patterns("/rooms/(?P<room_id>[^/]*)/messages$")
|
PATTERNS = client_patterns("/rooms/(?P<room_id>[^/]*)/messages$", v1=True)
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(RoomMessageListRestServlet, self).__init__(hs)
|
super(RoomMessageListRestServlet, self).__init__()
|
||||||
self.pagination_handler = hs.get_pagination_handler()
|
self.pagination_handler = hs.get_pagination_handler()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_GET(self, request, room_id):
|
def on_GET(self, request, room_id):
|
||||||
@ -475,6 +494,8 @@ class RoomMessageListRestServlet(ClientV1RestServlet):
|
|||||||
if filter_bytes:
|
if filter_bytes:
|
||||||
filter_json = urlparse.unquote(filter_bytes.decode("UTF-8"))
|
filter_json = urlparse.unquote(filter_bytes.decode("UTF-8"))
|
||||||
event_filter = Filter(json.loads(filter_json))
|
event_filter = Filter(json.loads(filter_json))
|
||||||
|
if event_filter.filter_json.get("event_format", "client") == "federation":
|
||||||
|
as_client_event = False
|
||||||
else:
|
else:
|
||||||
event_filter = None
|
event_filter = None
|
||||||
msgs = yield self.pagination_handler.get_messages(
|
msgs = yield self.pagination_handler.get_messages(
|
||||||
@ -489,12 +510,13 @@ class RoomMessageListRestServlet(ClientV1RestServlet):
|
|||||||
|
|
||||||
|
|
||||||
# TODO: Needs unit testing
|
# TODO: Needs unit testing
|
||||||
class RoomStateRestServlet(ClientV1RestServlet):
|
class RoomStateRestServlet(RestServlet):
|
||||||
PATTERNS = client_path_patterns("/rooms/(?P<room_id>[^/]*)/state$")
|
PATTERNS = client_patterns("/rooms/(?P<room_id>[^/]*)/state$", v1=True)
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(RoomStateRestServlet, self).__init__(hs)
|
super(RoomStateRestServlet, self).__init__()
|
||||||
self.message_handler = hs.get_message_handler()
|
self.message_handler = hs.get_message_handler()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_GET(self, request, room_id):
|
def on_GET(self, request, room_id):
|
||||||
@ -509,12 +531,13 @@ class RoomStateRestServlet(ClientV1RestServlet):
|
|||||||
|
|
||||||
|
|
||||||
# TODO: Needs unit testing
|
# TODO: Needs unit testing
|
||||||
class RoomInitialSyncRestServlet(ClientV1RestServlet):
|
class RoomInitialSyncRestServlet(RestServlet):
|
||||||
PATTERNS = client_path_patterns("/rooms/(?P<room_id>[^/]*)/initialSync$")
|
PATTERNS = client_patterns("/rooms/(?P<room_id>[^/]*)/initialSync$", v1=True)
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(RoomInitialSyncRestServlet, self).__init__(hs)
|
super(RoomInitialSyncRestServlet, self).__init__()
|
||||||
self.initial_sync_handler = hs.get_initial_sync_handler()
|
self.initial_sync_handler = hs.get_initial_sync_handler()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_GET(self, request, room_id):
|
def on_GET(self, request, room_id):
|
||||||
@ -528,16 +551,17 @@ class RoomInitialSyncRestServlet(ClientV1RestServlet):
|
|||||||
defer.returnValue((200, content))
|
defer.returnValue((200, content))
|
||||||
|
|
||||||
|
|
||||||
class RoomEventServlet(ClientV1RestServlet):
|
class RoomEventServlet(RestServlet):
|
||||||
PATTERNS = client_path_patterns(
|
PATTERNS = client_patterns(
|
||||||
"/rooms/(?P<room_id>[^/]*)/event/(?P<event_id>[^/]*)$"
|
"/rooms/(?P<room_id>[^/]*)/event/(?P<event_id>[^/]*)$", v1=True
|
||||||
)
|
)
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(RoomEventServlet, self).__init__(hs)
|
super(RoomEventServlet, self).__init__()
|
||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
self.event_handler = hs.get_event_handler()
|
self.event_handler = hs.get_event_handler()
|
||||||
self._event_serializer = hs.get_event_client_serializer()
|
self._event_serializer = hs.get_event_client_serializer()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_GET(self, request, room_id, event_id):
|
def on_GET(self, request, room_id, event_id):
|
||||||
@ -552,16 +576,17 @@ class RoomEventServlet(ClientV1RestServlet):
|
|||||||
defer.returnValue((404, "Event not found."))
|
defer.returnValue((404, "Event not found."))
|
||||||
|
|
||||||
|
|
||||||
class RoomEventContextServlet(ClientV1RestServlet):
|
class RoomEventContextServlet(RestServlet):
|
||||||
PATTERNS = client_path_patterns(
|
PATTERNS = client_patterns(
|
||||||
"/rooms/(?P<room_id>[^/]*)/context/(?P<event_id>[^/]*)$"
|
"/rooms/(?P<room_id>[^/]*)/context/(?P<event_id>[^/]*)$", v1=True
|
||||||
)
|
)
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(RoomEventContextServlet, self).__init__(hs)
|
super(RoomEventContextServlet, self).__init__()
|
||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
self.room_context_handler = hs.get_room_context_handler()
|
self.room_context_handler = hs.get_room_context_handler()
|
||||||
self._event_serializer = hs.get_event_client_serializer()
|
self._event_serializer = hs.get_event_client_serializer()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_GET(self, request, room_id, event_id):
|
def on_GET(self, request, room_id, event_id):
|
||||||
@ -607,10 +632,11 @@ class RoomEventContextServlet(ClientV1RestServlet):
|
|||||||
defer.returnValue((200, results))
|
defer.returnValue((200, results))
|
||||||
|
|
||||||
|
|
||||||
class RoomForgetRestServlet(ClientV1RestServlet):
|
class RoomForgetRestServlet(TransactionRestServlet):
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(RoomForgetRestServlet, self).__init__(hs)
|
super(RoomForgetRestServlet, self).__init__(hs)
|
||||||
self.room_member_handler = hs.get_room_member_handler()
|
self.room_member_handler = hs.get_room_member_handler()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
def register(self, http_server):
|
def register(self, http_server):
|
||||||
PATTERNS = ("/rooms/(?P<room_id>[^/]*)/forget")
|
PATTERNS = ("/rooms/(?P<room_id>[^/]*)/forget")
|
||||||
@ -637,11 +663,12 @@ class RoomForgetRestServlet(ClientV1RestServlet):
|
|||||||
|
|
||||||
|
|
||||||
# TODO: Needs unit testing
|
# TODO: Needs unit testing
|
||||||
class RoomMembershipRestServlet(ClientV1RestServlet):
|
class RoomMembershipRestServlet(TransactionRestServlet):
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(RoomMembershipRestServlet, self).__init__(hs)
|
super(RoomMembershipRestServlet, self).__init__(hs)
|
||||||
self.room_member_handler = hs.get_room_member_handler()
|
self.room_member_handler = hs.get_room_member_handler()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
def register(self, http_server):
|
def register(self, http_server):
|
||||||
# /rooms/$roomid/[invite|join|leave]
|
# /rooms/$roomid/[invite|join|leave]
|
||||||
@ -721,11 +748,12 @@ class RoomMembershipRestServlet(ClientV1RestServlet):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class RoomRedactEventRestServlet(ClientV1RestServlet):
|
class RoomRedactEventRestServlet(TransactionRestServlet):
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(RoomRedactEventRestServlet, self).__init__(hs)
|
super(RoomRedactEventRestServlet, self).__init__(hs)
|
||||||
self.handlers = hs.get_handlers()
|
self.handlers = hs.get_handlers()
|
||||||
self.event_creation_handler = hs.get_event_creation_handler()
|
self.event_creation_handler = hs.get_event_creation_handler()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
def register(self, http_server):
|
def register(self, http_server):
|
||||||
PATTERNS = ("/rooms/(?P<room_id>[^/]*)/redact/(?P<event_id>[^/]*)")
|
PATTERNS = ("/rooms/(?P<room_id>[^/]*)/redact/(?P<event_id>[^/]*)")
|
||||||
@ -756,15 +784,16 @@ class RoomRedactEventRestServlet(ClientV1RestServlet):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class RoomTypingRestServlet(ClientV1RestServlet):
|
class RoomTypingRestServlet(RestServlet):
|
||||||
PATTERNS = client_path_patterns(
|
PATTERNS = client_patterns(
|
||||||
"/rooms/(?P<room_id>[^/]*)/typing/(?P<user_id>[^/]*)$"
|
"/rooms/(?P<room_id>[^/]*)/typing/(?P<user_id>[^/]*)$", v1=True
|
||||||
)
|
)
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(RoomTypingRestServlet, self).__init__(hs)
|
super(RoomTypingRestServlet, self).__init__()
|
||||||
self.presence_handler = hs.get_presence_handler()
|
self.presence_handler = hs.get_presence_handler()
|
||||||
self.typing_handler = hs.get_typing_handler()
|
self.typing_handler = hs.get_typing_handler()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_PUT(self, request, room_id, user_id):
|
def on_PUT(self, request, room_id, user_id):
|
||||||
@ -797,14 +826,13 @@ class RoomTypingRestServlet(ClientV1RestServlet):
|
|||||||
defer.returnValue((200, {}))
|
defer.returnValue((200, {}))
|
||||||
|
|
||||||
|
|
||||||
class SearchRestServlet(ClientV1RestServlet):
|
class SearchRestServlet(RestServlet):
|
||||||
PATTERNS = client_path_patterns(
|
PATTERNS = client_patterns("/search$", v1=True)
|
||||||
"/search$"
|
|
||||||
)
|
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(SearchRestServlet, self).__init__(hs)
|
super(SearchRestServlet, self).__init__()
|
||||||
self.handlers = hs.get_handlers()
|
self.handlers = hs.get_handlers()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_POST(self, request):
|
def on_POST(self, request):
|
||||||
@ -822,12 +850,13 @@ class SearchRestServlet(ClientV1RestServlet):
|
|||||||
defer.returnValue((200, results))
|
defer.returnValue((200, results))
|
||||||
|
|
||||||
|
|
||||||
class JoinedRoomsRestServlet(ClientV1RestServlet):
|
class JoinedRoomsRestServlet(RestServlet):
|
||||||
PATTERNS = client_path_patterns("/joined_rooms$")
|
PATTERNS = client_patterns("/joined_rooms$", v1=True)
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(JoinedRoomsRestServlet, self).__init__(hs)
|
super(JoinedRoomsRestServlet, self).__init__()
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_GET(self, request):
|
def on_GET(self, request):
|
||||||
@ -852,18 +881,18 @@ def register_txn_path(servlet, regex_string, http_server, with_get=False):
|
|||||||
"""
|
"""
|
||||||
http_server.register_paths(
|
http_server.register_paths(
|
||||||
"POST",
|
"POST",
|
||||||
client_path_patterns(regex_string + "$"),
|
client_patterns(regex_string + "$", v1=True),
|
||||||
servlet.on_POST
|
servlet.on_POST
|
||||||
)
|
)
|
||||||
http_server.register_paths(
|
http_server.register_paths(
|
||||||
"PUT",
|
"PUT",
|
||||||
client_path_patterns(regex_string + "/(?P<txn_id>[^/]*)$"),
|
client_patterns(regex_string + "/(?P<txn_id>[^/]*)$", v1=True),
|
||||||
servlet.on_PUT
|
servlet.on_PUT
|
||||||
)
|
)
|
||||||
if with_get:
|
if with_get:
|
||||||
http_server.register_paths(
|
http_server.register_paths(
|
||||||
"GET",
|
"GET",
|
||||||
client_path_patterns(regex_string + "/(?P<txn_id>[^/]*)$"),
|
client_patterns(regex_string + "/(?P<txn_id>[^/]*)$", v1=True),
|
||||||
servlet.on_GET
|
servlet.on_GET
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -19,11 +19,17 @@ import hmac
|
|||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from .base import ClientV1RestServlet, client_path_patterns
|
from synapse.http.servlet import RestServlet
|
||||||
|
from synapse.rest.client.v2_alpha._base import client_patterns
|
||||||
|
|
||||||
|
|
||||||
class VoipRestServlet(ClientV1RestServlet):
|
class VoipRestServlet(RestServlet):
|
||||||
PATTERNS = client_path_patterns("/voip/turnServer$")
|
PATTERNS = client_patterns("/voip/turnServer$", v1=True)
|
||||||
|
|
||||||
|
def __init__(self, hs):
|
||||||
|
super(VoipRestServlet, self).__init__()
|
||||||
|
self.hs = hs
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_GET(self, request):
|
def on_GET(self, request):
|
||||||
|
@ -26,8 +26,7 @@ from synapse.api.urls import CLIENT_API_PREFIX
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def client_v2_patterns(path_regex, releases=(0,),
|
def client_patterns(path_regex, releases=(0,), unstable=True, v1=False):
|
||||||
unstable=True):
|
|
||||||
"""Creates a regex compiled client path with the correct client path
|
"""Creates a regex compiled client path with the correct client path
|
||||||
prefix.
|
prefix.
|
||||||
|
|
||||||
@ -41,6 +40,9 @@ def client_v2_patterns(path_regex, releases=(0,),
|
|||||||
if unstable:
|
if unstable:
|
||||||
unstable_prefix = CLIENT_API_PREFIX + "/unstable"
|
unstable_prefix = CLIENT_API_PREFIX + "/unstable"
|
||||||
patterns.append(re.compile("^" + unstable_prefix + path_regex))
|
patterns.append(re.compile("^" + unstable_prefix + path_regex))
|
||||||
|
if v1:
|
||||||
|
v1_prefix = CLIENT_API_PREFIX + "/api/v1"
|
||||||
|
patterns.append(re.compile("^" + v1_prefix + path_regex))
|
||||||
for release in releases:
|
for release in releases:
|
||||||
new_prefix = CLIENT_API_PREFIX + "/r%d" % (release,)
|
new_prefix = CLIENT_API_PREFIX + "/r%d" % (release,)
|
||||||
patterns.append(re.compile("^" + new_prefix + path_regex))
|
patterns.append(re.compile("^" + new_prefix + path_regex))
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user