mirror of
https://git.anonymousland.org/anonymousland/synapse-product.git
synced 2024-12-17 13:44:21 -05:00
Synapse 0.99.1 (2019-02-14)
=========================== Features -------- - Include m.room.encryption on invites by default ([\#3902](https://github.com/matrix-org/synapse/issues/3902)) - Federation OpenID listener resource can now be activated even if federation is disabled ([\#4420](https://github.com/matrix-org/synapse/issues/4420)) - Synapse's ACME support will now correctly reprovision a certificate that approaches its expiry while Synapse is running. ([\#4522](https://github.com/matrix-org/synapse/issues/4522)) - Add ability to update backup versions ([\#4580](https://github.com/matrix-org/synapse/issues/4580)) - Allow the "unavailable" presence status for /sync. This change makes Synapse compliant with r0.4.0 of the Client-Server specification. ([\#4592](https://github.com/matrix-org/synapse/issues/4592)) - There is no longer any need to specify `no_tls`: it is inferred from the absence of TLS listeners ([\#4613](https://github.com/matrix-org/synapse/issues/4613), [\#4615](https://github.com/matrix-org/synapse/issues/4615), [\#4617](https://github.com/matrix-org/synapse/issues/4617), [\#4636](https://github.com/matrix-org/synapse/issues/4636)) - The default configuration no longer requires TLS certificates. ([\#4614](https://github.com/matrix-org/synapse/issues/4614)) Bugfixes -------- - Copy over room federation ability on room upgrade. ([\#4530](https://github.com/matrix-org/synapse/issues/4530)) - Fix noisy "twisted.internet.task.TaskStopped" errors in logs ([\#4546](https://github.com/matrix-org/synapse/issues/4546)) - Synapse is now tolerant of the `tls_fingerprints` option being None or not specified. ([\#4589](https://github.com/matrix-org/synapse/issues/4589)) - Fix 'no unique or exclusion constraint' error ([\#4591](https://github.com/matrix-org/synapse/issues/4591)) - Transfer Server ACLs on room upgrade. ([\#4608](https://github.com/matrix-org/synapse/issues/4608)) - Fix failure to start when not TLS certificate was given even if TLS was disabled. ([\#4618](https://github.com/matrix-org/synapse/issues/4618)) - Fix self-signed cert notice from generate-config. ([\#4625](https://github.com/matrix-org/synapse/issues/4625)) - Fix performance of `user_ips` table deduplication background update ([\#4626](https://github.com/matrix-org/synapse/issues/4626), [\#4627](https://github.com/matrix-org/synapse/issues/4627)) Internal Changes ---------------- - Change the user directory state query to use a filtered call to the db instead of a generic one. ([\#4462](https://github.com/matrix-org/synapse/issues/4462)) - Reject federation transactions if they include more than 50 PDUs or 100 EDUs. ([\#4513](https://github.com/matrix-org/synapse/issues/4513)) - Reduce duplication of ``synapse.app`` code. ([\#4567](https://github.com/matrix-org/synapse/issues/4567)) - Fix docker upload job to push -py2 images. ([\#4576](https://github.com/matrix-org/synapse/issues/4576)) - Add port configuration information to ACME instructions. ([\#4578](https://github.com/matrix-org/synapse/issues/4578)) - Update MSC1711 FAQ to calrify .well-known usage ([\#4584](https://github.com/matrix-org/synapse/issues/4584)) - Clean up default listener configuration ([\#4586](https://github.com/matrix-org/synapse/issues/4586)) - Clarifications for reverse proxy docs ([\#4607](https://github.com/matrix-org/synapse/issues/4607)) - Move ClientTLSOptionsFactory init out of `refresh_certificates` ([\#4611](https://github.com/matrix-org/synapse/issues/4611)) - Fail cleanly if listener config lacks a 'port' ([\#4616](https://github.com/matrix-org/synapse/issues/4616)) - Remove redundant entries from docker config ([\#4619](https://github.com/matrix-org/synapse/issues/4619)) - README updates ([\#4621](https://github.com/matrix-org/synapse/issues/4621)) -----BEGIN PGP SIGNATURE----- iQFHBAABCgAxFiEEQlNDQm4FMsm53u1sih+T1XW16NUFAlxlemgTHHJpY2hhcmRA bWF0cml4Lm9yZwAKCRCKH5PVdbXo1eKYCACR9TcOvMver/YyD2qP+dY6Lt24f8zG zYYzHGAHin+p204q8Pp6o0XLe4UuLDuhAyNVPZyj1wzwHYdubRvdah1uFwPdxmCY tGbJG5p37ykSEfEwcxdXEjYfPqflOwQL5aCeXyCCLWSdVVFKkWCXGgw8F6WPkgrI QwWKTfsM3wCnfa8ryKAXHxcmX2G1JncZ0ouUZTVNz5vokBsA19IaLvfJ5Rv3Kk59 eXsBB/yE+9Dat4A439AGfVDQDKiGYvuhppJmUdYRMqxulzakd8diyZqBDAHZafqt QdjxnDx2e0OtSxI3RSevABnDnNyJ4NsUEtrny1Lh/MV72T9K3yEbHuwH =UCD1 -----END PGP SIGNATURE----- Merge tag 'v0.99.1' Synapse 0.99.1 (2019-02-14) =========================== Features -------- - Include m.room.encryption on invites by default ([\#3902](https://github.com/matrix-org/synapse/issues/3902)) - Federation OpenID listener resource can now be activated even if federation is disabled ([\#4420](https://github.com/matrix-org/synapse/issues/4420)) - Synapse's ACME support will now correctly reprovision a certificate that approaches its expiry while Synapse is running. ([\#4522](https://github.com/matrix-org/synapse/issues/4522)) - Add ability to update backup versions ([\#4580](https://github.com/matrix-org/synapse/issues/4580)) - Allow the "unavailable" presence status for /sync. This change makes Synapse compliant with r0.4.0 of the Client-Server specification. ([\#4592](https://github.com/matrix-org/synapse/issues/4592)) - There is no longer any need to specify `no_tls`: it is inferred from the absence of TLS listeners ([\#4613](https://github.com/matrix-org/synapse/issues/4613), [\#4615](https://github.com/matrix-org/synapse/issues/4615), [\#4617](https://github.com/matrix-org/synapse/issues/4617), [\#4636](https://github.com/matrix-org/synapse/issues/4636)) - The default configuration no longer requires TLS certificates. ([\#4614](https://github.com/matrix-org/synapse/issues/4614)) Bugfixes -------- - Copy over room federation ability on room upgrade. ([\#4530](https://github.com/matrix-org/synapse/issues/4530)) - Fix noisy "twisted.internet.task.TaskStopped" errors in logs ([\#4546](https://github.com/matrix-org/synapse/issues/4546)) - Synapse is now tolerant of the `tls_fingerprints` option being None or not specified. ([\#4589](https://github.com/matrix-org/synapse/issues/4589)) - Fix 'no unique or exclusion constraint' error ([\#4591](https://github.com/matrix-org/synapse/issues/4591)) - Transfer Server ACLs on room upgrade. ([\#4608](https://github.com/matrix-org/synapse/issues/4608)) - Fix failure to start when not TLS certificate was given even if TLS was disabled. ([\#4618](https://github.com/matrix-org/synapse/issues/4618)) - Fix self-signed cert notice from generate-config. ([\#4625](https://github.com/matrix-org/synapse/issues/4625)) - Fix performance of `user_ips` table deduplication background update ([\#4626](https://github.com/matrix-org/synapse/issues/4626), [\#4627](https://github.com/matrix-org/synapse/issues/4627)) Internal Changes ---------------- - Change the user directory state query to use a filtered call to the db instead of a generic one. ([\#4462](https://github.com/matrix-org/synapse/issues/4462)) - Reject federation transactions if they include more than 50 PDUs or 100 EDUs. ([\#4513](https://github.com/matrix-org/synapse/issues/4513)) - Reduce duplication of ``synapse.app`` code. ([\#4567](https://github.com/matrix-org/synapse/issues/4567)) - Fix docker upload job to push -py2 images. ([\#4576](https://github.com/matrix-org/synapse/issues/4576)) - Add port configuration information to ACME instructions. ([\#4578](https://github.com/matrix-org/synapse/issues/4578)) - Update MSC1711 FAQ to calrify .well-known usage ([\#4584](https://github.com/matrix-org/synapse/issues/4584)) - Clean up default listener configuration ([\#4586](https://github.com/matrix-org/synapse/issues/4586)) - Clarifications for reverse proxy docs ([\#4607](https://github.com/matrix-org/synapse/issues/4607)) - Move ClientTLSOptionsFactory init out of `refresh_certificates` ([\#4611](https://github.com/matrix-org/synapse/issues/4611)) - Fail cleanly if listener config lacks a 'port' ([\#4616](https://github.com/matrix-org/synapse/issues/4616)) - Remove redundant entries from docker config ([\#4619](https://github.com/matrix-org/synapse/issues/4619)) - README updates ([\#4621](https://github.com/matrix-org/synapse/issues/4621))
This commit is contained in:
commit
00cf679bf2
46
CHANGES.md
46
CHANGES.md
@ -1,3 +1,49 @@
|
|||||||
|
Synapse 0.99.1 (2019-02-14)
|
||||||
|
===========================
|
||||||
|
|
||||||
|
Features
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Include m.room.encryption on invites by default ([\#3902](https://github.com/matrix-org/synapse/issues/3902))
|
||||||
|
- Federation OpenID listener resource can now be activated even if federation is disabled ([\#4420](https://github.com/matrix-org/synapse/issues/4420))
|
||||||
|
- Synapse's ACME support will now correctly reprovision a certificate that approaches its expiry while Synapse is running. ([\#4522](https://github.com/matrix-org/synapse/issues/4522))
|
||||||
|
- Add ability to update backup versions ([\#4580](https://github.com/matrix-org/synapse/issues/4580))
|
||||||
|
- Allow the "unavailable" presence status for /sync.
|
||||||
|
This change makes Synapse compliant with r0.4.0 of the Client-Server specification. ([\#4592](https://github.com/matrix-org/synapse/issues/4592))
|
||||||
|
- There is no longer any need to specify `no_tls`: it is inferred from the absence of TLS listeners ([\#4613](https://github.com/matrix-org/synapse/issues/4613), [\#4615](https://github.com/matrix-org/synapse/issues/4615), [\#4617](https://github.com/matrix-org/synapse/issues/4617), [\#4636](https://github.com/matrix-org/synapse/issues/4636))
|
||||||
|
- The default configuration no longer requires TLS certificates. ([\#4614](https://github.com/matrix-org/synapse/issues/4614))
|
||||||
|
|
||||||
|
|
||||||
|
Bugfixes
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Copy over room federation ability on room upgrade. ([\#4530](https://github.com/matrix-org/synapse/issues/4530))
|
||||||
|
- Fix noisy "twisted.internet.task.TaskStopped" errors in logs ([\#4546](https://github.com/matrix-org/synapse/issues/4546))
|
||||||
|
- Synapse is now tolerant of the `tls_fingerprints` option being None or not specified. ([\#4589](https://github.com/matrix-org/synapse/issues/4589))
|
||||||
|
- Fix 'no unique or exclusion constraint' error ([\#4591](https://github.com/matrix-org/synapse/issues/4591))
|
||||||
|
- Transfer Server ACLs on room upgrade. ([\#4608](https://github.com/matrix-org/synapse/issues/4608))
|
||||||
|
- Fix failure to start when not TLS certificate was given even if TLS was disabled. ([\#4618](https://github.com/matrix-org/synapse/issues/4618))
|
||||||
|
- Fix self-signed cert notice from generate-config. ([\#4625](https://github.com/matrix-org/synapse/issues/4625))
|
||||||
|
- Fix performance of `user_ips` table deduplication background update ([\#4626](https://github.com/matrix-org/synapse/issues/4626), [\#4627](https://github.com/matrix-org/synapse/issues/4627))
|
||||||
|
|
||||||
|
|
||||||
|
Internal Changes
|
||||||
|
----------------
|
||||||
|
|
||||||
|
- Change the user directory state query to use a filtered call to the db instead of a generic one. ([\#4462](https://github.com/matrix-org/synapse/issues/4462))
|
||||||
|
- Reject federation transactions if they include more than 50 PDUs or 100 EDUs. ([\#4513](https://github.com/matrix-org/synapse/issues/4513))
|
||||||
|
- Reduce duplication of ``synapse.app`` code. ([\#4567](https://github.com/matrix-org/synapse/issues/4567))
|
||||||
|
- Fix docker upload job to push -py2 images. ([\#4576](https://github.com/matrix-org/synapse/issues/4576))
|
||||||
|
- Add port configuration information to ACME instructions. ([\#4578](https://github.com/matrix-org/synapse/issues/4578))
|
||||||
|
- Update MSC1711 FAQ to calrify .well-known usage ([\#4584](https://github.com/matrix-org/synapse/issues/4584))
|
||||||
|
- Clean up default listener configuration ([\#4586](https://github.com/matrix-org/synapse/issues/4586))
|
||||||
|
- Clarifications for reverse proxy docs ([\#4607](https://github.com/matrix-org/synapse/issues/4607))
|
||||||
|
- Move ClientTLSOptionsFactory init out of `refresh_certificates` ([\#4611](https://github.com/matrix-org/synapse/issues/4611))
|
||||||
|
- Fail cleanly if listener config lacks a 'port' ([\#4616](https://github.com/matrix-org/synapse/issues/4616))
|
||||||
|
- Remove redundant entries from docker config ([\#4619](https://github.com/matrix-org/synapse/issues/4619))
|
||||||
|
- README updates ([\#4621](https://github.com/matrix-org/synapse/issues/4621))
|
||||||
|
|
||||||
|
|
||||||
Synapse 0.99.0 (2019-02-05)
|
Synapse 0.99.0 (2019-02-05)
|
||||||
===========================
|
===========================
|
||||||
|
|
||||||
|
39
INSTALL.md
39
INSTALL.md
@ -350,17 +350,34 @@ Once you have installed synapse as above, you will need to configure it.
|
|||||||
|
|
||||||
## TLS certificates
|
## TLS certificates
|
||||||
|
|
||||||
The default configuration exposes two HTTP ports: 8008 and 8448. Port 8008 is
|
The default configuration exposes a single HTTP port: http://localhost:8008. It
|
||||||
configured without TLS; it should be behind a reverse proxy for TLS/SSL
|
is suitable for local testing, but for any practical use, you will either need
|
||||||
termination on port 443 which in turn should be used for clients. Port 8448
|
to enable a reverse proxy, or configure Synapse to expose an HTTPS port.
|
||||||
is configured to use TLS for Federation with a self-signed or verified
|
|
||||||
certificate, but please be aware that a valid certificate will be required in
|
|
||||||
Synapse v1.0. Instructions for having Synapse automatically provision and renew federation certificates through ACME can be found at [ACME.md](docs/ACME.md).
|
|
||||||
|
|
||||||
If you would like to use your own certificates, you can do so by changing
|
For information on using a reverse proxy, see
|
||||||
`tls_certificate_path` and `tls_private_key_path` in `homeserver.yaml`;
|
[docs/reverse_proxy.rst](docs/reverse_proxy.rst).
|
||||||
alternatively, you can use a reverse-proxy. Apart from port 8448 using TLS,
|
|
||||||
both ports are the same in the default configuration.
|
To configure Synapse to expose an HTTPS port, you will need to edit
|
||||||
|
`homeserver.yaml`.
|
||||||
|
|
||||||
|
First, under the `listeners` section, uncomment the configuration for the
|
||||||
|
TLS-enabled listener. (Remove the hash sign (`#`) and space at the start of
|
||||||
|
each line). The relevant lines are like this:
|
||||||
|
|
||||||
|
```
|
||||||
|
- port: 8448
|
||||||
|
type: http
|
||||||
|
tls: true
|
||||||
|
resources:
|
||||||
|
- names: [client, federation]
|
||||||
|
```
|
||||||
|
|
||||||
|
You will also need to uncomment the `tls_certificate_path` and
|
||||||
|
`tls_private_key_path` lines under the `TLS` section. You can either point
|
||||||
|
these settings at an existing certificate and key, or you can enable Synapse's
|
||||||
|
built-in ACME (Let's Encrypt) support. Instructions for having Synapse
|
||||||
|
automatically provision and renew federation certificates through ACME can be
|
||||||
|
found at [ACME.md](docs/ACME.md).
|
||||||
|
|
||||||
## Registering a user
|
## Registering a user
|
||||||
|
|
||||||
@ -374,7 +391,7 @@ users. This can be done as follows:
|
|||||||
```
|
```
|
||||||
$ source ~/synapse/env/bin/activate
|
$ source ~/synapse/env/bin/activate
|
||||||
$ synctl start # if not already running
|
$ synctl start # if not already running
|
||||||
$ register_new_matrix_user -c homeserver.yaml https://localhost:8448
|
$ register_new_matrix_user -c homeserver.yaml http://localhost:8008
|
||||||
New user localpart: erikj
|
New user localpart: erikj
|
||||||
Password:
|
Password:
|
||||||
Confirm password:
|
Confirm password:
|
||||||
|
148
README.rst
148
README.rst
@ -26,7 +26,6 @@ via IRC bridge at irc://irc.freenode.net/matrix.
|
|||||||
Synapse is currently in rapid development, but as of version 0.5 we believe it
|
Synapse is currently in rapid development, but as of version 0.5 we believe it
|
||||||
is sufficiently stable to be run as an internet-facing service for real usage!
|
is sufficiently stable to be run as an internet-facing service for real usage!
|
||||||
|
|
||||||
|
|
||||||
About Matrix
|
About Matrix
|
||||||
============
|
============
|
||||||
|
|
||||||
@ -88,18 +87,20 @@ Connecting to Synapse from a client
|
|||||||
===================================
|
===================================
|
||||||
|
|
||||||
The easiest way to try out your new Synapse installation is by connecting to it
|
The easiest way to try out your new Synapse installation is by connecting to it
|
||||||
from a web client. The easiest option is probably the one at
|
from a web client.
|
||||||
https://riot.im/app. You will need to specify a "Custom server" when you log on
|
|
||||||
or register: set this to ``https://domain.tld`` if you setup a reverse proxy
|
|
||||||
following the recommended setup, or ``https://localhost:8448`` - remember to specify the
|
|
||||||
port (``:8448``) if not ``:443`` unless you changed the configuration. (Leave the identity
|
|
||||||
server as the default - see `Identity servers`_.)
|
|
||||||
|
|
||||||
If using port 8448 you will run into errors if you are using a self-signed
|
Unless you are running a test instance of Synapse on your local machine, in
|
||||||
certificate. To overcome this, simply go to ``https://localhost:8448``
|
general, you will need to enable TLS support before you can successfully
|
||||||
directly with your browser and accept the presented certificate. You can then
|
connect from a client: see `<INSTALL.md#tls-certificates>`_.
|
||||||
go back in your web client and proceed further. Valid federation certificates
|
|
||||||
should not have this problem.
|
An easy way to get started is to login or register via Riot at
|
||||||
|
https://riot.im/app/#/login or https://riot.im/app/#/register respectively.
|
||||||
|
You will need to change the server you are logging into from ``matrix.org``
|
||||||
|
and instead specify a Homeserver URL of ``https://<server_name>:8448``
|
||||||
|
(or just ``https://<server_name>`` if you are using a reverse proxy).
|
||||||
|
(Leave the identity server as the default - see `Identity servers`_.)
|
||||||
|
If you prefer to use another client, refer to our
|
||||||
|
`client breakdown <https://matrix.org/docs/projects/clients-matrix>`_.
|
||||||
|
|
||||||
If all goes well you should at least be able to log in, create a room, and
|
If all goes well you should at least be able to log in, create a room, and
|
||||||
start sending messages.
|
start sending messages.
|
||||||
@ -174,9 +175,30 @@ Separately, Synapse may leak file handles if inbound HTTP requests get stuck
|
|||||||
during processing - e.g. blocked behind a lock or talking to a remote server etc.
|
during processing - e.g. blocked behind a lock or talking to a remote server etc.
|
||||||
This is best diagnosed by matching up the 'Received request' and 'Processed request'
|
This is best diagnosed by matching up the 'Received request' and 'Processed request'
|
||||||
log lines and looking for any 'Processed request' lines which take more than
|
log lines and looking for any 'Processed request' lines which take more than
|
||||||
a few seconds to execute. Please let us know at #matrix-dev:matrix.org if
|
a few seconds to execute. Please let us know at #synapse:matrix.org if
|
||||||
you see this failure mode so we can help debug it, however.
|
you see this failure mode so we can help debug it, however.
|
||||||
|
|
||||||
|
Help!! Synapse eats all my RAM!
|
||||||
|
-------------------------------
|
||||||
|
|
||||||
|
Synapse's architecture is quite RAM hungry currently - we deliberately
|
||||||
|
cache a lot of recent room data and metadata in RAM in order to speed up
|
||||||
|
common requests. We'll improve this in future, but for now the easiest
|
||||||
|
way to either reduce the RAM usage (at the risk of slowing things down)
|
||||||
|
is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
|
||||||
|
variable. The default is 0.5, which can be decreased to reduce RAM usage
|
||||||
|
in memory constrained enviroments, or increased if performance starts to
|
||||||
|
degrade.
|
||||||
|
|
||||||
|
Using `libjemalloc <http://jemalloc.net/>`_ can also yield a significant
|
||||||
|
improvement in overall amount, and especially in terms of giving back RAM
|
||||||
|
to the OS. To use it, the library must simply be put in the LD_PRELOAD
|
||||||
|
environment variable when launching Synapse. On Debian, this can be done
|
||||||
|
by installing the ``libjemalloc1`` package and adding this line to
|
||||||
|
``/etc/default/matrix-synapse``::
|
||||||
|
|
||||||
|
LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
|
||||||
|
|
||||||
|
|
||||||
Upgrading an existing Synapse
|
Upgrading an existing Synapse
|
||||||
=============================
|
=============================
|
||||||
@ -196,12 +218,12 @@ Federation is the process by which users on different servers can participate
|
|||||||
in the same room. For this to work, those other servers must be able to contact
|
in the same room. For this to work, those other servers must be able to contact
|
||||||
yours to send messages.
|
yours to send messages.
|
||||||
|
|
||||||
The ``server_name`` in your
|
The ``server_name`` in your ``homeserver.yaml`` file determines the way that
|
||||||
``homeserver.yaml`` file determines the way that other servers will reach
|
other servers will reach yours. By default, they will treat it as a hostname
|
||||||
yours. By default, they will treat it as a hostname and try to connect to
|
and try to connect to port 8448. This is easy to set up and will work with the
|
||||||
port 8448. This is easy to set up and will work with the default configuration,
|
default configuration, provided you set the ``server_name`` to match your
|
||||||
provided you set the ``server_name`` to match your machine's public DNS
|
machine's public DNS hostname, and give Synapse a TLS certificate which is
|
||||||
hostname.
|
valid for your ``server_name``.
|
||||||
|
|
||||||
For a more flexible configuration, you can set up a DNS SRV record. This allows
|
For a more flexible configuration, you can set up a DNS SRV record. This allows
|
||||||
you to run your server on a machine that might not have the same name as your
|
you to run your server on a machine that might not have the same name as your
|
||||||
@ -243,11 +265,8 @@ largest boxes pause for thought.)
|
|||||||
Troubleshooting
|
Troubleshooting
|
||||||
---------------
|
---------------
|
||||||
|
|
||||||
You can use the federation tester to check if your homeserver is all set:
|
You can use the `federation tester <https://matrix.org/federationtester>`_ to
|
||||||
``https://matrix.org/federationtester/api/report?server_name=<your_server_name>``
|
check if your homeserver is all set.
|
||||||
If any of the attributes under "checks" is false, federation won't work.
|
|
||||||
There is also a nicer interface available from a community member at
|
|
||||||
`<https://neo.lain.haus/fed-tester>`_.
|
|
||||||
|
|
||||||
The typical failure mode with federation is that when you try to join a room,
|
The typical failure mode with federation is that when you try to join a room,
|
||||||
it is rejected with "401: Unauthorized". Generally this means that other
|
it is rejected with "401: Unauthorized". Generally this means that other
|
||||||
@ -263,6 +282,11 @@ So, things to check are:
|
|||||||
(it should be ``_matrix._tcp.<server_name>``), and that the port and hostname
|
(it should be ``_matrix._tcp.<server_name>``), and that the port and hostname
|
||||||
it specifies are reachable from outside your network.
|
it specifies are reachable from outside your network.
|
||||||
|
|
||||||
|
Another common problem is that people on other servers can't join rooms that
|
||||||
|
you invite them to. This can be caused by an incorrectly-configured reverse
|
||||||
|
proxy: see `<docs/reverse_proxy.rst>`_ for instructions on how to correctly
|
||||||
|
configure a reverse proxy.
|
||||||
|
|
||||||
Running a Demo Federation of Synapses
|
Running a Demo Federation of Synapses
|
||||||
-------------------------------------
|
-------------------------------------
|
||||||
|
|
||||||
@ -290,7 +314,6 @@ The advantages of Postgres include:
|
|||||||
For information on how to install and use PostgreSQL, please see
|
For information on how to install and use PostgreSQL, please see
|
||||||
`docs/postgres.rst <docs/postgres.rst>`_.
|
`docs/postgres.rst <docs/postgres.rst>`_.
|
||||||
|
|
||||||
|
|
||||||
.. _reverse-proxy:
|
.. _reverse-proxy:
|
||||||
|
|
||||||
Using a reverse proxy with Synapse
|
Using a reverse proxy with Synapse
|
||||||
@ -304,54 +327,7 @@ It is recommended to put a reverse proxy such as
|
|||||||
doing so is that it means that you can expose the default https port (443) to
|
doing so is that it means that you can expose the default https port (443) to
|
||||||
Matrix clients without needing to run Synapse with root privileges.
|
Matrix clients without needing to run Synapse with root privileges.
|
||||||
|
|
||||||
The most important thing to know here is that Matrix clients and other Matrix
|
For information on configuring one, see `<docs/reverse_proxy.rst>`_.
|
||||||
servers do not necessarily need to connect to your server via the same
|
|
||||||
port. Indeed, clients will use port 443 by default, whereas servers default to
|
|
||||||
port 8448. Where these are different, we refer to the 'client port' and the
|
|
||||||
'federation port'.
|
|
||||||
|
|
||||||
All Matrix endpoints begin with ``/_matrix``, so an example nginx
|
|
||||||
configuration for forwarding client connections to Synapse might look like::
|
|
||||||
|
|
||||||
server {
|
|
||||||
listen 443 ssl;
|
|
||||||
listen [::]:443 ssl;
|
|
||||||
server_name matrix.example.com;
|
|
||||||
|
|
||||||
location /_matrix {
|
|
||||||
proxy_pass http://localhost:8008;
|
|
||||||
proxy_set_header X-Forwarded-For $remote_addr;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
an example Caddy configuration might look like::
|
|
||||||
|
|
||||||
matrix.example.com {
|
|
||||||
proxy /_matrix http://localhost:8008 {
|
|
||||||
transparent
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
and an example Apache configuration might look like::
|
|
||||||
|
|
||||||
<VirtualHost *:443>
|
|
||||||
SSLEngine on
|
|
||||||
ServerName matrix.example.com;
|
|
||||||
|
|
||||||
<Location /_matrix>
|
|
||||||
ProxyPass http://127.0.0.1:8008/_matrix nocanon
|
|
||||||
ProxyPassReverse http://127.0.0.1:8008/_matrix
|
|
||||||
</Location>
|
|
||||||
</VirtualHost>
|
|
||||||
|
|
||||||
You will also want to set ``bind_addresses: ['127.0.0.1']`` and ``x_forwarded: true``
|
|
||||||
for port 8008 in ``homeserver.yaml`` to ensure that client IP addresses are
|
|
||||||
recorded correctly.
|
|
||||||
|
|
||||||
Having done so, you can then use ``https://matrix.example.com`` (instead of
|
|
||||||
``https://matrix.example.com:8448``) as the "Custom server" when `Connecting to
|
|
||||||
Synapse from a client`_.
|
|
||||||
|
|
||||||
|
|
||||||
Identity Servers
|
Identity Servers
|
||||||
================
|
================
|
||||||
@ -409,7 +385,7 @@ Synapse Development
|
|||||||
|
|
||||||
Before setting up a development environment for synapse, make sure you have the
|
Before setting up a development environment for synapse, make sure you have the
|
||||||
system dependencies (such as the python header files) installed - see
|
system dependencies (such as the python header files) installed - see
|
||||||
`Installing from source`_.
|
`Installing from source <INSTALL.md#installing-from-source>`_.
|
||||||
|
|
||||||
To check out a synapse for development, clone the git repo into a working
|
To check out a synapse for development, clone the git repo into a working
|
||||||
directory of your choice::
|
directory of your choice::
|
||||||
@ -420,7 +396,7 @@ directory of your choice::
|
|||||||
Synapse has a number of external dependencies, that are easiest
|
Synapse has a number of external dependencies, that are easiest
|
||||||
to install using pip and a virtualenv::
|
to install using pip and a virtualenv::
|
||||||
|
|
||||||
virtualenv -p python2.7 env
|
virtualenv -p python3 env
|
||||||
source env/bin/activate
|
source env/bin/activate
|
||||||
python -m pip install -e .[all]
|
python -m pip install -e .[all]
|
||||||
|
|
||||||
@ -462,25 +438,3 @@ sphinxcontrib-napoleon::
|
|||||||
Building internal API documentation::
|
Building internal API documentation::
|
||||||
|
|
||||||
python setup.py build_sphinx
|
python setup.py build_sphinx
|
||||||
|
|
||||||
|
|
||||||
Help!! Synapse eats all my RAM!
|
|
||||||
===============================
|
|
||||||
|
|
||||||
Synapse's architecture is quite RAM hungry currently - we deliberately
|
|
||||||
cache a lot of recent room data and metadata in RAM in order to speed up
|
|
||||||
common requests. We'll improve this in future, but for now the easiest
|
|
||||||
way to either reduce the RAM usage (at the risk of slowing things down)
|
|
||||||
is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
|
|
||||||
variable. The default is 0.5, which can be decreased to reduce RAM usage
|
|
||||||
in memory constrained enviroments, or increased if performance starts to
|
|
||||||
degrade.
|
|
||||||
|
|
||||||
Using `libjemalloc <http://jemalloc.net/>`_ can also yield a significant
|
|
||||||
improvement in overall amount, and especially in terms of giving back RAM
|
|
||||||
to the OS. To use it, the library must simply be put in the LD_PRELOAD
|
|
||||||
environment variable when launching Synapse. On Debian, this can be done
|
|
||||||
by installing the ``libjemalloc1`` package and adding this line to
|
|
||||||
``/etc/default/matrix-synapse``::
|
|
||||||
|
|
||||||
LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
|
|
||||||
|
@ -1 +0,0 @@
|
|||||||
Fix docker upload job to push -py2 images.
|
|
@ -1 +0,0 @@
|
|||||||
Add port configuration information to ACME instructions.
|
|
@ -1 +0,0 @@
|
|||||||
Update MSC1711 FAQ to calrify .well-known usage
|
|
10
debian/changelog
vendored
10
debian/changelog
vendored
@ -1,3 +1,13 @@
|
|||||||
|
matrix-synapse-py3 (0.99.1) stable; urgency=medium
|
||||||
|
|
||||||
|
[ Damjan Georgievski ]
|
||||||
|
* Added ExecReload= in service unit file to send a HUP signal
|
||||||
|
|
||||||
|
[ Synapse Packaging team ]
|
||||||
|
* New synapse release 0.99.1
|
||||||
|
|
||||||
|
-- Synapse Packaging team <packages@matrix.org> Thu, 14 Feb 2019 14:12:26 +0000
|
||||||
|
|
||||||
matrix-synapse-py3 (0.99.0) stable; urgency=medium
|
matrix-synapse-py3 (0.99.0) stable; urgency=medium
|
||||||
|
|
||||||
* New synapse release 0.99.0
|
* New synapse release 0.99.0
|
||||||
|
1
debian/matrix-synapse.service
vendored
1
debian/matrix-synapse.service
vendored
@ -8,6 +8,7 @@ WorkingDirectory=/var/lib/matrix-synapse
|
|||||||
EnvironmentFile=/etc/default/matrix-synapse
|
EnvironmentFile=/etc/default/matrix-synapse
|
||||||
ExecStartPre=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --generate-keys
|
ExecStartPre=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --generate-keys
|
||||||
ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/
|
ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/
|
||||||
|
ExecReload=/bin/kill -HUP $MAINPID
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=3
|
RestartSec=3
|
||||||
|
|
||||||
|
@ -1,17 +0,0 @@
|
|||||||
-----BEGIN CERTIFICATE-----
|
|
||||||
MIICnTCCAYUCAgPoMA0GCSqGSIb3DQEBCwUAMBQxEjAQBgNVBAMMCWxvY2FsaG9z
|
|
||||||
dDAeFw0xOTAxMTUwMDQxNTBaFw0yOTAxMTIwMDQxNTBaMBQxEjAQBgNVBAMMCWxv
|
|
||||||
Y2FsaG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMKqm81/8j5d
|
|
||||||
R1s7VZ8ueg12gJrPVCCAOkp0UnuC/ZlXhN0HTvnhQ+B0IlSgB4CcQZyf4jnA6o4M
|
|
||||||
rwSc7VX0MPE9x/idoA0g/0WoC6tsxugOrvbzCw8Tv+fnXglm6uVc7aFPfx69wU3q
|
|
||||||
lUHGD/8jtEoHxmCG177Pt2lHAfiVLBAyMQGtETzxt/yAfkloaybe316qoljgK5WK
|
|
||||||
cokdAt9G84EEqxNeEnx5FG3Vc100bAqJS4GvQlFgtF9KFEqZKEyB1yKBpPMDfPIS
|
|
||||||
V9hIV0gswSmYI8dpyBlGf5lPElY68ZGABmOQgr0RI5qHK/h28OpFPE0q3v4AMHgZ
|
|
||||||
I36wii4NrAUCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAfD8kcpZ+dn08xh1qtKtp
|
|
||||||
X+/YNZaOBIeVdlCzfoZKNblSFAFD/jCfObNJYvZMUQ8NX2UtEJp1lTA6m7ltSsdY
|
|
||||||
gpC2k1VD8iN+ooXklJmL0kxc7UUqho8I0l9vn35h+lhLF0ihT6XfZVi/lDHWl+4G
|
|
||||||
rG+v9oxvCSCWrNWLearSlFPtQQ8xPtOE0nLwfXtOI/H/2kOuC38ihaIWM4jjbWXK
|
|
||||||
E/ksgUfuDv0mFiwf1YdBF5/M3/qOowqzU8HgMJ3WoT/9Po5Ya1pWc+3BcxxytUDf
|
|
||||||
XdMu0tWHKX84tZxLcR1nZHzluyvFFM8xNtLi9xV0Z7WbfT76V0C/ulEOybGInYsv
|
|
||||||
nQ==
|
|
||||||
-----END CERTIFICATE-----
|
|
@ -2,13 +2,7 @@
|
|||||||
|
|
||||||
## TLS ##
|
## TLS ##
|
||||||
|
|
||||||
{% if SYNAPSE_NO_TLS %}
|
{% if not SYNAPSE_NO_TLS %}
|
||||||
no_tls: True
|
|
||||||
|
|
||||||
# workaround for https://github.com/matrix-org/synapse/issues/4554
|
|
||||||
tls_certificate_path: "/conf/dummy.tls.crt"
|
|
||||||
|
|
||||||
{% else %}
|
|
||||||
|
|
||||||
tls_certificate_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.crt"
|
tls_certificate_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.crt"
|
||||||
tls_private_key_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.key"
|
tls_private_key_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.key"
|
||||||
|
@ -42,9 +42,9 @@ imminent Matrix 1.0 release, you can also see our
|
|||||||
* It used to work just fine, why are you breaking everything?
|
* It used to work just fine, why are you breaking everything?
|
||||||
* Can I manage my own certificates rather than having Synapse renew
|
* Can I manage my own certificates rather than having Synapse renew
|
||||||
certificates itself?
|
certificates itself?
|
||||||
* Do you still recommend against using a reverse-proxy on the federation port?
|
* Do you still recommend against using a reverse proxy on the federation port?
|
||||||
* Do I still need to give my TLS certificates to Synapse if I am using a
|
* Do I still need to give my TLS certificates to Synapse if I am using a
|
||||||
reverse-proxy?
|
reverse proxy?
|
||||||
* Do I need the same certificate for the client and federation port?
|
* Do I need the same certificate for the client and federation port?
|
||||||
* How do I tell Synapse to reload my keys/certificates after I replace them?
|
* How do I tell Synapse to reload my keys/certificates after I replace them?
|
||||||
|
|
||||||
@ -132,6 +132,9 @@ your domain, you can simply route all traffic through the reverse proxy by
|
|||||||
updating the SRV record appropriately (or removing it, if the proxy listens on
|
updating the SRV record appropriately (or removing it, if the proxy listens on
|
||||||
8448).
|
8448).
|
||||||
|
|
||||||
|
See [reverse_proxy.rst](reverse_proxy.rst) for information on setting up a
|
||||||
|
reverse proxy.
|
||||||
|
|
||||||
#### Option 3: add a .well-known file to delegate your matrix traffic
|
#### Option 3: add a .well-known file to delegate your matrix traffic
|
||||||
|
|
||||||
This will allow you to keep Synapse on a separate domain, without having to
|
This will allow you to keep Synapse on a separate domain, without having to
|
||||||
@ -297,17 +300,20 @@ attempt to obtain certificates from Let's Encrypt if you configure it to do
|
|||||||
so.The only requirement is that there is a valid TLS cert present for
|
so.The only requirement is that there is a valid TLS cert present for
|
||||||
federation end points.
|
federation end points.
|
||||||
|
|
||||||
### Do you still recommend against using a reverse-proxy on the federation port?
|
### Do you still recommend against using a reverse proxy on the federation port?
|
||||||
|
|
||||||
We no longer actively recommend against using a reverse proxy. Many admins will
|
We no longer actively recommend against using a reverse proxy. Many admins will
|
||||||
find it easier to direct federation traffic to a reverse-proxy and manage their
|
find it easier to direct federation traffic to a reverse proxy and manage their
|
||||||
own TLS certificates, and this is a supported configuration.
|
own TLS certificates, and this is a supported configuration.
|
||||||
|
|
||||||
|
See [reverse_proxy.rst](reverse_proxy.rst) for information on setting up a
|
||||||
|
reverse proxy.
|
||||||
|
|
||||||
### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy?
|
### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy?
|
||||||
|
|
||||||
Practically speaking, this is no longer necessary.
|
Practically speaking, this is no longer necessary.
|
||||||
|
|
||||||
If you are using a reverse-proxy for all of your TLS traffic, then you can set
|
If you are using a reverse proxy for all of your TLS traffic, then you can set
|
||||||
`no_tls: True`. In that case, the only reason Synapse needs the certificate is
|
`no_tls: True`. In that case, the only reason Synapse needs the certificate is
|
||||||
to populate a legacy 'tls_fingerprints' field in the federation API. This is
|
to populate a legacy 'tls_fingerprints' field in the federation API. This is
|
||||||
ignored by Synapse 0.99.0 and later, and the only time pre-0.99 Synapses will
|
ignored by Synapse 0.99.0 and later, and the only time pre-0.99 Synapses will
|
||||||
@ -321,9 +327,9 @@ this, you can give it any TLS certificate at all. This will be fixed soon.
|
|||||||
|
|
||||||
### Do I need the same certificate for the client and federation port?
|
### Do I need the same certificate for the client and federation port?
|
||||||
|
|
||||||
No. There is nothing stopping you doing so, particularly if you are using a
|
No. There is nothing stopping you from using different certificates,
|
||||||
reverse-proxy. However, Synapse will use the same certificate on any ports
|
particularly if you are using a reverse proxy. However, Synapse will use the
|
||||||
where TLS is configured.
|
same certificate on any ports where TLS is configured.
|
||||||
|
|
||||||
### How do I tell Synapse to reload my keys/certificates after I replace them?
|
### How do I tell Synapse to reload my keys/certificates after I replace them?
|
||||||
|
|
||||||
|
94
docs/reverse_proxy.rst
Normal file
94
docs/reverse_proxy.rst
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
Using a reverse proxy with Synapse
|
||||||
|
==================================
|
||||||
|
|
||||||
|
It is recommended to put a reverse proxy such as
|
||||||
|
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
|
||||||
|
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
|
||||||
|
`Caddy <https://caddyserver.com/docs/proxy>`_ or
|
||||||
|
`HAProxy <https://www.haproxy.org/>`_ in front of Synapse. One advantage of
|
||||||
|
doing so is that it means that you can expose the default https port (443) to
|
||||||
|
Matrix clients without needing to run Synapse with root privileges.
|
||||||
|
|
||||||
|
**NOTE**: Your reverse proxy must not 'canonicalise' or 'normalise' the
|
||||||
|
requested URI in any way (for example, by decoding ``%xx`` escapes). Beware
|
||||||
|
that Apache *will* canonicalise URIs unless you specifify ``nocanon``.
|
||||||
|
|
||||||
|
When setting up a reverse proxy, remember that Matrix clients and other Matrix
|
||||||
|
servers do not necessarily need to connect to your server via the same server
|
||||||
|
name or port. Indeed, clients will use port 443 by default, whereas servers
|
||||||
|
default to port 8448. Where these are different, we refer to the 'client port'
|
||||||
|
and the 'federation port'. See `Setting up federation
|
||||||
|
<../README.rst#setting-up-federation>`_ for more details of the algorithm used for
|
||||||
|
federation connections.
|
||||||
|
|
||||||
|
Let's assume that we expect clients to connect to our server at
|
||||||
|
``https://matrix.example.com``, and other servers to connect at
|
||||||
|
``https://example.com:8448``. Here are some example configurations:
|
||||||
|
|
||||||
|
* nginx::
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 443 ssl;
|
||||||
|
listen [::]:443 ssl;
|
||||||
|
server_name matrix.example.com;
|
||||||
|
|
||||||
|
location /_matrix {
|
||||||
|
proxy_pass http://localhost:8008;
|
||||||
|
proxy_set_header X-Forwarded-For $remote_addr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 8448 ssl default_server;
|
||||||
|
listen [::]:8448 ssl default_server;
|
||||||
|
server_name example.com;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_pass http://localhost:8008;
|
||||||
|
proxy_set_header X-Forwarded-For $remote_addr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
* Caddy::
|
||||||
|
|
||||||
|
matrix.example.com {
|
||||||
|
proxy /_matrix http://localhost:8008 {
|
||||||
|
transparent
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
example.com:8448 {
|
||||||
|
proxy / http://localhost:8008 {
|
||||||
|
transparent
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
* Apache (note the ``nocanon`` options here!)::
|
||||||
|
|
||||||
|
<VirtualHost *:443>
|
||||||
|
SSLEngine on
|
||||||
|
ServerName matrix.example.com;
|
||||||
|
|
||||||
|
<Location /_matrix>
|
||||||
|
ProxyPass http://127.0.0.1:8008/_matrix nocanon
|
||||||
|
ProxyPassReverse http://127.0.0.1:8008/_matrix
|
||||||
|
</Location>
|
||||||
|
</VirtualHost>
|
||||||
|
|
||||||
|
<VirtualHost *:8448>
|
||||||
|
SSLEngine on
|
||||||
|
ServerName example.com;
|
||||||
|
|
||||||
|
<Location />
|
||||||
|
ProxyPass http://127.0.0.1:8008/_matrix nocanon
|
||||||
|
ProxyPassReverse http://127.0.0.1:8008/_matrix
|
||||||
|
</Location>
|
||||||
|
</VirtualHost>
|
||||||
|
|
||||||
|
You will also want to set ``bind_addresses: ['127.0.0.1']`` and ``x_forwarded: true``
|
||||||
|
for port 8008 in ``homeserver.yaml`` to ensure that client IP addresses are
|
||||||
|
recorded correctly.
|
||||||
|
|
||||||
|
Having done so, you can then use ``https://matrix.example.com`` (instead of
|
||||||
|
``https://matrix.example.com:8448``) as the "Custom server" when connecting to
|
||||||
|
Synapse from a client.
|
@ -26,9 +26,8 @@ Configuration
|
|||||||
To make effective use of the workers, you will need to configure an HTTP
|
To make effective use of the workers, you will need to configure an HTTP
|
||||||
reverse-proxy such as nginx or haproxy, which will direct incoming requests to
|
reverse-proxy such as nginx or haproxy, which will direct incoming requests to
|
||||||
the correct worker, or to the main synapse instance. Note that this includes
|
the correct worker, or to the main synapse instance. Note that this includes
|
||||||
requests made to the federation port. The caveats regarding running a
|
requests made to the federation port. See `<reverse_proxy.rst>`_ for
|
||||||
reverse-proxy on the federation port still apply (see
|
information on setting up a reverse proxy.
|
||||||
https://github.com/matrix-org/synapse/blob/master/README.rst#reverse-proxying-the-federation-port).
|
|
||||||
|
|
||||||
To enable workers, you need to add two replication listeners to the master
|
To enable workers, you need to add two replication listeners to the master
|
||||||
synapse, e.g.::
|
synapse, e.g.::
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
# Copyright 2018 New Vector Ltd
|
# Copyright 2018-9 New Vector Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -27,4 +27,4 @@ try:
|
|||||||
except ImportError:
|
except ImportError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
__version__ = "0.99.0"
|
__version__ = "0.99.1"
|
||||||
|
@ -73,6 +73,7 @@ class EventTypes(object):
|
|||||||
RoomHistoryVisibility = "m.room.history_visibility"
|
RoomHistoryVisibility = "m.room.history_visibility"
|
||||||
CanonicalAlias = "m.room.canonical_alias"
|
CanonicalAlias = "m.room.canonical_alias"
|
||||||
RoomAvatar = "m.room.avatar"
|
RoomAvatar = "m.room.avatar"
|
||||||
|
RoomEncryption = "m.room.encryption"
|
||||||
GuestAccess = "m.room.guest_access"
|
GuestAccess = "m.room.guest_access"
|
||||||
|
|
||||||
# These are used for validation
|
# These are used for validation
|
||||||
|
@ -15,19 +15,36 @@
|
|||||||
|
|
||||||
import gc
|
import gc
|
||||||
import logging
|
import logging
|
||||||
|
import signal
|
||||||
import sys
|
import sys
|
||||||
|
import traceback
|
||||||
|
|
||||||
import psutil
|
import psutil
|
||||||
from daemonize import Daemonize
|
from daemonize import Daemonize
|
||||||
|
|
||||||
from twisted.internet import error, reactor
|
from twisted.internet import error, reactor
|
||||||
|
from twisted.protocols.tls import TLSMemoryBIOFactory
|
||||||
|
|
||||||
from synapse.app import check_bind_error
|
from synapse.app import check_bind_error
|
||||||
|
from synapse.crypto import context_factory
|
||||||
from synapse.util import PreserveLoggingContext
|
from synapse.util import PreserveLoggingContext
|
||||||
from synapse.util.rlimit import change_resource_limit
|
from synapse.util.rlimit import change_resource_limit
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
_sighup_callbacks = []
|
||||||
|
|
||||||
|
|
||||||
|
def register_sighup(func):
|
||||||
|
"""
|
||||||
|
Register a function to be called when a SIGHUP occurs.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
func (function): Function to be called when sent a SIGHUP signal.
|
||||||
|
Will be called with a single argument, the homeserver.
|
||||||
|
"""
|
||||||
|
_sighup_callbacks.append(func)
|
||||||
|
|
||||||
|
|
||||||
def start_worker_reactor(appname, config):
|
def start_worker_reactor(appname, config):
|
||||||
""" Run the reactor in the main process
|
""" Run the reactor in the main process
|
||||||
@ -136,9 +153,8 @@ def listen_metrics(bind_addresses, port):
|
|||||||
from prometheus_client import start_http_server
|
from prometheus_client import start_http_server
|
||||||
|
|
||||||
for host in bind_addresses:
|
for host in bind_addresses:
|
||||||
reactor.callInThread(start_http_server, int(port),
|
logger.info("Starting metrics listener on %s:%d", host, port)
|
||||||
addr=host, registry=RegistryProxy)
|
start_http_server(port, addr=host, registry=RegistryProxy)
|
||||||
logger.info("Metrics now reporting on %s:%d", host, port)
|
|
||||||
|
|
||||||
|
|
||||||
def listen_tcp(bind_addresses, port, factory, reactor=reactor, backlog=50):
|
def listen_tcp(bind_addresses, port, factory, reactor=reactor, backlog=50):
|
||||||
@ -146,21 +162,23 @@ def listen_tcp(bind_addresses, port, factory, reactor=reactor, backlog=50):
|
|||||||
Create a TCP socket for a port and several addresses
|
Create a TCP socket for a port and several addresses
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
list (empty)
|
list[twisted.internet.tcp.Port]: listening for TCP connections
|
||||||
"""
|
"""
|
||||||
|
r = []
|
||||||
for address in bind_addresses:
|
for address in bind_addresses:
|
||||||
try:
|
try:
|
||||||
reactor.listenTCP(
|
r.append(
|
||||||
port,
|
reactor.listenTCP(
|
||||||
factory,
|
port,
|
||||||
backlog,
|
factory,
|
||||||
address
|
backlog,
|
||||||
|
address
|
||||||
|
)
|
||||||
)
|
)
|
||||||
except error.CannotListenError as e:
|
except error.CannotListenError as e:
|
||||||
check_bind_error(e, address, bind_addresses)
|
check_bind_error(e, address, bind_addresses)
|
||||||
|
|
||||||
logger.info("Synapse now listening on TCP port %d", port)
|
return r
|
||||||
return []
|
|
||||||
|
|
||||||
|
|
||||||
def listen_ssl(
|
def listen_ssl(
|
||||||
@ -187,5 +205,74 @@ def listen_ssl(
|
|||||||
except error.CannotListenError as e:
|
except error.CannotListenError as e:
|
||||||
check_bind_error(e, address, bind_addresses)
|
check_bind_error(e, address, bind_addresses)
|
||||||
|
|
||||||
logger.info("Synapse now listening on port %d (TLS)", port)
|
|
||||||
return r
|
return r
|
||||||
|
|
||||||
|
|
||||||
|
def refresh_certificate(hs):
|
||||||
|
"""
|
||||||
|
Refresh the TLS certificates that Synapse is using by re-reading them from
|
||||||
|
disk and updating the TLS context factories to use them.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if not hs.config.has_tls_listener():
|
||||||
|
# attempt to reload the certs for the good of the tls_fingerprints
|
||||||
|
hs.config.read_certificate_from_disk(require_cert_and_key=False)
|
||||||
|
return
|
||||||
|
|
||||||
|
hs.config.read_certificate_from_disk(require_cert_and_key=True)
|
||||||
|
hs.tls_server_context_factory = context_factory.ServerContextFactory(hs.config)
|
||||||
|
|
||||||
|
if hs._listening_services:
|
||||||
|
logger.info("Updating context factories...")
|
||||||
|
for i in hs._listening_services:
|
||||||
|
# When you listenSSL, it doesn't make an SSL port but a TCP one with
|
||||||
|
# a TLS wrapping factory around the factory you actually want to get
|
||||||
|
# requests. This factory attribute is public but missing from
|
||||||
|
# Twisted's documentation.
|
||||||
|
if isinstance(i.factory, TLSMemoryBIOFactory):
|
||||||
|
addr = i.getHost()
|
||||||
|
logger.info(
|
||||||
|
"Replacing TLS context factory on [%s]:%i", addr.host, addr.port,
|
||||||
|
)
|
||||||
|
# We want to replace TLS factories with a new one, with the new
|
||||||
|
# TLS configuration. We do this by reaching in and pulling out
|
||||||
|
# the wrappedFactory, and then re-wrapping it.
|
||||||
|
i.factory = TLSMemoryBIOFactory(
|
||||||
|
hs.tls_server_context_factory,
|
||||||
|
False,
|
||||||
|
i.factory.wrappedFactory
|
||||||
|
)
|
||||||
|
logger.info("Context factories updated.")
|
||||||
|
|
||||||
|
|
||||||
|
def start(hs, listeners=None):
|
||||||
|
"""
|
||||||
|
Start a Synapse server or worker.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
hs (synapse.server.HomeServer)
|
||||||
|
listeners (list[dict]): Listener configuration ('listeners' in homeserver.yaml)
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Set up the SIGHUP machinery.
|
||||||
|
if hasattr(signal, "SIGHUP"):
|
||||||
|
def handle_sighup(*args, **kwargs):
|
||||||
|
for i in _sighup_callbacks:
|
||||||
|
i(hs)
|
||||||
|
|
||||||
|
signal.signal(signal.SIGHUP, handle_sighup)
|
||||||
|
|
||||||
|
register_sighup(refresh_certificate)
|
||||||
|
|
||||||
|
# Load the certificate from disk.
|
||||||
|
refresh_certificate(hs)
|
||||||
|
|
||||||
|
# It is now safe to start your Synapse.
|
||||||
|
hs.start_listening(listeners)
|
||||||
|
hs.get_datastore().start_profiling()
|
||||||
|
except Exception:
|
||||||
|
traceback.print_exc(file=sys.stderr)
|
||||||
|
reactor = hs.get_reactor()
|
||||||
|
if reactor.running:
|
||||||
|
reactor.stop()
|
||||||
|
sys.exit(1)
|
||||||
|
@ -168,12 +168,7 @@ def start(config_options):
|
|||||||
)
|
)
|
||||||
|
|
||||||
ps.setup()
|
ps.setup()
|
||||||
ps.start_listening(config.worker_listeners)
|
reactor.callWhenRunning(_base.start, ps, config.worker_listeners)
|
||||||
|
|
||||||
def start():
|
|
||||||
ps.get_datastore().start_profiling()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
|
||||||
|
|
||||||
_base.start_worker_reactor("synapse-appservice", config)
|
_base.start_worker_reactor("synapse-appservice", config)
|
||||||
|
|
||||||
|
@ -25,7 +25,6 @@ from synapse.app import _base
|
|||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.crypto import context_factory
|
|
||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import RegistryProxy
|
||||||
@ -173,17 +172,7 @@ def start(config_options):
|
|||||||
)
|
)
|
||||||
|
|
||||||
ss.setup()
|
ss.setup()
|
||||||
|
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
||||||
def start():
|
|
||||||
ss.config.read_certificate_from_disk()
|
|
||||||
ss.tls_server_context_factory = context_factory.ServerContextFactory(config)
|
|
||||||
ss.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
|
|
||||||
config
|
|
||||||
)
|
|
||||||
ss.start_listening(config.worker_listeners)
|
|
||||||
ss.get_datastore().start_profiling()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
|
||||||
|
|
||||||
_base.start_worker_reactor("synapse-client-reader", config)
|
_base.start_worker_reactor("synapse-client-reader", config)
|
||||||
|
|
||||||
|
@ -25,7 +25,6 @@ from synapse.app import _base
|
|||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.crypto import context_factory
|
|
||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import RegistryProxy
|
||||||
@ -194,17 +193,7 @@ def start(config_options):
|
|||||||
)
|
)
|
||||||
|
|
||||||
ss.setup()
|
ss.setup()
|
||||||
|
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
||||||
def start():
|
|
||||||
ss.config.read_certificate_from_disk()
|
|
||||||
ss.tls_server_context_factory = context_factory.ServerContextFactory(config)
|
|
||||||
ss.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
|
|
||||||
config
|
|
||||||
)
|
|
||||||
ss.start_listening(config.worker_listeners)
|
|
||||||
ss.get_datastore().start_profiling()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
|
||||||
|
|
||||||
_base.start_worker_reactor("synapse-event-creator", config)
|
_base.start_worker_reactor("synapse-event-creator", config)
|
||||||
|
|
||||||
|
@ -26,7 +26,6 @@ from synapse.app import _base
|
|||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.crypto import context_factory
|
|
||||||
from synapse.federation.transport.server import TransportLayerServer
|
from synapse.federation.transport.server import TransportLayerServer
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import RegistryProxy
|
||||||
@ -87,6 +86,16 @@ class FederationReaderServer(HomeServer):
|
|||||||
resources.update({
|
resources.update({
|
||||||
FEDERATION_PREFIX: TransportLayerServer(self),
|
FEDERATION_PREFIX: TransportLayerServer(self),
|
||||||
})
|
})
|
||||||
|
if name == "openid" and "federation" not in res["names"]:
|
||||||
|
# Only load the openid resource separately if federation resource
|
||||||
|
# is not specified since federation resource includes openid
|
||||||
|
# resource.
|
||||||
|
resources.update({
|
||||||
|
FEDERATION_PREFIX: TransportLayerServer(
|
||||||
|
self,
|
||||||
|
servlet_groups=["openid"],
|
||||||
|
),
|
||||||
|
})
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, NoResource())
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
|
|
||||||
@ -99,7 +108,8 @@ class FederationReaderServer(HomeServer):
|
|||||||
listener_config,
|
listener_config,
|
||||||
root_resource,
|
root_resource,
|
||||||
self.version_string,
|
self.version_string,
|
||||||
)
|
),
|
||||||
|
reactor=self.get_reactor()
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.info("Synapse federation reader now listening on port %d", port)
|
logger.info("Synapse federation reader now listening on port %d", port)
|
||||||
@ -160,17 +170,7 @@ def start(config_options):
|
|||||||
)
|
)
|
||||||
|
|
||||||
ss.setup()
|
ss.setup()
|
||||||
|
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
||||||
def start():
|
|
||||||
ss.config.read_certificate_from_disk()
|
|
||||||
ss.tls_server_context_factory = context_factory.ServerContextFactory(config)
|
|
||||||
ss.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
|
|
||||||
config
|
|
||||||
)
|
|
||||||
ss.start_listening(config.worker_listeners)
|
|
||||||
ss.get_datastore().start_profiling()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
|
||||||
|
|
||||||
_base.start_worker_reactor("synapse-federation-reader", config)
|
_base.start_worker_reactor("synapse-federation-reader", config)
|
||||||
|
|
||||||
|
@ -25,7 +25,6 @@ from synapse.app import _base
|
|||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.crypto import context_factory
|
|
||||||
from synapse.federation import send_queue
|
from synapse.federation import send_queue
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import RegistryProxy
|
||||||
@ -192,17 +191,8 @@ def start(config_options):
|
|||||||
)
|
)
|
||||||
|
|
||||||
ss.setup()
|
ss.setup()
|
||||||
|
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
||||||
|
|
||||||
def start():
|
|
||||||
ss.config.read_certificate_from_disk()
|
|
||||||
ss.tls_server_context_factory = context_factory.ServerContextFactory(config)
|
|
||||||
ss.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
|
|
||||||
config
|
|
||||||
)
|
|
||||||
ss.start_listening(config.worker_listeners)
|
|
||||||
ss.get_datastore().start_profiling()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
|
||||||
_base.start_worker_reactor("synapse-federation-sender", config)
|
_base.start_worker_reactor("synapse-federation-sender", config)
|
||||||
|
|
||||||
|
|
||||||
|
@ -26,7 +26,6 @@ from synapse.app import _base
|
|||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.crypto import context_factory
|
|
||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
@ -250,17 +249,7 @@ def start(config_options):
|
|||||||
)
|
)
|
||||||
|
|
||||||
ss.setup()
|
ss.setup()
|
||||||
|
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
||||||
def start():
|
|
||||||
ss.config.read_certificate_from_disk()
|
|
||||||
ss.tls_server_context_factory = context_factory.ServerContextFactory(config)
|
|
||||||
ss.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
|
|
||||||
config
|
|
||||||
)
|
|
||||||
ss.start_listening(config.worker_listeners)
|
|
||||||
ss.get_datastore().start_profiling()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
|
||||||
|
|
||||||
_base.start_worker_reactor("synapse-frontend-proxy", config)
|
_base.start_worker_reactor("synapse-frontend-proxy", config)
|
||||||
|
|
||||||
|
@ -17,7 +17,6 @@
|
|||||||
import gc
|
import gc
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import signal
|
|
||||||
import sys
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
@ -28,7 +27,6 @@ from prometheus_client import Gauge
|
|||||||
|
|
||||||
from twisted.application import service
|
from twisted.application import service
|
||||||
from twisted.internet import defer, reactor
|
from twisted.internet import defer, reactor
|
||||||
from twisted.protocols.tls import TLSMemoryBIOFactory
|
|
||||||
from twisted.web.resource import EncodingResourceWrapper, NoResource
|
from twisted.web.resource import EncodingResourceWrapper, NoResource
|
||||||
from twisted.web.server import GzipEncoderFactory
|
from twisted.web.server import GzipEncoderFactory
|
||||||
from twisted.web.static import File
|
from twisted.web.static import File
|
||||||
@ -49,7 +47,6 @@ from synapse.app import _base
|
|||||||
from synapse.app._base import listen_ssl, listen_tcp, quit_with_error
|
from synapse.app._base import listen_ssl, listen_tcp, quit_with_error
|
||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.crypto import context_factory
|
|
||||||
from synapse.federation.transport.server import TransportLayerServer
|
from synapse.federation.transport.server import TransportLayerServer
|
||||||
from synapse.http.additional_resource import AdditionalResource
|
from synapse.http.additional_resource import AdditionalResource
|
||||||
from synapse.http.server import RootRedirect
|
from synapse.http.server import RootRedirect
|
||||||
@ -86,7 +83,6 @@ def gz_wrap(r):
|
|||||||
|
|
||||||
class SynapseHomeServer(HomeServer):
|
class SynapseHomeServer(HomeServer):
|
||||||
DATASTORE_CLASS = DataStore
|
DATASTORE_CLASS = DataStore
|
||||||
_listening_services = []
|
|
||||||
|
|
||||||
def _listener_http(self, config, listener_config):
|
def _listener_http(self, config, listener_config):
|
||||||
port = listener_config["port"]
|
port = listener_config["port"]
|
||||||
@ -94,14 +90,13 @@ class SynapseHomeServer(HomeServer):
|
|||||||
tls = listener_config.get("tls", False)
|
tls = listener_config.get("tls", False)
|
||||||
site_tag = listener_config.get("tag", port)
|
site_tag = listener_config.get("tag", port)
|
||||||
|
|
||||||
if tls and config.no_tls:
|
|
||||||
raise ConfigError(
|
|
||||||
"Listener on port %i has TLS enabled, but no_tls is set" % (port,),
|
|
||||||
)
|
|
||||||
|
|
||||||
resources = {}
|
resources = {}
|
||||||
for res in listener_config["resources"]:
|
for res in listener_config["resources"]:
|
||||||
for name in res["names"]:
|
for name in res["names"]:
|
||||||
|
if name == "openid" and "federation" in res["names"]:
|
||||||
|
# Skip loading openid resource if federation is defined
|
||||||
|
# since federation resource will include openid
|
||||||
|
continue
|
||||||
resources.update(self._configure_named_resource(
|
resources.update(self._configure_named_resource(
|
||||||
name, res.get("compress", False),
|
name, res.get("compress", False),
|
||||||
))
|
))
|
||||||
@ -126,7 +121,7 @@ class SynapseHomeServer(HomeServer):
|
|||||||
root_resource = create_resource_tree(resources, root_resource)
|
root_resource = create_resource_tree(resources, root_resource)
|
||||||
|
|
||||||
if tls:
|
if tls:
|
||||||
return listen_ssl(
|
ports = listen_ssl(
|
||||||
bind_addresses,
|
bind_addresses,
|
||||||
port,
|
port,
|
||||||
SynapseSite(
|
SynapseSite(
|
||||||
@ -137,10 +132,12 @@ class SynapseHomeServer(HomeServer):
|
|||||||
self.version_string,
|
self.version_string,
|
||||||
),
|
),
|
||||||
self.tls_server_context_factory,
|
self.tls_server_context_factory,
|
||||||
|
reactor=self.get_reactor(),
|
||||||
)
|
)
|
||||||
|
logger.info("Synapse now listening on TCP port %d (TLS)", port)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
return listen_tcp(
|
ports = listen_tcp(
|
||||||
bind_addresses,
|
bind_addresses,
|
||||||
port,
|
port,
|
||||||
SynapseSite(
|
SynapseSite(
|
||||||
@ -149,8 +146,12 @@ class SynapseHomeServer(HomeServer):
|
|||||||
listener_config,
|
listener_config,
|
||||||
root_resource,
|
root_resource,
|
||||||
self.version_string,
|
self.version_string,
|
||||||
)
|
),
|
||||||
|
reactor=self.get_reactor(),
|
||||||
)
|
)
|
||||||
|
logger.info("Synapse now listening on TCP port %d", port)
|
||||||
|
|
||||||
|
return ports
|
||||||
|
|
||||||
def _configure_named_resource(self, name, compress=False):
|
def _configure_named_resource(self, name, compress=False):
|
||||||
"""Build a resource map for a named resource
|
"""Build a resource map for a named resource
|
||||||
@ -196,6 +197,11 @@ class SynapseHomeServer(HomeServer):
|
|||||||
FEDERATION_PREFIX: TransportLayerServer(self),
|
FEDERATION_PREFIX: TransportLayerServer(self),
|
||||||
})
|
})
|
||||||
|
|
||||||
|
if name == "openid":
|
||||||
|
resources.update({
|
||||||
|
FEDERATION_PREFIX: TransportLayerServer(self, servlet_groups=["openid"]),
|
||||||
|
})
|
||||||
|
|
||||||
if name in ["static", "client"]:
|
if name in ["static", "client"]:
|
||||||
resources.update({
|
resources.update({
|
||||||
STATIC_PREFIX: File(
|
STATIC_PREFIX: File(
|
||||||
@ -241,10 +247,10 @@ class SynapseHomeServer(HomeServer):
|
|||||||
|
|
||||||
return resources
|
return resources
|
||||||
|
|
||||||
def start_listening(self):
|
def start_listening(self, listeners):
|
||||||
config = self.get_config()
|
config = self.get_config()
|
||||||
|
|
||||||
for listener in config.listeners:
|
for listener in listeners:
|
||||||
if listener["type"] == "http":
|
if listener["type"] == "http":
|
||||||
self._listening_services.extend(
|
self._listening_services.extend(
|
||||||
self._listener_http(config, listener)
|
self._listener_http(config, listener)
|
||||||
@ -260,14 +266,14 @@ class SynapseHomeServer(HomeServer):
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
elif listener["type"] == "replication":
|
elif listener["type"] == "replication":
|
||||||
bind_addresses = listener["bind_addresses"]
|
services = listen_tcp(
|
||||||
for address in bind_addresses:
|
listener["bind_addresses"],
|
||||||
factory = ReplicationStreamProtocolFactory(self)
|
listener["port"],
|
||||||
server_listener = reactor.listenTCP(
|
ReplicationStreamProtocolFactory(self),
|
||||||
listener["port"], factory, interface=address
|
)
|
||||||
)
|
for s in services:
|
||||||
reactor.addSystemEventTrigger(
|
reactor.addSystemEventTrigger(
|
||||||
"before", "shutdown", server_listener.stopListening,
|
"before", "shutdown", s.stopListening,
|
||||||
)
|
)
|
||||||
elif listener["type"] == "metrics":
|
elif listener["type"] == "metrics":
|
||||||
if not self.get_config().enable_metrics:
|
if not self.get_config().enable_metrics:
|
||||||
@ -328,20 +334,11 @@ def setup(config_options):
|
|||||||
# generating config files and shouldn't try to continue.
|
# generating config files and shouldn't try to continue.
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
sighup_callbacks = []
|
|
||||||
synapse.config.logger.setup_logging(
|
synapse.config.logger.setup_logging(
|
||||||
config,
|
config,
|
||||||
use_worker_options=False,
|
use_worker_options=False
|
||||||
register_sighup=sighup_callbacks.append
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def handle_sighup(*args, **kwargs):
|
|
||||||
for i in sighup_callbacks:
|
|
||||||
i(*args, **kwargs)
|
|
||||||
|
|
||||||
if hasattr(signal, "SIGHUP"):
|
|
||||||
signal.signal(signal.SIGHUP, handle_sighup)
|
|
||||||
|
|
||||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
database_engine = create_engine(config.database_config)
|
database_engine = create_engine(config.database_config)
|
||||||
@ -377,76 +374,73 @@ def setup(config_options):
|
|||||||
|
|
||||||
hs.setup()
|
hs.setup()
|
||||||
|
|
||||||
def refresh_certificate(*args):
|
@defer.inlineCallbacks
|
||||||
|
def do_acme():
|
||||||
"""
|
"""
|
||||||
Refresh the TLS certificates that Synapse is using by re-reading them
|
Reprovision an ACME certificate, if it's required.
|
||||||
from disk and updating the TLS context factories to use them.
|
|
||||||
|
Returns:
|
||||||
|
Deferred[bool]: Whether the cert has been updated.
|
||||||
"""
|
"""
|
||||||
logging.info("Reloading certificate from disk...")
|
acme = hs.get_acme_handler()
|
||||||
hs.config.read_certificate_from_disk()
|
|
||||||
hs.tls_server_context_factory = context_factory.ServerContextFactory(config)
|
# Check how long the certificate is active for.
|
||||||
hs.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
|
cert_days_remaining = hs.config.is_disk_cert_valid(
|
||||||
config
|
allow_self_signed=False
|
||||||
)
|
)
|
||||||
logging.info("Certificate reloaded.")
|
|
||||||
|
|
||||||
logging.info("Updating context factories...")
|
# We want to reprovision if cert_days_remaining is None (meaning no
|
||||||
for i in hs._listening_services:
|
# certificate exists), or the days remaining number it returns
|
||||||
if isinstance(i.factory, TLSMemoryBIOFactory):
|
# is less than our re-registration threshold.
|
||||||
i.factory = TLSMemoryBIOFactory(
|
provision = False
|
||||||
hs.tls_server_context_factory,
|
|
||||||
False,
|
|
||||||
i.factory.wrappedFactory
|
|
||||||
)
|
|
||||||
logging.info("Context factories updated.")
|
|
||||||
|
|
||||||
sighup_callbacks.append(refresh_certificate)
|
if (cert_days_remaining is None):
|
||||||
|
provision = True
|
||||||
|
|
||||||
|
if cert_days_remaining > hs.config.acme_reprovision_threshold:
|
||||||
|
provision = True
|
||||||
|
|
||||||
|
if provision:
|
||||||
|
yield acme.provision_certificate()
|
||||||
|
|
||||||
|
defer.returnValue(provision)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def reprovision_acme():
|
||||||
|
"""
|
||||||
|
Provision a certificate from ACME, if required, and reload the TLS
|
||||||
|
certificate if it's renewed.
|
||||||
|
"""
|
||||||
|
reprovisioned = yield do_acme()
|
||||||
|
if reprovisioned:
|
||||||
|
_base.refresh_certificate(hs)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def start():
|
def start():
|
||||||
try:
|
try:
|
||||||
# Check if the certificate is still valid.
|
# Run the ACME provisioning code, if it's enabled.
|
||||||
cert_days_remaining = hs.config.is_disk_cert_valid()
|
|
||||||
|
|
||||||
if hs.config.acme_enabled:
|
if hs.config.acme_enabled:
|
||||||
# If ACME is enabled, we might need to provision a certificate
|
|
||||||
# before starting.
|
|
||||||
acme = hs.get_acme_handler()
|
acme = hs.get_acme_handler()
|
||||||
|
|
||||||
# Start up the webservices which we will respond to ACME
|
# Start up the webservices which we will respond to ACME
|
||||||
# challenges with.
|
# challenges with, and then provision.
|
||||||
yield acme.start_listening()
|
yield acme.start_listening()
|
||||||
|
yield do_acme()
|
||||||
|
|
||||||
# We want to reprovision if cert_days_remaining is None (meaning no
|
# Check if it needs to be reprovisioned every day.
|
||||||
# certificate exists), or the days remaining number it returns
|
hs.get_clock().looping_call(
|
||||||
# is less than our re-registration threshold.
|
reprovision_acme,
|
||||||
if (cert_days_remaining is None) or (
|
24 * 60 * 60 * 1000
|
||||||
not cert_days_remaining > hs.config.acme_reprovision_threshold
|
)
|
||||||
):
|
|
||||||
yield acme.provision_certificate()
|
|
||||||
|
|
||||||
# Read the certificate from disk and build the context factories for
|
_base.start(hs, config.listeners)
|
||||||
# TLS.
|
|
||||||
hs.config.read_certificate_from_disk()
|
|
||||||
hs.tls_server_context_factory = context_factory.ServerContextFactory(config)
|
|
||||||
hs.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
|
|
||||||
config
|
|
||||||
)
|
|
||||||
|
|
||||||
# It is now safe to start your Synapse.
|
|
||||||
hs.start_listening()
|
|
||||||
hs.get_pusherpool().start()
|
hs.get_pusherpool().start()
|
||||||
hs.get_datastore().start_profiling()
|
|
||||||
hs.get_datastore().start_doing_background_updates()
|
hs.get_datastore().start_doing_background_updates()
|
||||||
except Exception as e:
|
except Exception:
|
||||||
# If a DeferredList failed (like in listening on the ACME listener),
|
# Print the exception and bail out.
|
||||||
# we need to print the subfailure explicitly.
|
|
||||||
if isinstance(e, defer.FirstError):
|
|
||||||
e.subFailure.printTraceback(sys.stderr)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
# Something else went wrong when starting. Print it and bail out.
|
|
||||||
traceback.print_exc(file=sys.stderr)
|
traceback.print_exc(file=sys.stderr)
|
||||||
|
if reactor.running:
|
||||||
|
reactor.stop()
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
@ -455,7 +449,8 @@ def setup(config_options):
|
|||||||
|
|
||||||
|
|
||||||
class SynapseService(service.Service):
|
class SynapseService(service.Service):
|
||||||
"""A twisted Service class that will start synapse. Used to run synapse
|
"""
|
||||||
|
A twisted Service class that will start synapse. Used to run synapse
|
||||||
via twistd and a .tac.
|
via twistd and a .tac.
|
||||||
"""
|
"""
|
||||||
def __init__(self, config):
|
def __init__(self, config):
|
||||||
|
@ -26,7 +26,6 @@ from synapse.app import _base
|
|||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.crypto import context_factory
|
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import RegistryProxy
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
@ -160,17 +159,7 @@ def start(config_options):
|
|||||||
)
|
)
|
||||||
|
|
||||||
ss.setup()
|
ss.setup()
|
||||||
|
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
||||||
def start():
|
|
||||||
ss.config.read_certificate_from_disk()
|
|
||||||
ss.tls_server_context_factory = context_factory.ServerContextFactory(config)
|
|
||||||
ss.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
|
|
||||||
config
|
|
||||||
)
|
|
||||||
ss.start_listening(config.worker_listeners)
|
|
||||||
ss.get_datastore().start_profiling()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
|
||||||
|
|
||||||
_base.start_worker_reactor("synapse-media-repository", config)
|
_base.start_worker_reactor("synapse-media-repository", config)
|
||||||
|
|
||||||
|
@ -224,11 +224,10 @@ def start(config_options):
|
|||||||
)
|
)
|
||||||
|
|
||||||
ps.setup()
|
ps.setup()
|
||||||
ps.start_listening(config.worker_listeners)
|
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
|
_base.start(ps, config.worker_listeners)
|
||||||
ps.get_pusherpool().start()
|
ps.get_pusherpool().start()
|
||||||
ps.get_datastore().start_profiling()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
|
@ -445,12 +445,7 @@ def start(config_options):
|
|||||||
)
|
)
|
||||||
|
|
||||||
ss.setup()
|
ss.setup()
|
||||||
ss.start_listening(config.worker_listeners)
|
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
||||||
|
|
||||||
def start():
|
|
||||||
ss.get_datastore().start_profiling()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
|
||||||
|
|
||||||
_base.start_worker_reactor("synapse-synchrotron", config)
|
_base.start_worker_reactor("synapse-synchrotron", config)
|
||||||
|
|
||||||
|
@ -26,7 +26,6 @@ from synapse.app import _base
|
|||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.crypto import context_factory
|
|
||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import RegistryProxy
|
||||||
@ -220,17 +219,7 @@ def start(config_options):
|
|||||||
)
|
)
|
||||||
|
|
||||||
ss.setup()
|
ss.setup()
|
||||||
|
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
||||||
def start():
|
|
||||||
ss.config.read_certificate_from_disk()
|
|
||||||
ss.tls_server_context_factory = context_factory.ServerContextFactory(config)
|
|
||||||
ss.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
|
|
||||||
config
|
|
||||||
)
|
|
||||||
ss.start_listening(config.worker_listeners)
|
|
||||||
ss.get_datastore().start_profiling()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
|
||||||
|
|
||||||
_base.start_worker_reactor("synapse-user-dir", config)
|
_base.start_worker_reactor("synapse-user-dir", config)
|
||||||
|
|
||||||
|
@ -257,7 +257,7 @@ class Config(object):
|
|||||||
"--keys-directory",
|
"--keys-directory",
|
||||||
metavar="DIRECTORY",
|
metavar="DIRECTORY",
|
||||||
help="Used with 'generate-*' options to specify where files such as"
|
help="Used with 'generate-*' options to specify where files such as"
|
||||||
" certs and signing keys should be stored in, unless explicitly"
|
" signing keys should be stored, unless explicitly"
|
||||||
" specified in the config.",
|
" specified in the config.",
|
||||||
)
|
)
|
||||||
config_parser.add_argument(
|
config_parser.add_argument(
|
||||||
@ -313,16 +313,11 @@ class Config(object):
|
|||||||
print(
|
print(
|
||||||
(
|
(
|
||||||
"A config file has been generated in %r for server name"
|
"A config file has been generated in %r for server name"
|
||||||
" %r with corresponding SSL keys and self-signed"
|
" %r. Please review this file and customise it"
|
||||||
" certificates. Please review this file and customise it"
|
|
||||||
" to your needs."
|
" to your needs."
|
||||||
)
|
)
|
||||||
% (config_path, server_name)
|
% (config_path, server_name)
|
||||||
)
|
)
|
||||||
print(
|
|
||||||
"If this server name is incorrect, you will need to"
|
|
||||||
" regenerate the SSL certificates"
|
|
||||||
)
|
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
print(
|
print(
|
||||||
|
@ -24,6 +24,7 @@ class ApiConfig(Config):
|
|||||||
EventTypes.JoinRules,
|
EventTypes.JoinRules,
|
||||||
EventTypes.CanonicalAlias,
|
EventTypes.CanonicalAlias,
|
||||||
EventTypes.RoomAvatar,
|
EventTypes.RoomAvatar,
|
||||||
|
EventTypes.RoomEncryption,
|
||||||
EventTypes.Name,
|
EventTypes.Name,
|
||||||
])
|
])
|
||||||
|
|
||||||
@ -36,5 +37,6 @@ class ApiConfig(Config):
|
|||||||
- "{JoinRules}"
|
- "{JoinRules}"
|
||||||
- "{CanonicalAlias}"
|
- "{CanonicalAlias}"
|
||||||
- "{RoomAvatar}"
|
- "{RoomAvatar}"
|
||||||
|
- "{RoomEncryption}"
|
||||||
- "{Name}"
|
- "{Name}"
|
||||||
""".format(**vars(EventTypes))
|
""".format(**vars(EventTypes))
|
||||||
|
@ -42,7 +42,7 @@ from .voip import VoipConfig
|
|||||||
from .workers import WorkerConfig
|
from .workers import WorkerConfig
|
||||||
|
|
||||||
|
|
||||||
class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
class HomeServerConfig(ServerConfig, TlsConfig, DatabaseConfig, LoggingConfig,
|
||||||
RatelimitConfig, ContentRepositoryConfig, CaptchaConfig,
|
RatelimitConfig, ContentRepositoryConfig, CaptchaConfig,
|
||||||
VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig,
|
VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig,
|
||||||
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
|
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
|
||||||
|
@ -15,7 +15,6 @@
|
|||||||
import logging
|
import logging
|
||||||
import logging.config
|
import logging.config
|
||||||
import os
|
import os
|
||||||
import signal
|
|
||||||
import sys
|
import sys
|
||||||
from string import Template
|
from string import Template
|
||||||
|
|
||||||
@ -24,6 +23,7 @@ import yaml
|
|||||||
from twisted.logger import STDLibLogObserver, globalLogBeginner
|
from twisted.logger import STDLibLogObserver, globalLogBeginner
|
||||||
|
|
||||||
import synapse
|
import synapse
|
||||||
|
from synapse.app import _base as appbase
|
||||||
from synapse.util.logcontext import LoggingContextFilter
|
from synapse.util.logcontext import LoggingContextFilter
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
@ -127,7 +127,7 @@ class LoggingConfig(Config):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def setup_logging(config, use_worker_options=False, register_sighup=None):
|
def setup_logging(config, use_worker_options=False):
|
||||||
""" Set up python logging
|
""" Set up python logging
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@ -140,12 +140,6 @@ def setup_logging(config, use_worker_options=False, register_sighup=None):
|
|||||||
register_sighup (func | None): Function to call to register a
|
register_sighup (func | None): Function to call to register a
|
||||||
sighup handler.
|
sighup handler.
|
||||||
"""
|
"""
|
||||||
if not register_sighup:
|
|
||||||
if getattr(signal, "SIGHUP"):
|
|
||||||
register_sighup = lambda x: signal.signal(signal.SIGHUP, x)
|
|
||||||
else:
|
|
||||||
register_sighup = lambda x: None
|
|
||||||
|
|
||||||
log_config = (config.worker_log_config if use_worker_options
|
log_config = (config.worker_log_config if use_worker_options
|
||||||
else config.log_config)
|
else config.log_config)
|
||||||
log_file = (config.worker_log_file if use_worker_options
|
log_file = (config.worker_log_file if use_worker_options
|
||||||
@ -187,7 +181,7 @@ def setup_logging(config, use_worker_options=False, register_sighup=None):
|
|||||||
else:
|
else:
|
||||||
handler = logging.StreamHandler()
|
handler = logging.StreamHandler()
|
||||||
|
|
||||||
def sighup(signum, stack):
|
def sighup(*args):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
handler.setFormatter(formatter)
|
handler.setFormatter(formatter)
|
||||||
@ -200,14 +194,14 @@ def setup_logging(config, use_worker_options=False, register_sighup=None):
|
|||||||
with open(log_config, 'r') as f:
|
with open(log_config, 'r') as f:
|
||||||
logging.config.dictConfig(yaml.load(f))
|
logging.config.dictConfig(yaml.load(f))
|
||||||
|
|
||||||
def sighup(signum, stack):
|
def sighup(*args):
|
||||||
# it might be better to use a file watcher or something for this.
|
# it might be better to use a file watcher or something for this.
|
||||||
load_log_config()
|
load_log_config()
|
||||||
logging.info("Reloaded log config from %s due to SIGHUP", log_config)
|
logging.info("Reloaded log config from %s due to SIGHUP", log_config)
|
||||||
|
|
||||||
load_log_config()
|
load_log_config()
|
||||||
|
|
||||||
register_sighup(sighup)
|
appbase.register_sighup(sighup)
|
||||||
|
|
||||||
# make sure that the first thing we log is a thing we can grep backwards
|
# make sure that the first thing we log is a thing we can grep backwards
|
||||||
# for
|
# for
|
||||||
|
@ -24,6 +24,14 @@ from ._base import Config, ConfigError
|
|||||||
|
|
||||||
logger = logging.Logger(__name__)
|
logger = logging.Logger(__name__)
|
||||||
|
|
||||||
|
# by default, we attempt to listen on both '::' *and* '0.0.0.0' because some OSes
|
||||||
|
# (Windows, macOS, other BSD/Linux where net.ipv6.bindv6only is set) will only listen
|
||||||
|
# on IPv6 when '::' is set.
|
||||||
|
#
|
||||||
|
# We later check for errors when binding to 0.0.0.0 and ignore them if :: is also in
|
||||||
|
# in the list.
|
||||||
|
DEFAULT_BIND_ADDRESSES = ['::', '0.0.0.0']
|
||||||
|
|
||||||
|
|
||||||
class ServerConfig(Config):
|
class ServerConfig(Config):
|
||||||
|
|
||||||
@ -118,16 +126,38 @@ class ServerConfig(Config):
|
|||||||
self.public_baseurl += '/'
|
self.public_baseurl += '/'
|
||||||
self.start_pushers = config.get("start_pushers", True)
|
self.start_pushers = config.get("start_pushers", True)
|
||||||
|
|
||||||
self.listeners = config.get("listeners", [])
|
self.listeners = []
|
||||||
|
for listener in config.get("listeners", []):
|
||||||
|
if not isinstance(listener.get("port", None), int):
|
||||||
|
raise ConfigError(
|
||||||
|
"Listener configuration is lacking a valid 'port' option"
|
||||||
|
)
|
||||||
|
|
||||||
|
if listener.setdefault("tls", False):
|
||||||
|
# no_tls is not really supported any more, but let's grandfather it in
|
||||||
|
# here.
|
||||||
|
if config.get("no_tls", False):
|
||||||
|
logger.info(
|
||||||
|
"Ignoring TLS-enabled listener on port %i due to no_tls"
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
for listener in self.listeners:
|
|
||||||
bind_address = listener.pop("bind_address", None)
|
bind_address = listener.pop("bind_address", None)
|
||||||
bind_addresses = listener.setdefault("bind_addresses", [])
|
bind_addresses = listener.setdefault("bind_addresses", [])
|
||||||
|
|
||||||
|
# if bind_address was specified, add it to the list of addresses
|
||||||
if bind_address:
|
if bind_address:
|
||||||
bind_addresses.append(bind_address)
|
bind_addresses.append(bind_address)
|
||||||
elif not bind_addresses:
|
|
||||||
bind_addresses.append('')
|
# if we still have an empty list of addresses, use the default list
|
||||||
|
if not bind_addresses:
|
||||||
|
if listener['type'] == 'metrics':
|
||||||
|
# the metrics listener doesn't support IPv6
|
||||||
|
bind_addresses.append('0.0.0.0')
|
||||||
|
else:
|
||||||
|
bind_addresses.extend(DEFAULT_BIND_ADDRESSES)
|
||||||
|
|
||||||
|
self.listeners.append(listener)
|
||||||
|
|
||||||
if not self.web_client_location:
|
if not self.web_client_location:
|
||||||
_warn_if_webclient_configured(self.listeners)
|
_warn_if_webclient_configured(self.listeners)
|
||||||
@ -136,6 +166,9 @@ class ServerConfig(Config):
|
|||||||
|
|
||||||
bind_port = config.get("bind_port")
|
bind_port = config.get("bind_port")
|
||||||
if bind_port:
|
if bind_port:
|
||||||
|
if config.get("no_tls", False):
|
||||||
|
raise ConfigError("no_tls is incompatible with bind_port")
|
||||||
|
|
||||||
self.listeners = []
|
self.listeners = []
|
||||||
bind_host = config.get("bind_host", "")
|
bind_host = config.get("bind_host", "")
|
||||||
gzip_responses = config.get("gzip_responses", True)
|
gzip_responses = config.get("gzip_responses", True)
|
||||||
@ -182,6 +215,7 @@ class ServerConfig(Config):
|
|||||||
"port": manhole,
|
"port": manhole,
|
||||||
"bind_addresses": ["127.0.0.1"],
|
"bind_addresses": ["127.0.0.1"],
|
||||||
"type": "manhole",
|
"type": "manhole",
|
||||||
|
"tls": False,
|
||||||
})
|
})
|
||||||
|
|
||||||
metrics_port = config.get("metrics_port")
|
metrics_port = config.get("metrics_port")
|
||||||
@ -207,6 +241,9 @@ class ServerConfig(Config):
|
|||||||
|
|
||||||
_check_resource_config(self.listeners)
|
_check_resource_config(self.listeners)
|
||||||
|
|
||||||
|
def has_tls_listener(self):
|
||||||
|
return any(l["tls"] for l in self.listeners)
|
||||||
|
|
||||||
def default_config(self, server_name, data_dir_path, **kwargs):
|
def default_config(self, server_name, data_dir_path, **kwargs):
|
||||||
_, bind_port = parse_and_validate_server_name(server_name)
|
_, bind_port = parse_and_validate_server_name(server_name)
|
||||||
if bind_port is not None:
|
if bind_port is not None:
|
||||||
@ -295,75 +332,106 @@ class ServerConfig(Config):
|
|||||||
|
|
||||||
# List of ports that Synapse should listen on, their purpose and their
|
# List of ports that Synapse should listen on, their purpose and their
|
||||||
# configuration.
|
# configuration.
|
||||||
|
#
|
||||||
|
# Options for each listener include:
|
||||||
|
#
|
||||||
|
# port: the TCP port to bind to
|
||||||
|
#
|
||||||
|
# bind_addresses: a list of local addresses to listen on. The default is
|
||||||
|
# 'all local interfaces'.
|
||||||
|
#
|
||||||
|
# type: the type of listener. Normally 'http', but other valid options are:
|
||||||
|
# 'manhole' (see docs/manhole.md),
|
||||||
|
# 'metrics' (see docs/metrics-howto.rst),
|
||||||
|
# 'replication' (see docs/workers.rst).
|
||||||
|
#
|
||||||
|
# tls: set to true to enable TLS for this listener. Will use the TLS
|
||||||
|
# key/cert specified in tls_private_key_path / tls_certificate_path.
|
||||||
|
#
|
||||||
|
# x_forwarded: Only valid for an 'http' listener. Set to true to use the
|
||||||
|
# X-Forwarded-For header as the client IP. Useful when Synapse is
|
||||||
|
# behind a reverse-proxy.
|
||||||
|
#
|
||||||
|
# resources: Only valid for an 'http' listener. A list of resources to host
|
||||||
|
# on this port. Options for each resource are:
|
||||||
|
#
|
||||||
|
# names: a list of names of HTTP resources. See below for a list of
|
||||||
|
# valid resource names.
|
||||||
|
#
|
||||||
|
# compress: set to true to enable HTTP comression for this resource.
|
||||||
|
#
|
||||||
|
# additional_resources: Only valid for an 'http' listener. A map of
|
||||||
|
# additional endpoints which should be loaded via dynamic modules.
|
||||||
|
#
|
||||||
|
# Valid resource names are:
|
||||||
|
#
|
||||||
|
# client: the client-server API (/_matrix/client). Also implies 'media' and
|
||||||
|
# 'static'.
|
||||||
|
#
|
||||||
|
# consent: user consent forms (/_matrix/consent). See
|
||||||
|
# docs/consent_tracking.md.
|
||||||
|
#
|
||||||
|
# federation: the server-server API (/_matrix/federation). Also implies
|
||||||
|
# 'media', 'keys', 'openid'
|
||||||
|
#
|
||||||
|
# keys: the key discovery API (/_matrix/keys).
|
||||||
|
#
|
||||||
|
# media: the media API (/_matrix/media).
|
||||||
|
#
|
||||||
|
# metrics: the metrics interface. See docs/metrics-howto.rst.
|
||||||
|
#
|
||||||
|
# openid: OpenID authentication.
|
||||||
|
#
|
||||||
|
# replication: the HTTP replication API (/_synapse/replication). See
|
||||||
|
# docs/workers.rst.
|
||||||
|
#
|
||||||
|
# static: static resources under synapse/static (/_matrix/static). (Mostly
|
||||||
|
# useful for 'fallback authentication'.)
|
||||||
|
#
|
||||||
|
# webclient: A web client. Requires web_client_location to be set.
|
||||||
|
#
|
||||||
listeners:
|
listeners:
|
||||||
# Main HTTPS listener
|
# TLS-enabled listener: for when matrix traffic is sent directly to synapse.
|
||||||
# For when matrix traffic is sent directly to synapse.
|
#
|
||||||
-
|
# Disabled by default. To enable it, uncomment the following. (Note that you
|
||||||
# The port to listen for HTTPS requests on.
|
# will also need to give Synapse a TLS key and certificate: see the TLS section
|
||||||
port: %(bind_port)s
|
# below.)
|
||||||
|
#
|
||||||
|
# - port: %(bind_port)s
|
||||||
|
# type: http
|
||||||
|
# tls: true
|
||||||
|
# resources:
|
||||||
|
# - names: [client, federation]
|
||||||
|
|
||||||
# Local addresses to listen on.
|
# Unsecure HTTP listener: for when matrix traffic passes through a reverse proxy
|
||||||
# On Linux and Mac OS, `::` will listen on all IPv4 and IPv6
|
# that unwraps TLS.
|
||||||
# addresses by default. For most other OSes, this will only listen
|
#
|
||||||
# on IPv6.
|
# If you plan to use a reverse proxy, please see
|
||||||
bind_addresses:
|
# https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.rst.
|
||||||
- '::'
|
#
|
||||||
- '0.0.0.0'
|
- port: %(unsecure_port)s
|
||||||
|
tls: false
|
||||||
# This is a 'http' listener, allows us to specify 'resources'.
|
bind_addresses: ['::1', '127.0.0.1']
|
||||||
type: http
|
type: http
|
||||||
|
x_forwarded: true
|
||||||
|
|
||||||
tls: true
|
|
||||||
|
|
||||||
# Use the X-Forwarded-For (XFF) header as the client IP and not the
|
|
||||||
# actual client IP.
|
|
||||||
x_forwarded: false
|
|
||||||
|
|
||||||
# List of HTTP resources to serve on this listener.
|
|
||||||
resources:
|
resources:
|
||||||
-
|
- names: [client, federation]
|
||||||
# List of resources to host on this listener.
|
|
||||||
names:
|
|
||||||
- client # The client-server APIs, both v1 and v2
|
|
||||||
# - webclient # A web client. Requires web_client_location to be set.
|
|
||||||
|
|
||||||
# Should synapse compress HTTP responses to clients that support it?
|
|
||||||
# This should be disabled if running synapse behind a load balancer
|
|
||||||
# that can do automatic compression.
|
|
||||||
compress: true
|
|
||||||
|
|
||||||
- names: [federation] # Federation APIs
|
|
||||||
compress: false
|
compress: false
|
||||||
|
|
||||||
# optional list of additional endpoints which can be loaded via
|
# example additonal_resources:
|
||||||
# dynamic modules
|
#
|
||||||
# additional_resources:
|
# additional_resources:
|
||||||
# "/_matrix/my/custom/endpoint":
|
# "/_matrix/my/custom/endpoint":
|
||||||
# module: my_module.CustomRequestHandler
|
# module: my_module.CustomRequestHandler
|
||||||
# config: {}
|
# config: {}
|
||||||
|
|
||||||
# Unsecure HTTP listener,
|
|
||||||
# For when matrix traffic passes through loadbalancer that unwraps TLS.
|
|
||||||
- port: %(unsecure_port)s
|
|
||||||
tls: false
|
|
||||||
bind_addresses: ['::', '0.0.0.0']
|
|
||||||
type: http
|
|
||||||
|
|
||||||
x_forwarded: false
|
|
||||||
|
|
||||||
resources:
|
|
||||||
- names: [client]
|
|
||||||
compress: true
|
|
||||||
- names: [federation]
|
|
||||||
compress: false
|
|
||||||
|
|
||||||
# Turn on the twisted ssh manhole service on localhost on the given
|
# Turn on the twisted ssh manhole service on localhost on the given
|
||||||
# port.
|
# port.
|
||||||
# - port: 9000
|
# - port: 9000
|
||||||
# bind_addresses: ['::1', '127.0.0.1']
|
# bind_addresses: ['::1', '127.0.0.1']
|
||||||
# type: manhole
|
# type: manhole
|
||||||
|
|
||||||
|
|
||||||
# Homeserver blocking
|
# Homeserver blocking
|
||||||
#
|
#
|
||||||
# How to reach the server admin, used in ResourceLimitError
|
# How to reach the server admin, used in ResourceLimitError
|
||||||
@ -480,6 +548,7 @@ KNOWN_RESOURCES = (
|
|||||||
'keys',
|
'keys',
|
||||||
'media',
|
'media',
|
||||||
'metrics',
|
'metrics',
|
||||||
|
'openid',
|
||||||
'replication',
|
'replication',
|
||||||
'static',
|
'static',
|
||||||
'webclient',
|
'webclient',
|
||||||
|
@ -23,9 +23,9 @@ from unpaddedbase64 import encode_base64
|
|||||||
|
|
||||||
from OpenSSL import crypto
|
from OpenSSL import crypto
|
||||||
|
|
||||||
from synapse.config._base import Config
|
from synapse.config._base import Config, ConfigError
|
||||||
|
|
||||||
logger = logging.getLogger()
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class TlsConfig(Config):
|
class TlsConfig(Config):
|
||||||
@ -45,9 +45,25 @@ class TlsConfig(Config):
|
|||||||
|
|
||||||
self.tls_certificate_file = self.abspath(config.get("tls_certificate_path"))
|
self.tls_certificate_file = self.abspath(config.get("tls_certificate_path"))
|
||||||
self.tls_private_key_file = self.abspath(config.get("tls_private_key_path"))
|
self.tls_private_key_file = self.abspath(config.get("tls_private_key_path"))
|
||||||
self._original_tls_fingerprints = config["tls_fingerprints"]
|
|
||||||
|
if self.has_tls_listener():
|
||||||
|
if not self.tls_certificate_file:
|
||||||
|
raise ConfigError(
|
||||||
|
"tls_certificate_path must be specified if TLS-enabled listeners are "
|
||||||
|
"configured."
|
||||||
|
)
|
||||||
|
if not self.tls_private_key_file:
|
||||||
|
raise ConfigError(
|
||||||
|
"tls_certificate_path must be specified if TLS-enabled listeners are "
|
||||||
|
"configured."
|
||||||
|
)
|
||||||
|
|
||||||
|
self._original_tls_fingerprints = config.get("tls_fingerprints", [])
|
||||||
|
|
||||||
|
if self._original_tls_fingerprints is None:
|
||||||
|
self._original_tls_fingerprints = []
|
||||||
|
|
||||||
self.tls_fingerprints = list(self._original_tls_fingerprints)
|
self.tls_fingerprints = list(self._original_tls_fingerprints)
|
||||||
self.no_tls = config.get("no_tls", False)
|
|
||||||
|
|
||||||
# This config option applies to non-federation HTTP clients
|
# This config option applies to non-federation HTTP clients
|
||||||
# (e.g. for talking to recaptcha, identity servers, and such)
|
# (e.g. for talking to recaptcha, identity servers, and such)
|
||||||
@ -60,10 +76,14 @@ class TlsConfig(Config):
|
|||||||
self.tls_certificate = None
|
self.tls_certificate = None
|
||||||
self.tls_private_key = None
|
self.tls_private_key = None
|
||||||
|
|
||||||
def is_disk_cert_valid(self):
|
def is_disk_cert_valid(self, allow_self_signed=True):
|
||||||
"""
|
"""
|
||||||
Is the certificate we have on disk valid, and if so, for how long?
|
Is the certificate we have on disk valid, and if so, for how long?
|
||||||
|
|
||||||
|
Args:
|
||||||
|
allow_self_signed (bool): Should we allow the certificate we
|
||||||
|
read to be self signed?
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
int: Days remaining of certificate validity.
|
int: Days remaining of certificate validity.
|
||||||
None: No certificate exists.
|
None: No certificate exists.
|
||||||
@ -84,6 +104,12 @@ class TlsConfig(Config):
|
|||||||
logger.exception("Failed to parse existing certificate off disk!")
|
logger.exception("Failed to parse existing certificate off disk!")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
if not allow_self_signed:
|
||||||
|
if tls_certificate.get_subject() == tls_certificate.get_issuer():
|
||||||
|
raise ValueError(
|
||||||
|
"TLS Certificate is self signed, and this is not permitted"
|
||||||
|
)
|
||||||
|
|
||||||
# YYYYMMDDhhmmssZ -- in UTC
|
# YYYYMMDDhhmmssZ -- in UTC
|
||||||
expires_on = datetime.strptime(
|
expires_on = datetime.strptime(
|
||||||
tls_certificate.get_notAfter().decode('ascii'), "%Y%m%d%H%M%SZ"
|
tls_certificate.get_notAfter().decode('ascii'), "%Y%m%d%H%M%SZ"
|
||||||
@ -92,36 +118,40 @@ class TlsConfig(Config):
|
|||||||
days_remaining = (expires_on - now).days
|
days_remaining = (expires_on - now).days
|
||||||
return days_remaining
|
return days_remaining
|
||||||
|
|
||||||
def read_certificate_from_disk(self):
|
def read_certificate_from_disk(self, require_cert_and_key):
|
||||||
"""
|
"""
|
||||||
Read the certificates from disk.
|
Read the certificates and private key from disk.
|
||||||
"""
|
|
||||||
self.tls_certificate = self.read_tls_certificate(self.tls_certificate_file)
|
|
||||||
|
|
||||||
# Check if it is self-signed, and issue a warning if so.
|
Args:
|
||||||
if self.tls_certificate.get_issuer() == self.tls_certificate.get_subject():
|
require_cert_and_key (bool): set to True to throw an error if the certificate
|
||||||
warnings.warn(
|
and key file are not given
|
||||||
(
|
"""
|
||||||
"Self-signed TLS certificates will not be accepted by Synapse 1.0. "
|
if require_cert_and_key:
|
||||||
"Please either provide a valid certificate, or use Synapse's ACME "
|
self.tls_private_key = self.read_tls_private_key()
|
||||||
"support to provision one."
|
self.tls_certificate = self.read_tls_certificate()
|
||||||
|
elif self.tls_certificate_file:
|
||||||
|
# we only need the certificate for the tls_fingerprints. Reload it if we
|
||||||
|
# can, but it's not a fatal error if we can't.
|
||||||
|
try:
|
||||||
|
self.tls_certificate = self.read_tls_certificate()
|
||||||
|
except Exception as e:
|
||||||
|
logger.info(
|
||||||
|
"Unable to read TLS certificate (%s). Ignoring as no "
|
||||||
|
"tls listeners enabled.", e,
|
||||||
)
|
)
|
||||||
)
|
|
||||||
|
|
||||||
if not self.no_tls:
|
|
||||||
self.tls_private_key = self.read_tls_private_key(self.tls_private_key_file)
|
|
||||||
|
|
||||||
self.tls_fingerprints = list(self._original_tls_fingerprints)
|
self.tls_fingerprints = list(self._original_tls_fingerprints)
|
||||||
|
|
||||||
# Check that our own certificate is included in the list of fingerprints
|
if self.tls_certificate:
|
||||||
# and include it if it is not.
|
# Check that our own certificate is included in the list of fingerprints
|
||||||
x509_certificate_bytes = crypto.dump_certificate(
|
# and include it if it is not.
|
||||||
crypto.FILETYPE_ASN1, self.tls_certificate
|
x509_certificate_bytes = crypto.dump_certificate(
|
||||||
)
|
crypto.FILETYPE_ASN1, self.tls_certificate
|
||||||
sha256_fingerprint = encode_base64(sha256(x509_certificate_bytes).digest())
|
)
|
||||||
sha256_fingerprints = set(f["sha256"] for f in self.tls_fingerprints)
|
sha256_fingerprint = encode_base64(sha256(x509_certificate_bytes).digest())
|
||||||
if sha256_fingerprint not in sha256_fingerprints:
|
sha256_fingerprints = set(f["sha256"] for f in self.tls_fingerprints)
|
||||||
self.tls_fingerprints.append({u"sha256": sha256_fingerprint})
|
if sha256_fingerprint not in sha256_fingerprints:
|
||||||
|
self.tls_fingerprints.append({u"sha256": sha256_fingerprint})
|
||||||
|
|
||||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||||
base_key_name = os.path.join(config_dir_path, server_name)
|
base_key_name = os.path.join(config_dir_path, server_name)
|
||||||
@ -137,6 +167,8 @@ class TlsConfig(Config):
|
|||||||
|
|
||||||
return (
|
return (
|
||||||
"""\
|
"""\
|
||||||
|
## TLS ##
|
||||||
|
|
||||||
# PEM-encoded X509 certificate for TLS.
|
# PEM-encoded X509 certificate for TLS.
|
||||||
# This certificate, as of Synapse 1.0, will need to be a valid and verifiable
|
# This certificate, as of Synapse 1.0, will need to be a valid and verifiable
|
||||||
# certificate, signed by a recognised Certificate Authority.
|
# certificate, signed by a recognised Certificate Authority.
|
||||||
@ -144,10 +176,10 @@ class TlsConfig(Config):
|
|||||||
# See 'ACME support' below to enable auto-provisioning this certificate via
|
# See 'ACME support' below to enable auto-provisioning this certificate via
|
||||||
# Let's Encrypt.
|
# Let's Encrypt.
|
||||||
#
|
#
|
||||||
tls_certificate_path: "%(tls_certificate_path)s"
|
# tls_certificate_path: "%(tls_certificate_path)s"
|
||||||
|
|
||||||
# PEM-encoded private key for TLS
|
# PEM-encoded private key for TLS
|
||||||
tls_private_key_path: "%(tls_private_key_path)s"
|
# tls_private_key_path: "%(tls_private_key_path)s"
|
||||||
|
|
||||||
# ACME support: This will configure Synapse to request a valid TLS certificate
|
# ACME support: This will configure Synapse to request a valid TLS certificate
|
||||||
# for your configured `server_name` via Let's Encrypt.
|
# for your configured `server_name` via Let's Encrypt.
|
||||||
@ -172,7 +204,7 @@ class TlsConfig(Config):
|
|||||||
#
|
#
|
||||||
acme:
|
acme:
|
||||||
# ACME support is disabled by default. Uncomment the following line
|
# ACME support is disabled by default. Uncomment the following line
|
||||||
# to enable it.
|
# (and tls_certificate_path and tls_private_key_path above) to enable it.
|
||||||
#
|
#
|
||||||
# enabled: true
|
# enabled: true
|
||||||
|
|
||||||
@ -197,13 +229,6 @@ class TlsConfig(Config):
|
|||||||
#
|
#
|
||||||
# reprovision_threshold: 30
|
# reprovision_threshold: 30
|
||||||
|
|
||||||
# If your server runs behind a reverse-proxy which terminates TLS connections
|
|
||||||
# (for both client and federation connections), it may be useful to disable
|
|
||||||
# All TLS support for incoming connections. Setting no_tls to True will
|
|
||||||
# do so (and avoid the need to give synapse a TLS private key).
|
|
||||||
#
|
|
||||||
# no_tls: True
|
|
||||||
|
|
||||||
# List of allowed TLS fingerprints for this server to publish along
|
# List of allowed TLS fingerprints for this server to publish along
|
||||||
# with the signing keys for this server. Other matrix servers that
|
# with the signing keys for this server. Other matrix servers that
|
||||||
# make HTTPS requests to this server will check that the TLS
|
# make HTTPS requests to this server will check that the TLS
|
||||||
@ -236,10 +261,38 @@ class TlsConfig(Config):
|
|||||||
% locals()
|
% locals()
|
||||||
)
|
)
|
||||||
|
|
||||||
def read_tls_certificate(self, cert_path):
|
def read_tls_certificate(self):
|
||||||
cert_pem = self.read_file(cert_path, "tls_certificate")
|
"""Reads the TLS certificate from the configured file, and returns it
|
||||||
return crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem)
|
|
||||||
|
|
||||||
def read_tls_private_key(self, private_key_path):
|
Also checks if it is self-signed, and warns if so
|
||||||
private_key_pem = self.read_file(private_key_path, "tls_private_key")
|
|
||||||
|
Returns:
|
||||||
|
OpenSSL.crypto.X509: the certificate
|
||||||
|
"""
|
||||||
|
cert_path = self.tls_certificate_file
|
||||||
|
logger.info("Loading TLS certificate from %s", cert_path)
|
||||||
|
cert_pem = self.read_file(cert_path, "tls_certificate_path")
|
||||||
|
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem)
|
||||||
|
|
||||||
|
# Check if it is self-signed, and issue a warning if so.
|
||||||
|
if cert.get_issuer() == cert.get_subject():
|
||||||
|
warnings.warn(
|
||||||
|
(
|
||||||
|
"Self-signed TLS certificates will not be accepted by Synapse 1.0. "
|
||||||
|
"Please either provide a valid certificate, or use Synapse's ACME "
|
||||||
|
"support to provision one."
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
return cert
|
||||||
|
|
||||||
|
def read_tls_private_key(self):
|
||||||
|
"""Reads the TLS private key from the configured file, and returns it
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
OpenSSL.crypto.PKey: the private key
|
||||||
|
"""
|
||||||
|
private_key_path = self.tls_private_key_file
|
||||||
|
logger.info("Loading TLS key from %s", private_key_path)
|
||||||
|
private_key_pem = self.read_file(private_key_path, "tls_private_key_path")
|
||||||
return crypto.load_privatekey(crypto.FILETYPE_PEM, private_key_pem)
|
return crypto.load_privatekey(crypto.FILETYPE_PEM, private_key_pem)
|
||||||
|
@ -43,9 +43,7 @@ class ServerContextFactory(ContextFactory):
|
|||||||
logger.exception("Failed to enable elliptic curve for TLS")
|
logger.exception("Failed to enable elliptic curve for TLS")
|
||||||
context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
|
context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
|
||||||
context.use_certificate_chain_file(config.tls_certificate_file)
|
context.use_certificate_chain_file(config.tls_certificate_file)
|
||||||
|
context.use_privatekey(config.tls_private_key)
|
||||||
if not config.no_tls:
|
|
||||||
context.use_privatekey(config.tls_private_key)
|
|
||||||
|
|
||||||
# https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
|
# https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
|
||||||
context.set_cipher_list(
|
context.set_cipher_list(
|
||||||
|
@ -148,6 +148,22 @@ class FederationServer(FederationBase):
|
|||||||
|
|
||||||
logger.debug("[%s] Transaction is new", transaction.transaction_id)
|
logger.debug("[%s] Transaction is new", transaction.transaction_id)
|
||||||
|
|
||||||
|
# Reject if PDU count > 50 and EDU count > 100
|
||||||
|
if (len(transaction.pdus) > 50
|
||||||
|
or (hasattr(transaction, "edus") and len(transaction.edus) > 100)):
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"Transaction PDU or EDU count too large. Returning 400",
|
||||||
|
)
|
||||||
|
|
||||||
|
response = {}
|
||||||
|
yield self.transaction_actions.set_response(
|
||||||
|
origin,
|
||||||
|
transaction,
|
||||||
|
400, response
|
||||||
|
)
|
||||||
|
defer.returnValue((400, response))
|
||||||
|
|
||||||
received_pdus_counter.inc(len(transaction.pdus))
|
received_pdus_counter.inc(len(transaction.pdus))
|
||||||
|
|
||||||
origin_host, _ = parse_server_name(origin)
|
origin_host, _ = parse_server_name(origin)
|
||||||
|
@ -43,9 +43,20 @@ logger = logging.getLogger(__name__)
|
|||||||
class TransportLayerServer(JsonResource):
|
class TransportLayerServer(JsonResource):
|
||||||
"""Handles incoming federation HTTP requests"""
|
"""Handles incoming federation HTTP requests"""
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs, servlet_groups=None):
|
||||||
|
"""Initialize the TransportLayerServer
|
||||||
|
|
||||||
|
Will by default register all servlets. For custom behaviour, pass in
|
||||||
|
a list of servlet_groups to register.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
hs (synapse.server.HomeServer): homeserver
|
||||||
|
servlet_groups (list[str], optional): List of servlet groups to register.
|
||||||
|
Defaults to ``DEFAULT_SERVLET_GROUPS``.
|
||||||
|
"""
|
||||||
self.hs = hs
|
self.hs = hs
|
||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
|
self.servlet_groups = servlet_groups
|
||||||
|
|
||||||
super(TransportLayerServer, self).__init__(hs, canonical_json=False)
|
super(TransportLayerServer, self).__init__(hs, canonical_json=False)
|
||||||
|
|
||||||
@ -67,6 +78,7 @@ class TransportLayerServer(JsonResource):
|
|||||||
resource=self,
|
resource=self,
|
||||||
ratelimiter=self.ratelimiter,
|
ratelimiter=self.ratelimiter,
|
||||||
authenticator=self.authenticator,
|
authenticator=self.authenticator,
|
||||||
|
servlet_groups=self.servlet_groups,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -1308,10 +1320,12 @@ FEDERATION_SERVLET_CLASSES = (
|
|||||||
FederationClientKeysClaimServlet,
|
FederationClientKeysClaimServlet,
|
||||||
FederationThirdPartyInviteExchangeServlet,
|
FederationThirdPartyInviteExchangeServlet,
|
||||||
On3pidBindServlet,
|
On3pidBindServlet,
|
||||||
OpenIdUserInfo,
|
|
||||||
FederationVersionServlet,
|
FederationVersionServlet,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
OPENID_SERVLET_CLASSES = (
|
||||||
|
OpenIdUserInfo,
|
||||||
|
)
|
||||||
|
|
||||||
ROOM_LIST_CLASSES = (
|
ROOM_LIST_CLASSES = (
|
||||||
PublicRoomList,
|
PublicRoomList,
|
||||||
@ -1350,44 +1364,83 @@ GROUP_ATTESTATION_SERVLET_CLASSES = (
|
|||||||
FederationGroupsRenewAttestaionServlet,
|
FederationGroupsRenewAttestaionServlet,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
DEFAULT_SERVLET_GROUPS = (
|
||||||
|
"federation",
|
||||||
|
"room_list",
|
||||||
|
"group_server",
|
||||||
|
"group_local",
|
||||||
|
"group_attestation",
|
||||||
|
"openid",
|
||||||
|
)
|
||||||
|
|
||||||
def register_servlets(hs, resource, authenticator, ratelimiter):
|
|
||||||
for servletclass in FEDERATION_SERVLET_CLASSES:
|
|
||||||
servletclass(
|
|
||||||
handler=hs.get_federation_server(),
|
|
||||||
authenticator=authenticator,
|
|
||||||
ratelimiter=ratelimiter,
|
|
||||||
server_name=hs.hostname,
|
|
||||||
).register(resource)
|
|
||||||
|
|
||||||
for servletclass in ROOM_LIST_CLASSES:
|
def register_servlets(hs, resource, authenticator, ratelimiter, servlet_groups=None):
|
||||||
servletclass(
|
"""Initialize and register servlet classes.
|
||||||
handler=hs.get_room_list_handler(),
|
|
||||||
authenticator=authenticator,
|
|
||||||
ratelimiter=ratelimiter,
|
|
||||||
server_name=hs.hostname,
|
|
||||||
).register(resource)
|
|
||||||
|
|
||||||
for servletclass in GROUP_SERVER_SERVLET_CLASSES:
|
Will by default register all servlets. For custom behaviour, pass in
|
||||||
servletclass(
|
a list of servlet_groups to register.
|
||||||
handler=hs.get_groups_server_handler(),
|
|
||||||
authenticator=authenticator,
|
|
||||||
ratelimiter=ratelimiter,
|
|
||||||
server_name=hs.hostname,
|
|
||||||
).register(resource)
|
|
||||||
|
|
||||||
for servletclass in GROUP_LOCAL_SERVLET_CLASSES:
|
Args:
|
||||||
servletclass(
|
hs (synapse.server.HomeServer): homeserver
|
||||||
handler=hs.get_groups_local_handler(),
|
resource (TransportLayerServer): resource class to register to
|
||||||
authenticator=authenticator,
|
authenticator (Authenticator): authenticator to use
|
||||||
ratelimiter=ratelimiter,
|
ratelimiter (util.ratelimitutils.FederationRateLimiter): ratelimiter to use
|
||||||
server_name=hs.hostname,
|
servlet_groups (list[str], optional): List of servlet groups to register.
|
||||||
).register(resource)
|
Defaults to ``DEFAULT_SERVLET_GROUPS``.
|
||||||
|
"""
|
||||||
|
if not servlet_groups:
|
||||||
|
servlet_groups = DEFAULT_SERVLET_GROUPS
|
||||||
|
|
||||||
for servletclass in GROUP_ATTESTATION_SERVLET_CLASSES:
|
if "federation" in servlet_groups:
|
||||||
servletclass(
|
for servletclass in FEDERATION_SERVLET_CLASSES:
|
||||||
handler=hs.get_groups_attestation_renewer(),
|
servletclass(
|
||||||
authenticator=authenticator,
|
handler=hs.get_federation_server(),
|
||||||
ratelimiter=ratelimiter,
|
authenticator=authenticator,
|
||||||
server_name=hs.hostname,
|
ratelimiter=ratelimiter,
|
||||||
).register(resource)
|
server_name=hs.hostname,
|
||||||
|
).register(resource)
|
||||||
|
|
||||||
|
if "openid" in servlet_groups:
|
||||||
|
for servletclass in OPENID_SERVLET_CLASSES:
|
||||||
|
servletclass(
|
||||||
|
handler=hs.get_federation_server(),
|
||||||
|
authenticator=authenticator,
|
||||||
|
ratelimiter=ratelimiter,
|
||||||
|
server_name=hs.hostname,
|
||||||
|
).register(resource)
|
||||||
|
|
||||||
|
if "room_list" in servlet_groups:
|
||||||
|
for servletclass in ROOM_LIST_CLASSES:
|
||||||
|
servletclass(
|
||||||
|
handler=hs.get_room_list_handler(),
|
||||||
|
authenticator=authenticator,
|
||||||
|
ratelimiter=ratelimiter,
|
||||||
|
server_name=hs.hostname,
|
||||||
|
).register(resource)
|
||||||
|
|
||||||
|
if "group_server" in servlet_groups:
|
||||||
|
for servletclass in GROUP_SERVER_SERVLET_CLASSES:
|
||||||
|
servletclass(
|
||||||
|
handler=hs.get_groups_server_handler(),
|
||||||
|
authenticator=authenticator,
|
||||||
|
ratelimiter=ratelimiter,
|
||||||
|
server_name=hs.hostname,
|
||||||
|
).register(resource)
|
||||||
|
|
||||||
|
if "group_local" in servlet_groups:
|
||||||
|
for servletclass in GROUP_LOCAL_SERVLET_CLASSES:
|
||||||
|
servletclass(
|
||||||
|
handler=hs.get_groups_local_handler(),
|
||||||
|
authenticator=authenticator,
|
||||||
|
ratelimiter=ratelimiter,
|
||||||
|
server_name=hs.hostname,
|
||||||
|
).register(resource)
|
||||||
|
|
||||||
|
if "group_attestation" in servlet_groups:
|
||||||
|
for servletclass in GROUP_ATTESTATION_SERVLET_CLASSES:
|
||||||
|
servletclass(
|
||||||
|
handler=hs.get_groups_attestation_renewer(),
|
||||||
|
authenticator=authenticator,
|
||||||
|
ratelimiter=ratelimiter,
|
||||||
|
server_name=hs.hostname,
|
||||||
|
).register(resource)
|
||||||
|
@ -19,7 +19,13 @@ from six import iteritems
|
|||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.api.errors import NotFoundError, RoomKeysVersionError, StoreError
|
from synapse.api.errors import (
|
||||||
|
Codes,
|
||||||
|
NotFoundError,
|
||||||
|
RoomKeysVersionError,
|
||||||
|
StoreError,
|
||||||
|
SynapseError,
|
||||||
|
)
|
||||||
from synapse.util.async_helpers import Linearizer
|
from synapse.util.async_helpers import Linearizer
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -267,7 +273,7 @@ class E2eRoomKeysHandler(object):
|
|||||||
version(str): Optional; if None gives the most recent version
|
version(str): Optional; if None gives the most recent version
|
||||||
otherwise a historical one.
|
otherwise a historical one.
|
||||||
Raises:
|
Raises:
|
||||||
StoreError: code 404 if the requested backup version doesn't exist
|
NotFoundError: if the requested backup version doesn't exist
|
||||||
Returns:
|
Returns:
|
||||||
A deferred of a info dict that gives the info about the new version.
|
A deferred of a info dict that gives the info about the new version.
|
||||||
|
|
||||||
@ -279,7 +285,13 @@ class E2eRoomKeysHandler(object):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
with (yield self._upload_linearizer.queue(user_id)):
|
with (yield self._upload_linearizer.queue(user_id)):
|
||||||
res = yield self.store.get_e2e_room_keys_version_info(user_id, version)
|
try:
|
||||||
|
res = yield self.store.get_e2e_room_keys_version_info(user_id, version)
|
||||||
|
except StoreError as e:
|
||||||
|
if e.code == 404:
|
||||||
|
raise NotFoundError("Unknown backup version")
|
||||||
|
else:
|
||||||
|
raise
|
||||||
defer.returnValue(res)
|
defer.returnValue(res)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@ -290,8 +302,60 @@ class E2eRoomKeysHandler(object):
|
|||||||
user_id(str): the user whose current backup version we're deleting
|
user_id(str): the user whose current backup version we're deleting
|
||||||
version(str): the version id of the backup being deleted
|
version(str): the version id of the backup being deleted
|
||||||
Raises:
|
Raises:
|
||||||
StoreError: code 404 if this backup version doesn't exist
|
NotFoundError: if this backup version doesn't exist
|
||||||
"""
|
"""
|
||||||
|
|
||||||
with (yield self._upload_linearizer.queue(user_id)):
|
with (yield self._upload_linearizer.queue(user_id)):
|
||||||
yield self.store.delete_e2e_room_keys_version(user_id, version)
|
try:
|
||||||
|
yield self.store.delete_e2e_room_keys_version(user_id, version)
|
||||||
|
except StoreError as e:
|
||||||
|
if e.code == 404:
|
||||||
|
raise NotFoundError("Unknown backup version")
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def update_version(self, user_id, version, version_info):
|
||||||
|
"""Update the info about a given version of the user's backup
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id(str): the user whose current backup version we're updating
|
||||||
|
version(str): the backup version we're updating
|
||||||
|
version_info(dict): the new information about the backup
|
||||||
|
Raises:
|
||||||
|
NotFoundError: if the requested backup version doesn't exist
|
||||||
|
Returns:
|
||||||
|
A deferred of an empty dict.
|
||||||
|
"""
|
||||||
|
if "version" not in version_info:
|
||||||
|
raise SynapseError(
|
||||||
|
400,
|
||||||
|
"Missing version in body",
|
||||||
|
Codes.MISSING_PARAM
|
||||||
|
)
|
||||||
|
if version_info["version"] != version:
|
||||||
|
raise SynapseError(
|
||||||
|
400,
|
||||||
|
"Version in body does not match",
|
||||||
|
Codes.INVALID_PARAM
|
||||||
|
)
|
||||||
|
with (yield self._upload_linearizer.queue(user_id)):
|
||||||
|
try:
|
||||||
|
old_info = yield self.store.get_e2e_room_keys_version_info(
|
||||||
|
user_id, version
|
||||||
|
)
|
||||||
|
except StoreError as e:
|
||||||
|
if e.code == 404:
|
||||||
|
raise NotFoundError("Unknown backup version")
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
if old_info["algorithm"] != version_info["algorithm"]:
|
||||||
|
raise SynapseError(
|
||||||
|
400,
|
||||||
|
"Algorithm does not match",
|
||||||
|
Codes.INVALID_PARAM
|
||||||
|
)
|
||||||
|
|
||||||
|
yield self.store.update_e2e_room_keys_version(user_id, version, version_info)
|
||||||
|
|
||||||
|
defer.returnValue({})
|
||||||
|
@ -263,6 +263,16 @@ class RoomCreationHandler(BaseHandler):
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Check if old room was non-federatable
|
||||||
|
|
||||||
|
# Get old room's create event
|
||||||
|
old_room_create_event = yield self.store.get_create_event_for_room(old_room_id)
|
||||||
|
|
||||||
|
# Check if the create event specified a non-federatable room
|
||||||
|
if not old_room_create_event.content.get("m.federate", True):
|
||||||
|
# If so, mark the new room as non-federatable as well
|
||||||
|
creation_content["m.federate"] = False
|
||||||
|
|
||||||
initial_state = dict()
|
initial_state = dict()
|
||||||
|
|
||||||
# Replicate relevant room events
|
# Replicate relevant room events
|
||||||
@ -274,6 +284,7 @@ class RoomCreationHandler(BaseHandler):
|
|||||||
(EventTypes.GuestAccess, ""),
|
(EventTypes.GuestAccess, ""),
|
||||||
(EventTypes.RoomAvatar, ""),
|
(EventTypes.RoomAvatar, ""),
|
||||||
(EventTypes.Encryption, ""),
|
(EventTypes.Encryption, ""),
|
||||||
|
(EventTypes.ServerACL, ""),
|
||||||
)
|
)
|
||||||
|
|
||||||
old_room_state_ids = yield self.store.get_filtered_current_state_ids(
|
old_room_state_ids = yield self.store.get_filtered_current_state_ids(
|
||||||
|
@ -28,7 +28,7 @@ from canonicaljson import encode_canonical_json
|
|||||||
from prometheus_client import Counter
|
from prometheus_client import Counter
|
||||||
from signedjson.sign import sign_json
|
from signedjson.sign import sign_json
|
||||||
|
|
||||||
from twisted.internet import defer, protocol
|
from twisted.internet import defer, protocol, task
|
||||||
from twisted.internet.error import DNSLookupError
|
from twisted.internet.error import DNSLookupError
|
||||||
from twisted.internet.task import _EPSILON, Cooperator
|
from twisted.internet.task import _EPSILON, Cooperator
|
||||||
from twisted.web._newclient import ResponseDone
|
from twisted.web._newclient import ResponseDone
|
||||||
@ -168,7 +168,7 @@ class MatrixFederationHttpClient(object):
|
|||||||
requests.
|
requests.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs, tls_client_options_factory):
|
||||||
self.hs = hs
|
self.hs = hs
|
||||||
self.signing_key = hs.config.signing_key[0]
|
self.signing_key = hs.config.signing_key[0]
|
||||||
self.server_name = hs.hostname
|
self.server_name = hs.hostname
|
||||||
@ -176,7 +176,7 @@ class MatrixFederationHttpClient(object):
|
|||||||
|
|
||||||
self.agent = MatrixFederationAgent(
|
self.agent = MatrixFederationAgent(
|
||||||
hs.get_reactor(),
|
hs.get_reactor(),
|
||||||
hs.tls_client_options_factory,
|
tls_client_options_factory,
|
||||||
)
|
)
|
||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
self._store = hs.get_datastore()
|
self._store = hs.get_datastore()
|
||||||
@ -286,7 +286,7 @@ class MatrixFederationHttpClient(object):
|
|||||||
json,
|
json,
|
||||||
)
|
)
|
||||||
data = encode_canonical_json(json)
|
data = encode_canonical_json(json)
|
||||||
producer = FileBodyProducer(
|
producer = QuieterFileBodyProducer(
|
||||||
BytesIO(data),
|
BytesIO(data),
|
||||||
cooperator=self._cooperator,
|
cooperator=self._cooperator,
|
||||||
)
|
)
|
||||||
@ -839,3 +839,16 @@ def encode_query_args(args):
|
|||||||
query_bytes = urllib.parse.urlencode(encoded_args, True)
|
query_bytes = urllib.parse.urlencode(encoded_args, True)
|
||||||
|
|
||||||
return query_bytes.encode('utf8')
|
return query_bytes.encode('utf8')
|
||||||
|
|
||||||
|
|
||||||
|
class QuieterFileBodyProducer(FileBodyProducer):
|
||||||
|
"""Wrapper for FileBodyProducer that avoids CRITICAL errors when the connection drops.
|
||||||
|
|
||||||
|
Workaround for https://github.com/matrix-org/synapse/issues/4003 /
|
||||||
|
https://twistedmatrix.com/trac/ticket/6528
|
||||||
|
"""
|
||||||
|
def stopProducing(self):
|
||||||
|
try:
|
||||||
|
FileBodyProducer.stopProducing(self)
|
||||||
|
except task.TaskStopped:
|
||||||
|
pass
|
||||||
|
@ -85,7 +85,7 @@ CONDITIONAL_REQUIREMENTS = {
|
|||||||
|
|
||||||
"saml2": ["pysaml2>=4.5.0"],
|
"saml2": ["pysaml2>=4.5.0"],
|
||||||
"url_preview": ["lxml>=3.5.0"],
|
"url_preview": ["lxml>=3.5.0"],
|
||||||
"test": ["mock>=2.0"],
|
"test": ["mock>=2.0", "parameterized"],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -380,6 +380,40 @@ class RoomKeysVersionServlet(RestServlet):
|
|||||||
)
|
)
|
||||||
defer.returnValue((200, {}))
|
defer.returnValue((200, {}))
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_PUT(self, request, version):
|
||||||
|
"""
|
||||||
|
Update the information about a given version of the user's room_keys backup.
|
||||||
|
|
||||||
|
POST /room_keys/version/12345 HTTP/1.1
|
||||||
|
Content-Type: application/json
|
||||||
|
{
|
||||||
|
"algorithm": "m.megolm_backup.v1",
|
||||||
|
"auth_data": {
|
||||||
|
"public_key": "abcdefg",
|
||||||
|
"signatures": {
|
||||||
|
"ed25519:something": "hijklmnop"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"version": "42"
|
||||||
|
}
|
||||||
|
|
||||||
|
HTTP/1.1 200 OK
|
||||||
|
Content-Type: application/json
|
||||||
|
{}
|
||||||
|
"""
|
||||||
|
requester = yield self.auth.get_user_by_req(request, allow_guest=False)
|
||||||
|
user_id = requester.user.to_string()
|
||||||
|
info = parse_json_object_from_request(request)
|
||||||
|
|
||||||
|
if version is None:
|
||||||
|
raise SynapseError(400, "No version specified to update", Codes.MISSING_PARAM)
|
||||||
|
|
||||||
|
yield self.e2e_room_keys_handler.update_version(
|
||||||
|
user_id, version, info
|
||||||
|
)
|
||||||
|
defer.returnValue((200, {}))
|
||||||
|
|
||||||
|
|
||||||
def register_servlets(hs, http_server):
|
def register_servlets(hs, http_server):
|
||||||
RoomKeysServlet(hs).register(http_server)
|
RoomKeysServlet(hs).register(http_server)
|
||||||
|
@ -75,7 +75,7 @@ class SyncRestServlet(RestServlet):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
PATTERNS = client_v2_patterns("/sync$")
|
PATTERNS = client_v2_patterns("/sync$")
|
||||||
ALLOWED_PRESENCE = set(["online", "offline"])
|
ALLOWED_PRESENCE = set(["online", "offline", "unavailable"])
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(SyncRestServlet, self).__init__()
|
super(SyncRestServlet, self).__init__()
|
||||||
|
@ -38,6 +38,7 @@ class VersionsRestServlet(RestServlet):
|
|||||||
"r0.1.0",
|
"r0.1.0",
|
||||||
"r0.2.0",
|
"r0.2.0",
|
||||||
"r0.3.0",
|
"r0.3.0",
|
||||||
|
"r0.4.0",
|
||||||
],
|
],
|
||||||
# as per MSC1497:
|
# as per MSC1497:
|
||||||
"unstable_features": {
|
"unstable_features": {
|
||||||
|
@ -31,6 +31,7 @@ from synapse.api.filtering import Filtering
|
|||||||
from synapse.api.ratelimiting import Ratelimiter
|
from synapse.api.ratelimiting import Ratelimiter
|
||||||
from synapse.appservice.api import ApplicationServiceApi
|
from synapse.appservice.api import ApplicationServiceApi
|
||||||
from synapse.appservice.scheduler import ApplicationServiceScheduler
|
from synapse.appservice.scheduler import ApplicationServiceScheduler
|
||||||
|
from synapse.crypto import context_factory
|
||||||
from synapse.crypto.keyring import Keyring
|
from synapse.crypto.keyring import Keyring
|
||||||
from synapse.events.builder import EventBuilderFactory
|
from synapse.events.builder import EventBuilderFactory
|
||||||
from synapse.events.spamcheck import SpamChecker
|
from synapse.events.spamcheck import SpamChecker
|
||||||
@ -112,6 +113,8 @@ class HomeServer(object):
|
|||||||
|
|
||||||
Attributes:
|
Attributes:
|
||||||
config (synapse.config.homeserver.HomeserverConfig):
|
config (synapse.config.homeserver.HomeserverConfig):
|
||||||
|
_listening_services (list[twisted.internet.tcp.Port]): TCP ports that
|
||||||
|
we are listening on to provide HTTP services.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
__metaclass__ = abc.ABCMeta
|
__metaclass__ = abc.ABCMeta
|
||||||
@ -196,6 +199,7 @@ class HomeServer(object):
|
|||||||
self._reactor = reactor
|
self._reactor = reactor
|
||||||
self.hostname = hostname
|
self.hostname = hostname
|
||||||
self._building = {}
|
self._building = {}
|
||||||
|
self._listening_services = []
|
||||||
|
|
||||||
self.clock = Clock(reactor)
|
self.clock = Clock(reactor)
|
||||||
self.distributor = Distributor()
|
self.distributor = Distributor()
|
||||||
@ -364,7 +368,10 @@ class HomeServer(object):
|
|||||||
return PusherPool(self)
|
return PusherPool(self)
|
||||||
|
|
||||||
def build_http_client(self):
|
def build_http_client(self):
|
||||||
return MatrixFederationHttpClient(self)
|
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
|
||||||
|
self.config
|
||||||
|
)
|
||||||
|
return MatrixFederationHttpClient(self, tls_client_options_factory)
|
||||||
|
|
||||||
def build_db_pool(self):
|
def build_db_pool(self):
|
||||||
name = self.db_config["name"]
|
name = self.db_config["name"]
|
||||||
|
@ -50,6 +50,21 @@ sql_query_timer = Histogram("synapse_storage_query_time", "sec", ["verb"])
|
|||||||
sql_txn_timer = Histogram("synapse_storage_transaction_time", "sec", ["desc"])
|
sql_txn_timer = Histogram("synapse_storage_transaction_time", "sec", ["desc"])
|
||||||
|
|
||||||
|
|
||||||
|
# Unique indexes which have been added in background updates. Maps from table name
|
||||||
|
# to the name of the background update which added the unique index to that table.
|
||||||
|
#
|
||||||
|
# This is used by the upsert logic to figure out which tables are safe to do a proper
|
||||||
|
# UPSERT on: until the relevant background update has completed, we
|
||||||
|
# have to emulate an upsert by locking the table.
|
||||||
|
#
|
||||||
|
UNIQUE_INDEX_BACKGROUND_UPDATES = {
|
||||||
|
"user_ips": "user_ips_device_unique_index",
|
||||||
|
"device_lists_remote_extremeties": "device_lists_remote_extremeties_unique_idx",
|
||||||
|
"device_lists_remote_cache": "device_lists_remote_cache_unique_idx",
|
||||||
|
"event_search": "event_search_event_id_idx",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
class LoggingTransaction(object):
|
class LoggingTransaction(object):
|
||||||
"""An object that almost-transparently proxies for the 'txn' object
|
"""An object that almost-transparently proxies for the 'txn' object
|
||||||
passed to the constructor. Adds logging and metrics to the .execute()
|
passed to the constructor. Adds logging and metrics to the .execute()
|
||||||
@ -194,7 +209,7 @@ class SQLBaseStore(object):
|
|||||||
self.database_engine = hs.database_engine
|
self.database_engine = hs.database_engine
|
||||||
|
|
||||||
# A set of tables that are not safe to use native upserts in.
|
# A set of tables that are not safe to use native upserts in.
|
||||||
self._unsafe_to_upsert_tables = {"user_ips"}
|
self._unsafe_to_upsert_tables = set(UNIQUE_INDEX_BACKGROUND_UPDATES.keys())
|
||||||
|
|
||||||
# We add the user_directory_search table to the blacklist on SQLite
|
# We add the user_directory_search table to the blacklist on SQLite
|
||||||
# because the existing search table does not have an index, making it
|
# because the existing search table does not have an index, making it
|
||||||
@ -230,12 +245,12 @@ class SQLBaseStore(object):
|
|||||||
)
|
)
|
||||||
updates = [x["update_name"] for x in updates]
|
updates = [x["update_name"] for x in updates]
|
||||||
|
|
||||||
# The User IPs table in schema #53 was missing a unique index, which we
|
for table, update_name in UNIQUE_INDEX_BACKGROUND_UPDATES.items():
|
||||||
# run as a background update.
|
if update_name not in updates:
|
||||||
if "user_ips_device_unique_index" not in updates:
|
logger.debug("Now safe to upsert in %s", table)
|
||||||
self._unsafe_to_upsert_tables.discard("user_ips")
|
self._unsafe_to_upsert_tables.discard(table)
|
||||||
|
|
||||||
# If there's any tables left to check, reschedule to run.
|
# If there's any updates still running, reschedule to run.
|
||||||
if updates:
|
if updates:
|
||||||
self._clock.call_later(
|
self._clock.call_later(
|
||||||
15.0,
|
15.0,
|
||||||
|
@ -65,6 +65,11 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
|
|||||||
columns=["last_seen"],
|
columns=["last_seen"],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self.register_background_update_handler(
|
||||||
|
"user_ips_analyze",
|
||||||
|
self._analyze_user_ip,
|
||||||
|
)
|
||||||
|
|
||||||
self.register_background_update_handler(
|
self.register_background_update_handler(
|
||||||
"user_ips_remove_dupes",
|
"user_ips_remove_dupes",
|
||||||
self._remove_user_ip_dupes,
|
self._remove_user_ip_dupes,
|
||||||
@ -108,6 +113,25 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
|
|||||||
yield self._end_background_update("user_ips_drop_nonunique_index")
|
yield self._end_background_update("user_ips_drop_nonunique_index")
|
||||||
defer.returnValue(1)
|
defer.returnValue(1)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _analyze_user_ip(self, progress, batch_size):
|
||||||
|
# Background update to analyze user_ips table before we run the
|
||||||
|
# deduplication background update. The table may not have been analyzed
|
||||||
|
# for ages due to the table locks.
|
||||||
|
#
|
||||||
|
# This will lock out the naive upserts to user_ips while it happens, but
|
||||||
|
# the analyze should be quick (28GB table takes ~10s)
|
||||||
|
def user_ips_analyze(txn):
|
||||||
|
txn.execute("ANALYZE user_ips")
|
||||||
|
|
||||||
|
yield self.runInteraction(
|
||||||
|
"user_ips_analyze", user_ips_analyze
|
||||||
|
)
|
||||||
|
|
||||||
|
yield self._end_background_update("user_ips_analyze")
|
||||||
|
|
||||||
|
defer.returnValue(1)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _remove_user_ip_dupes(self, progress, batch_size):
|
def _remove_user_ip_dupes(self, progress, batch_size):
|
||||||
# This works function works by scanning the user_ips table in batches
|
# This works function works by scanning the user_ips table in batches
|
||||||
@ -167,12 +191,16 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
|
|||||||
clause = "? <= last_seen AND last_seen < ?"
|
clause = "? <= last_seen AND last_seen < ?"
|
||||||
args = (begin_last_seen, end_last_seen)
|
args = (begin_last_seen, end_last_seen)
|
||||||
|
|
||||||
|
# (Note: The DISTINCT in the inner query is important to ensure that
|
||||||
|
# the COUNT(*) is accurate, otherwise double counting may happen due
|
||||||
|
# to the join effectively being a cross product)
|
||||||
txn.execute(
|
txn.execute(
|
||||||
"""
|
"""
|
||||||
SELECT user_id, access_token, ip,
|
SELECT user_id, access_token, ip,
|
||||||
MAX(device_id), MAX(user_agent), MAX(last_seen)
|
MAX(device_id), MAX(user_agent), MAX(last_seen),
|
||||||
|
COUNT(*)
|
||||||
FROM (
|
FROM (
|
||||||
SELECT user_id, access_token, ip
|
SELECT DISTINCT user_id, access_token, ip
|
||||||
FROM user_ips
|
FROM user_ips
|
||||||
WHERE {}
|
WHERE {}
|
||||||
) c
|
) c
|
||||||
@ -186,7 +214,60 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
|
|||||||
|
|
||||||
# We've got some duplicates
|
# We've got some duplicates
|
||||||
for i in res:
|
for i in res:
|
||||||
user_id, access_token, ip, device_id, user_agent, last_seen = i
|
user_id, access_token, ip, device_id, user_agent, last_seen, count = i
|
||||||
|
|
||||||
|
# We want to delete the duplicates so we end up with only a
|
||||||
|
# single row.
|
||||||
|
#
|
||||||
|
# The naive way of doing this would be just to delete all rows
|
||||||
|
# and reinsert a constructed row. However, if there are a lot of
|
||||||
|
# duplicate rows this can cause the table to grow a lot, which
|
||||||
|
# can be problematic in two ways:
|
||||||
|
# 1. If user_ips is already large then this can cause the
|
||||||
|
# table to rapidly grow, potentially filling the disk.
|
||||||
|
# 2. Reinserting a lot of rows can confuse the table
|
||||||
|
# statistics for postgres, causing it to not use the
|
||||||
|
# correct indices for the query above, resulting in a full
|
||||||
|
# table scan. This is incredibly slow for large tables and
|
||||||
|
# can kill database performance. (This seems to mainly
|
||||||
|
# happen for the last query where the clause is simply `? <
|
||||||
|
# last_seen`)
|
||||||
|
#
|
||||||
|
# So instead we want to delete all but *one* of the duplicate
|
||||||
|
# rows. That is hard to do reliably, so we cheat and do a two
|
||||||
|
# step process:
|
||||||
|
# 1. Delete all rows with a last_seen strictly less than the
|
||||||
|
# max last_seen. This hopefully results in deleting all but
|
||||||
|
# one row the majority of the time, but there may be
|
||||||
|
# duplicate last_seen
|
||||||
|
# 2. If multiple rows remain, we fall back to the naive method
|
||||||
|
# and simply delete all rows and reinsert.
|
||||||
|
#
|
||||||
|
# Note that this relies on no new duplicate rows being inserted,
|
||||||
|
# but if that is happening then this entire process is futile
|
||||||
|
# anyway.
|
||||||
|
|
||||||
|
# Do step 1:
|
||||||
|
|
||||||
|
txn.execute(
|
||||||
|
"""
|
||||||
|
DELETE FROM user_ips
|
||||||
|
WHERE user_id = ? AND access_token = ? AND ip = ? AND last_seen < ?
|
||||||
|
""",
|
||||||
|
(user_id, access_token, ip, last_seen)
|
||||||
|
)
|
||||||
|
if txn.rowcount == count - 1:
|
||||||
|
# We deleted all but one of the duplicate rows, i.e. there
|
||||||
|
# is exactly one remaining and so there is nothing left to
|
||||||
|
# do.
|
||||||
|
continue
|
||||||
|
elif txn.rowcount >= count:
|
||||||
|
raise Exception(
|
||||||
|
"We deleted more duplicate rows from 'user_ips' than expected",
|
||||||
|
)
|
||||||
|
|
||||||
|
# The previous step didn't delete enough rows, so we fallback to
|
||||||
|
# step 2:
|
||||||
|
|
||||||
# Drop all the duplicates
|
# Drop all the duplicates
|
||||||
txn.execute(
|
txn.execute(
|
||||||
|
@ -298,6 +298,27 @@ class EndToEndRoomKeyStore(SQLBaseStore):
|
|||||||
"create_e2e_room_keys_version_txn", _create_e2e_room_keys_version_txn
|
"create_e2e_room_keys_version_txn", _create_e2e_room_keys_version_txn
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def update_e2e_room_keys_version(self, user_id, version, info):
|
||||||
|
"""Update a given backup version
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id(str): the user whose backup version we're updating
|
||||||
|
version(str): the version ID of the backup version we're updating
|
||||||
|
info(dict): the new backup version info to store
|
||||||
|
"""
|
||||||
|
|
||||||
|
return self._simple_update(
|
||||||
|
table="e2e_room_keys_versions",
|
||||||
|
keyvalues={
|
||||||
|
"user_id": user_id,
|
||||||
|
"version": version,
|
||||||
|
},
|
||||||
|
updatevalues={
|
||||||
|
"auth_data": json.dumps(info["auth_data"]),
|
||||||
|
},
|
||||||
|
desc="update_e2e_room_keys_version"
|
||||||
|
)
|
||||||
|
|
||||||
def delete_e2e_room_keys_version(self, user_id, version=None):
|
def delete_e2e_room_keys_version(self, user_id, version=None):
|
||||||
"""Delete a given backup version of the user's room keys.
|
"""Delete a given backup version of the user's room keys.
|
||||||
Doesn't delete their actual key data.
|
Doesn't delete their actual key data.
|
||||||
|
@ -13,9 +13,13 @@
|
|||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
-- delete duplicates
|
-- analyze user_ips, to help ensure the correct indices are used
|
||||||
INSERT INTO background_updates (update_name, progress_json) VALUES
|
INSERT INTO background_updates (update_name, progress_json) VALUES
|
||||||
('user_ips_remove_dupes', '{}');
|
('user_ips_analyze', '{}');
|
||||||
|
|
||||||
|
-- delete duplicates
|
||||||
|
INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
|
||||||
|
('user_ips_remove_dupes', '{}', 'user_ips_analyze');
|
||||||
|
|
||||||
-- add a new unique index to user_ips table
|
-- add a new unique index to user_ips table
|
||||||
INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
|
INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
|
||||||
|
@ -428,13 +428,9 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
|||||||
"""
|
"""
|
||||||
# for now we do this by looking at the create event. We may want to cache this
|
# for now we do this by looking at the create event. We may want to cache this
|
||||||
# more intelligently in future.
|
# more intelligently in future.
|
||||||
state_ids = yield self.get_current_state_ids(room_id)
|
|
||||||
create_id = state_ids.get((EventTypes.Create, ""))
|
|
||||||
|
|
||||||
if not create_id:
|
# Retrieve the room's create event
|
||||||
raise NotFoundError("Unknown room %s" % (room_id))
|
create_event = yield self.get_create_event_for_room(room_id)
|
||||||
|
|
||||||
create_event = yield self.get_event(create_id)
|
|
||||||
defer.returnValue(create_event.content.get("room_version", "1"))
|
defer.returnValue(create_event.content.get("room_version", "1"))
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@ -447,19 +443,39 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
|||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Deferred[unicode|None]: predecessor room id
|
Deferred[unicode|None]: predecessor room id
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
NotFoundError if the room is unknown
|
||||||
|
"""
|
||||||
|
# Retrieve the room's create event
|
||||||
|
create_event = yield self.get_create_event_for_room(room_id)
|
||||||
|
|
||||||
|
# Return predecessor if present
|
||||||
|
defer.returnValue(create_event.content.get("predecessor", None))
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def get_create_event_for_room(self, room_id):
|
||||||
|
"""Get the create state event for a room.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
room_id (str)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred[EventBase]: The room creation event.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
NotFoundError if the room is unknown
|
||||||
"""
|
"""
|
||||||
state_ids = yield self.get_current_state_ids(room_id)
|
state_ids = yield self.get_current_state_ids(room_id)
|
||||||
create_id = state_ids.get((EventTypes.Create, ""))
|
create_id = state_ids.get((EventTypes.Create, ""))
|
||||||
|
|
||||||
# If we can't find the create event, assume we've hit a dead end
|
# If we can't find the create event, assume we've hit a dead end
|
||||||
if not create_id:
|
if not create_id:
|
||||||
defer.returnValue(None)
|
raise NotFoundError("Unknown room %s" % (room_id))
|
||||||
|
|
||||||
# Retrieve the room's create event
|
# Retrieve the room's create event and return
|
||||||
create_event = yield self.get_event(create_id)
|
create_event = yield self.get_event(create_id)
|
||||||
|
defer.returnValue(create_event)
|
||||||
# Return predecessor if present
|
|
||||||
defer.returnValue(create_event.content.get("predecessor", None))
|
|
||||||
|
|
||||||
@cached(max_entries=100000, iterable=True)
|
@cached(max_entries=100000, iterable=True)
|
||||||
def get_current_state_ids(self, room_id):
|
def get_current_state_ids(self, room_id):
|
||||||
|
@ -22,6 +22,7 @@ from twisted.internet import defer
|
|||||||
|
|
||||||
from synapse.api.constants import EventTypes, JoinRules
|
from synapse.api.constants import EventTypes, JoinRules
|
||||||
from synapse.storage.engines import PostgresEngine, Sqlite3Engine
|
from synapse.storage.engines import PostgresEngine, Sqlite3Engine
|
||||||
|
from synapse.storage.state import StateFilter
|
||||||
from synapse.types import get_domain_from_id, get_localpart_from_id
|
from synapse.types import get_domain_from_id, get_localpart_from_id
|
||||||
from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
|
from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
|
||||||
|
|
||||||
@ -31,12 +32,19 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
class UserDirectoryStore(SQLBaseStore):
|
class UserDirectoryStore(SQLBaseStore):
|
||||||
@cachedInlineCallbacks(cache_context=True)
|
@defer.inlineCallbacks
|
||||||
def is_room_world_readable_or_publicly_joinable(self, room_id, cache_context):
|
def is_room_world_readable_or_publicly_joinable(self, room_id):
|
||||||
"""Check if the room is either world_readable or publically joinable
|
"""Check if the room is either world_readable or publically joinable
|
||||||
"""
|
"""
|
||||||
current_state_ids = yield self.get_current_state_ids(
|
|
||||||
room_id, on_invalidate=cache_context.invalidate
|
# Create a state filter that only queries join and history state event
|
||||||
|
types_to_filter = (
|
||||||
|
(EventTypes.JoinRules, ""),
|
||||||
|
(EventTypes.RoomHistoryVisibility, ""),
|
||||||
|
)
|
||||||
|
|
||||||
|
current_state_ids = yield self.get_filtered_current_state_ids(
|
||||||
|
room_id, StateFilter.from_types(types_to_filter),
|
||||||
)
|
)
|
||||||
|
|
||||||
join_rules_id = current_state_ids.get((EventTypes.JoinRules, ""))
|
join_rules_id = current_state_ids.get((EventTypes.JoinRules, ""))
|
||||||
|
@ -59,7 +59,7 @@ class FrontendProxyTests(HomeserverTestCase):
|
|||||||
|
|
||||||
def test_listen_http_with_presence_disabled(self):
|
def test_listen_http_with_presence_disabled(self):
|
||||||
"""
|
"""
|
||||||
When presence is on, the stub servlet will register.
|
When presence is off, the stub servlet will register.
|
||||||
"""
|
"""
|
||||||
# Presence is off
|
# Presence is off
|
||||||
self.hs.config.use_presence = False
|
self.hs.config.use_presence = False
|
||||||
|
119
tests/app/test_openid_listener.py
Normal file
119
tests/app/test_openid_listener.py
Normal file
@ -0,0 +1,119 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2019 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
from mock import Mock, patch
|
||||||
|
|
||||||
|
from parameterized import parameterized
|
||||||
|
|
||||||
|
from synapse.app.federation_reader import FederationReaderServer
|
||||||
|
from synapse.app.homeserver import SynapseHomeServer
|
||||||
|
|
||||||
|
from tests.unittest import HomeserverTestCase
|
||||||
|
|
||||||
|
|
||||||
|
class FederationReaderOpenIDListenerTests(HomeserverTestCase):
|
||||||
|
def make_homeserver(self, reactor, clock):
|
||||||
|
hs = self.setup_test_homeserver(
|
||||||
|
http_client=None, homeserverToUse=FederationReaderServer,
|
||||||
|
)
|
||||||
|
return hs
|
||||||
|
|
||||||
|
@parameterized.expand([
|
||||||
|
(["federation"], "auth_fail"),
|
||||||
|
([], "no_resource"),
|
||||||
|
(["openid", "federation"], "auth_fail"),
|
||||||
|
(["openid"], "auth_fail"),
|
||||||
|
])
|
||||||
|
def test_openid_listener(self, names, expectation):
|
||||||
|
"""
|
||||||
|
Test different openid listener configurations.
|
||||||
|
|
||||||
|
401 is success here since it means we hit the handler and auth failed.
|
||||||
|
"""
|
||||||
|
config = {
|
||||||
|
"port": 8080,
|
||||||
|
"bind_addresses": ["0.0.0.0"],
|
||||||
|
"resources": [{"names": names}],
|
||||||
|
}
|
||||||
|
|
||||||
|
# Listen with the config
|
||||||
|
self.hs._listen_http(config)
|
||||||
|
|
||||||
|
# Grab the resource from the site that was told to listen
|
||||||
|
site = self.reactor.tcpServers[0][1]
|
||||||
|
try:
|
||||||
|
self.resource = (
|
||||||
|
site.resource.children[b"_matrix"].children[b"federation"]
|
||||||
|
)
|
||||||
|
except KeyError:
|
||||||
|
if expectation == "no_resource":
|
||||||
|
return
|
||||||
|
raise
|
||||||
|
|
||||||
|
request, channel = self.make_request(
|
||||||
|
"GET",
|
||||||
|
"/_matrix/federation/v1/openid/userinfo",
|
||||||
|
)
|
||||||
|
self.render(request)
|
||||||
|
|
||||||
|
self.assertEqual(channel.code, 401)
|
||||||
|
|
||||||
|
|
||||||
|
@patch("synapse.app.homeserver.KeyApiV2Resource", new=Mock())
|
||||||
|
class SynapseHomeserverOpenIDListenerTests(HomeserverTestCase):
|
||||||
|
def make_homeserver(self, reactor, clock):
|
||||||
|
hs = self.setup_test_homeserver(
|
||||||
|
http_client=None, homeserverToUse=SynapseHomeServer,
|
||||||
|
)
|
||||||
|
return hs
|
||||||
|
|
||||||
|
@parameterized.expand([
|
||||||
|
(["federation"], "auth_fail"),
|
||||||
|
([], "no_resource"),
|
||||||
|
(["openid", "federation"], "auth_fail"),
|
||||||
|
(["openid"], "auth_fail"),
|
||||||
|
])
|
||||||
|
def test_openid_listener(self, names, expectation):
|
||||||
|
"""
|
||||||
|
Test different openid listener configurations.
|
||||||
|
|
||||||
|
401 is success here since it means we hit the handler and auth failed.
|
||||||
|
"""
|
||||||
|
config = {
|
||||||
|
"port": 8080,
|
||||||
|
"bind_addresses": ["0.0.0.0"],
|
||||||
|
"resources": [{"names": names}],
|
||||||
|
}
|
||||||
|
|
||||||
|
# Listen with the config
|
||||||
|
self.hs._listener_http(config, config)
|
||||||
|
|
||||||
|
# Grab the resource from the site that was told to listen
|
||||||
|
site = self.reactor.tcpServers[0][1]
|
||||||
|
try:
|
||||||
|
self.resource = (
|
||||||
|
site.resource.children[b"_matrix"].children[b"federation"]
|
||||||
|
)
|
||||||
|
except KeyError:
|
||||||
|
if expectation == "no_resource":
|
||||||
|
return
|
||||||
|
raise
|
||||||
|
|
||||||
|
request, channel = self.make_request(
|
||||||
|
"GET",
|
||||||
|
"/_matrix/federation/v1/openid/userinfo",
|
||||||
|
)
|
||||||
|
self.render(request)
|
||||||
|
|
||||||
|
self.assertEqual(channel.code, 401)
|
@ -20,6 +20,11 @@ from synapse.config.tls import TlsConfig
|
|||||||
from tests.unittest import TestCase
|
from tests.unittest import TestCase
|
||||||
|
|
||||||
|
|
||||||
|
class TestConfig(TlsConfig):
|
||||||
|
def has_tls_listener(self):
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
class TLSConfigTests(TestCase):
|
class TLSConfigTests(TestCase):
|
||||||
|
|
||||||
def test_warn_self_signed(self):
|
def test_warn_self_signed(self):
|
||||||
@ -55,13 +60,12 @@ s4niecZKPBizL6aucT59CsunNmmb5Glq8rlAcU+1ZTZZzGYqVYhF6axB9Qg=
|
|||||||
|
|
||||||
config = {
|
config = {
|
||||||
"tls_certificate_path": os.path.join(config_dir, "cert.pem"),
|
"tls_certificate_path": os.path.join(config_dir, "cert.pem"),
|
||||||
"no_tls": True,
|
|
||||||
"tls_fingerprints": []
|
"tls_fingerprints": []
|
||||||
}
|
}
|
||||||
|
|
||||||
t = TlsConfig()
|
t = TestConfig()
|
||||||
t.read_config(config)
|
t.read_config(config)
|
||||||
t.read_certificate_from_disk()
|
t.read_certificate_from_disk(require_cert_and_key=False)
|
||||||
|
|
||||||
warnings = self.flushWarnings()
|
warnings = self.flushWarnings()
|
||||||
self.assertEqual(len(warnings), 1)
|
self.assertEqual(len(warnings), 1)
|
||||||
|
@ -125,6 +125,78 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase):
|
|||||||
"auth_data": "second_version_auth_data",
|
"auth_data": "second_version_auth_data",
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def test_update_version(self):
|
||||||
|
"""Check that we can update versions.
|
||||||
|
"""
|
||||||
|
version = yield self.handler.create_version(self.local_user, {
|
||||||
|
"algorithm": "m.megolm_backup.v1",
|
||||||
|
"auth_data": "first_version_auth_data",
|
||||||
|
})
|
||||||
|
self.assertEqual(version, "1")
|
||||||
|
|
||||||
|
res = yield self.handler.update_version(self.local_user, version, {
|
||||||
|
"algorithm": "m.megolm_backup.v1",
|
||||||
|
"auth_data": "revised_first_version_auth_data",
|
||||||
|
"version": version
|
||||||
|
})
|
||||||
|
self.assertDictEqual(res, {})
|
||||||
|
|
||||||
|
# check we can retrieve it as the current version
|
||||||
|
res = yield self.handler.get_version_info(self.local_user)
|
||||||
|
self.assertDictEqual(res, {
|
||||||
|
"algorithm": "m.megolm_backup.v1",
|
||||||
|
"auth_data": "revised_first_version_auth_data",
|
||||||
|
"version": version
|
||||||
|
})
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def test_update_missing_version(self):
|
||||||
|
"""Check that we get a 404 on updating nonexistent versions
|
||||||
|
"""
|
||||||
|
res = None
|
||||||
|
try:
|
||||||
|
yield self.handler.update_version(self.local_user, "1", {
|
||||||
|
"algorithm": "m.megolm_backup.v1",
|
||||||
|
"auth_data": "revised_first_version_auth_data",
|
||||||
|
"version": "1"
|
||||||
|
})
|
||||||
|
except errors.SynapseError as e:
|
||||||
|
res = e.code
|
||||||
|
self.assertEqual(res, 404)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def test_update_bad_version(self):
|
||||||
|
"""Check that we get a 400 if the version in the body is missing or
|
||||||
|
doesn't match
|
||||||
|
"""
|
||||||
|
version = yield self.handler.create_version(self.local_user, {
|
||||||
|
"algorithm": "m.megolm_backup.v1",
|
||||||
|
"auth_data": "first_version_auth_data",
|
||||||
|
})
|
||||||
|
self.assertEqual(version, "1")
|
||||||
|
|
||||||
|
res = None
|
||||||
|
try:
|
||||||
|
yield self.handler.update_version(self.local_user, version, {
|
||||||
|
"algorithm": "m.megolm_backup.v1",
|
||||||
|
"auth_data": "revised_first_version_auth_data"
|
||||||
|
})
|
||||||
|
except errors.SynapseError as e:
|
||||||
|
res = e.code
|
||||||
|
self.assertEqual(res, 400)
|
||||||
|
|
||||||
|
res = None
|
||||||
|
try:
|
||||||
|
yield self.handler.update_version(self.local_user, version, {
|
||||||
|
"algorithm": "m.megolm_backup.v1",
|
||||||
|
"auth_data": "revised_first_version_auth_data",
|
||||||
|
"version": "incorrect"
|
||||||
|
})
|
||||||
|
except errors.SynapseError as e:
|
||||||
|
res = e.code
|
||||||
|
self.assertEqual(res, 400)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def test_delete_missing_version(self):
|
def test_delete_missing_version(self):
|
||||||
"""Check that we get a 404 on deleting nonexistent versions
|
"""Check that we get a 404 on deleting nonexistent versions
|
||||||
|
@ -43,13 +43,11 @@ def check_logcontext(context):
|
|||||||
|
|
||||||
class FederationClientTests(HomeserverTestCase):
|
class FederationClientTests(HomeserverTestCase):
|
||||||
def make_homeserver(self, reactor, clock):
|
def make_homeserver(self, reactor, clock):
|
||||||
|
|
||||||
hs = self.setup_test_homeserver(reactor=reactor, clock=clock)
|
hs = self.setup_test_homeserver(reactor=reactor, clock=clock)
|
||||||
hs.tls_client_options_factory = None
|
|
||||||
return hs
|
return hs
|
||||||
|
|
||||||
def prepare(self, reactor, clock, homeserver):
|
def prepare(self, reactor, clock, homeserver):
|
||||||
self.cl = MatrixFederationHttpClient(self.hs)
|
self.cl = MatrixFederationHttpClient(self.hs, None)
|
||||||
self.reactor.lookups["testserv"] = "1.2.3.4"
|
self.reactor.lookups["testserv"] = "1.2.3.4"
|
||||||
|
|
||||||
def test_client_get(self):
|
def test_client_get(self):
|
||||||
|
Loading…
Reference in New Issue
Block a user