From 4c131b2c78bae793509bea776107a8183274a709 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 11 Nov 2019 15:47:47 +0000 Subject: [PATCH 01/60] Implement v2 API for send_join --- synapse/federation/federation_client.py | 46 +++++++++++++++++++++---- synapse/federation/transport/client.py | 13 ++++++- 2 files changed, 51 insertions(+), 8 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 545d71965..50ae40504 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -664,13 +664,7 @@ class FederationClient(FederationBase): @defer.inlineCallbacks def send_request(destination): - time_now = self._clock.time_msec() - _, content = yield self.transport_layer.send_join( - destination=destination, - room_id=pdu.room_id, - event_id=pdu.event_id, - content=pdu.get_pdu_json(time_now), - ) + content = self._do_send_join(destination, pdu) logger.debug("Got content: %s", content) @@ -737,6 +731,44 @@ class FederationClient(FederationBase): return self._try_destination_list("send_join", destinations, send_request) + @defer.inlineCallbacks + def _do_send_join(self, destination, pdu): + time_now = self._clock.time_msec() + + try: + content = yield self.transport_layer.send_join_v2( + destination=destination, + room_id=pdu.room_id, + event_id=pdu.event_id, + content=pdu.get_pdu_json(time_now), + ) + + return content + except HttpResponseException as e: + if e.code in [400, 404]: + err = e.to_synapse_error() + + # If we receive an error response that isn't a generic error, or an + # unrecognised endpoint error, we assume that the remote understands + # the v2 invite API and this is a legitimate error. + if not err.errcode in [Codes.UNKNOWN, Codes.UNRECOGNIZED]: + raise err + else: + raise e.to_synapse_error() + + logger.debug("Couldn't send_join with the v2 API, falling back to the v1 API") + + resp = yield self.transport_layer.send_join_v1( + destination=destination, + room_id=pdu.room_id, + event_id=pdu.event_id, + content=pdu.get_pdu_json(time_now), + ) + + # We expect the v1 API to respond with [200, content], so we only return the + # content. + return resp[1] + @defer.inlineCallbacks def send_invite(self, destination, room_id, event_id, pdu): room_version = yield self.store.get_room_version(room_id) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 920fa8685..ba68f7c0b 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -267,7 +267,7 @@ class TransportLayerClient(object): @defer.inlineCallbacks @log_function - def send_join(self, destination, room_id, event_id, content): + def send_join_v1(self, destination, room_id, event_id, content): path = _create_v1_path("/send_join/%s/%s", room_id, event_id) response = yield self.client.put_json( @@ -276,6 +276,17 @@ class TransportLayerClient(object): return response + @defer.inlineCallbacks + @log_function + def send_join_v2(self, destination, room_id, event_id, content): + path = _create_v2_path("/send_join/%s/%s", room_id, event_id) + + response = yield self.client.put_json( + destination=destination, path=path, data=content + ) + + return response + @defer.inlineCallbacks @log_function def send_leave(self, destination, room_id, event_id, content): From 92527d7b2186a06c204c3c4bff47207252c5dea2 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 11 Nov 2019 16:20:53 +0000 Subject: [PATCH 02/60] Add missing yield --- synapse/federation/federation_client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 50ae40504..4a8e65c29 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -664,7 +664,7 @@ class FederationClient(FederationBase): @defer.inlineCallbacks def send_request(destination): - content = self._do_send_join(destination, pdu) + content = yield self._do_send_join(destination, pdu) logger.debug("Got content: %s", content) From 1e202a90f15a8f518e4350075d40d0423b64318d Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 11 Nov 2019 16:26:53 +0000 Subject: [PATCH 03/60] Implement v2 API for send_leave --- synapse/federation/federation_client.py | 41 ++++++++++++++++++++++--- synapse/federation/transport/client.py | 20 +++++++++++- 2 files changed, 56 insertions(+), 5 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 4a8e65c29..289017b2e 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -879,17 +879,50 @@ class FederationClient(FederationBase): @defer.inlineCallbacks def send_request(destination): time_now = self._clock.time_msec() - _, content = yield self.transport_layer.send_leave( + content = yield self._do_send_leave(destination, pdu) + + logger.debug("Got content: %s", content) + return None + + return self._try_destination_list("send_leave", destinations, send_request) + + @defer.inlineCallbacks + def _do_send_leave(self, destination, pdu): + time_now = self._clock.time_msec() + + try: + content = yield self.transport_layer.send_leave_v2( destination=destination, room_id=pdu.room_id, event_id=pdu.event_id, content=pdu.get_pdu_json(time_now), ) - logger.debug("Got content: %s", content) - return None + return content + except HttpResponseException as e: + if e.code in [400, 404]: + err = e.to_synapse_error() - return self._try_destination_list("send_leave", destinations, send_request) + # If we receive an error response that isn't a generic error, or an + # unrecognised endpoint error, we assume that the remote understands + # the v2 invite API and this is a legitimate error. + if not err.errcode in [Codes.UNKNOWN, Codes.UNRECOGNIZED]: + raise err + else: + raise e.to_synapse_error() + + logger.debug("Couldn't send_leave with the v2 API, falling back to the v1 API") + + resp = yield self.transport_layer.send_leave_v1( + destination=destination, + room_id=pdu.room_id, + event_id=pdu.event_id, + content=pdu.get_pdu_json(time_now), + ) + + # We expect the v1 API to respond with [200, content], so we only return the + # content. + return resp[1] def get_public_rooms( self, diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index ba68f7c0b..df2b5dc91 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -289,7 +289,7 @@ class TransportLayerClient(object): @defer.inlineCallbacks @log_function - def send_leave(self, destination, room_id, event_id, content): + def send_leave_v1(self, destination, room_id, event_id, content): path = _create_v1_path("/send_leave/%s/%s", room_id, event_id) response = yield self.client.put_json( @@ -305,6 +305,24 @@ class TransportLayerClient(object): return response + @defer.inlineCallbacks + @log_function + def send_leave_v2(self, destination, room_id, event_id, content): + path = _create_v2_path("/send_leave/%s/%s", room_id, event_id) + + response = yield self.client.put_json( + destination=destination, + path=path, + data=content, + # we want to do our best to send this through. The problem is + # that if it fails, we won't retry it later, so if the remote + # server was just having a momentary blip, the room will be out of + # sync. + ignore_backoff=True, + ) + + return response + @defer.inlineCallbacks @log_function def send_invite_v1(self, destination, room_id, event_id, content): From 74897de01fc271ee04ce0638654d991f5fb8e2fa Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 11 Nov 2019 16:40:45 +0000 Subject: [PATCH 04/60] Add server-side support to the v2 API --- synapse/federation/federation_server.py | 17 ++++++-------- synapse/federation/transport/server.py | 30 +++++++++++++++++++++---- 2 files changed, 33 insertions(+), 14 deletions(-) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index d942d77a7..0ce83cd88 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -376,15 +376,12 @@ class FederationServer(FederationBase): res_pdus = await self.handler.on_send_join_request(origin, pdu) time_now = self._clock.time_msec() - return ( - 200, - { - "state": [p.get_pdu_json(time_now) for p in res_pdus["state"]], - "auth_chain": [ - p.get_pdu_json(time_now) for p in res_pdus["auth_chain"] - ], - }, - ) + return { + "state": [p.get_pdu_json(time_now) for p in res_pdus["state"]], + "auth_chain": [ + p.get_pdu_json(time_now) for p in res_pdus["auth_chain"] + ], + } async def on_make_leave_request(self, origin, room_id, user_id): origin_host, _ = parse_server_name(origin) @@ -411,7 +408,7 @@ class FederationServer(FederationBase): pdu = await self._check_sigs_and_hash(room_version, pdu) await self.handler.on_send_leave_request(origin, pdu) - return 200, {} + return {} async def on_event_auth(self, origin, room_id, event_id): with (await self._server_linearizer.queue((origin, room_id))): diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index d6c23f22b..5263e292c 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -506,7 +506,15 @@ class FederationMakeLeaveServlet(BaseFederationServlet): return 200, content -class FederationSendLeaveServlet(BaseFederationServlet): +class FederationV1SendLeaveServlet(BaseFederationServlet): + PATH = "/send_leave/(?P[^/]*)/(?P[^/]*)" + + async def on_PUT(self, origin, content, query, room_id, event_id): + content = await self.handler.on_send_leave_request(origin, content, room_id) + return 200, (200, content) + + +class FederationV2SendLeaveServlet(BaseFederationServlet): PATH = "/send_leave/(?P[^/]*)/(?P[^/]*)" async def on_PUT(self, origin, content, query, room_id, event_id): @@ -521,9 +529,21 @@ class FederationEventAuthServlet(BaseFederationServlet): return await self.handler.on_event_auth(origin, context, event_id) -class FederationSendJoinServlet(BaseFederationServlet): +class FederationV1SendJoinServlet(BaseFederationServlet): PATH = "/send_join/(?P[^/]*)/(?P[^/]*)" + async def on_PUT(self, origin, content, query, context, event_id): + # TODO(paul): assert that context/event_id parsed from path actually + # match those given in content + content = await self.handler.on_send_join_request(origin, content, context) + return 200, (200, content) + + +class FederationV2SendJoinServlet(BaseFederationServlet): + PATH = "/send_join/(?P[^/]*)/(?P[^/]*)" + + PREFIX = FEDERATION_V2_PREFIX + async def on_PUT(self, origin, content, query, context, event_id): # TODO(paul): assert that context/event_id parsed from path actually # match those given in content @@ -1367,8 +1387,10 @@ FEDERATION_SERVLET_CLASSES = ( FederationMakeJoinServlet, FederationMakeLeaveServlet, FederationEventServlet, - FederationSendJoinServlet, - FederationSendLeaveServlet, + FederationV1SendJoinServlet, + FederationV2SendJoinServlet, + FederationV1SendLeaveServlet, + FederationV2SendLeaveServlet, FederationV1InviteServlet, FederationV2InviteServlet, FederationQueryAuthServlet, From 5e18dc7955180086870013b55bf5ae326fa6c695 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 11 Nov 2019 16:46:09 +0000 Subject: [PATCH 05/60] Fix prefix for v2/send_leave --- synapse/federation/transport/server.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 5263e292c..551a162eb 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -517,6 +517,8 @@ class FederationV1SendLeaveServlet(BaseFederationServlet): class FederationV2SendLeaveServlet(BaseFederationServlet): PATH = "/send_leave/(?P[^/]*)/(?P[^/]*)" + PREFIX = FEDERATION_V2_PREFIX + async def on_PUT(self, origin, content, query, room_id, event_id): content = await self.handler.on_send_leave_request(origin, content, room_id) return 200, content From edc4c7d4c55b1843c85d452ff92bda17dfff47bc Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 11 Nov 2019 16:51:54 +0000 Subject: [PATCH 06/60] Lint --- synapse/federation/federation_server.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 0ce83cd88..08a913e08 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -378,9 +378,7 @@ class FederationServer(FederationBase): time_now = self._clock.time_msec() return { "state": [p.get_pdu_json(time_now) for p in res_pdus["state"]], - "auth_chain": [ - p.get_pdu_json(time_now) for p in res_pdus["auth_chain"] - ], + "auth_chain": [p.get_pdu_json(time_now) for p in res_pdus["auth_chain"]], } async def on_make_leave_request(self, origin, room_id, user_id): From 21056ad12a9dbfadad085802c7a0096d3b071681 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 11 Nov 2019 16:53:29 +0000 Subject: [PATCH 07/60] Changelog --- changelog.d/6349.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6349.feature diff --git a/changelog.d/6349.feature b/changelog.d/6349.feature new file mode 100644 index 000000000..56c4fbf78 --- /dev/null +++ b/changelog.d/6349.feature @@ -0,0 +1 @@ +Implement v2 APIs for the `send_join` and `send_leave` federation endpoints (as described in [MSC1802](https://github.com/matrix-org/matrix-doc/pull/1802)). From 94cdd6fffed90cceaf0396a66174ddd5c990c8eb Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 11 Nov 2019 16:56:55 +0000 Subject: [PATCH 08/60] Lint --- synapse/federation/federation_client.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 289017b2e..23c08104b 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -751,7 +751,7 @@ class FederationClient(FederationBase): # If we receive an error response that isn't a generic error, or an # unrecognised endpoint error, we assume that the remote understands # the v2 invite API and this is a legitimate error. - if not err.errcode in [Codes.UNKNOWN, Codes.UNRECOGNIZED]: + if err.errcode not in [Codes.UNKNOWN, Codes.UNRECOGNIZED]: raise err else: raise e.to_synapse_error() @@ -878,7 +878,6 @@ class FederationClient(FederationBase): @defer.inlineCallbacks def send_request(destination): - time_now = self._clock.time_msec() content = yield self._do_send_leave(destination, pdu) logger.debug("Got content: %s", content) @@ -906,7 +905,7 @@ class FederationClient(FederationBase): # If we receive an error response that isn't a generic error, or an # unrecognised endpoint error, we assume that the remote understands # the v2 invite API and this is a legitimate error. - if not err.errcode in [Codes.UNKNOWN, Codes.UNRECOGNIZED]: + if err.errcode not in [Codes.UNKNOWN, Codes.UNRECOGNIZED]: raise err else: raise e.to_synapse_error() From 9a4fb457cf5918c85068ea249cd2d58b3e2e3cfc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 6 Dec 2019 13:08:40 +0000 Subject: [PATCH 09/60] Change DataStores to accept 'database' param. --- synapse/app/federation_sender.py | 5 +++-- synapse/app/user_dir.py | 5 +++-- synapse/replication/slave/storage/_base.py | 5 +++-- synapse/replication/slave/storage/account_data.py | 5 +++-- synapse/replication/slave/storage/client_ips.py | 5 +++-- synapse/replication/slave/storage/deviceinbox.py | 5 +++-- synapse/replication/slave/storage/devices.py | 5 +++-- synapse/replication/slave/storage/events.py | 5 +++-- synapse/replication/slave/storage/filtering.py | 5 +++-- synapse/replication/slave/storage/groups.py | 5 +++-- synapse/replication/slave/storage/presence.py | 5 +++-- synapse/replication/slave/storage/push_rule.py | 5 +++-- synapse/replication/slave/storage/pushers.py | 5 +++-- synapse/replication/slave/storage/receipts.py | 5 +++-- synapse/replication/slave/storage/room.py | 5 +++-- synapse/storage/_base.py | 2 +- synapse/storage/data_stores/main/__init__.py | 5 +++-- synapse/storage/data_stores/main/account_data.py | 9 +++++---- synapse/storage/data_stores/main/appservice.py | 5 +++-- synapse/storage/data_stores/main/client_ips.py | 9 +++++---- synapse/storage/data_stores/main/deviceinbox.py | 9 +++++---- synapse/storage/data_stores/main/devices.py | 9 +++++---- .../storage/data_stores/main/event_federation.py | 5 +++-- .../storage/data_stores/main/event_push_actions.py | 9 +++++---- synapse/storage/data_stores/main/events.py | 5 +++-- .../storage/data_stores/main/events_bg_updates.py | 5 +++-- synapse/storage/data_stores/main/events_worker.py | 5 +++-- .../storage/data_stores/main/media_repository.py | 11 +++++++---- .../data_stores/main/monthly_active_users.py | 7 ++++--- synapse/storage/data_stores/main/push_rule.py | 5 +++-- synapse/storage/data_stores/main/receipts.py | 9 +++++---- synapse/storage/data_stores/main/registration.py | 13 +++++++------ synapse/storage/data_stores/main/room.py | 9 +++++---- synapse/storage/data_stores/main/roommember.py | 13 +++++++------ synapse/storage/data_stores/main/search.py | 9 +++++---- synapse/storage/data_stores/main/state.py | 13 +++++++------ synapse/storage/data_stores/main/stats.py | 5 +++-- synapse/storage/data_stores/main/stream.py | 5 +++-- synapse/storage/data_stores/main/transactions.py | 5 +++-- synapse/storage/data_stores/main/user_directory.py | 9 +++++---- tests/storage/test_appservice.py | 5 +++-- 41 files changed, 156 insertions(+), 114 deletions(-) diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index 448e45e00..f24920a7d 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -40,6 +40,7 @@ from synapse.replication.slave.storage.transactions import SlavedTransactionStor from synapse.replication.tcp.client import ReplicationClientHandler from synapse.replication.tcp.streams._base import ReceiptsStream from synapse.server import HomeServer +from synapse.storage.database import Database from synapse.storage.engines import create_engine from synapse.types import ReadReceipt from synapse.util.async_helpers import Linearizer @@ -59,8 +60,8 @@ class FederationSenderSlaveStore( SlavedDeviceStore, SlavedPresenceStore, ): - def __init__(self, db_conn, hs): - super(FederationSenderSlaveStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(FederationSenderSlaveStore, self).__init__(database, db_conn, hs) # We pull out the current federation stream position now so that we # always have a known value for the federation position in memory so diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index b6d448172..c01fb34a9 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -43,6 +43,7 @@ from synapse.replication.tcp.streams.events import ( from synapse.rest.client.v2_alpha import user_directory from synapse.server import HomeServer from synapse.storage.data_stores.main.user_directory import UserDirectoryStore +from synapse.storage.database import Database from synapse.storage.engines import create_engine from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.httpresourcetree import create_resource_tree @@ -60,8 +61,8 @@ class UserDirectorySlaveStore( UserDirectoryStore, BaseSlavedStore, ): - def __init__(self, db_conn, hs): - super(UserDirectorySlaveStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(UserDirectorySlaveStore, self).__init__(database, db_conn, hs) events_max = self._stream_id_gen.get_current_token() curr_state_delta_prefill, min_curr_state_delta_id = self.db.get_cache_dict( diff --git a/synapse/replication/slave/storage/_base.py b/synapse/replication/slave/storage/_base.py index 6ece1d674..b91a52824 100644 --- a/synapse/replication/slave/storage/_base.py +++ b/synapse/replication/slave/storage/_base.py @@ -20,6 +20,7 @@ import six from synapse.storage._base import SQLBaseStore from synapse.storage.data_stores.main.cache import CURRENT_STATE_CACHE_NAME +from synapse.storage.database import Database from synapse.storage.engines import PostgresEngine from ._slaved_id_tracker import SlavedIdTracker @@ -35,8 +36,8 @@ def __func__(inp): class BaseSlavedStore(SQLBaseStore): - def __init__(self, db_conn, hs): - super(BaseSlavedStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(BaseSlavedStore, self).__init__(database, db_conn, hs) if isinstance(self.database_engine, PostgresEngine): self._cache_id_gen = SlavedIdTracker( db_conn, "cache_invalidation_stream", "stream_id" diff --git a/synapse/replication/slave/storage/account_data.py b/synapse/replication/slave/storage/account_data.py index bc2f6a12a..ebe94909c 100644 --- a/synapse/replication/slave/storage/account_data.py +++ b/synapse/replication/slave/storage/account_data.py @@ -18,15 +18,16 @@ from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.storage.data_stores.main.account_data import AccountDataWorkerStore from synapse.storage.data_stores.main.tags import TagsWorkerStore +from synapse.storage.database import Database class SlavedAccountDataStore(TagsWorkerStore, AccountDataWorkerStore, BaseSlavedStore): - def __init__(self, db_conn, hs): + def __init__(self, database: Database, db_conn, hs): self._account_data_id_gen = SlavedIdTracker( db_conn, "account_data_max_stream_id", "stream_id" ) - super(SlavedAccountDataStore, self).__init__(db_conn, hs) + super(SlavedAccountDataStore, self).__init__(database, db_conn, hs) def get_max_account_data_stream_id(self): return self._account_data_id_gen.get_current_token() diff --git a/synapse/replication/slave/storage/client_ips.py b/synapse/replication/slave/storage/client_ips.py index b4f58cea1..fbf996e33 100644 --- a/synapse/replication/slave/storage/client_ips.py +++ b/synapse/replication/slave/storage/client_ips.py @@ -14,6 +14,7 @@ # limitations under the License. from synapse.storage.data_stores.main.client_ips import LAST_SEEN_GRANULARITY +from synapse.storage.database import Database from synapse.util.caches import CACHE_SIZE_FACTOR from synapse.util.caches.descriptors import Cache @@ -21,8 +22,8 @@ from ._base import BaseSlavedStore class SlavedClientIpStore(BaseSlavedStore): - def __init__(self, db_conn, hs): - super(SlavedClientIpStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(SlavedClientIpStore, self).__init__(database, db_conn, hs) self.client_ip_last_seen = Cache( name="client_ip_last_seen", keylen=4, max_entries=50000 * CACHE_SIZE_FACTOR diff --git a/synapse/replication/slave/storage/deviceinbox.py b/synapse/replication/slave/storage/deviceinbox.py index 9fb6c5c6f..0c237c6e0 100644 --- a/synapse/replication/slave/storage/deviceinbox.py +++ b/synapse/replication/slave/storage/deviceinbox.py @@ -16,13 +16,14 @@ from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.storage.data_stores.main.deviceinbox import DeviceInboxWorkerStore +from synapse.storage.database import Database from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.caches.stream_change_cache import StreamChangeCache class SlavedDeviceInboxStore(DeviceInboxWorkerStore, BaseSlavedStore): - def __init__(self, db_conn, hs): - super(SlavedDeviceInboxStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(SlavedDeviceInboxStore, self).__init__(database, db_conn, hs) self._device_inbox_id_gen = SlavedIdTracker( db_conn, "device_max_stream_id", "stream_id" ) diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py index de50748c3..dc625e0d7 100644 --- a/synapse/replication/slave/storage/devices.py +++ b/synapse/replication/slave/storage/devices.py @@ -18,12 +18,13 @@ from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.replication.tcp.streams._base import DeviceListsStream, UserSignatureStream from synapse.storage.data_stores.main.devices import DeviceWorkerStore from synapse.storage.data_stores.main.end_to_end_keys import EndToEndKeyWorkerStore +from synapse.storage.database import Database from synapse.util.caches.stream_change_cache import StreamChangeCache class SlavedDeviceStore(EndToEndKeyWorkerStore, DeviceWorkerStore, BaseSlavedStore): - def __init__(self, db_conn, hs): - super(SlavedDeviceStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(SlavedDeviceStore, self).__init__(database, db_conn, hs) self.hs = hs diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py index d0a0eaf75..29f35b991 100644 --- a/synapse/replication/slave/storage/events.py +++ b/synapse/replication/slave/storage/events.py @@ -31,6 +31,7 @@ from synapse.storage.data_stores.main.signatures import SignatureWorkerStore from synapse.storage.data_stores.main.state import StateGroupWorkerStore from synapse.storage.data_stores.main.stream import StreamWorkerStore from synapse.storage.data_stores.main.user_erasure_store import UserErasureWorkerStore +from synapse.storage.database import Database from ._base import BaseSlavedStore from ._slaved_id_tracker import SlavedIdTracker @@ -59,13 +60,13 @@ class SlavedEventStore( RelationsWorkerStore, BaseSlavedStore, ): - def __init__(self, db_conn, hs): + def __init__(self, database: Database, db_conn, hs): self._stream_id_gen = SlavedIdTracker(db_conn, "events", "stream_ordering") self._backfill_id_gen = SlavedIdTracker( db_conn, "events", "stream_ordering", step=-1 ) - super(SlavedEventStore, self).__init__(db_conn, hs) + super(SlavedEventStore, self).__init__(database, db_conn, hs) # Cached functions can't be accessed through a class instance so we need # to reach inside the __dict__ to extract them. diff --git a/synapse/replication/slave/storage/filtering.py b/synapse/replication/slave/storage/filtering.py index 5c84ebd12..bcb068895 100644 --- a/synapse/replication/slave/storage/filtering.py +++ b/synapse/replication/slave/storage/filtering.py @@ -14,13 +14,14 @@ # limitations under the License. from synapse.storage.data_stores.main.filtering import FilteringStore +from synapse.storage.database import Database from ._base import BaseSlavedStore class SlavedFilteringStore(BaseSlavedStore): - def __init__(self, db_conn, hs): - super(SlavedFilteringStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(SlavedFilteringStore, self).__init__(database, db_conn, hs) # Filters are immutable so this cache doesn't need to be expired get_user_filter = FilteringStore.__dict__["get_user_filter"] diff --git a/synapse/replication/slave/storage/groups.py b/synapse/replication/slave/storage/groups.py index 28a46edd2..69a4ae42f 100644 --- a/synapse/replication/slave/storage/groups.py +++ b/synapse/replication/slave/storage/groups.py @@ -14,6 +14,7 @@ # limitations under the License. from synapse.storage import DataStore +from synapse.storage.database import Database from synapse.util.caches.stream_change_cache import StreamChangeCache from ._base import BaseSlavedStore, __func__ @@ -21,8 +22,8 @@ from ._slaved_id_tracker import SlavedIdTracker class SlavedGroupServerStore(BaseSlavedStore): - def __init__(self, db_conn, hs): - super(SlavedGroupServerStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(SlavedGroupServerStore, self).__init__(database, db_conn, hs) self.hs = hs diff --git a/synapse/replication/slave/storage/presence.py b/synapse/replication/slave/storage/presence.py index 747ced0c8..f552e7c97 100644 --- a/synapse/replication/slave/storage/presence.py +++ b/synapse/replication/slave/storage/presence.py @@ -15,6 +15,7 @@ from synapse.storage import DataStore from synapse.storage.data_stores.main.presence import PresenceStore +from synapse.storage.database import Database from synapse.util.caches.stream_change_cache import StreamChangeCache from ._base import BaseSlavedStore, __func__ @@ -22,8 +23,8 @@ from ._slaved_id_tracker import SlavedIdTracker class SlavedPresenceStore(BaseSlavedStore): - def __init__(self, db_conn, hs): - super(SlavedPresenceStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(SlavedPresenceStore, self).__init__(database, db_conn, hs) self._presence_id_gen = SlavedIdTracker(db_conn, "presence_stream", "stream_id") self._presence_on_startup = self._get_active_presence(db_conn) diff --git a/synapse/replication/slave/storage/push_rule.py b/synapse/replication/slave/storage/push_rule.py index 3655f05e5..eebd5a1fb 100644 --- a/synapse/replication/slave/storage/push_rule.py +++ b/synapse/replication/slave/storage/push_rule.py @@ -15,17 +15,18 @@ # limitations under the License. from synapse.storage.data_stores.main.push_rule import PushRulesWorkerStore +from synapse.storage.database import Database from ._slaved_id_tracker import SlavedIdTracker from .events import SlavedEventStore class SlavedPushRuleStore(SlavedEventStore, PushRulesWorkerStore): - def __init__(self, db_conn, hs): + def __init__(self, database: Database, db_conn, hs): self._push_rules_stream_id_gen = SlavedIdTracker( db_conn, "push_rules_stream", "stream_id" ) - super(SlavedPushRuleStore, self).__init__(db_conn, hs) + super(SlavedPushRuleStore, self).__init__(database, db_conn, hs) def get_push_rules_stream_token(self): return ( diff --git a/synapse/replication/slave/storage/pushers.py b/synapse/replication/slave/storage/pushers.py index b4331d079..f22c2d44a 100644 --- a/synapse/replication/slave/storage/pushers.py +++ b/synapse/replication/slave/storage/pushers.py @@ -15,14 +15,15 @@ # limitations under the License. from synapse.storage.data_stores.main.pusher import PusherWorkerStore +from synapse.storage.database import Database from ._base import BaseSlavedStore from ._slaved_id_tracker import SlavedIdTracker class SlavedPusherStore(PusherWorkerStore, BaseSlavedStore): - def __init__(self, db_conn, hs): - super(SlavedPusherStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(SlavedPusherStore, self).__init__(database, db_conn, hs) self._pushers_id_gen = SlavedIdTracker( db_conn, "pushers", "id", extra_tables=[("deleted_pushers", "stream_id")] ) diff --git a/synapse/replication/slave/storage/receipts.py b/synapse/replication/slave/storage/receipts.py index 43d823c60..d40dc6e1f 100644 --- a/synapse/replication/slave/storage/receipts.py +++ b/synapse/replication/slave/storage/receipts.py @@ -15,6 +15,7 @@ # limitations under the License. from synapse.storage.data_stores.main.receipts import ReceiptsWorkerStore +from synapse.storage.database import Database from ._base import BaseSlavedStore from ._slaved_id_tracker import SlavedIdTracker @@ -29,14 +30,14 @@ from ._slaved_id_tracker import SlavedIdTracker class SlavedReceiptsStore(ReceiptsWorkerStore, BaseSlavedStore): - def __init__(self, db_conn, hs): + def __init__(self, database: Database, db_conn, hs): # We instantiate this first as the ReceiptsWorkerStore constructor # needs to be able to call get_max_receipt_stream_id self._receipts_id_gen = SlavedIdTracker( db_conn, "receipts_linearized", "stream_id" ) - super(SlavedReceiptsStore, self).__init__(db_conn, hs) + super(SlavedReceiptsStore, self).__init__(database, db_conn, hs) def get_max_receipt_stream_id(self): return self._receipts_id_gen.get_current_token() diff --git a/synapse/replication/slave/storage/room.py b/synapse/replication/slave/storage/room.py index d9ad386b2..3a20f4531 100644 --- a/synapse/replication/slave/storage/room.py +++ b/synapse/replication/slave/storage/room.py @@ -14,14 +14,15 @@ # limitations under the License. from synapse.storage.data_stores.main.room import RoomWorkerStore +from synapse.storage.database import Database from ._base import BaseSlavedStore from ._slaved_id_tracker import SlavedIdTracker class RoomStore(RoomWorkerStore, BaseSlavedStore): - def __init__(self, db_conn, hs): - super(RoomStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(RoomStore, self).__init__(database, db_conn, hs) self._public_room_id_gen = SlavedIdTracker( db_conn, "public_room_list_stream", "stream_id" ) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index b7e27d4e9..f9e7f9a71 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -37,7 +37,7 @@ class SQLBaseStore(object): per data store (and not one per physical database). """ - def __init__(self, db_conn, hs): + def __init__(self, database: Database, db_conn, hs): self.hs = hs self._clock = hs.get_clock() self.database_engine = hs.database_engine diff --git a/synapse/storage/data_stores/main/__init__.py b/synapse/storage/data_stores/main/__init__.py index 6adb8adb0..7f5fd81bc 100644 --- a/synapse/storage/data_stores/main/__init__.py +++ b/synapse/storage/data_stores/main/__init__.py @@ -20,6 +20,7 @@ import logging import time from synapse.api.constants import PresenceState +from synapse.storage.database import Database from synapse.storage.engines import PostgresEngine from synapse.storage.util.id_generators import ( ChainedIdGenerator, @@ -111,7 +112,7 @@ class DataStore( RelationsStore, CacheInvalidationStore, ): - def __init__(self, db_conn, hs): + def __init__(self, database: Database, db_conn, hs): self.hs = hs self._clock = hs.get_clock() self.database_engine = hs.database_engine @@ -169,7 +170,7 @@ class DataStore( else: self._cache_id_gen = None - super(DataStore, self).__init__(db_conn, hs) + super(DataStore, self).__init__(database, db_conn, hs) self._presence_on_startup = self._get_active_presence(db_conn) diff --git a/synapse/storage/data_stores/main/account_data.py b/synapse/storage/data_stores/main/account_data.py index a96fe9485..44d20c19b 100644 --- a/synapse/storage/data_stores/main/account_data.py +++ b/synapse/storage/data_stores/main/account_data.py @@ -22,6 +22,7 @@ from canonicaljson import json from twisted.internet import defer from synapse.storage._base import SQLBaseStore +from synapse.storage.database import Database from synapse.storage.util.id_generators import StreamIdGenerator from synapse.util.caches.descriptors import cached, cachedInlineCallbacks from synapse.util.caches.stream_change_cache import StreamChangeCache @@ -38,13 +39,13 @@ class AccountDataWorkerStore(SQLBaseStore): # the abstract methods being implemented. __metaclass__ = abc.ABCMeta - def __init__(self, db_conn, hs): + def __init__(self, database: Database, db_conn, hs): account_max = self.get_max_account_data_stream_id() self._account_data_stream_cache = StreamChangeCache( "AccountDataAndTagsChangeCache", account_max ) - super(AccountDataWorkerStore, self).__init__(db_conn, hs) + super(AccountDataWorkerStore, self).__init__(database, db_conn, hs) @abc.abstractmethod def get_max_account_data_stream_id(self): @@ -270,12 +271,12 @@ class AccountDataWorkerStore(SQLBaseStore): class AccountDataStore(AccountDataWorkerStore): - def __init__(self, db_conn, hs): + def __init__(self, database: Database, db_conn, hs): self._account_data_id_gen = StreamIdGenerator( db_conn, "account_data_max_stream_id", "stream_id" ) - super(AccountDataStore, self).__init__(db_conn, hs) + super(AccountDataStore, self).__init__(database, db_conn, hs) def get_max_account_data_stream_id(self): """Get the current max stream id for the private user data stream diff --git a/synapse/storage/data_stores/main/appservice.py b/synapse/storage/data_stores/main/appservice.py index 6b2e12719..b2f39649f 100644 --- a/synapse/storage/data_stores/main/appservice.py +++ b/synapse/storage/data_stores/main/appservice.py @@ -24,6 +24,7 @@ from synapse.appservice import AppServiceTransaction from synapse.config.appservice import load_appservices from synapse.storage._base import SQLBaseStore from synapse.storage.data_stores.main.events_worker import EventsWorkerStore +from synapse.storage.database import Database logger = logging.getLogger(__name__) @@ -48,13 +49,13 @@ def _make_exclusive_regex(services_cache): class ApplicationServiceWorkerStore(SQLBaseStore): - def __init__(self, db_conn, hs): + def __init__(self, database: Database, db_conn, hs): self.services_cache = load_appservices( hs.hostname, hs.config.app_service_config_files ) self.exclusive_user_regex = _make_exclusive_regex(self.services_cache) - super(ApplicationServiceWorkerStore, self).__init__(db_conn, hs) + super(ApplicationServiceWorkerStore, self).__init__(database, db_conn, hs) def get_app_services(self): return self.services_cache diff --git a/synapse/storage/data_stores/main/client_ips.py b/synapse/storage/data_stores/main/client_ips.py index 7b470a58f..320c5b0f0 100644 --- a/synapse/storage/data_stores/main/client_ips.py +++ b/synapse/storage/data_stores/main/client_ips.py @@ -21,6 +21,7 @@ from twisted.internet import defer from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore +from synapse.storage.database import Database from synapse.util.caches import CACHE_SIZE_FACTOR from synapse.util.caches.descriptors import Cache @@ -33,8 +34,8 @@ LAST_SEEN_GRANULARITY = 120 * 1000 class ClientIpBackgroundUpdateStore(SQLBaseStore): - def __init__(self, db_conn, hs): - super(ClientIpBackgroundUpdateStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(ClientIpBackgroundUpdateStore, self).__init__(database, db_conn, hs) self.db.updates.register_background_index_update( "user_ips_device_index", @@ -363,13 +364,13 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore): class ClientIpStore(ClientIpBackgroundUpdateStore): - def __init__(self, db_conn, hs): + def __init__(self, database: Database, db_conn, hs): self.client_ip_last_seen = Cache( name="client_ip_last_seen", keylen=4, max_entries=50000 * CACHE_SIZE_FACTOR ) - super(ClientIpStore, self).__init__(db_conn, hs) + super(ClientIpStore, self).__init__(database, db_conn, hs) self.user_ips_max_age = hs.config.user_ips_max_age diff --git a/synapse/storage/data_stores/main/deviceinbox.py b/synapse/storage/data_stores/main/deviceinbox.py index 3c9f09301..85cfa1685 100644 --- a/synapse/storage/data_stores/main/deviceinbox.py +++ b/synapse/storage/data_stores/main/deviceinbox.py @@ -21,6 +21,7 @@ from twisted.internet import defer from synapse.logging.opentracing import log_kv, set_tag, trace from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause +from synapse.storage.database import Database from synapse.util.caches.expiringcache import ExpiringCache logger = logging.getLogger(__name__) @@ -210,8 +211,8 @@ class DeviceInboxWorkerStore(SQLBaseStore): class DeviceInboxBackgroundUpdateStore(SQLBaseStore): DEVICE_INBOX_STREAM_ID = "device_inbox_stream_drop" - def __init__(self, db_conn, hs): - super(DeviceInboxBackgroundUpdateStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(DeviceInboxBackgroundUpdateStore, self).__init__(database, db_conn, hs) self.db.updates.register_background_index_update( "device_inbox_stream_index", @@ -241,8 +242,8 @@ class DeviceInboxBackgroundUpdateStore(SQLBaseStore): class DeviceInboxStore(DeviceInboxWorkerStore, DeviceInboxBackgroundUpdateStore): DEVICE_INBOX_STREAM_ID = "device_inbox_stream_drop" - def __init__(self, db_conn, hs): - super(DeviceInboxStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(DeviceInboxStore, self).__init__(database, db_conn, hs) # Map of (user_id, device_id) to the last stream_id that has been # deleted up to. This is so that we can no op deletions. diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py index 91ddaf137..9a828231c 100644 --- a/synapse/storage/data_stores/main/devices.py +++ b/synapse/storage/data_stores/main/devices.py @@ -31,6 +31,7 @@ from synapse.logging.opentracing import ( ) from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause +from synapse.storage.database import Database from synapse.types import get_verify_key_from_cross_signing_key from synapse.util import batch_iter from synapse.util.caches.descriptors import ( @@ -642,8 +643,8 @@ class DeviceWorkerStore(SQLBaseStore): class DeviceBackgroundUpdateStore(SQLBaseStore): - def __init__(self, db_conn, hs): - super(DeviceBackgroundUpdateStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(DeviceBackgroundUpdateStore, self).__init__(database, db_conn, hs) self.db.updates.register_background_index_update( "device_lists_stream_idx", @@ -692,8 +693,8 @@ class DeviceBackgroundUpdateStore(SQLBaseStore): class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): - def __init__(self, db_conn, hs): - super(DeviceStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(DeviceStore, self).__init__(database, db_conn, hs) # Map of (user_id, device_id) -> bool. If there is an entry that implies # the device exists. diff --git a/synapse/storage/data_stores/main/event_federation.py b/synapse/storage/data_stores/main/event_federation.py index 31d2e8eb2..1f517e8fa 100644 --- a/synapse/storage/data_stores/main/event_federation.py +++ b/synapse/storage/data_stores/main/event_federation.py @@ -28,6 +28,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause from synapse.storage.data_stores.main.events_worker import EventsWorkerStore from synapse.storage.data_stores.main.signatures import SignatureWorkerStore +from synapse.storage.database import Database from synapse.util.caches.descriptors import cached logger = logging.getLogger(__name__) @@ -491,8 +492,8 @@ class EventFederationStore(EventFederationWorkerStore): EVENT_AUTH_STATE_ONLY = "event_auth_state_only" - def __init__(self, db_conn, hs): - super(EventFederationStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(EventFederationStore, self).__init__(database, db_conn, hs) self.db.updates.register_background_update_handler( self.EVENT_AUTH_STATE_ONLY, self._background_delete_non_state_event_auth diff --git a/synapse/storage/data_stores/main/event_push_actions.py b/synapse/storage/data_stores/main/event_push_actions.py index eec054cd4..9988a6d3f 100644 --- a/synapse/storage/data_stores/main/event_push_actions.py +++ b/synapse/storage/data_stores/main/event_push_actions.py @@ -24,6 +24,7 @@ from twisted.internet import defer from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage._base import LoggingTransaction, SQLBaseStore +from synapse.storage.database import Database from synapse.util.caches.descriptors import cachedInlineCallbacks logger = logging.getLogger(__name__) @@ -68,8 +69,8 @@ def _deserialize_action(actions, is_highlight): class EventPushActionsWorkerStore(SQLBaseStore): - def __init__(self, db_conn, hs): - super(EventPushActionsWorkerStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(EventPushActionsWorkerStore, self).__init__(database, db_conn, hs) # These get correctly set by _find_stream_orderings_for_times_txn self.stream_ordering_month_ago = None @@ -611,8 +612,8 @@ class EventPushActionsWorkerStore(SQLBaseStore): class EventPushActionsStore(EventPushActionsWorkerStore): EPA_HIGHLIGHT_INDEX = "epa_highlight_index" - def __init__(self, db_conn, hs): - super(EventPushActionsStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(EventPushActionsStore, self).__init__(database, db_conn, hs) self.db.updates.register_background_index_update( self.EPA_HIGHLIGHT_INDEX, diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py index d644c8278..da1529f6e 100644 --- a/synapse/storage/data_stores/main/events.py +++ b/synapse/storage/data_stores/main/events.py @@ -41,6 +41,7 @@ from synapse.storage._base import make_in_list_sql_clause from synapse.storage.data_stores.main.event_federation import EventFederationStore from synapse.storage.data_stores.main.events_worker import EventsWorkerStore from synapse.storage.data_stores.main.state import StateGroupWorkerStore +from synapse.storage.database import Database from synapse.types import RoomStreamToken, get_domain_from_id from synapse.util import batch_iter from synapse.util.caches.descriptors import cached, cachedInlineCallbacks @@ -95,8 +96,8 @@ def _retry_on_integrity_error(func): class EventsStore( StateGroupWorkerStore, EventFederationStore, EventsWorkerStore, ): - def __init__(self, db_conn, hs): - super(EventsStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(EventsStore, self).__init__(database, db_conn, hs) # Collect metrics on the number of forward extremities that exist. # Counter of number of extremities to count diff --git a/synapse/storage/data_stores/main/events_bg_updates.py b/synapse/storage/data_stores/main/events_bg_updates.py index cb1fc30c3..efee17b92 100644 --- a/synapse/storage/data_stores/main/events_bg_updates.py +++ b/synapse/storage/data_stores/main/events_bg_updates.py @@ -23,6 +23,7 @@ from twisted.internet import defer from synapse.api.constants import EventContentFields from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause +from synapse.storage.database import Database logger = logging.getLogger(__name__) @@ -33,8 +34,8 @@ class EventsBackgroundUpdatesStore(SQLBaseStore): EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" - def __init__(self, db_conn, hs): - super(EventsBackgroundUpdatesStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(EventsBackgroundUpdatesStore, self).__init__(database, db_conn, hs) self.db.updates.register_background_update_handler( self.EVENT_ORIGIN_SERVER_TS_NAME, self._background_reindex_origin_server_ts diff --git a/synapse/storage/data_stores/main/events_worker.py b/synapse/storage/data_stores/main/events_worker.py index e041fc5ea..9ee117ce0 100644 --- a/synapse/storage/data_stores/main/events_worker.py +++ b/synapse/storage/data_stores/main/events_worker.py @@ -33,6 +33,7 @@ from synapse.events.utils import prune_event from synapse.logging.context import LoggingContext, PreserveLoggingContext from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause +from synapse.storage.database import Database from synapse.types import get_domain_from_id from synapse.util import batch_iter from synapse.util.caches.descriptors import Cache @@ -55,8 +56,8 @@ _EventCacheEntry = namedtuple("_EventCacheEntry", ("event", "redacted_event")) class EventsWorkerStore(SQLBaseStore): - def __init__(self, db_conn, hs): - super(EventsWorkerStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(EventsWorkerStore, self).__init__(database, db_conn, hs) self._get_event_cache = Cache( "*getEvent*", keylen=3, max_entries=hs.config.event_cache_size diff --git a/synapse/storage/data_stores/main/media_repository.py b/synapse/storage/data_stores/main/media_repository.py index 03c9c6f8a..80ca36ded 100644 --- a/synapse/storage/data_stores/main/media_repository.py +++ b/synapse/storage/data_stores/main/media_repository.py @@ -13,11 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. from synapse.storage._base import SQLBaseStore +from synapse.storage.database import Database class MediaRepositoryBackgroundUpdateStore(SQLBaseStore): - def __init__(self, db_conn, hs): - super(MediaRepositoryBackgroundUpdateStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(MediaRepositoryBackgroundUpdateStore, self).__init__( + database, db_conn, hs + ) self.db.updates.register_background_index_update( update_name="local_media_repository_url_idx", @@ -31,8 +34,8 @@ class MediaRepositoryBackgroundUpdateStore(SQLBaseStore): class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): """Persistence for attachments and avatars""" - def __init__(self, db_conn, hs): - super(MediaRepositoryStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(MediaRepositoryStore, self).__init__(database, db_conn, hs) def get_local_media(self, media_id): """Get the metadata for a local piece of media diff --git a/synapse/storage/data_stores/main/monthly_active_users.py b/synapse/storage/data_stores/main/monthly_active_users.py index 34bf3a188..27158534c 100644 --- a/synapse/storage/data_stores/main/monthly_active_users.py +++ b/synapse/storage/data_stores/main/monthly_active_users.py @@ -17,6 +17,7 @@ import logging from twisted.internet import defer from synapse.storage._base import SQLBaseStore +from synapse.storage.database import Database from synapse.util.caches.descriptors import cached logger = logging.getLogger(__name__) @@ -27,13 +28,13 @@ LAST_SEEN_GRANULARITY = 60 * 60 * 1000 class MonthlyActiveUsersStore(SQLBaseStore): - def __init__(self, dbconn, hs): - super(MonthlyActiveUsersStore, self).__init__(None, hs) + def __init__(self, database: Database, db_conn, hs): + super(MonthlyActiveUsersStore, self).__init__(database, db_conn, hs) self._clock = hs.get_clock() self.hs = hs # Do not add more reserved users than the total allowable number self.db.new_transaction( - dbconn, + db_conn, "initialise_mau_threepids", [], [], diff --git a/synapse/storage/data_stores/main/push_rule.py b/synapse/storage/data_stores/main/push_rule.py index de682cc63..5ba13aa97 100644 --- a/synapse/storage/data_stores/main/push_rule.py +++ b/synapse/storage/data_stores/main/push_rule.py @@ -27,6 +27,7 @@ from synapse.storage.data_stores.main.appservice import ApplicationServiceWorker from synapse.storage.data_stores.main.pusher import PusherWorkerStore from synapse.storage.data_stores.main.receipts import ReceiptsWorkerStore from synapse.storage.data_stores.main.roommember import RoomMemberWorkerStore +from synapse.storage.database import Database from synapse.storage.push_rule import InconsistentRuleException, RuleNotFoundException from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList from synapse.util.caches.stream_change_cache import StreamChangeCache @@ -72,8 +73,8 @@ class PushRulesWorkerStore( # the abstract methods being implemented. __metaclass__ = abc.ABCMeta - def __init__(self, db_conn, hs): - super(PushRulesWorkerStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(PushRulesWorkerStore, self).__init__(database, db_conn, hs) push_rules_prefill, push_rules_id = self.db.get_cache_dict( db_conn, diff --git a/synapse/storage/data_stores/main/receipts.py b/synapse/storage/data_stores/main/receipts.py index ac2d45bd5..96e54d145 100644 --- a/synapse/storage/data_stores/main/receipts.py +++ b/synapse/storage/data_stores/main/receipts.py @@ -22,6 +22,7 @@ from canonicaljson import json from twisted.internet import defer from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause +from synapse.storage.database import Database from synapse.storage.util.id_generators import StreamIdGenerator from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList from synapse.util.caches.stream_change_cache import StreamChangeCache @@ -38,8 +39,8 @@ class ReceiptsWorkerStore(SQLBaseStore): # the abstract methods being implemented. __metaclass__ = abc.ABCMeta - def __init__(self, db_conn, hs): - super(ReceiptsWorkerStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(ReceiptsWorkerStore, self).__init__(database, db_conn, hs) self._receipts_stream_cache = StreamChangeCache( "ReceiptsRoomChangeCache", self.get_max_receipt_stream_id() @@ -315,14 +316,14 @@ class ReceiptsWorkerStore(SQLBaseStore): class ReceiptsStore(ReceiptsWorkerStore): - def __init__(self, db_conn, hs): + def __init__(self, database: Database, db_conn, hs): # We instantiate this first as the ReceiptsWorkerStore constructor # needs to be able to call get_max_receipt_stream_id self._receipts_id_gen = StreamIdGenerator( db_conn, "receipts_linearized", "stream_id" ) - super(ReceiptsStore, self).__init__(db_conn, hs) + super(ReceiptsStore, self).__init__(database, db_conn, hs) def get_max_receipt_stream_id(self): return self._receipts_id_gen.get_current_token() diff --git a/synapse/storage/data_stores/main/registration.py b/synapse/storage/data_stores/main/registration.py index 1ef143c6d..5e8ecac0e 100644 --- a/synapse/storage/data_stores/main/registration.py +++ b/synapse/storage/data_stores/main/registration.py @@ -27,6 +27,7 @@ from synapse.api.constants import UserTypes from synapse.api.errors import Codes, StoreError, SynapseError, ThreepidValidationError from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage._base import SQLBaseStore +from synapse.storage.database import Database from synapse.types import UserID from synapse.util.caches.descriptors import cached, cachedInlineCallbacks @@ -36,8 +37,8 @@ logger = logging.getLogger(__name__) class RegistrationWorkerStore(SQLBaseStore): - def __init__(self, db_conn, hs): - super(RegistrationWorkerStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(RegistrationWorkerStore, self).__init__(database, db_conn, hs) self.config = hs.config self.clock = hs.get_clock() @@ -794,8 +795,8 @@ class RegistrationWorkerStore(SQLBaseStore): class RegistrationBackgroundUpdateStore(RegistrationWorkerStore): - def __init__(self, db_conn, hs): - super(RegistrationBackgroundUpdateStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(RegistrationBackgroundUpdateStore, self).__init__(database, db_conn, hs) self.clock = hs.get_clock() self.config = hs.config @@ -920,8 +921,8 @@ class RegistrationBackgroundUpdateStore(RegistrationWorkerStore): class RegistrationStore(RegistrationBackgroundUpdateStore): - def __init__(self, db_conn, hs): - super(RegistrationStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(RegistrationStore, self).__init__(database, db_conn, hs) self._account_validity = hs.config.account_validity diff --git a/synapse/storage/data_stores/main/room.py b/synapse/storage/data_stores/main/room.py index da42dae24..0148be20d 100644 --- a/synapse/storage/data_stores/main/room.py +++ b/synapse/storage/data_stores/main/room.py @@ -29,6 +29,7 @@ from synapse.api.constants import EventTypes from synapse.api.errors import StoreError from synapse.storage._base import SQLBaseStore from synapse.storage.data_stores.main.search import SearchStore +from synapse.storage.database import Database from synapse.types import ThirdPartyInstanceID from synapse.util.caches.descriptors import cached, cachedInlineCallbacks @@ -361,8 +362,8 @@ class RoomWorkerStore(SQLBaseStore): class RoomBackgroundUpdateStore(SQLBaseStore): - def __init__(self, db_conn, hs): - super(RoomBackgroundUpdateStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(RoomBackgroundUpdateStore, self).__init__(database, db_conn, hs) self.config = hs.config @@ -440,8 +441,8 @@ class RoomBackgroundUpdateStore(SQLBaseStore): class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore): - def __init__(self, db_conn, hs): - super(RoomStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(RoomStore, self).__init__(database, db_conn, hs) self.config = hs.config diff --git a/synapse/storage/data_stores/main/roommember.py b/synapse/storage/data_stores/main/roommember.py index 929f6b0d3..92e3b9c51 100644 --- a/synapse/storage/data_stores/main/roommember.py +++ b/synapse/storage/data_stores/main/roommember.py @@ -32,6 +32,7 @@ from synapse.storage._base import ( make_in_list_sql_clause, ) from synapse.storage.data_stores.main.events_worker import EventsWorkerStore +from synapse.storage.database import Database from synapse.storage.engines import Sqlite3Engine from synapse.storage.roommember import ( GetRoomsForUserWithStreamOrdering, @@ -54,8 +55,8 @@ _CURRENT_STATE_MEMBERSHIP_UPDATE_NAME = "current_state_events_membership" class RoomMemberWorkerStore(EventsWorkerStore): - def __init__(self, db_conn, hs): - super(RoomMemberWorkerStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(RoomMemberWorkerStore, self).__init__(database, db_conn, hs) # Is the current_state_events.membership up to date? Or is the # background update still running? @@ -835,8 +836,8 @@ class RoomMemberWorkerStore(EventsWorkerStore): class RoomMemberBackgroundUpdateStore(SQLBaseStore): - def __init__(self, db_conn, hs): - super(RoomMemberBackgroundUpdateStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(RoomMemberBackgroundUpdateStore, self).__init__(database, db_conn, hs) self.db.updates.register_background_update_handler( _MEMBERSHIP_PROFILE_UPDATE_NAME, self._background_add_membership_profile ) @@ -991,8 +992,8 @@ class RoomMemberBackgroundUpdateStore(SQLBaseStore): class RoomMemberStore(RoomMemberWorkerStore, RoomMemberBackgroundUpdateStore): - def __init__(self, db_conn, hs): - super(RoomMemberStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(RoomMemberStore, self).__init__(database, db_conn, hs) def _store_room_members_txn(self, txn, events, backfilled): """Store a room member in the database. diff --git a/synapse/storage/data_stores/main/search.py b/synapse/storage/data_stores/main/search.py index ffa1817e6..4eec2fae5 100644 --- a/synapse/storage/data_stores/main/search.py +++ b/synapse/storage/data_stores/main/search.py @@ -25,6 +25,7 @@ from twisted.internet import defer from synapse.api.errors import SynapseError from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause +from synapse.storage.database import Database from synapse.storage.engines import PostgresEngine, Sqlite3Engine logger = logging.getLogger(__name__) @@ -42,8 +43,8 @@ class SearchBackgroundUpdateStore(SQLBaseStore): EVENT_SEARCH_USE_GIST_POSTGRES_NAME = "event_search_postgres_gist" EVENT_SEARCH_USE_GIN_POSTGRES_NAME = "event_search_postgres_gin" - def __init__(self, db_conn, hs): - super(SearchBackgroundUpdateStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(SearchBackgroundUpdateStore, self).__init__(database, db_conn, hs) if not hs.config.enable_search: return @@ -342,8 +343,8 @@ class SearchBackgroundUpdateStore(SQLBaseStore): class SearchStore(SearchBackgroundUpdateStore): - def __init__(self, db_conn, hs): - super(SearchStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(SearchStore, self).__init__(database, db_conn, hs) def store_event_search_txn(self, txn, event, key, value): """Add event to the search table diff --git a/synapse/storage/data_stores/main/state.py b/synapse/storage/data_stores/main/state.py index 7d5a9f812..9ef7b48c7 100644 --- a/synapse/storage/data_stores/main/state.py +++ b/synapse/storage/data_stores/main/state.py @@ -28,6 +28,7 @@ from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.storage._base import SQLBaseStore from synapse.storage.data_stores.main.events_worker import EventsWorkerStore +from synapse.storage.database import Database from synapse.storage.engines import PostgresEngine from synapse.storage.state import StateFilter from synapse.util.caches import get_cache_factor_for, intern_string @@ -213,8 +214,8 @@ class StateGroupWorkerStore( STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index" CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx" - def __init__(self, db_conn, hs): - super(StateGroupWorkerStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(StateGroupWorkerStore, self).__init__(database, db_conn, hs) # Originally the state store used a single DictionaryCache to cache the # event IDs for the state types in a given state group to avoid hammering @@ -1029,8 +1030,8 @@ class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore): CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx" EVENT_STATE_GROUP_INDEX_UPDATE_NAME = "event_to_state_groups_sg_index" - def __init__(self, db_conn, hs): - super(StateBackgroundUpdateStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(StateBackgroundUpdateStore, self).__init__(database, db_conn, hs) self.db.updates.register_background_update_handler( self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, self._background_deduplicate_state, @@ -1245,8 +1246,8 @@ class StateStore(StateGroupWorkerStore, StateBackgroundUpdateStore): * `state_groups_state`: Maps state group to state events. """ - def __init__(self, db_conn, hs): - super(StateStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(StateStore, self).__init__(database, db_conn, hs) def _store_event_state_mappings_txn( self, txn, events_and_contexts: Iterable[Tuple[EventBase, EventContext]] diff --git a/synapse/storage/data_stores/main/stats.py b/synapse/storage/data_stores/main/stats.py index 40579bf96..7bc186e9a 100644 --- a/synapse/storage/data_stores/main/stats.py +++ b/synapse/storage/data_stores/main/stats.py @@ -22,6 +22,7 @@ from twisted.internet.defer import DeferredLock from synapse.api.constants import EventTypes, Membership from synapse.storage.data_stores.main.state_deltas import StateDeltasStore +from synapse.storage.database import Database from synapse.storage.engines import PostgresEngine from synapse.util.caches.descriptors import cached @@ -58,8 +59,8 @@ TYPE_TO_ORIGIN_TABLE = {"room": ("rooms", "room_id"), "user": ("users", "name")} class StatsStore(StateDeltasStore): - def __init__(self, db_conn, hs): - super(StatsStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(StatsStore, self).__init__(database, db_conn, hs) self.server_name = hs.hostname self.clock = self.hs.get_clock() diff --git a/synapse/storage/data_stores/main/stream.py b/synapse/storage/data_stores/main/stream.py index 2ff8c5710..140da8dad 100644 --- a/synapse/storage/data_stores/main/stream.py +++ b/synapse/storage/data_stores/main/stream.py @@ -47,6 +47,7 @@ from twisted.internet import defer from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.storage._base import SQLBaseStore from synapse.storage.data_stores.main.events_worker import EventsWorkerStore +from synapse.storage.database import Database from synapse.storage.engines import PostgresEngine from synapse.types import RoomStreamToken from synapse.util.caches.stream_change_cache import StreamChangeCache @@ -251,8 +252,8 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): __metaclass__ = abc.ABCMeta - def __init__(self, db_conn, hs): - super(StreamWorkerStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(StreamWorkerStore, self).__init__(database, db_conn, hs) events_max = self.get_room_max_stream_ordering() event_cache_prefill, min_event_val = self.db.get_cache_dict( diff --git a/synapse/storage/data_stores/main/transactions.py b/synapse/storage/data_stores/main/transactions.py index c0d155a43..5b07c2fbc 100644 --- a/synapse/storage/data_stores/main/transactions.py +++ b/synapse/storage/data_stores/main/transactions.py @@ -24,6 +24,7 @@ from twisted.internet import defer from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage._base import SQLBaseStore, db_to_json +from synapse.storage.database import Database from synapse.util.caches.expiringcache import ExpiringCache # py2 sqlite has buffer hardcoded as only binary type, so we must use it, @@ -52,8 +53,8 @@ class TransactionStore(SQLBaseStore): """A collection of queries for handling PDUs. """ - def __init__(self, db_conn, hs): - super(TransactionStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(TransactionStore, self).__init__(database, db_conn, hs) self._clock.looping_call(self._start_cleanup_transactions, 30 * 60 * 1000) diff --git a/synapse/storage/data_stores/main/user_directory.py b/synapse/storage/data_stores/main/user_directory.py index 62ffb34b2..90c180ec6 100644 --- a/synapse/storage/data_stores/main/user_directory.py +++ b/synapse/storage/data_stores/main/user_directory.py @@ -21,6 +21,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, JoinRules from synapse.storage.data_stores.main.state import StateFilter from synapse.storage.data_stores.main.state_deltas import StateDeltasStore +from synapse.storage.database import Database from synapse.storage.engines import PostgresEngine, Sqlite3Engine from synapse.types import get_domain_from_id, get_localpart_from_id from synapse.util.caches.descriptors import cached @@ -37,8 +38,8 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): # add_users_who_share_private_rooms? SHARE_PRIVATE_WORKING_SET = 500 - def __init__(self, db_conn, hs): - super(UserDirectoryBackgroundUpdateStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(UserDirectoryBackgroundUpdateStore, self).__init__(database, db_conn, hs) self.server_name = hs.hostname @@ -549,8 +550,8 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): # add_users_who_share_private_rooms? SHARE_PRIVATE_WORKING_SET = 500 - def __init__(self, db_conn, hs): - super(UserDirectoryStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(UserDirectoryStore, self).__init__(database, db_conn, hs) def remove_from_user_dir(self, user_id): def _remove_from_user_dir_txn(txn): diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index dfeea2459..1679112d8 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -28,6 +28,7 @@ from synapse.storage.data_stores.main.appservice import ( ApplicationServiceStore, ApplicationServiceTransactionStore, ) +from synapse.storage.database import Database from tests import unittest from tests.utils import setup_test_homeserver @@ -382,8 +383,8 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): # required for ApplicationServiceTransactionStoreTestCase tests class TestTransactionStore(ApplicationServiceTransactionStore, ApplicationServiceStore): - def __init__(self, db_conn, hs): - super(TestTransactionStore, self).__init__(db_conn, hs) + def __init__(self, database: Database, db_conn, hs): + super(TestTransactionStore, self).__init__(database, db_conn, hs) class ApplicationServiceStoreConfigTestCase(unittest.TestCase): From d64bb32a73761ad55f53152756b8e0c10e1de9b0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 6 Dec 2019 13:09:40 +0000 Subject: [PATCH 10/60] Move are_all_users_on_domain checks to main data store. --- synapse/app/homeserver.py | 12 +--------- synapse/storage/__init__.py | 12 ---------- synapse/storage/data_stores/main/__init__.py | 24 +++++++++++++++++++- 3 files changed, 24 insertions(+), 24 deletions(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 9f81a857a..6d6c1f8e0 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -68,7 +68,7 @@ from synapse.rest.key.v2 import KeyApiV2Resource from synapse.rest.media.v0.content_repository import ContentRepoResource from synapse.rest.well_known import WellKnownResource from synapse.server import HomeServer -from synapse.storage import DataStore, are_all_users_on_domain +from synapse.storage import DataStore from synapse.storage.engines import IncorrectDatabaseSetup, create_engine from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database from synapse.util.caches import CACHE_SIZE_FACTOR @@ -295,16 +295,6 @@ class SynapseHomeServer(HomeServer): logger.warning("Unrecognized listener type: %s", listener["type"]) def run_startup_checks(self, db_conn, database_engine): - all_users_native = are_all_users_on_domain( - db_conn.cursor(), database_engine, self.hostname - ) - if not all_users_native: - quit_with_error( - "Found users in database not native to %s!\n" - "You cannot changed a synapse server_name after it's been configured" - % (self.hostname,) - ) - try: database_engine.check_database(db_conn.cursor()) except IncorrectDatabaseSetup as e: diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 8fb18203d..ec89f645d 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -49,15 +49,3 @@ class Storage(object): self.persistence = EventsPersistenceStorage(hs, stores) self.purge_events = PurgeEventsStorage(hs, stores) self.state = StateGroupStorage(hs, stores) - - -def are_all_users_on_domain(txn, database_engine, domain): - sql = database_engine.convert_param_style( - "SELECT COUNT(*) FROM users WHERE name NOT LIKE ?" - ) - pat = "%:" + domain - txn.execute(sql, (pat,)) - num_not_matching = txn.fetchall()[0][0] - if num_not_matching == 0: - return True - return False diff --git a/synapse/storage/data_stores/main/__init__.py b/synapse/storage/data_stores/main/__init__.py index 7f5fd81bc..66f8a9f3a 100644 --- a/synapse/storage/data_stores/main/__init__.py +++ b/synapse/storage/data_stores/main/__init__.py @@ -115,7 +115,17 @@ class DataStore( def __init__(self, database: Database, db_conn, hs): self.hs = hs self._clock = hs.get_clock() - self.database_engine = hs.database_engine + self.database_engine = database.engine + + all_users_native = are_all_users_on_domain( + db_conn.cursor(), database.engine, hs.hostname + ) + if not all_users_native: + raise Exception( + "Found users in database not native to %s!\n" + "You cannot changed a synapse server_name after it's been configured" + % (self.hostname,) + ) self._stream_id_gen = StreamIdGenerator( db_conn, @@ -555,3 +565,15 @@ class DataStore( retcols=["name", "password_hash", "is_guest", "admin", "user_type"], desc="search_users", ) + + +def are_all_users_on_domain(txn, database_engine, domain): + sql = database_engine.convert_param_style( + "SELECT COUNT(*) FROM users WHERE name NOT LIKE ?" + ) + pat = "%:" + domain + txn.execute(sql, (pat,)) + num_not_matching = txn.fetchall()[0][0] + if num_not_matching == 0: + return True + return False From d537be1ebd0e7ce4c84118efa400932cc6432aa9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 6 Dec 2019 13:40:02 +0000 Subject: [PATCH 11/60] Pass Database into the data store --- synapse/server.py | 3 +- synapse/storage/_base.py | 2 +- synapse/storage/background_updates.py | 2 +- synapse/storage/data_stores/__init__.py | 7 +++-- synapse/storage/database.py | 38 +++++++++++-------------- 5 files changed, 24 insertions(+), 28 deletions(-) diff --git a/synapse/server.py b/synapse/server.py index be9af7f98..2db3dab22 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -238,8 +238,7 @@ class HomeServer(object): def setup(self): logger.info("Setting up.") with self.get_db_conn() as conn: - datastore = self.DATASTORE_CLASS(conn, self) - self.datastores = DataStores(datastore, conn, self) + self.datastores = DataStores(self.DATASTORE_CLASS, conn, self) conn.commit() self.start_time = int(self.get_clock().time()) logger.info("Finished setting up.") diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index f9e7f9a71..b7637b5dc 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -41,7 +41,7 @@ class SQLBaseStore(object): self.hs = hs self._clock = hs.get_clock() self.database_engine = hs.database_engine - self.db = Database(hs) # In future this will be passed in + self.db = database self.rand = random.SystemRandom() def _invalidate_state_caches(self, room_id, members_changed): diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index a9a13a265..4f97fd5ab 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -379,7 +379,7 @@ class BackgroundUpdater(object): logger.debug("[SQL] %s", sql) c.execute(sql) - if isinstance(self.db.database_engine, engines.PostgresEngine): + if isinstance(self.db.engine, engines.PostgresEngine): runner = create_index_psql elif psql_only: runner = None diff --git a/synapse/storage/data_stores/__init__.py b/synapse/storage/data_stores/__init__.py index cb184a98c..79ecc6273 100644 --- a/synapse/storage/data_stores/__init__.py +++ b/synapse/storage/data_stores/__init__.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from synapse.storage.database import Database + class DataStores(object): """The various data stores. @@ -20,7 +22,8 @@ class DataStores(object): These are low level interfaces to physical databases. """ - def __init__(self, main_store, db_conn, hs): + def __init__(self, main_store_class, db_conn, hs): # Note we pass in the main store here as workers use a different main # store. - self.main = main_store + database = Database(hs) + self.main = main_store_class(database, db_conn, hs) diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 6843b7e7f..ec19ae1d9 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -234,7 +234,7 @@ class Database(object): # to watch it self._txn_perf_counters = PerformanceCounters() - self.database_engine = hs.database_engine + self.engine = hs.database_engine # A set of tables that are not safe to use native upserts in. self._unsafe_to_upsert_tables = set(UNIQUE_INDEX_BACKGROUND_UPDATES.keys()) @@ -242,10 +242,10 @@ class Database(object): # We add the user_directory_search table to the blacklist on SQLite # because the existing search table does not have an index, making it # unsafe to use native upserts. - if isinstance(self.database_engine, Sqlite3Engine): + if isinstance(self.engine, Sqlite3Engine): self._unsafe_to_upsert_tables.add("user_directory_search") - if self.database_engine.can_native_upsert: + if self.engine.can_native_upsert: # Check ASAP (and then later, every 1s) to see if we have finished # background updates of tables that aren't safe to update. self._clock.call_later( @@ -331,7 +331,7 @@ class Database(object): cursor = LoggingTransaction( conn.cursor(), name, - self.database_engine, + self.engine, after_callbacks, exception_callbacks, ) @@ -339,7 +339,7 @@ class Database(object): r = func(cursor, *args, **kwargs) conn.commit() return r - except self.database_engine.module.OperationalError as e: + except self.engine.module.OperationalError as e: # This can happen if the database disappears mid # transaction. logger.warning( @@ -353,20 +353,20 @@ class Database(object): i += 1 try: conn.rollback() - except self.database_engine.module.Error as e1: + except self.engine.module.Error as e1: logger.warning( "[TXN EROLL] {%s} %s", name, exception_to_unicode(e1) ) continue raise - except self.database_engine.module.DatabaseError as e: - if self.database_engine.is_deadlock(e): + except self.engine.module.DatabaseError as e: + if self.engine.is_deadlock(e): logger.warning("[TXN DEADLOCK] {%s} %d/%d", name, i, N) if i < N: i += 1 try: conn.rollback() - except self.database_engine.module.Error as e1: + except self.engine.module.Error as e1: logger.warning( "[TXN EROLL] {%s} %s", name, @@ -494,7 +494,7 @@ class Database(object): sql_scheduling_timer.observe(sched_duration_sec) context.add_database_scheduled(sched_duration_sec) - if self.database_engine.is_connection_closed(conn): + if self.engine.is_connection_closed(conn): logger.debug("Reconnecting closed database connection") conn.reconnect() @@ -561,7 +561,7 @@ class Database(object): """ try: yield self.runInteraction(desc, self.simple_insert_txn, table, values) - except self.database_engine.module.IntegrityError: + except self.engine.module.IntegrityError: # We have to do or_ignore flag at this layer, since we can't reuse # a cursor after we receive an error from the db. if not or_ignore: @@ -660,7 +660,7 @@ class Database(object): lock=lock, ) return result - except self.database_engine.module.IntegrityError as e: + except self.engine.module.IntegrityError as e: attempts += 1 if attempts >= 5: # don't retry forever, because things other than races @@ -692,10 +692,7 @@ class Database(object): upserts return True if a new entry was created, False if an existing one was updated. """ - if ( - self.database_engine.can_native_upsert - and table not in self._unsafe_to_upsert_tables - ): + if self.engine.can_native_upsert and table not in self._unsafe_to_upsert_tables: return self.simple_upsert_txn_native_upsert( txn, table, keyvalues, values, insertion_values=insertion_values ) @@ -726,7 +723,7 @@ class Database(object): """ # We need to lock the table :(, unless we're *really* careful if lock: - self.database_engine.lock_table(txn, table) + self.engine.lock_table(txn, table) def _getwhere(key): # If the value we're passing in is None (aka NULL), we need to use @@ -828,10 +825,7 @@ class Database(object): Returns: None """ - if ( - self.database_engine.can_native_upsert - and table not in self._unsafe_to_upsert_tables - ): + if self.engine.can_native_upsert and table not in self._unsafe_to_upsert_tables: return self.simple_upsert_many_txn_native_upsert( txn, table, key_names, key_values, value_names, value_values ) @@ -1301,7 +1295,7 @@ class Database(object): "limit": limit, } - sql = self.database_engine.convert_param_style(sql) + sql = self.engine.convert_param_style(sql) txn = db_conn.cursor() txn.execute(sql, (int(max_value),)) From 75f87450d82e5039d90eabd60b70ddea97a6bdbc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 6 Dec 2019 13:09:40 +0000 Subject: [PATCH 12/60] Move start up DB checks to main data store. --- synapse/app/homeserver.py | 23 +++++------------------ synapse/storage/data_stores/__init__.py | 7 +++++++ 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 6d6c1f8e0..df65d0a98 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -70,7 +70,7 @@ from synapse.rest.well_known import WellKnownResource from synapse.server import HomeServer from synapse.storage import DataStore from synapse.storage.engines import IncorrectDatabaseSetup, create_engine -from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database +from synapse.storage.prepare_database import UpgradeDatabaseException from synapse.util.caches import CACHE_SIZE_FACTOR from synapse.util.httpresourcetree import create_resource_tree from synapse.util.manhole import manhole @@ -294,12 +294,6 @@ class SynapseHomeServer(HomeServer): else: logger.warning("Unrecognized listener type: %s", listener["type"]) - def run_startup_checks(self, db_conn, database_engine): - try: - database_engine.check_database(db_conn.cursor()) - except IncorrectDatabaseSetup as e: - quit_with_error(str(e)) - # Gauges to expose monthly active user control metrics current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU") @@ -347,16 +341,12 @@ def setup(config_options): synapse.config.logger.setup_logging(hs, config, use_worker_options=False) - logger.info("Preparing database: %s...", config.database_config["name"]) + logger.info("Setting up server") try: - with hs.get_db_conn(run_new_connection=False) as db_conn: - prepare_database(db_conn, database_engine, config=config) - database_engine.on_new_connection(db_conn) - - hs.run_startup_checks(db_conn, database_engine) - - db_conn.commit() + hs.setup() + except IncorrectDatabaseSetup as e: + quit_with_error(str(e)) except UpgradeDatabaseException: sys.stderr.write( "\nFailed to upgrade database.\n" @@ -365,9 +355,6 @@ def setup(config_options): ) sys.exit(1) - logger.info("Database prepared in %s.", config.database_config["name"]) - - hs.setup() hs.setup_master() @defer.inlineCallbacks diff --git a/synapse/storage/data_stores/__init__.py b/synapse/storage/data_stores/__init__.py index 79ecc6273..cecc04f03 100644 --- a/synapse/storage/data_stores/__init__.py +++ b/synapse/storage/data_stores/__init__.py @@ -14,6 +14,7 @@ # limitations under the License. from synapse.storage.database import Database +from synapse.storage.prepare_database import prepare_database class DataStores(object): @@ -26,4 +27,10 @@ class DataStores(object): # Note we pass in the main store here as workers use a different main # store. database = Database(hs) + + # Check that db is correctly configured. + database.engine.check_database(db_conn.cursor()) + + prepare_database(db_conn, database.engine, config=hs.config) + self.main = main_store_class(database, db_conn, hs) From 852f80d8a697c9d556b1b2641a2e4c1797cbbb46 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 6 Dec 2019 16:02:50 +0000 Subject: [PATCH 13/60] Fixup tests --- tests/replication/slave/storage/_base.py | 5 ++++- tests/storage/test_appservice.py | 12 +++++++----- tests/storage/test_base.py | 3 ++- tests/storage/test_profile.py | 3 +-- tests/storage/test_user_directory.py | 4 +--- tests/test_federation.py | 16 +++++----------- 6 files changed, 20 insertions(+), 23 deletions(-) diff --git a/tests/replication/slave/storage/_base.py b/tests/replication/slave/storage/_base.py index e7472e3a9..3dae83c54 100644 --- a/tests/replication/slave/storage/_base.py +++ b/tests/replication/slave/storage/_base.py @@ -20,6 +20,7 @@ from synapse.replication.tcp.client import ( ReplicationClientHandler, ) from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory +from synapse.storage.database import Database from tests import unittest from tests.server import FakeTransport @@ -42,7 +43,9 @@ class BaseSlavedStoreTestCase(unittest.HomeserverTestCase): self.master_store = self.hs.get_datastore() self.storage = hs.get_storage() - self.slaved_store = self.STORE_TYPE(self.hs.get_db_conn(), self.hs) + self.slaved_store = self.STORE_TYPE( + Database(hs), self.hs.get_db_conn(), self.hs + ) self.event_id = 0 server_factory = ReplicationStreamProtocolFactory(self.hs) diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index 1679112d8..2e521e9ab 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -55,7 +55,8 @@ class ApplicationServiceStoreTestCase(unittest.TestCase): self._add_appservice("token2", "as2", "some_url", "some_hs_token", "bob") self._add_appservice("token3", "as3", "some_url", "some_hs_token", "bob") # must be done after inserts - self.store = ApplicationServiceStore(hs.get_db_conn(), hs) + database = Database(hs) + self.store = ApplicationServiceStore(database, hs.get_db_conn(), hs) def tearDown(self): # TODO: suboptimal that we need to create files for tests! @@ -124,7 +125,8 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): self.as_yaml_files = [] - self.store = TestTransactionStore(hs.get_db_conn(), hs) + database = Database(hs) + self.store = TestTransactionStore(database, hs.get_db_conn(), hs) def _add_service(self, url, as_token, id): as_yaml = dict( @@ -417,7 +419,7 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase): hs.config.event_cache_size = 1 hs.config.password_providers = [] - ApplicationServiceStore(hs.get_db_conn(), hs) + ApplicationServiceStore(Database(hs), hs.get_db_conn(), hs) @defer.inlineCallbacks def test_duplicate_ids(self): @@ -433,7 +435,7 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase): hs.config.password_providers = [] with self.assertRaises(ConfigError) as cm: - ApplicationServiceStore(hs.get_db_conn(), hs) + ApplicationServiceStore(Database(hs), hs.get_db_conn(), hs) e = cm.exception self.assertIn(f1, str(e)) @@ -454,7 +456,7 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase): hs.config.password_providers = [] with self.assertRaises(ConfigError) as cm: - ApplicationServiceStore(hs.get_db_conn(), hs) + ApplicationServiceStore(Database(hs), hs.get_db_conn(), hs) e = cm.exception self.assertIn(f1, str(e)) diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py index 7915d48a9..537cfe9f6 100644 --- a/tests/storage/test_base.py +++ b/tests/storage/test_base.py @@ -21,6 +21,7 @@ from mock import Mock from twisted.internet import defer from synapse.storage._base import SQLBaseStore +from synapse.storage.database import Database from synapse.storage.engines import create_engine from tests import unittest @@ -59,7 +60,7 @@ class SQLBaseStoreTestCase(unittest.TestCase): "test", db_pool=self.db_pool, config=config, database_engine=fake_engine ) - self.datastore = SQLBaseStore(None, hs) + self.datastore = SQLBaseStore(Database(hs), None, hs) @defer.inlineCallbacks def test_insert_1col(self): diff --git a/tests/storage/test_profile.py b/tests/storage/test_profile.py index 24c7fe16c..9b6f7211a 100644 --- a/tests/storage/test_profile.py +++ b/tests/storage/test_profile.py @@ -16,7 +16,6 @@ from twisted.internet import defer -from synapse.storage.data_stores.main.profile import ProfileStore from synapse.types import UserID from tests import unittest @@ -28,7 +27,7 @@ class ProfileStoreTestCase(unittest.TestCase): def setUp(self): hs = yield setup_test_homeserver(self.addCleanup) - self.store = ProfileStore(hs.get_db_conn(), hs) + self.store = hs.get_datastore() self.u_frank = UserID.from_string("@frank:test") diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py index 7eea57c0e..6a545d2eb 100644 --- a/tests/storage/test_user_directory.py +++ b/tests/storage/test_user_directory.py @@ -15,8 +15,6 @@ from twisted.internet import defer -from synapse.storage.data_stores.main.user_directory import UserDirectoryStore - from tests import unittest from tests.utils import setup_test_homeserver @@ -29,7 +27,7 @@ class UserDirectoryStoreTestCase(unittest.TestCase): @defer.inlineCallbacks def setUp(self): self.hs = yield setup_test_homeserver(self.addCleanup) - self.store = UserDirectoryStore(self.hs.get_db_conn(), self.hs) + self.store = self.hs.get_datastore() # alice and bob are both in !room_id. bobby is not but shares # a homeserver with alice. diff --git a/tests/test_federation.py b/tests/test_federation.py index 7d82b5846..ad165d729 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -33,6 +33,8 @@ class MessageAcceptTests(unittest.TestCase): self.reactor.advance(0.1) self.room_id = self.successResultOf(room)["room_id"] + self.store = self.homeserver.get_datastore() + # Figure out what the most recent event is most_recent = self.successResultOf( maybeDeferred( @@ -77,10 +79,7 @@ class MessageAcceptTests(unittest.TestCase): # Make sure we actually joined the room self.assertEqual( self.successResultOf( - maybeDeferred( - self.homeserver.get_datastore().get_latest_event_ids_in_room, - self.room_id, - ) + maybeDeferred(self.store.get_latest_event_ids_in_room, self.room_id) )[0], "$join:test.serv", ) @@ -100,10 +99,7 @@ class MessageAcceptTests(unittest.TestCase): # Figure out what the most recent event is most_recent = self.successResultOf( - maybeDeferred( - self.homeserver.get_datastore().get_latest_event_ids_in_room, - self.room_id, - ) + maybeDeferred(self.store.get_latest_event_ids_in_room, self.room_id) )[0] # Now lie about an event @@ -141,7 +137,5 @@ class MessageAcceptTests(unittest.TestCase): ) # Make sure the invalid event isn't there - extrem = maybeDeferred( - self.homeserver.get_datastore().get_latest_event_ids_in_room, self.room_id - ) + extrem = maybeDeferred(self.store.get_latest_event_ids_in_room, self.room_id) self.assertEqual(self.successResultOf(extrem)[0], "$join:test.serv") From 5e35f69ac35cbe12d2e7e033e68ec507222c40b3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 6 Dec 2019 16:13:41 +0000 Subject: [PATCH 14/60] Newsfile --- changelog.d/6487.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6487.misc diff --git a/changelog.d/6487.misc b/changelog.d/6487.misc new file mode 100644 index 000000000..18b49b9cb --- /dev/null +++ b/changelog.d/6487.misc @@ -0,0 +1 @@ +Pass in `Database` object to data stores. From 71ee22c0baf7c804ecc93ce5fd9ac00e1f410e6f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 6 Dec 2019 16:27:29 +0000 Subject: [PATCH 15/60] Fix port db script --- .buildkite/postgres-config.yaml | 2 +- .buildkite/sqlite-config.yaml | 2 +- scripts/synapse_port_db | 36 ++------------------ synapse/storage/data_stores/main/__init__.py | 2 +- 4 files changed, 5 insertions(+), 37 deletions(-) diff --git a/.buildkite/postgres-config.yaml b/.buildkite/postgres-config.yaml index a35fec394..2acbe66f4 100644 --- a/.buildkite/postgres-config.yaml +++ b/.buildkite/postgres-config.yaml @@ -1,7 +1,7 @@ # Configuration file used for testing the 'synapse_port_db' script. # Tells the script to connect to the postgresql database that will be available in the # CI's Docker setup at the point where this file is considered. -server_name: "test" +server_name: "localhost:8800" signing_key_path: "/src/.buildkite/test.signing.key" diff --git a/.buildkite/sqlite-config.yaml b/.buildkite/sqlite-config.yaml index 635b92176..6d9bf80d8 100644 --- a/.buildkite/sqlite-config.yaml +++ b/.buildkite/sqlite-config.yaml @@ -1,7 +1,7 @@ # Configuration file used for testing the 'synapse_port_db' script. # Tells the 'update_database' script to connect to the test SQLite database to upgrade its # schema and run background updates on it. -server_name: "test" +server_name: "localhost:8800" signing_key_path: "/src/.buildkite/test.signing.key" diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index 72061177c..e393a9b2f 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -55,6 +55,7 @@ from synapse.storage.data_stores.main.stats import StatsStore from synapse.storage.data_stores.main.user_directory import ( UserDirectoryBackgroundUpdateStore, ) +from synapse.storage.database import Database from synapse.storage.engines import create_engine from synapse.storage.prepare_database import prepare_database from synapse.util import Clock @@ -139,39 +140,6 @@ class Store( UserDirectoryBackgroundUpdateStore, StatsStore, ): - def __init__(self, db_conn, hs): - super().__init__(db_conn, hs) - self.db_pool = hs.get_db_pool() - - @defer.inlineCallbacks - def runInteraction(self, desc, func, *args, **kwargs): - def r(conn): - try: - i = 0 - N = 5 - while True: - try: - txn = conn.cursor() - return func( - LoggingTransaction(txn, desc, self.database_engine, [], []), - *args, - **kwargs - ) - except self.database_engine.module.DatabaseError as e: - if self.database_engine.is_deadlock(e): - logger.warning("[TXN DEADLOCK] {%s} %d/%d", desc, i, N) - if i < N: - i += 1 - conn.rollback() - continue - raise - except Exception as e: - logger.debug("[TXN FAIL] {%s} %s", desc, e) - raise - - with PreserveLoggingContext(): - return (yield self.db_pool.runWithConnection(r)) - def execute(self, f, *args, **kwargs): return self.db.runInteraction(f.__name__, f, *args, **kwargs) @@ -512,7 +480,7 @@ class Porter(object): hs = MockHomeserver(self.hs_config, engine, conn, db_pool) - store = Store(conn, hs) + store = Store(Database(hs), conn, hs) yield store.db.runInteraction( "%s_engine.check_database" % config["name"], engine.check_database, diff --git a/synapse/storage/data_stores/main/__init__.py b/synapse/storage/data_stores/main/__init__.py index 66f8a9f3a..c577c0df5 100644 --- a/synapse/storage/data_stores/main/__init__.py +++ b/synapse/storage/data_stores/main/__init__.py @@ -124,7 +124,7 @@ class DataStore( raise Exception( "Found users in database not native to %s!\n" "You cannot changed a synapse server_name after it's been configured" - % (self.hostname,) + % (hs.hostname,) ) self._stream_id_gen = StreamIdGenerator( From 65b37f672927d0b88401d97a9f27f506eec0ca6d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 9 Dec 2019 11:47:55 +0000 Subject: [PATCH 16/60] Fix comment Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- synapse/storage/data_stores/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/data_stores/__init__.py b/synapse/storage/data_stores/__init__.py index cecc04f03..cafedd5c0 100644 --- a/synapse/storage/data_stores/__init__.py +++ b/synapse/storage/data_stores/__init__.py @@ -24,7 +24,7 @@ class DataStores(object): """ def __init__(self, main_store_class, db_conn, hs): - # Note we pass in the main store here as workers use a different main + # Note we pass in the main store class here as workers use a different main # store. database = Database(hs) From adfdd82b21ae296ed77453b2f51d55414890f162 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Mon, 9 Dec 2019 13:59:27 +0000 Subject: [PATCH 17/60] Back out perf regression from get_cross_signing_keys_from_cache. (#6494) Back out cross-signing code added in Synapse 1.5.0, which caused a performance regression. --- changelog.d/6494.bugfix | 1 + synapse/handlers/e2e_keys.py | 38 +++++++-------------------------- sytest-blacklist | 3 +++ tests/handlers/test_e2e_keys.py | 8 +++++++ 4 files changed, 20 insertions(+), 30 deletions(-) create mode 100644 changelog.d/6494.bugfix diff --git a/changelog.d/6494.bugfix b/changelog.d/6494.bugfix new file mode 100644 index 000000000..78726d5d7 --- /dev/null +++ b/changelog.d/6494.bugfix @@ -0,0 +1 @@ +Back out cross-signing code added in Synapse 1.5.0, which caused a performance regression. diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 28c12753c..57a10daef 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -264,7 +264,6 @@ class E2eKeysHandler(object): return ret - @defer.inlineCallbacks def get_cross_signing_keys_from_cache(self, query, from_user_id): """Get cross-signing keys for users from the database @@ -284,35 +283,14 @@ class E2eKeysHandler(object): self_signing_keys = {} user_signing_keys = {} - for user_id in query: - # XXX: consider changing the store functions to allow querying - # multiple users simultaneously. - key = yield self.store.get_e2e_cross_signing_key( - user_id, "master", from_user_id - ) - if key: - master_keys[user_id] = key - - key = yield self.store.get_e2e_cross_signing_key( - user_id, "self_signing", from_user_id - ) - if key: - self_signing_keys[user_id] = key - - # users can see other users' master and self-signing keys, but can - # only see their own user-signing keys - if from_user_id == user_id: - key = yield self.store.get_e2e_cross_signing_key( - user_id, "user_signing", from_user_id - ) - if key: - user_signing_keys[user_id] = key - - return { - "master_keys": master_keys, - "self_signing_keys": self_signing_keys, - "user_signing_keys": user_signing_keys, - } + # Currently a stub, implementation coming in https://github.com/matrix-org/synapse/pull/6486 + return defer.succeed( + { + "master_keys": master_keys, + "self_signing_keys": self_signing_keys, + "user_signing_keys": user_signing_keys, + } + ) @trace @defer.inlineCallbacks diff --git a/sytest-blacklist b/sytest-blacklist index 411cce069..79b2d4402 100644 --- a/sytest-blacklist +++ b/sytest-blacklist @@ -33,3 +33,6 @@ New federated private chats get full presence information (SYN-115) # Blacklisted due to https://github.com/matrix-org/matrix-doc/pull/2314 removing # this requirement from the spec Inbound federation of state requires event_id as a mandatory paramater + +# Blacklisted until https://github.com/matrix-org/synapse/pull/6486 lands +Can upload self-signing keys diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 854eb6c02..fdfa2cbbc 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -183,6 +183,10 @@ class E2eKeysHandlerTestCase(unittest.TestCase): ) self.assertDictEqual(devices["master_keys"], {local_user: keys2["master_key"]}) + test_replace_master_key.skip = ( + "Disabled waiting on #https://github.com/matrix-org/synapse/pull/6486" + ) + @defer.inlineCallbacks def test_reupload_signatures(self): """re-uploading a signature should not fail""" @@ -503,3 +507,7 @@ class E2eKeysHandlerTestCase(unittest.TestCase): ], other_master_key["signatures"][local_user]["ed25519:" + usersigning_pubkey], ) + + test_upload_signatures.skip = ( + "Disabled waiting on #https://github.com/matrix-org/synapse/pull/6486" + ) From 96d35f1028833ae700f39bb12c6f77a7de2c30bf Mon Sep 17 00:00:00 2001 From: Clifford Garwood II Date: Mon, 9 Dec 2019 09:40:37 -0500 Subject: [PATCH 18/60] Systemd documentation (#6490) --- changelog.d/6490.doc | 1 + contrib/systemd/matrix-synapse.service | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6490.doc diff --git a/changelog.d/6490.doc b/changelog.d/6490.doc new file mode 100644 index 000000000..6db351d7d --- /dev/null +++ b/changelog.d/6490.doc @@ -0,0 +1 @@ +Update documentation and variables in user contributed systemd reference file. diff --git a/contrib/systemd/matrix-synapse.service b/contrib/systemd/matrix-synapse.service index bd492544b..813717b03 100644 --- a/contrib/systemd/matrix-synapse.service +++ b/contrib/systemd/matrix-synapse.service @@ -25,7 +25,7 @@ Restart=on-abort User=synapse Group=nogroup -WorkingDirectory=/opt/synapse +WorkingDirectory=/home/synapse/synapse ExecStart=/home/synapse/synapse/env/bin/python -m synapse.app.homeserver --config-path=/home/synapse/synapse/homeserver.yaml SyslogIdentifier=matrix-synapse From 24da1ffcb615ecde30c413b73434688c0d5963b9 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Mon, 9 Dec 2019 14:46:20 +0000 Subject: [PATCH 19/60] 1.7.0rc1 --- CHANGES.md | 78 ++++++++++++++++++++++++++++++++++++++++ changelog.d/5815.feature | 1 - changelog.d/5858.feature | 1 - changelog.d/5925.feature | 1 - changelog.d/5925.removal | 1 - changelog.d/6119.feature | 1 - changelog.d/6176.feature | 1 - changelog.d/6237.bugfix | 1 - changelog.d/6241.bugfix | 1 - changelog.d/6266.misc | 1 - changelog.d/6322.misc | 1 - changelog.d/6329.bugfix | 1 - changelog.d/6332.bugfix | 1 - changelog.d/6333.bugfix | 1 - changelog.d/6343.misc | 1 - changelog.d/6354.feature | 1 - changelog.d/6362.misc | 1 - changelog.d/6369.doc | 1 - changelog.d/6379.misc | 1 - changelog.d/6388.doc | 1 - changelog.d/6390.doc | 1 - changelog.d/6392.misc | 1 - changelog.d/6406.bugfix | 1 - changelog.d/6408.bugfix | 1 - changelog.d/6409.feature | 1 - changelog.d/6420.bugfix | 1 - changelog.d/6421.bugfix | 1 - changelog.d/6423.misc | 1 - changelog.d/6426.bugfix | 1 - changelog.d/6429.misc | 1 - changelog.d/6434.feature | 1 - changelog.d/6436.bugfix | 1 - changelog.d/6443.doc | 1 - changelog.d/6449.bugfix | 1 - changelog.d/6451.bugfix | 1 - changelog.d/6454.misc | 1 - changelog.d/6458.doc | 1 - changelog.d/6461.doc | 1 - changelog.d/6462.bugfix | 1 - changelog.d/6464.misc | 1 - changelog.d/6468.misc | 1 - changelog.d/6469.misc | 1 - changelog.d/6470.bugfix | 1 - changelog.d/6472.bugfix | 1 - changelog.d/6480.misc | 1 - changelog.d/6482.misc | 1 - changelog.d/6483.misc | 1 - changelog.d/6484.misc | 1 - changelog.d/6487.misc | 1 - changelog.d/6488.removal | 1 - changelog.d/6490.doc | 1 - changelog.d/6491.bugfix | 1 - changelog.d/6493.bugfix | 1 - changelog.d/6494.bugfix | 1 - synapse/__init__.py | 2 +- 55 files changed, 79 insertions(+), 54 deletions(-) delete mode 100644 changelog.d/5815.feature delete mode 100644 changelog.d/5858.feature delete mode 100644 changelog.d/5925.feature delete mode 100644 changelog.d/5925.removal delete mode 100644 changelog.d/6119.feature delete mode 100644 changelog.d/6176.feature delete mode 100644 changelog.d/6237.bugfix delete mode 100644 changelog.d/6241.bugfix delete mode 100644 changelog.d/6266.misc delete mode 100644 changelog.d/6322.misc delete mode 100644 changelog.d/6329.bugfix delete mode 100644 changelog.d/6332.bugfix delete mode 100644 changelog.d/6333.bugfix delete mode 100644 changelog.d/6343.misc delete mode 100644 changelog.d/6354.feature delete mode 100644 changelog.d/6362.misc delete mode 100644 changelog.d/6369.doc delete mode 100644 changelog.d/6379.misc delete mode 100644 changelog.d/6388.doc delete mode 100644 changelog.d/6390.doc delete mode 100644 changelog.d/6392.misc delete mode 100644 changelog.d/6406.bugfix delete mode 100644 changelog.d/6408.bugfix delete mode 100644 changelog.d/6409.feature delete mode 100644 changelog.d/6420.bugfix delete mode 100644 changelog.d/6421.bugfix delete mode 100644 changelog.d/6423.misc delete mode 100644 changelog.d/6426.bugfix delete mode 100644 changelog.d/6429.misc delete mode 100644 changelog.d/6434.feature delete mode 100644 changelog.d/6436.bugfix delete mode 100644 changelog.d/6443.doc delete mode 100644 changelog.d/6449.bugfix delete mode 100644 changelog.d/6451.bugfix delete mode 100644 changelog.d/6454.misc delete mode 100644 changelog.d/6458.doc delete mode 100644 changelog.d/6461.doc delete mode 100644 changelog.d/6462.bugfix delete mode 100644 changelog.d/6464.misc delete mode 100644 changelog.d/6468.misc delete mode 100644 changelog.d/6469.misc delete mode 100644 changelog.d/6470.bugfix delete mode 100644 changelog.d/6472.bugfix delete mode 100644 changelog.d/6480.misc delete mode 100644 changelog.d/6482.misc delete mode 100644 changelog.d/6483.misc delete mode 100644 changelog.d/6484.misc delete mode 100644 changelog.d/6487.misc delete mode 100644 changelog.d/6488.removal delete mode 100644 changelog.d/6490.doc delete mode 100644 changelog.d/6491.bugfix delete mode 100644 changelog.d/6493.bugfix delete mode 100644 changelog.d/6494.bugfix diff --git a/CHANGES.md b/CHANGES.md index a9afd36d2..0ef9794aa 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,81 @@ +Synapse 1.7.0rc1 (2019-12-09) +============================= + +Features +-------- + +- Implement per-room message retention policies. ([\#5815](https://github.com/matrix-org/synapse/issues/5815)) +- Add etag and count fields to key backup endpoints to help clients guess if there are new keys. ([\#5858](https://github.com/matrix-org/synapse/issues/5858)) +- Add admin/v2/users endpoint with pagination. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#5925](https://github.com/matrix-org/synapse/issues/5925)) +- Require User-Interactive Authentication for `/account/3pid/add`, meaning the user's password will be required to add a third-party ID to their account. ([\#6119](https://github.com/matrix-org/synapse/issues/6119)) +- Implement the `/_matrix/federation/unstable/net.atleastfornow/state/` API as drafted in MSC2314. ([\#6176](https://github.com/matrix-org/synapse/issues/6176)) +- Configure privacy preserving settings by default for the room directory. ([\#6354](https://github.com/matrix-org/synapse/issues/6354)) +- Add ephemeral messages support by partially implementing [MSC2228](https://github.com/matrix-org/matrix-doc/pull/2228). ([\#6409](https://github.com/matrix-org/synapse/issues/6409)) +- Add support for MSC 2367, which allows specifying a reason on all membership events. ([\#6434](https://github.com/matrix-org/synapse/issues/6434)) + + +Bugfixes +-------- + +- Transfer non-standard power levels on room upgrade. ([\#6237](https://github.com/matrix-org/synapse/issues/6237)) +- Fix error from the Pillow library when uploading RGBA images. ([\#6241](https://github.com/matrix-org/synapse/issues/6241)) +- Correctly apply the event filter to the `state`, `events_before` and `events_after` fields in the response to `/context` requests. ([\#6329](https://github.com/matrix-org/synapse/issues/6329)) +- Fix caching devices for remote users when using workers, so that we don't attempt to refetch (and potentially fail) each time a user requests devices. ([\#6332](https://github.com/matrix-org/synapse/issues/6332)) +- Prevent account data syncs getting lost across TCP replication. ([\#6333](https://github.com/matrix-org/synapse/issues/6333)) +- Fix bug: TypeError in `register_user()` while using LDAP auth module. ([\#6406](https://github.com/matrix-org/synapse/issues/6406)) +- Fix an intermittent exception when handling read-receipts. ([\#6408](https://github.com/matrix-org/synapse/issues/6408)) +- Fix broken guest registration when there are existing blocks of numeric user IDs. ([\#6420](https://github.com/matrix-org/synapse/issues/6420)) +- Fix startup error when http proxy is defined. ([\#6421](https://github.com/matrix-org/synapse/issues/6421)) +- Clean up local threepids from user on account deactivation. ([\#6426](https://github.com/matrix-org/synapse/issues/6426)) +- Fix a bug where a room could become unusable with a low retention policy and a low activity. ([\#6436](https://github.com/matrix-org/synapse/issues/6436)) +- Fix error when using synapse_port_db on a vanilla synapse db. ([\#6449](https://github.com/matrix-org/synapse/issues/6449)) +- Fix uploading multiple cross signing signatures for the same user. ([\#6451](https://github.com/matrix-org/synapse/issues/6451)) +- Fix bug which lead to exceptions being thrown in a loop when a cross-signed device is deleted. ([\#6462](https://github.com/matrix-org/synapse/issues/6462)) +- Fix `synapse_port_db` not exiting with a 0 code if something went wrong during the port process. ([\#6470](https://github.com/matrix-org/synapse/issues/6470)) +- Improve sanity-checking when receiving events over federation. ([\#6472](https://github.com/matrix-org/synapse/issues/6472)) +- Fix inaccurate per-block Prometheus metrics. ([\#6491](https://github.com/matrix-org/synapse/issues/6491)) +- Fix small performance regression for sending invites. ([\#6493](https://github.com/matrix-org/synapse/issues/6493)) +- Back out cross-signing code added in Synapse 1.5.0, which caused a performance regression. ([\#6494](https://github.com/matrix-org/synapse/issues/6494)) + + +Improved Documentation +---------------------- + +- Update documentation and variables in user contributed systemd reference file. ([\#6369](https://github.com/matrix-org/synapse/issues/6369), [\#6490](https://github.com/matrix-org/synapse/issues/6490)) +- Fix link in the user directory documentation. ([\#6388](https://github.com/matrix-org/synapse/issues/6388)) +- Add build instructions to the docker readme. ([\#6390](https://github.com/matrix-org/synapse/issues/6390)) +- Switch Ubuntu package install recommendation to use python3 packages in INSTALL.md. ([\#6443](https://github.com/matrix-org/synapse/issues/6443)) +- Write some docs for the quarantine_media api. ([\#6458](https://github.com/matrix-org/synapse/issues/6458)) +- Convert CONTRIBUTING.rst to markdown (among other small fixes). ([\#6461](https://github.com/matrix-org/synapse/issues/6461)) + + +Deprecations and Removals +------------------------- + +- Remove admin/v1/users_paginate endpoint. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#5925](https://github.com/matrix-org/synapse/issues/5925)) +- Remove fallback for federation with old servers which lack the /federation/v1/state_ids API. ([\#6488](https://github.com/matrix-org/synapse/issues/6488)) + + +Internal Changes +---------------- + +- Add benchmarks for structured logging and improve output performance. ([\#6266](https://github.com/matrix-org/synapse/issues/6266)) +- Improve the performance of outputting structured logging. ([\#6322](https://github.com/matrix-org/synapse/issues/6322)) +- Refactor some code in the event authentication path for clarity. ([\#6343](https://github.com/matrix-org/synapse/issues/6343), [\#6468](https://github.com/matrix-org/synapse/issues/6468), [\#6480](https://github.com/matrix-org/synapse/issues/6480)) +- Clean up some unnecessary quotation marks around the codebase. ([\#6362](https://github.com/matrix-org/synapse/issues/6362)) +- Complain on startup instead of 500'ing during runtime when `public_baseurl` isn't set when necessary. ([\#6379](https://github.com/matrix-org/synapse/issues/6379)) +- Add a test scenario to make sure room history purges don't break `/messages` in the future. ([\#6392](https://github.com/matrix-org/synapse/issues/6392)) +- Clarifications for the email configuration settings. ([\#6423](https://github.com/matrix-org/synapse/issues/6423)) +- Add more tests to the blacklist when running in worker mode. ([\#6429](https://github.com/matrix-org/synapse/issues/6429)) +- Move data store specific code out of `SQLBaseStore`. ([\#6454](https://github.com/matrix-org/synapse/issues/6454)) +- Prepare SQLBaseStore functions being moved out of the stores. ([\#6464](https://github.com/matrix-org/synapse/issues/6464)) +- Move per database functionality out of the data stores and into a dedicated `Database` class. ([\#6469](https://github.com/matrix-org/synapse/issues/6469)) +- Port synapse.rest.client.v1 to async/await. ([\#6482](https://github.com/matrix-org/synapse/issues/6482)) +- Port synapse.rest.client.v2_alpha to async/await. ([\#6483](https://github.com/matrix-org/synapse/issues/6483)) +- Port SyncHandler to async/await. ([\#6484](https://github.com/matrix-org/synapse/issues/6484)) +- Pass in `Database` object to data stores. ([\#6487](https://github.com/matrix-org/synapse/issues/6487)) + + Synapse 1.6.1 (2019-11-28) ========================== diff --git a/changelog.d/5815.feature b/changelog.d/5815.feature deleted file mode 100644 index ca4df4e7f..000000000 --- a/changelog.d/5815.feature +++ /dev/null @@ -1 +0,0 @@ -Implement per-room message retention policies. diff --git a/changelog.d/5858.feature b/changelog.d/5858.feature deleted file mode 100644 index 55ee93051..000000000 --- a/changelog.d/5858.feature +++ /dev/null @@ -1 +0,0 @@ -Add etag and count fields to key backup endpoints to help clients guess if there are new keys. diff --git a/changelog.d/5925.feature b/changelog.d/5925.feature deleted file mode 100644 index 8025cc823..000000000 --- a/changelog.d/5925.feature +++ /dev/null @@ -1 +0,0 @@ -Add admin/v2/users endpoint with pagination. Contributed by Awesome Technologies Innovationslabor GmbH. diff --git a/changelog.d/5925.removal b/changelog.d/5925.removal deleted file mode 100644 index cbba2855c..000000000 --- a/changelog.d/5925.removal +++ /dev/null @@ -1 +0,0 @@ -Remove admin/v1/users_paginate endpoint. Contributed by Awesome Technologies Innovationslabor GmbH. diff --git a/changelog.d/6119.feature b/changelog.d/6119.feature deleted file mode 100644 index 1492e83c5..000000000 --- a/changelog.d/6119.feature +++ /dev/null @@ -1 +0,0 @@ -Require User-Interactive Authentication for `/account/3pid/add`, meaning the user's password will be required to add a third-party ID to their account. \ No newline at end of file diff --git a/changelog.d/6176.feature b/changelog.d/6176.feature deleted file mode 100644 index 3c66d689d..000000000 --- a/changelog.d/6176.feature +++ /dev/null @@ -1 +0,0 @@ -Implement the `/_matrix/federation/unstable/net.atleastfornow/state/` API as drafted in MSC2314. diff --git a/changelog.d/6237.bugfix b/changelog.d/6237.bugfix deleted file mode 100644 index 9285600b0..000000000 --- a/changelog.d/6237.bugfix +++ /dev/null @@ -1 +0,0 @@ -Transfer non-standard power levels on room upgrade. \ No newline at end of file diff --git a/changelog.d/6241.bugfix b/changelog.d/6241.bugfix deleted file mode 100644 index 25109ca4a..000000000 --- a/changelog.d/6241.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix error from the Pillow library when uploading RGBA images. diff --git a/changelog.d/6266.misc b/changelog.d/6266.misc deleted file mode 100644 index 634e421a7..000000000 --- a/changelog.d/6266.misc +++ /dev/null @@ -1 +0,0 @@ -Add benchmarks for structured logging and improve output performance. diff --git a/changelog.d/6322.misc b/changelog.d/6322.misc deleted file mode 100644 index 70ef36ca8..000000000 --- a/changelog.d/6322.misc +++ /dev/null @@ -1 +0,0 @@ -Improve the performance of outputting structured logging. diff --git a/changelog.d/6329.bugfix b/changelog.d/6329.bugfix deleted file mode 100644 index e558d13b7..000000000 --- a/changelog.d/6329.bugfix +++ /dev/null @@ -1 +0,0 @@ -Correctly apply the event filter to the `state`, `events_before` and `events_after` fields in the response to `/context` requests. \ No newline at end of file diff --git a/changelog.d/6332.bugfix b/changelog.d/6332.bugfix deleted file mode 100644 index 67d5170ba..000000000 --- a/changelog.d/6332.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix caching devices for remote users when using workers, so that we don't attempt to refetch (and potentially fail) each time a user requests devices. diff --git a/changelog.d/6333.bugfix b/changelog.d/6333.bugfix deleted file mode 100644 index a25d6ef3c..000000000 --- a/changelog.d/6333.bugfix +++ /dev/null @@ -1 +0,0 @@ -Prevent account data syncs getting lost across TCP replication. \ No newline at end of file diff --git a/changelog.d/6343.misc b/changelog.d/6343.misc deleted file mode 100644 index d9a44389b..000000000 --- a/changelog.d/6343.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor some code in the event authentication path for clarity. diff --git a/changelog.d/6354.feature b/changelog.d/6354.feature deleted file mode 100644 index fed9db884..000000000 --- a/changelog.d/6354.feature +++ /dev/null @@ -1 +0,0 @@ -Configure privacy preserving settings by default for the room directory. diff --git a/changelog.d/6362.misc b/changelog.d/6362.misc deleted file mode 100644 index b79a5bea9..000000000 --- a/changelog.d/6362.misc +++ /dev/null @@ -1 +0,0 @@ -Clean up some unnecessary quotation marks around the codebase. \ No newline at end of file diff --git a/changelog.d/6369.doc b/changelog.d/6369.doc deleted file mode 100644 index 6db351d7d..000000000 --- a/changelog.d/6369.doc +++ /dev/null @@ -1 +0,0 @@ -Update documentation and variables in user contributed systemd reference file. diff --git a/changelog.d/6379.misc b/changelog.d/6379.misc deleted file mode 100644 index 725c2e7d8..000000000 --- a/changelog.d/6379.misc +++ /dev/null @@ -1 +0,0 @@ -Complain on startup instead of 500'ing during runtime when `public_baseurl` isn't set when necessary. \ No newline at end of file diff --git a/changelog.d/6388.doc b/changelog.d/6388.doc deleted file mode 100644 index c777cb6b8..000000000 --- a/changelog.d/6388.doc +++ /dev/null @@ -1 +0,0 @@ -Fix link in the user directory documentation. diff --git a/changelog.d/6390.doc b/changelog.d/6390.doc deleted file mode 100644 index 093411bec..000000000 --- a/changelog.d/6390.doc +++ /dev/null @@ -1 +0,0 @@ -Add build instructions to the docker readme. \ No newline at end of file diff --git a/changelog.d/6392.misc b/changelog.d/6392.misc deleted file mode 100644 index a00257944..000000000 --- a/changelog.d/6392.misc +++ /dev/null @@ -1 +0,0 @@ -Add a test scenario to make sure room history purges don't break `/messages` in the future. diff --git a/changelog.d/6406.bugfix b/changelog.d/6406.bugfix deleted file mode 100644 index ca9bee084..000000000 --- a/changelog.d/6406.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug: TypeError in `register_user()` while using LDAP auth module. diff --git a/changelog.d/6408.bugfix b/changelog.d/6408.bugfix deleted file mode 100644 index c9babe599..000000000 --- a/changelog.d/6408.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix an intermittent exception when handling read-receipts. diff --git a/changelog.d/6409.feature b/changelog.d/6409.feature deleted file mode 100644 index 653ff5a5a..000000000 --- a/changelog.d/6409.feature +++ /dev/null @@ -1 +0,0 @@ -Add ephemeral messages support by partially implementing [MSC2228](https://github.com/matrix-org/matrix-doc/pull/2228). diff --git a/changelog.d/6420.bugfix b/changelog.d/6420.bugfix deleted file mode 100644 index aef47ccca..000000000 --- a/changelog.d/6420.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix broken guest registration when there are existing blocks of numeric user IDs. diff --git a/changelog.d/6421.bugfix b/changelog.d/6421.bugfix deleted file mode 100644 index 7969f7f71..000000000 --- a/changelog.d/6421.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix startup error when http proxy is defined. diff --git a/changelog.d/6423.misc b/changelog.d/6423.misc deleted file mode 100644 index 9bcd5d36c..000000000 --- a/changelog.d/6423.misc +++ /dev/null @@ -1 +0,0 @@ -Clarifications for the email configuration settings. diff --git a/changelog.d/6426.bugfix b/changelog.d/6426.bugfix deleted file mode 100644 index 3acfde421..000000000 --- a/changelog.d/6426.bugfix +++ /dev/null @@ -1 +0,0 @@ -Clean up local threepids from user on account deactivation. \ No newline at end of file diff --git a/changelog.d/6429.misc b/changelog.d/6429.misc deleted file mode 100644 index 4b32cdeac..000000000 --- a/changelog.d/6429.misc +++ /dev/null @@ -1 +0,0 @@ -Add more tests to the blacklist when running in worker mode. diff --git a/changelog.d/6434.feature b/changelog.d/6434.feature deleted file mode 100644 index affa5d50c..000000000 --- a/changelog.d/6434.feature +++ /dev/null @@ -1 +0,0 @@ -Add support for MSC 2367, which allows specifying a reason on all membership events. diff --git a/changelog.d/6436.bugfix b/changelog.d/6436.bugfix deleted file mode 100644 index 954a4e1d8..000000000 --- a/changelog.d/6436.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug where a room could become unusable with a low retention policy and a low activity. diff --git a/changelog.d/6443.doc b/changelog.d/6443.doc deleted file mode 100644 index 67c59f92e..000000000 --- a/changelog.d/6443.doc +++ /dev/null @@ -1 +0,0 @@ -Switch Ubuntu package install recommendation to use python3 packages in INSTALL.md. \ No newline at end of file diff --git a/changelog.d/6449.bugfix b/changelog.d/6449.bugfix deleted file mode 100644 index 002f33c45..000000000 --- a/changelog.d/6449.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix error when using synapse_port_db on a vanilla synapse db. diff --git a/changelog.d/6451.bugfix b/changelog.d/6451.bugfix deleted file mode 100644 index 23b67583e..000000000 --- a/changelog.d/6451.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix uploading multiple cross signing signatures for the same user. diff --git a/changelog.d/6454.misc b/changelog.d/6454.misc deleted file mode 100644 index 9e5259157..000000000 --- a/changelog.d/6454.misc +++ /dev/null @@ -1 +0,0 @@ -Move data store specific code out of `SQLBaseStore`. diff --git a/changelog.d/6458.doc b/changelog.d/6458.doc deleted file mode 100644 index 3a9f831d8..000000000 --- a/changelog.d/6458.doc +++ /dev/null @@ -1 +0,0 @@ -Write some docs for the quarantine_media api. diff --git a/changelog.d/6461.doc b/changelog.d/6461.doc deleted file mode 100644 index 1502fa285..000000000 --- a/changelog.d/6461.doc +++ /dev/null @@ -1 +0,0 @@ -Convert CONTRIBUTING.rst to markdown (among other small fixes). \ No newline at end of file diff --git a/changelog.d/6462.bugfix b/changelog.d/6462.bugfix deleted file mode 100644 index c43593952..000000000 --- a/changelog.d/6462.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug which lead to exceptions being thrown in a loop when a cross-signed device is deleted. diff --git a/changelog.d/6464.misc b/changelog.d/6464.misc deleted file mode 100644 index bd65276ef..000000000 --- a/changelog.d/6464.misc +++ /dev/null @@ -1 +0,0 @@ -Prepare SQLBaseStore functions being moved out of the stores. diff --git a/changelog.d/6468.misc b/changelog.d/6468.misc deleted file mode 100644 index d9a44389b..000000000 --- a/changelog.d/6468.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor some code in the event authentication path for clarity. diff --git a/changelog.d/6469.misc b/changelog.d/6469.misc deleted file mode 100644 index 32216b904..000000000 --- a/changelog.d/6469.misc +++ /dev/null @@ -1 +0,0 @@ -Move per database functionality out of the data stores and into a dedicated `Database` class. diff --git a/changelog.d/6470.bugfix b/changelog.d/6470.bugfix deleted file mode 100644 index c08b34c14..000000000 --- a/changelog.d/6470.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix `synapse_port_db` not exiting with a 0 code if something went wrong during the port process. diff --git a/changelog.d/6472.bugfix b/changelog.d/6472.bugfix deleted file mode 100644 index 598efb79f..000000000 --- a/changelog.d/6472.bugfix +++ /dev/null @@ -1 +0,0 @@ -Improve sanity-checking when receiving events over federation. diff --git a/changelog.d/6480.misc b/changelog.d/6480.misc deleted file mode 100644 index d9a44389b..000000000 --- a/changelog.d/6480.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor some code in the event authentication path for clarity. diff --git a/changelog.d/6482.misc b/changelog.d/6482.misc deleted file mode 100644 index bdef9cf40..000000000 --- a/changelog.d/6482.misc +++ /dev/null @@ -1 +0,0 @@ -Port synapse.rest.client.v1 to async/await. diff --git a/changelog.d/6483.misc b/changelog.d/6483.misc deleted file mode 100644 index cb2cd2bc3..000000000 --- a/changelog.d/6483.misc +++ /dev/null @@ -1 +0,0 @@ -Port synapse.rest.client.v2_alpha to async/await. diff --git a/changelog.d/6484.misc b/changelog.d/6484.misc deleted file mode 100644 index b7cd60001..000000000 --- a/changelog.d/6484.misc +++ /dev/null @@ -1 +0,0 @@ -Port SyncHandler to async/await. diff --git a/changelog.d/6487.misc b/changelog.d/6487.misc deleted file mode 100644 index 18b49b9cb..000000000 --- a/changelog.d/6487.misc +++ /dev/null @@ -1 +0,0 @@ -Pass in `Database` object to data stores. diff --git a/changelog.d/6488.removal b/changelog.d/6488.removal deleted file mode 100644 index 06e034a21..000000000 --- a/changelog.d/6488.removal +++ /dev/null @@ -1 +0,0 @@ -Remove fallback for federation with old servers which lack the /federation/v1/state_ids API. diff --git a/changelog.d/6490.doc b/changelog.d/6490.doc deleted file mode 100644 index 6db351d7d..000000000 --- a/changelog.d/6490.doc +++ /dev/null @@ -1 +0,0 @@ -Update documentation and variables in user contributed systemd reference file. diff --git a/changelog.d/6491.bugfix b/changelog.d/6491.bugfix deleted file mode 100644 index 78204693b..000000000 --- a/changelog.d/6491.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix inaccurate per-block Prometheus metrics. diff --git a/changelog.d/6493.bugfix b/changelog.d/6493.bugfix deleted file mode 100644 index 440c02efb..000000000 --- a/changelog.d/6493.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix small performance regression for sending invites. diff --git a/changelog.d/6494.bugfix b/changelog.d/6494.bugfix deleted file mode 100644 index 78726d5d7..000000000 --- a/changelog.d/6494.bugfix +++ /dev/null @@ -1 +0,0 @@ -Back out cross-signing code added in Synapse 1.5.0, which caused a performance regression. diff --git a/synapse/__init__.py b/synapse/__init__.py index f99de2f3f..c67a51a8d 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.6.1" +__version__ = "1.7.0rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 5e8abe9013427e8ad452c4652dfcb40da05c246e Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 9 Dec 2019 14:54:33 +0000 Subject: [PATCH 20/60] Better errors regarding changing avatar_url (#6497) --- changelog.d/6497.bugfix | 1 + synapse/rest/client/v1/profile.py | 11 ++++++++--- 2 files changed, 9 insertions(+), 3 deletions(-) create mode 100644 changelog.d/6497.bugfix diff --git a/changelog.d/6497.bugfix b/changelog.d/6497.bugfix new file mode 100644 index 000000000..92ed08fc4 --- /dev/null +++ b/changelog.d/6497.bugfix @@ -0,0 +1 @@ +Fix error message when setting your profile's avatar URL mentioning displaynames, and prevent NoneType avatar_urls. \ No newline at end of file diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index 1eac8a44c..4f47562c1 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -103,11 +103,16 @@ class ProfileAvatarURLRestServlet(RestServlet): content = parse_json_object_from_request(request) try: - new_name = content["avatar_url"] + new_avatar_url = content.get("avatar_url") except Exception: - return 400, "Unable to parse name" + return 400, "Unable to parse avatar_url" - await self.profile_handler.set_avatar_url(user, requester, new_name, is_admin) + if new_avatar_url is None: + return 400, "Missing required key: avatar_url" + + await self.profile_handler.set_avatar_url( + user, requester, new_avatar_url, is_admin + ) return 200, {} From 21aa0a458fa8dc035895392a30c389bbb8f51ef6 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Mon, 9 Dec 2019 14:57:09 +0000 Subject: [PATCH 21/60] Update CHANGES.md --- CHANGES.md | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 0ef9794aa..2a247690d 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,14 +4,14 @@ Synapse 1.7.0rc1 (2019-12-09) Features -------- -- Implement per-room message retention policies. ([\#5815](https://github.com/matrix-org/synapse/issues/5815)) +- Implement per-room message retention policies. ([\#5815](https://github.com/matrix-org/synapse/issues/5815), [\#6436](https://github.com/matrix-org/synapse/issues/6436)) - Add etag and count fields to key backup endpoints to help clients guess if there are new keys. ([\#5858](https://github.com/matrix-org/synapse/issues/5858)) -- Add admin/v2/users endpoint with pagination. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#5925](https://github.com/matrix-org/synapse/issues/5925)) +- Add `/admin/v2/users` endpoint with pagination. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#5925](https://github.com/matrix-org/synapse/issues/5925)) - Require User-Interactive Authentication for `/account/3pid/add`, meaning the user's password will be required to add a third-party ID to their account. ([\#6119](https://github.com/matrix-org/synapse/issues/6119)) - Implement the `/_matrix/federation/unstable/net.atleastfornow/state/` API as drafted in MSC2314. ([\#6176](https://github.com/matrix-org/synapse/issues/6176)) -- Configure privacy preserving settings by default for the room directory. ([\#6354](https://github.com/matrix-org/synapse/issues/6354)) +- Configure privacy-preserving settings by default for the room directory. ([\#6354](https://github.com/matrix-org/synapse/issues/6354)) - Add ephemeral messages support by partially implementing [MSC2228](https://github.com/matrix-org/matrix-doc/pull/2228). ([\#6409](https://github.com/matrix-org/synapse/issues/6409)) -- Add support for MSC 2367, which allows specifying a reason on all membership events. ([\#6434](https://github.com/matrix-org/synapse/issues/6434)) +- Add support for [MSC 2367](https://github.com/matrix-org/matrix-doc/pull/2367), which allows specifying a reason on all membership events. ([\#6434](https://github.com/matrix-org/synapse/issues/6434)) Bugfixes @@ -26,8 +26,6 @@ Bugfixes - Fix an intermittent exception when handling read-receipts. ([\#6408](https://github.com/matrix-org/synapse/issues/6408)) - Fix broken guest registration when there are existing blocks of numeric user IDs. ([\#6420](https://github.com/matrix-org/synapse/issues/6420)) - Fix startup error when http proxy is defined. ([\#6421](https://github.com/matrix-org/synapse/issues/6421)) -- Clean up local threepids from user on account deactivation. ([\#6426](https://github.com/matrix-org/synapse/issues/6426)) -- Fix a bug where a room could become unusable with a low retention policy and a low activity. ([\#6436](https://github.com/matrix-org/synapse/issues/6436)) - Fix error when using synapse_port_db on a vanilla synapse db. ([\#6449](https://github.com/matrix-org/synapse/issues/6449)) - Fix uploading multiple cross signing signatures for the same user. ([\#6451](https://github.com/matrix-org/synapse/issues/6451)) - Fix bug which lead to exceptions being thrown in a loop when a cross-signed device is deleted. ([\#6462](https://github.com/matrix-org/synapse/issues/6462)) @@ -67,14 +65,10 @@ Internal Changes - Add a test scenario to make sure room history purges don't break `/messages` in the future. ([\#6392](https://github.com/matrix-org/synapse/issues/6392)) - Clarifications for the email configuration settings. ([\#6423](https://github.com/matrix-org/synapse/issues/6423)) - Add more tests to the blacklist when running in worker mode. ([\#6429](https://github.com/matrix-org/synapse/issues/6429)) -- Move data store specific code out of `SQLBaseStore`. ([\#6454](https://github.com/matrix-org/synapse/issues/6454)) -- Prepare SQLBaseStore functions being moved out of the stores. ([\#6464](https://github.com/matrix-org/synapse/issues/6464)) -- Move per database functionality out of the data stores and into a dedicated `Database` class. ([\#6469](https://github.com/matrix-org/synapse/issues/6469)) +- Refactor data store layer to support multiple databases in the future. ([\#6454](https://github.com/matrix-org/synapse/issues/6454), [\#6464](https://github.com/matrix-org/synapse/issues/6464), [\#6469](https://github.com/matrix-org/synapse/issues/6469), [\#6487](https://github.com/matrix-org/synapse/issues/6487)) - Port synapse.rest.client.v1 to async/await. ([\#6482](https://github.com/matrix-org/synapse/issues/6482)) - Port synapse.rest.client.v2_alpha to async/await. ([\#6483](https://github.com/matrix-org/synapse/issues/6483)) - Port SyncHandler to async/await. ([\#6484](https://github.com/matrix-org/synapse/issues/6484)) -- Pass in `Database` object to data stores. ([\#6487](https://github.com/matrix-org/synapse/issues/6487)) - Synapse 1.6.1 (2019-11-28) ========================== From 4cade966164469b6517e821d27481e7ed019288e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 9 Dec 2019 15:09:13 +0000 Subject: [PATCH 22/60] Fix support for SQLite 3.7. Partial indices support was added in 3.8.0, so we need to use the background updates that handles this correctly. --- .../data_stores/main/events_bg_updates.py | 16 ++++++++++++++++ .../main/schema/delta/56/redaction_censor.sql | 4 +++- .../main/schema/delta/56/redaction_censor2.sql | 4 +++- 3 files changed, 22 insertions(+), 2 deletions(-) diff --git a/synapse/storage/data_stores/main/events_bg_updates.py b/synapse/storage/data_stores/main/events_bg_updates.py index efee17b92..efb9cd57a 100644 --- a/synapse/storage/data_stores/main/events_bg_updates.py +++ b/synapse/storage/data_stores/main/events_bg_updates.py @@ -90,6 +90,22 @@ class EventsBackgroundUpdatesStore(SQLBaseStore): "event_store_labels", self._event_store_labels ) + self.db.updates.register_background_index_update( + "redactions_have_censored_idx", + index_name="redactions_have_censored", + table="redactions", + columns=["event_id"], + where_clause="NOT have_censored", + ) + + self.db.updates.register_background_index_update( + "redactions_have_censored_ts_idx", + index_name="redactions_have_censored_ts", + table="redactions", + columns=["received_ts"], + where_clause="NOT have_censored", + ) + @defer.inlineCallbacks def _background_reindex_fields_sender(self, progress, batch_size): target_min_stream_id = progress["target_min_stream_id_inclusive"] diff --git a/synapse/storage/data_stores/main/schema/delta/56/redaction_censor.sql b/synapse/storage/data_stores/main/schema/delta/56/redaction_censor.sql index fe51b0230..a8583b52c 100644 --- a/synapse/storage/data_stores/main/schema/delta/56/redaction_censor.sql +++ b/synapse/storage/data_stores/main/schema/delta/56/redaction_censor.sql @@ -14,4 +14,6 @@ */ ALTER TABLE redactions ADD COLUMN have_censored BOOL NOT NULL DEFAULT false; -CREATE INDEX redactions_have_censored ON redactions(event_id) WHERE not have_censored; + +INSERT INTO background_updates (update_name, progress_json) VALUES + ('redactions_have_censored_idx', '{}'); diff --git a/synapse/storage/data_stores/main/schema/delta/56/redaction_censor2.sql b/synapse/storage/data_stores/main/schema/delta/56/redaction_censor2.sql index 77a5eca49..49ce35d79 100644 --- a/synapse/storage/data_stores/main/schema/delta/56/redaction_censor2.sql +++ b/synapse/storage/data_stores/main/schema/delta/56/redaction_censor2.sql @@ -14,7 +14,9 @@ */ ALTER TABLE redactions ADD COLUMN received_ts BIGINT; -CREATE INDEX redactions_have_censored_ts ON redactions(received_ts) WHERE not have_censored; INSERT INTO background_updates (update_name, progress_json) VALUES ('redactions_received_ts', '{}'); + +INSERT INTO background_updates (update_name, progress_json) VALUES + ('redactions_have_censored_ts_idx', '{}'); From 52fe9788bcc2c4b422f387421667698187fc2135 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 9 Dec 2019 15:19:32 +0000 Subject: [PATCH 23/60] Newsfile --- changelog.d/6499.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6499.bugfix diff --git a/changelog.d/6499.bugfix b/changelog.d/6499.bugfix new file mode 100644 index 000000000..299feba0f --- /dev/null +++ b/changelog.d/6499.bugfix @@ -0,0 +1 @@ +Fix support for SQLite 3.7. From d95736a2bd189c38bc168340975bf593e89f17f4 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Tue, 10 Dec 2019 10:05:33 +0000 Subject: [PATCH 24/60] Fix erroneous reference for new room directory defaults. --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 2a247690d..c30ea4718 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -9,7 +9,7 @@ Features - Add `/admin/v2/users` endpoint with pagination. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#5925](https://github.com/matrix-org/synapse/issues/5925)) - Require User-Interactive Authentication for `/account/3pid/add`, meaning the user's password will be required to add a third-party ID to their account. ([\#6119](https://github.com/matrix-org/synapse/issues/6119)) - Implement the `/_matrix/federation/unstable/net.atleastfornow/state/` API as drafted in MSC2314. ([\#6176](https://github.com/matrix-org/synapse/issues/6176)) -- Configure privacy-preserving settings by default for the room directory. ([\#6354](https://github.com/matrix-org/synapse/issues/6354)) +- Configure privacy-preserving settings by default for the room directory. ([\#6355](https://github.com/matrix-org/synapse/issues/6355)) - Add ephemeral messages support by partially implementing [MSC2228](https://github.com/matrix-org/matrix-doc/pull/2228). ([\#6409](https://github.com/matrix-org/synapse/issues/6409)) - Add support for [MSC 2367](https://github.com/matrix-org/matrix-doc/pull/2367), which allows specifying a reason on all membership events. ([\#6434](https://github.com/matrix-org/synapse/issues/6434)) From 353396e3a7c00c2634e3a175e9aeac98c6a4598f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 10 Dec 2019 11:12:56 +0000 Subject: [PATCH 25/60] Port handlers.account_data to async/await. --- synapse/handlers/account_data.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py index 2d7e6df6e..fe44d62a1 100644 --- a/synapse/handlers/account_data.py +++ b/synapse/handlers/account_data.py @@ -13,8 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer - class AccountDataEventSource(object): def __init__(self, hs): @@ -23,15 +21,14 @@ class AccountDataEventSource(object): def get_current_key(self, direction="f"): return self.store.get_max_account_data_stream_id() - @defer.inlineCallbacks - def get_new_events(self, user, from_key, **kwargs): + async def get_new_events(self, user, from_key, **kwargs): user_id = user.to_string() last_stream_id = from_key - current_stream_id = yield self.store.get_max_account_data_stream_id() + current_stream_id = await self.store.get_max_account_data_stream_id() results = [] - tags = yield self.store.get_updated_tags(user_id, last_stream_id) + tags = await self.store.get_updated_tags(user_id, last_stream_id) for room_id, room_tags in tags.items(): results.append( @@ -41,7 +38,7 @@ class AccountDataEventSource(object): ( account_data, room_account_data, - ) = yield self.store.get_updated_account_data_for_user(user_id, last_stream_id) + ) = await self.store.get_updated_account_data_for_user(user_id, last_stream_id) for account_data_type, content in account_data.items(): results.append({"type": account_data_type, "content": content}) @@ -54,6 +51,5 @@ class AccountDataEventSource(object): return results, current_stream_id - @defer.inlineCallbacks - def get_pagination_rows(self, user, config, key): + async def get_pagination_rows(self, user, config, key): return [], config.to_id From 9a2223d4c8c2a46f7ab072597bdba2614bc3e6ea Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 10 Dec 2019 11:22:12 +0000 Subject: [PATCH 26/60] Fix make_deferred_yieldable to work with coroutines --- synapse/logging/context.py | 9 ++++++++- tests/util/test_logcontext.py | 24 ++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 2c1fb9dda..9f484ce59 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -23,6 +23,7 @@ them. See doc/log_contexts.rst for details on how this works. """ +import inspect import logging import threading import types @@ -612,7 +613,8 @@ def run_in_background(f, *args, **kwargs): def make_deferred_yieldable(deferred): - """Given a deferred, make it follow the Synapse logcontext rules: + """Given a deferred (or coroutine), make it follow the Synapse logcontext + rules: If the deferred has completed (or is not actually a Deferred), essentially does nothing (just returns another completed deferred with the @@ -624,6 +626,11 @@ def make_deferred_yieldable(deferred): (This is more-or-less the opposite operation to run_in_background.) """ + if inspect.isawaitable(deferred): + # If we're given a coroutine we need to convert it to a deferred so that + # we can attach callbacks (and not immediately return). + deferred = defer.ensureDeferred(deferred) + if not isinstance(deferred, defer.Deferred): return deferred diff --git a/tests/util/test_logcontext.py b/tests/util/test_logcontext.py index 8b8455c8b..281b32c4b 100644 --- a/tests/util/test_logcontext.py +++ b/tests/util/test_logcontext.py @@ -179,6 +179,30 @@ class LoggingContextTestCase(unittest.TestCase): nested_context = nested_logging_context(suffix="bar") self.assertEqual(nested_context.request, "foo-bar") + @defer.inlineCallbacks + def test_make_deferred_yieldable_with_await(self): + # an async function which retuns an incomplete coroutine, but doesn't + # follow the synapse rules. + + async def blocking_function(): + d = defer.Deferred() + reactor.callLater(0, d.callback, None) + await d + + sentinel_context = LoggingContext.current_context() + + with LoggingContext() as context_one: + context_one.request = "one" + + d1 = make_deferred_yieldable(blocking_function()) + # make sure that the context was reset by make_deferred_yieldable + self.assertIs(LoggingContext.current_context(), sentinel_context) + + yield d1 + + # now it should be restored + self._check_test_key("one") + # a function which returns a deferred which has been "called", but # which had a function which returned another incomplete deferred on From f5bb1531b7307bc2d826789746e7c82fa4dbf36c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 10 Dec 2019 11:23:52 +0000 Subject: [PATCH 27/60] Newsfile --- changelog.d/6505.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6505.misc diff --git a/changelog.d/6505.misc b/changelog.d/6505.misc new file mode 100644 index 000000000..3a75b2d9d --- /dev/null +++ b/changelog.d/6505.misc @@ -0,0 +1 @@ +Make `make_deferred_yieldable` to work with async/await. From b1e7012deea2d254ecbf92d1fed429c34a65db54 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 10 Dec 2019 11:29:44 +0000 Subject: [PATCH 28/60] Newsfile --- changelog.d/6506.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6506.misc diff --git a/changelog.d/6506.misc b/changelog.d/6506.misc new file mode 100644 index 000000000..99d7a70bc --- /dev/null +++ b/changelog.d/6506.misc @@ -0,0 +1 @@ +Remove `SnapshotCache` in favour of `ResponseCache`. From cc5f6eb6083470f3980f93f937c6251be5e971dd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 10 Dec 2019 11:39:31 +0000 Subject: [PATCH 29/60] Only start censor background job after indices are created --- synapse/storage/data_stores/main/events.py | 7 +++++++ .../data_stores/main/schema/delta/56/redaction_censor2.sql | 4 ++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py index da1529f6e..bd670f002 100644 --- a/synapse/storage/data_stores/main/events.py +++ b/synapse/storage/data_stores/main/events.py @@ -1053,6 +1053,13 @@ class EventsStore( if self.hs.config.redaction_retention_period is None: return + if self.db.updates.has_completed_background_update( + "redactions_have_censored_ts_idx" + ): + # We don't want to run this until the appropriate index has been + # created. + return + before_ts = self._clock.time_msec() - self.hs.config.redaction_retention_period # We fetch all redactions that: diff --git a/synapse/storage/data_stores/main/schema/delta/56/redaction_censor2.sql b/synapse/storage/data_stores/main/schema/delta/56/redaction_censor2.sql index 49ce35d79..6c36bd546 100644 --- a/synapse/storage/data_stores/main/schema/delta/56/redaction_censor2.sql +++ b/synapse/storage/data_stores/main/schema/delta/56/redaction_censor2.sql @@ -18,5 +18,5 @@ ALTER TABLE redactions ADD COLUMN received_ts BIGINT; INSERT INTO background_updates (update_name, progress_json) VALUES ('redactions_received_ts', '{}'); -INSERT INTO background_updates (update_name, progress_json) VALUES - ('redactions_have_censored_ts_idx', '{}'); +INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES + ('redactions_have_censored_ts_idx', '{}', 'redactions_have_censored_idx'); From 2ac78438d8c7992c9c1ab347830c5498205f7008 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 10 Dec 2019 12:31:03 +0000 Subject: [PATCH 30/60] Make the PusherSlaveStore inherit from the slave RoomStore So that it has access to the get_retention_policy_for_room function which is required by filter_events_for_client. --- synapse/app/pusher.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 01a5ffc36..dd52a9fc2 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -33,6 +33,7 @@ from synapse.replication.slave.storage.account_data import SlavedAccountDataStor from synapse.replication.slave.storage.events import SlavedEventStore from synapse.replication.slave.storage.pushers import SlavedPusherStore from synapse.replication.slave.storage.receipts import SlavedReceiptsStore +from synapse.replication.slave.storage.room import RoomStore from synapse.replication.tcp.client import ReplicationClientHandler from synapse.server import HomeServer from synapse.storage import DataStore @@ -45,7 +46,11 @@ logger = logging.getLogger("synapse.app.pusher") class PusherSlaveStore( - SlavedEventStore, SlavedPusherStore, SlavedReceiptsStore, SlavedAccountDataStore + SlavedEventStore, + SlavedPusherStore, + SlavedReceiptsStore, + SlavedAccountDataStore, + RoomStore, ): update_pusher_last_stream_ordering_and_success = __func__( DataStore.update_pusher_last_stream_ordering_and_success From ec5fdd13339cb9bd2bb87537fdac524da2cad614 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 10 Dec 2019 12:34:33 +0000 Subject: [PATCH 31/60] Changelog --- changelog.d/6507.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6507.bugfix diff --git a/changelog.d/6507.bugfix b/changelog.d/6507.bugfix new file mode 100644 index 000000000..7f95da52c --- /dev/null +++ b/changelog.d/6507.bugfix @@ -0,0 +1 @@ +Fix pusher worker failing because it can't retrieve retention policies for rooms. From 31da85e467250a7d638650e14782290fb4476087 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 10 Dec 2019 12:42:58 +0000 Subject: [PATCH 32/60] Convert _censor_redactions to async since it awaits on coroutines --- synapse/storage/data_stores/main/events.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py index bd670f002..998bba1aa 100644 --- a/synapse/storage/data_stores/main/events.py +++ b/synapse/storage/data_stores/main/events.py @@ -1039,22 +1039,20 @@ class EventsStore( }, ) - @defer.inlineCallbacks - def _censor_redactions(self): + async def _censor_redactions(self): """Censors all redactions older than the configured period that haven't been censored yet. By censor we mean update the event_json table with the redacted event. - - Returns: - Deferred """ if self.hs.config.redaction_retention_period is None: return - if self.db.updates.has_completed_background_update( - "redactions_have_censored_ts_idx" + if not ( + await self.db.updates.has_completed_background_update( + "redactions_have_censored_ts_idx" + ) ): # We don't want to run this until the appropriate index has been # created. @@ -1081,15 +1079,15 @@ class EventsStore( LIMIT ? """ - rows = yield self.db.execute( + rows = await self.db.execute( "_censor_redactions_fetch", None, sql, before_ts, 100 ) updates = [] for redaction_id, event_id in rows: - redaction_event = yield self.get_event(redaction_id, allow_none=True) - original_event = yield self.get_event( + redaction_event = await self.get_event(redaction_id, allow_none=True) + original_event = await self.get_event( event_id, allow_rejected=True, allow_none=True ) @@ -1122,7 +1120,7 @@ class EventsStore( updatevalues={"have_censored": True}, ) - yield self.db.runInteraction("_update_censor_txn", _update_censor_txn) + await self.db.runInteraction("_update_censor_txn", _update_censor_txn) def _censor_event_txn(self, txn, event_id, pruned_json): """Censor an event by replacing its JSON in the event_json table with the From 52346990c8850a9002209b4b24c8f65b11d27ab4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 10 Dec 2019 12:45:16 +0000 Subject: [PATCH 33/60] Drop unused index --- .../data_stores/main/events_bg_updates.py | 8 -------- .../main/schema/delta/56/redaction_censor.sql | 3 --- .../main/schema/delta/56/redaction_censor2.sql | 4 ++-- .../main/schema/delta/56/redaction_censor4.sql | 16 ++++++++++++++++ 4 files changed, 18 insertions(+), 13 deletions(-) create mode 100644 synapse/storage/data_stores/main/schema/delta/56/redaction_censor4.sql diff --git a/synapse/storage/data_stores/main/events_bg_updates.py b/synapse/storage/data_stores/main/events_bg_updates.py index efb9cd57a..5177b7101 100644 --- a/synapse/storage/data_stores/main/events_bg_updates.py +++ b/synapse/storage/data_stores/main/events_bg_updates.py @@ -90,14 +90,6 @@ class EventsBackgroundUpdatesStore(SQLBaseStore): "event_store_labels", self._event_store_labels ) - self.db.updates.register_background_index_update( - "redactions_have_censored_idx", - index_name="redactions_have_censored", - table="redactions", - columns=["event_id"], - where_clause="NOT have_censored", - ) - self.db.updates.register_background_index_update( "redactions_have_censored_ts_idx", index_name="redactions_have_censored_ts", diff --git a/synapse/storage/data_stores/main/schema/delta/56/redaction_censor.sql b/synapse/storage/data_stores/main/schema/delta/56/redaction_censor.sql index a8583b52c..ea95db0ed 100644 --- a/synapse/storage/data_stores/main/schema/delta/56/redaction_censor.sql +++ b/synapse/storage/data_stores/main/schema/delta/56/redaction_censor.sql @@ -14,6 +14,3 @@ */ ALTER TABLE redactions ADD COLUMN have_censored BOOL NOT NULL DEFAULT false; - -INSERT INTO background_updates (update_name, progress_json) VALUES - ('redactions_have_censored_idx', '{}'); diff --git a/synapse/storage/data_stores/main/schema/delta/56/redaction_censor2.sql b/synapse/storage/data_stores/main/schema/delta/56/redaction_censor2.sql index 6c36bd546..49ce35d79 100644 --- a/synapse/storage/data_stores/main/schema/delta/56/redaction_censor2.sql +++ b/synapse/storage/data_stores/main/schema/delta/56/redaction_censor2.sql @@ -18,5 +18,5 @@ ALTER TABLE redactions ADD COLUMN received_ts BIGINT; INSERT INTO background_updates (update_name, progress_json) VALUES ('redactions_received_ts', '{}'); -INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES - ('redactions_have_censored_ts_idx', '{}', 'redactions_have_censored_idx'); +INSERT INTO background_updates (update_name, progress_json) VALUES + ('redactions_have_censored_ts_idx', '{}'); diff --git a/synapse/storage/data_stores/main/schema/delta/56/redaction_censor4.sql b/synapse/storage/data_stores/main/schema/delta/56/redaction_censor4.sql new file mode 100644 index 000000000..b7550f6f4 --- /dev/null +++ b/synapse/storage/data_stores/main/schema/delta/56/redaction_censor4.sql @@ -0,0 +1,16 @@ +/* Copyright 2019 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +DROP INDEX IF EXISTS redactions_have_censored; From 3bd049bbb771c18ba20aa724f240a82a394d0ad2 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 10 Dec 2019 13:05:35 +0000 Subject: [PATCH 34/60] Give the server config to the RoomWorkerStore --- synapse/storage/data_stores/main/room.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/synapse/storage/data_stores/main/room.py b/synapse/storage/data_stores/main/room.py index 0148be20d..aa476d0fb 100644 --- a/synapse/storage/data_stores/main/room.py +++ b/synapse/storage/data_stores/main/room.py @@ -46,6 +46,11 @@ RatelimitOverride = collections.namedtuple( class RoomWorkerStore(SQLBaseStore): + def __init__(self, database: Database, db_conn, hs): + super(RoomWorkerStore, self).__init__(database, db_conn, hs) + + self.config = hs.config + def get_room(self, room_id): """Retrieve a room. From 451ec9b8b96c17dcd465981fb8715f071a9316b4 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 10 Dec 2019 13:06:41 +0000 Subject: [PATCH 35/60] Changelog --- changelog.d/6509.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6509.bugfix diff --git a/changelog.d/6509.bugfix b/changelog.d/6509.bugfix new file mode 100644 index 000000000..7f95da52c --- /dev/null +++ b/changelog.d/6509.bugfix @@ -0,0 +1 @@ +Fix pusher worker failing because it can't retrieve retention policies for rooms. From ffeafade4879a1135ab6795d4ba0c11131f82f01 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 10 Dec 2019 13:17:39 +0000 Subject: [PATCH 36/60] Update comment --- synapse/logging/context.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 9f484ce59..6747f29e6 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -627,8 +627,10 @@ def make_deferred_yieldable(deferred): (This is more-or-less the opposite operation to run_in_background.) """ if inspect.isawaitable(deferred): - # If we're given a coroutine we need to convert it to a deferred so that - # we can attach callbacks (and not immediately return). + # If we're given a coroutine we convert it to a deferred so that we + # run it and find out if it immediately finishes, it it does then we + # don't need to fiddle with log contexts at all and can return + # immediately. deferred = defer.ensureDeferred(deferred) if not isinstance(deferred, defer.Deferred): From 663238aeb4ff1df2e857a12355fdf7cf76dcd6d9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 6 Dec 2019 17:42:14 +0000 Subject: [PATCH 37/60] Phone home stats DB reporting should not assume a single DB. --- synapse/app/homeserver.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index df65d0a98..032010600 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -519,8 +519,10 @@ def phone_stats_home(hs, stats, stats_process=_stats_process): # Database version # - stats["database_engine"] = hs.database_engine.module.__name__ - stats["database_server_version"] = hs.database_engine.server_version + # This only reports info about the *main* database. + stats["database_engine"] = hs.get_datastore().db.engine.module.__name__ + stats["database_server_version"] = hs.get_datastore().db.engine.server_version + logger.info("Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats)) try: yield hs.get_proxied_http_client().put_json( From accd343f9104653c0e89d79101ae6ae767f7aa35 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 10 Dec 2019 13:22:42 +0000 Subject: [PATCH 38/60] Newsfile --- changelog.d/6510.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6510.misc diff --git a/changelog.d/6510.misc b/changelog.d/6510.misc new file mode 100644 index 000000000..214f06539 --- /dev/null +++ b/changelog.d/6510.misc @@ -0,0 +1 @@ +Change phone home stats to not assume there is a single database and report information about the database used by the main data store. From 28b758fa0f396aafa92cdc96ff716469a3b083d4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 6 Dec 2019 17:42:26 +0000 Subject: [PATCH 39/60] Silence mypy errors for files outside those specified --- mypy.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mypy.ini b/mypy.ini index 1d77c0ecc..a66434b76 100644 --- a/mypy.ini +++ b/mypy.ini @@ -1,7 +1,7 @@ [mypy] namespace_packages = True plugins = mypy_zope:plugin -follow_imports = normal +follow_imports = silent check_untyped_defs = True show_error_codes = True show_traceback = True From 4643bb2a37dcd6302fd5b9e185ea5d0f4eb0df8c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 10 Dec 2019 13:36:00 +0000 Subject: [PATCH 40/60] Newsfile --- changelog.d/6512.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6512.misc diff --git a/changelog.d/6512.misc b/changelog.d/6512.misc new file mode 100644 index 000000000..37a8099ee --- /dev/null +++ b/changelog.d/6512.misc @@ -0,0 +1 @@ +Silence mypy errors for files outside those specified. From ae49d29ef1dabec25dc348bf11c2919d28d1cef3 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 10 Dec 2019 13:55:03 +0000 Subject: [PATCH 41/60] Fixup changelogs --- changelog.d/6507.bugfix | 2 +- changelog.d/6509.bugfix | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/changelog.d/6507.bugfix b/changelog.d/6507.bugfix index 7f95da52c..d767a6237 100644 --- a/changelog.d/6507.bugfix +++ b/changelog.d/6507.bugfix @@ -1 +1 @@ -Fix pusher worker failing because it can't retrieve retention policies for rooms. +Fix regression where sending email push would not work when using a pusher worker. diff --git a/changelog.d/6509.bugfix b/changelog.d/6509.bugfix index 7f95da52c..d767a6237 100644 --- a/changelog.d/6509.bugfix +++ b/changelog.d/6509.bugfix @@ -1 +1 @@ -Fix pusher worker failing because it can't retrieve retention policies for rooms. +Fix regression where sending email push would not work when using a pusher worker. From 257ef2c727abbb289c460b930e8bac18ab80e053 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 10 Dec 2019 11:13:15 +0000 Subject: [PATCH 42/60] Port handlers.account_validity to async/await. --- synapse/handlers/account_data.py | 2 +- synapse/handlers/account_validity.py | 86 ++++++++++----------- tests/rest/client/v2_alpha/test_register.py | 3 +- 3 files changed, 42 insertions(+), 49 deletions(-) diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py index fe44d62a1..20ec1ca01 100644 --- a/synapse/handlers/account_data.py +++ b/synapse/handlers/account_data.py @@ -25,7 +25,7 @@ class AccountDataEventSource(object): user_id = user.to_string() last_stream_id = from_key - current_stream_id = await self.store.get_max_account_data_stream_id() + current_stream_id = self.store.get_max_account_data_stream_id() results = [] tags = await self.store.get_updated_tags(user_id, last_stream_id) diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index d04e0fe57..829f52eca 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -18,8 +18,7 @@ import email.utils import logging from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText - -from twisted.internet import defer +from typing import List from synapse.api.errors import StoreError from synapse.logging.context import make_deferred_yieldable @@ -78,42 +77,39 @@ class AccountValidityHandler(object): # run as a background process to make sure that the database transactions # have a logcontext to report to return run_as_background_process( - "send_renewals", self.send_renewal_emails + "send_renewals", self._send_renewal_emails ) self.clock.looping_call(send_emails, 30 * 60 * 1000) - @defer.inlineCallbacks - def send_renewal_emails(self): + async def _send_renewal_emails(self): """Gets the list of users whose account is expiring in the amount of time configured in the ``renew_at`` parameter from the ``account_validity`` configuration, and sends renewal emails to all of these users as long as they have an email 3PID attached to their account. """ - expiring_users = yield self.store.get_users_expiring_soon() + expiring_users = await self.store.get_users_expiring_soon() if expiring_users: for user in expiring_users: - yield self._send_renewal_email( + await self._send_renewal_email( user_id=user["user_id"], expiration_ts=user["expiration_ts_ms"] ) - @defer.inlineCallbacks - def send_renewal_email_to_user(self, user_id): - expiration_ts = yield self.store.get_expiration_ts_for_user(user_id) - yield self._send_renewal_email(user_id, expiration_ts) + async def send_renewal_email_to_user(self, user_id: str): + expiration_ts = await self.store.get_expiration_ts_for_user(user_id) + await self._send_renewal_email(user_id, expiration_ts) - @defer.inlineCallbacks - def _send_renewal_email(self, user_id, expiration_ts): + async def _send_renewal_email(self, user_id: str, expiration_ts: int): """Sends out a renewal email to every email address attached to the given user with a unique link allowing them to renew their account. Args: - user_id (str): ID of the user to send email(s) to. - expiration_ts (int): Timestamp in milliseconds for the expiration date of + user_id: ID of the user to send email(s) to. + expiration_ts: Timestamp in milliseconds for the expiration date of this user's account (used in the email templates). """ - addresses = yield self._get_email_addresses_for_user(user_id) + addresses = await self._get_email_addresses_for_user(user_id) # Stop right here if the user doesn't have at least one email address. # In this case, they will have to ask their server admin to renew their @@ -125,7 +121,7 @@ class AccountValidityHandler(object): return try: - user_display_name = yield self.store.get_profile_displayname( + user_display_name = await self.store.get_profile_displayname( UserID.from_string(user_id).localpart ) if user_display_name is None: @@ -133,7 +129,7 @@ class AccountValidityHandler(object): except StoreError: user_display_name = user_id - renewal_token = yield self._get_renewal_token(user_id) + renewal_token = await self._get_renewal_token(user_id) url = "%s_matrix/client/unstable/account_validity/renew?token=%s" % ( self.hs.config.public_baseurl, renewal_token, @@ -165,7 +161,7 @@ class AccountValidityHandler(object): logger.info("Sending renewal email to %s", address) - yield make_deferred_yieldable( + await make_deferred_yieldable( self.sendmail( self.hs.config.email_smtp_host, self._raw_from, @@ -180,19 +176,18 @@ class AccountValidityHandler(object): ) ) - yield self.store.set_renewal_mail_status(user_id=user_id, email_sent=True) + await self.store.set_renewal_mail_status(user_id=user_id, email_sent=True) - @defer.inlineCallbacks - def _get_email_addresses_for_user(self, user_id): + async def _get_email_addresses_for_user(self, user_id: str) -> List[str]: """Retrieve the list of email addresses attached to a user's account. Args: - user_id (str): ID of the user to lookup email addresses for. + user_id: ID of the user to lookup email addresses for. Returns: - defer.Deferred[list[str]]: Email addresses for this account. + Email addresses for this account. """ - threepids = yield self.store.user_get_threepids(user_id) + threepids = await self.store.user_get_threepids(user_id) addresses = [] for threepid in threepids: @@ -201,16 +196,15 @@ class AccountValidityHandler(object): return addresses - @defer.inlineCallbacks - def _get_renewal_token(self, user_id): + async def _get_renewal_token(self, user_id: str) -> str: """Generates a 32-byte long random string that will be inserted into the user's renewal email's unique link, then saves it into the database. Args: - user_id (str): ID of the user to generate a string for. + user_id: ID of the user to generate a string for. Returns: - defer.Deferred[str]: The generated string. + The generated string. Raises: StoreError(500): Couldn't generate a unique string after 5 attempts. @@ -219,52 +213,52 @@ class AccountValidityHandler(object): while attempts < 5: try: renewal_token = stringutils.random_string(32) - yield self.store.set_renewal_token_for_user(user_id, renewal_token) + await self.store.set_renewal_token_for_user(user_id, renewal_token) return renewal_token except StoreError: attempts += 1 raise StoreError(500, "Couldn't generate a unique string as refresh string.") - @defer.inlineCallbacks - def renew_account(self, renewal_token): + async def renew_account(self, renewal_token: str) -> bool: """Renews the account attached to a given renewal token by pushing back the expiration date by the current validity period in the server's configuration. Args: - renewal_token (str): Token sent with the renewal request. + renewal_token: Token sent with the renewal request. Returns: - bool: Whether the provided token is valid. + Whether the provided token is valid. """ try: - user_id = yield self.store.get_user_from_renewal_token(renewal_token) + user_id = await self.store.get_user_from_renewal_token(renewal_token) except StoreError: - defer.returnValue(False) + return False logger.debug("Renewing an account for user %s", user_id) - yield self.renew_account_for_user(user_id) + await self.renew_account_for_user(user_id) - defer.returnValue(True) + return True - @defer.inlineCallbacks - def renew_account_for_user(self, user_id, expiration_ts=None, email_sent=False): + async def renew_account_for_user( + self, user_id: str, expiration_ts: int = None, email_sent: bool = False + ) -> int: """Renews the account attached to a given user by pushing back the expiration date by the current validity period in the server's configuration. Args: - renewal_token (str): Token sent with the renewal request. - expiration_ts (int): New expiration date. Defaults to now + validity period. - email_sent (bool): Whether an email has been sent for this validity period. + renewal_token: Token sent with the renewal request. + expiration_ts: New expiration date. Defaults to now + validity period. + email_sen: Whether an email has been sent for this validity period. Defaults to False. Returns: - defer.Deferred[int]: New expiration date for this account, as a timestamp - in milliseconds since epoch. + New expiration date for this account, as a timestamp in + milliseconds since epoch. """ if expiration_ts is None: expiration_ts = self.clock.time_msec() + self._account_validity.period - yield self.store.set_account_validity_for_user( + await self.store.set_account_validity_for_user( user_id=user_id, expiration_ts=expiration_ts, email_sent=email_sent ) diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index c0d0d2b44..d0c997e38 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -391,9 +391,8 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): # Email config. self.email_attempts = [] - def sendmail(*args, **kwargs): + async def sendmail(*args, **kwargs): self.email_attempts.append((args, kwargs)) - return config["email"] = { "enable_notifs": True, From 3f97b4c16bbe8a32fc465bd59421f3ba879d2124 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 10 Dec 2019 11:17:13 +0000 Subject: [PATCH 43/60] Newsfile --- changelog.d/6504.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6504.misc diff --git a/changelog.d/6504.misc b/changelog.d/6504.misc new file mode 100644 index 000000000..7c873459a --- /dev/null +++ b/changelog.d/6504.misc @@ -0,0 +1 @@ +Port handlers.account_data and handlers.account_validity to async/await. From 424fd58237d4a40c6f94772be136298d326fcd69 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 10 Dec 2019 15:09:45 +0000 Subject: [PATCH 44/60] Remove redundant code from event authorisation implementation. (#6502) --- changelog.d/6502.removal | 1 + synapse/event_auth.py | 8 ++------ 2 files changed, 3 insertions(+), 6 deletions(-) create mode 100644 changelog.d/6502.removal diff --git a/changelog.d/6502.removal b/changelog.d/6502.removal new file mode 100644 index 000000000..0b72261d5 --- /dev/null +++ b/changelog.d/6502.removal @@ -0,0 +1 @@ +Remove redundant code from event authorisation implementation. diff --git a/synapse/event_auth.py b/synapse/event_auth.py index ec3243b27..c940b8447 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -42,6 +42,8 @@ def check(room_version, event, auth_events, do_sig_check=True, do_size_check=Tru Returns: if the auth checks pass. """ + assert isinstance(auth_events, dict) + if do_size_check: _check_size_limits(event) @@ -74,12 +76,6 @@ def check(room_version, event, auth_events, do_sig_check=True, do_size_check=Tru if not event.signatures.get(event_id_domain): raise AuthError(403, "Event not signed by sending server") - if auth_events is None: - # Oh, we don't know what the state of the room was, so we - # are trusting that this is allowed (at least for now) - logger.warning("Trusting event: %s", event.event_id) - return - if event.type == EventTypes.Create: sender_domain = get_domain_from_id(event.sender) room_id_domain = get_domain_from_id(event.room_id) From c3dda2874d78790525f47e502aaed22b64961873 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 10 Dec 2019 16:22:00 +0000 Subject: [PATCH 45/60] Refactor get_events_from_store_or_dest to return a dict (#6501) There was a bunch of unnecessary conversion back and forth between dict and list going on here. We can simplify a bunch of the code. --- changelog.d/6501.misc | 1 + synapse/federation/federation_client.py | 44 +++++++++---------------- 2 files changed, 16 insertions(+), 29 deletions(-) create mode 100644 changelog.d/6501.misc diff --git a/changelog.d/6501.misc b/changelog.d/6501.misc new file mode 100644 index 000000000..255f45a9c --- /dev/null +++ b/changelog.d/6501.misc @@ -0,0 +1 @@ +Refactor get_events_from_store_or_dest to return a dict. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 709449c9e..73e1dda6a 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -18,8 +18,6 @@ import copy import itertools import logging -from six.moves import range - from prometheus_client import Counter from twisted.internet import defer @@ -41,7 +39,7 @@ from synapse.events import builder, room_version_to_event_format from synapse.federation.federation_base import FederationBase, event_from_pdu_json from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.logging.utils import log_function -from synapse.util import unwrapFirstError +from synapse.util import batch_iter, unwrapFirstError from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.retryutils import NotRetryingDestination @@ -331,10 +329,12 @@ class FederationClient(FederationBase): state_event_ids = result["pdu_ids"] auth_event_ids = result.get("auth_chain_ids", []) - fetched_events, failed_to_fetch = yield self.get_events_from_store_or_dest( - destination, room_id, set(state_event_ids + auth_event_ids) + desired_events = set(state_event_ids + auth_event_ids) + event_map = yield self.get_events_from_store_or_dest( + destination, room_id, desired_events ) + failed_to_fetch = desired_events - event_map.keys() if failed_to_fetch: logger.warning( "Failed to fetch missing state/auth events for %s: %s", @@ -342,8 +342,6 @@ class FederationClient(FederationBase): failed_to_fetch, ) - event_map = {ev.event_id: ev for ev in fetched_events} - pdus = [event_map[e_id] for e_id in state_event_ids if e_id in event_map] auth_chain = [event_map[e_id] for e_id in auth_event_ids if e_id in event_map] @@ -358,23 +356,18 @@ class FederationClient(FederationBase): Args: destination (str) room_id (str) - event_ids (list) + event_ids (Iterable[str]) Returns: - Deferred: A deferred resolving to a 2-tuple where the first is a list of - events and the second is a list of event ids that we failed to fetch. + Deferred[dict[str, EventBase]]: A deferred resolving to a map + from event_id to event """ - seen_events = yield self.store.get_events(event_ids, allow_rejected=True) - signed_events = list(seen_events.values()) + fetched_events = yield self.store.get_events(event_ids, allow_rejected=True) - failed_to_fetch = set() - - missing_events = set(event_ids) - for k in seen_events: - missing_events.discard(k) + missing_events = set(event_ids) - fetched_events.keys() if not missing_events: - return signed_events, failed_to_fetch + return fetched_events logger.debug( "Fetching unknown state/auth events %s for room %s", @@ -384,11 +377,8 @@ class FederationClient(FederationBase): room_version = yield self.store.get_room_version(room_id) - batch_size = 20 - missing_events = list(missing_events) - for i in range(0, len(missing_events), batch_size): - batch = set(missing_events[i : i + batch_size]) - + # XXX 20 requests at once? really? + for batch in batch_iter(missing_events, 20): deferreds = [ run_in_background( self.get_pdu, @@ -404,13 +394,9 @@ class FederationClient(FederationBase): ) for success, result in res: if success and result: - signed_events.append(result) - batch.discard(result.event_id) + fetched_events[result.event_id] = result - # We removed all events we successfully fetched from `batch` - failed_to_fetch.update(batch) - - return signed_events, failed_to_fetch + return fetched_events @defer.inlineCallbacks @log_function From 40eda849338b6e47a5804b4cf7000e9d2417c4d8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 10 Dec 2019 16:22:29 +0000 Subject: [PATCH 46/60] Fix race which caused deleted devices to reappear (#6514) Stop the `update_client_ips` background job from recreating deleted devices. --- changelog.d/6514.bugfix | 1 + .../storage/data_stores/main/client_ips.py | 8 +-- tests/storage/test_client_ips.py | 49 +++++++++++-------- 3 files changed, 35 insertions(+), 23 deletions(-) create mode 100644 changelog.d/6514.bugfix diff --git a/changelog.d/6514.bugfix b/changelog.d/6514.bugfix new file mode 100644 index 000000000..6dc1985c2 --- /dev/null +++ b/changelog.d/6514.bugfix @@ -0,0 +1 @@ +Fix race which occasionally caused deleted devices to reappear. diff --git a/synapse/storage/data_stores/main/client_ips.py b/synapse/storage/data_stores/main/client_ips.py index 320c5b0f0..add3037b6 100644 --- a/synapse/storage/data_stores/main/client_ips.py +++ b/synapse/storage/data_stores/main/client_ips.py @@ -451,16 +451,18 @@ class ClientIpStore(ClientIpBackgroundUpdateStore): # Technically an access token might not be associated with # a device so we need to check. if device_id: - self.db.simple_upsert_txn( + # this is always an update rather than an upsert: the row should + # already exist, and if it doesn't, that may be because it has been + # deleted, and we don't want to re-create it. + self.db.simple_update_txn( txn, table="devices", keyvalues={"user_id": user_id, "device_id": device_id}, - values={ + updatevalues={ "user_agent": user_agent, "last_seen": last_seen, "ip": ip, }, - lock=False, ) except Exception as e: # Failed to upsert, log and continue diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index fc279340d..bf674dd18 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -37,9 +37,13 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): self.reactor.advance(12345678) user_id = "@user:id" + device_id = "MY_DEVICE" + + # Insert a user IP + self.get_success(self.store.store_device(user_id, device_id, "display name",)) self.get_success( self.store.insert_client_ip( - user_id, "access_token", "ip", "user_agent", "device_id" + user_id, "access_token", "ip", "user_agent", device_id ) ) @@ -47,14 +51,14 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): self.reactor.advance(10) result = self.get_success( - self.store.get_last_client_ip_by_device(user_id, "device_id") + self.store.get_last_client_ip_by_device(user_id, device_id) ) - r = result[(user_id, "device_id")] + r = result[(user_id, device_id)] self.assertDictContainsSubset( { "user_id": user_id, - "device_id": "device_id", + "device_id": device_id, "ip": "ip", "user_agent": "user_agent", "last_seen": 12345678000, @@ -209,14 +213,16 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): self.store.db.updates.do_next_background_update(100), by=0.1 ) - # Insert a user IP user_id = "@user:id" + device_id = "MY_DEVICE" + + # Insert a user IP + self.get_success(self.store.store_device(user_id, device_id, "display name",)) self.get_success( self.store.insert_client_ip( - user_id, "access_token", "ip", "user_agent", "device_id" + user_id, "access_token", "ip", "user_agent", device_id ) ) - # Force persisting to disk self.reactor.advance(200) @@ -224,7 +230,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): self.get_success( self.store.db.simple_update( table="devices", - keyvalues={"user_id": user_id, "device_id": "device_id"}, + keyvalues={"user_id": user_id, "device_id": device_id}, updatevalues={"last_seen": None, "ip": None, "user_agent": None}, desc="test_devices_last_seen_bg_update", ) @@ -232,14 +238,14 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): # We should now get nulls when querying result = self.get_success( - self.store.get_last_client_ip_by_device(user_id, "device_id") + self.store.get_last_client_ip_by_device(user_id, device_id) ) - r = result[(user_id, "device_id")] + r = result[(user_id, device_id)] self.assertDictContainsSubset( { "user_id": user_id, - "device_id": "device_id", + "device_id": device_id, "ip": None, "user_agent": None, "last_seen": None, @@ -272,14 +278,14 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): # We should now get the correct result again result = self.get_success( - self.store.get_last_client_ip_by_device(user_id, "device_id") + self.store.get_last_client_ip_by_device(user_id, device_id) ) - r = result[(user_id, "device_id")] + r = result[(user_id, device_id)] self.assertDictContainsSubset( { "user_id": user_id, - "device_id": "device_id", + "device_id": device_id, "ip": "ip", "user_agent": "user_agent", "last_seen": 0, @@ -296,11 +302,14 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): self.store.db.updates.do_next_background_update(100), by=0.1 ) - # Insert a user IP user_id = "@user:id" + device_id = "MY_DEVICE" + + # Insert a user IP + self.get_success(self.store.store_device(user_id, device_id, "display name",)) self.get_success( self.store.insert_client_ip( - user_id, "access_token", "ip", "user_agent", "device_id" + user_id, "access_token", "ip", "user_agent", device_id ) ) @@ -324,7 +333,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): "access_token": "access_token", "ip": "ip", "user_agent": "user_agent", - "device_id": "device_id", + "device_id": device_id, "last_seen": 0, } ], @@ -347,14 +356,14 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): # But we should still get the correct values for the device result = self.get_success( - self.store.get_last_client_ip_by_device(user_id, "device_id") + self.store.get_last_client_ip_by_device(user_id, device_id) ) - r = result[(user_id, "device_id")] + r = result[(user_id, device_id)] self.assertDictContainsSubset( { "user_id": user_id, - "device_id": "device_id", + "device_id": device_id, "ip": "ip", "user_agent": "user_agent", "last_seen": 0, From 4947de5a147d4f6a4e60aecac1284714fb64df8a Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 10 Dec 2019 17:30:16 +0000 Subject: [PATCH 47/60] Allow SAML username provider plugins (#6411) --- changelog.d/6411.feature | 1 + docs/saml_mapping_providers.md | 77 ++++++++++++ docs/sample_config.yaml | 59 ++++++--- synapse/config/saml2_config.py | 180 +++++++++++++++++++--------- synapse/handlers/saml_handler.py | 200 ++++++++++++++++++++++++++----- 5 files changed, 414 insertions(+), 103 deletions(-) create mode 100644 changelog.d/6411.feature create mode 100644 docs/saml_mapping_providers.md diff --git a/changelog.d/6411.feature b/changelog.d/6411.feature new file mode 100644 index 000000000..ebea4a208 --- /dev/null +++ b/changelog.d/6411.feature @@ -0,0 +1 @@ +Allow custom SAML username mapping functinality through an external provider plugin. \ No newline at end of file diff --git a/docs/saml_mapping_providers.md b/docs/saml_mapping_providers.md new file mode 100644 index 000000000..92f238048 --- /dev/null +++ b/docs/saml_mapping_providers.md @@ -0,0 +1,77 @@ +# SAML Mapping Providers + +A SAML mapping provider is a Python class (loaded via a Python module) that +works out how to map attributes of a SAML response object to Matrix-specific +user attributes. Details such as user ID localpart, displayname, and even avatar +URLs are all things that can be mapped from talking to a SSO service. + +As an example, a SSO service may return the email address +"john.smith@example.com" for a user, whereas Synapse will need to figure out how +to turn that into a displayname when creating a Matrix user for this individual. +It may choose `John Smith`, or `Smith, John [Example.com]` or any number of +variations. As each Synapse configuration may want something different, this is +where SAML mapping providers come into play. + +## Enabling Providers + +External mapping providers are provided to Synapse in the form of an external +Python module. Retrieve this module from [PyPi](https://pypi.org) or elsewhere, +then tell Synapse where to look for the handler class by editing the +`saml2_config.user_mapping_provider.module` config option. + +`saml2_config.user_mapping_provider.config` allows you to provide custom +configuration options to the module. Check with the module's documentation for +what options it provides (if any). The options listed by default are for the +user mapping provider built in to Synapse. If using a custom module, you should +comment these options out and use those specified by the module instead. + +## Building a Custom Mapping Provider + +A custom mapping provider must specify the following methods: + +* `__init__(self, parsed_config)` + - Arguments: + - `parsed_config` - A configuration object that is the return value of the + `parse_config` method. You should set any configuration options needed by + the module here. +* `saml_response_to_user_attributes(self, saml_response, failures)` + - Arguments: + - `saml_response` - A `saml2.response.AuthnResponse` object to extract user + information from. + - `failures` - An `int` that represents the amount of times the returned + mxid localpart mapping has failed. This should be used + to create a deduplicated mxid localpart which should be + returned instead. For example, if this method returns + `john.doe` as the value of `mxid_localpart` in the returned + dict, and that is already taken on the homeserver, this + method will be called again with the same parameters but + with failures=1. The method should then return a different + `mxid_localpart` value, such as `john.doe1`. + - This method must return a dictionary, which will then be used by Synapse + to build a new user. The following keys are allowed: + * `mxid_localpart` - Required. The mxid localpart of the new user. + * `displayname` - The displayname of the new user. If not provided, will default to + the value of `mxid_localpart`. +* `parse_config(config)` + - This method should have the `@staticmethod` decoration. + - Arguments: + - `config` - A `dict` representing the parsed content of the + `saml2_config.user_mapping_provider.config` homeserver config option. + Runs on homeserver startup. Providers should extract any option values + they need here. + - Whatever is returned will be passed back to the user mapping provider module's + `__init__` method during construction. +* `get_saml_attributes(config)` + - This method should have the `@staticmethod` decoration. + - Arguments: + - `config` - A object resulting from a call to `parse_config`. + - Returns a tuple of two sets. The first set equates to the saml auth + response attributes that are required for the module to function, whereas + the second set consists of those attributes which can be used if available, + but are not necessary. + +## Synapse's Default Provider + +Synapse has a built-in SAML mapping provider if a custom provider isn't +specified in the config. It is located at +[`synapse.handlers.saml_handler.DefaultSamlMappingProvider`](../synapse/handlers/saml_handler.py). diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 10664ae8f..4d44e631d 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1250,33 +1250,58 @@ saml2_config: # #config_path: "CONFDIR/sp_conf.py" - # the lifetime of a SAML session. This defines how long a user has to + # The lifetime of a SAML session. This defines how long a user has to # complete the authentication process, if allow_unsolicited is unset. # The default is 5 minutes. # #saml_session_lifetime: 5m - # The SAML attribute (after mapping via the attribute maps) to use to derive - # the Matrix ID from. 'uid' by default. + # An external module can be provided here as a custom solution to + # mapping attributes returned from a saml provider onto a matrix user. # - #mxid_source_attribute: displayName + user_mapping_provider: + # The custom module's class. Uncomment to use a custom module. + # + #module: mapping_provider.SamlMappingProvider - # The mapping system to use for mapping the saml attribute onto a matrix ID. - # Options include: - # * 'hexencode' (which maps unpermitted characters to '=xx') - # * 'dotreplace' (which replaces unpermitted characters with '.'). - # The default is 'hexencode'. - # - #mxid_mapping: dotreplace + # Custom configuration values for the module. Below options are + # intended for the built-in provider, they should be changed if + # using a custom module. This section will be passed as a Python + # dictionary to the module's `parse_config` method. + # + config: + # The SAML attribute (after mapping via the attribute maps) to use + # to derive the Matrix ID from. 'uid' by default. + # + # Note: This used to be configured by the + # saml2_config.mxid_source_attribute option. If that is still + # defined, its value will be used instead. + # + #mxid_source_attribute: displayName - # In previous versions of synapse, the mapping from SAML attribute to MXID was - # always calculated dynamically rather than stored in a table. For backwards- - # compatibility, we will look for user_ids matching such a pattern before - # creating a new account. + # The mapping system to use for mapping the saml attribute onto a + # matrix ID. + # + # Options include: + # * 'hexencode' (which maps unpermitted characters to '=xx') + # * 'dotreplace' (which replaces unpermitted characters with + # '.'). + # The default is 'hexencode'. + # + # Note: This used to be configured by the + # saml2_config.mxid_mapping option. If that is still defined, its + # value will be used instead. + # + #mxid_mapping: dotreplace + + # In previous versions of synapse, the mapping from SAML attribute to + # MXID was always calculated dynamically rather than stored in a + # table. For backwards- compatibility, we will look for user_ids + # matching such a pattern before creating a new account. # # This setting controls the SAML attribute which will be used for this - # backwards-compatibility lookup. Typically it should be 'uid', but if the - # attribute maps are changed, it may be necessary to change it. + # backwards-compatibility lookup. Typically it should be 'uid', but if + # the attribute maps are changed, it may be necessary to change it. # # The default is 'uid'. # diff --git a/synapse/config/saml2_config.py b/synapse/config/saml2_config.py index c5ea2d43a..b91414aa3 100644 --- a/synapse/config/saml2_config.py +++ b/synapse/config/saml2_config.py @@ -14,17 +14,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -import re +import logging from synapse.python_dependencies import DependencyException, check_requirements -from synapse.types import ( - map_username_to_mxid_localpart, - mxid_localpart_allowed_characters, -) -from synapse.util.module_loader import load_python_module +from synapse.util.module_loader import load_module, load_python_module from ._base import Config, ConfigError +logger = logging.getLogger(__name__) + +DEFAULT_USER_MAPPING_PROVIDER = ( + "synapse.handlers.saml_handler.DefaultSamlMappingProvider" +) + def _dict_merge(merge_dict, into_dict): """Do a deep merge of two dicts @@ -75,15 +77,69 @@ class SAML2Config(Config): self.saml2_enabled = True - self.saml2_mxid_source_attribute = saml2_config.get( - "mxid_source_attribute", "uid" - ) - self.saml2_grandfathered_mxid_source_attribute = saml2_config.get( "grandfathered_mxid_source_attribute", "uid" ) - saml2_config_dict = self._default_saml_config_dict() + # user_mapping_provider may be None if the key is present but has no value + ump_dict = saml2_config.get("user_mapping_provider") or {} + + # Use the default user mapping provider if not set + ump_dict.setdefault("module", DEFAULT_USER_MAPPING_PROVIDER) + + # Ensure a config is present + ump_dict["config"] = ump_dict.get("config") or {} + + if ump_dict["module"] == DEFAULT_USER_MAPPING_PROVIDER: + # Load deprecated options for use by the default module + old_mxid_source_attribute = saml2_config.get("mxid_source_attribute") + if old_mxid_source_attribute: + logger.warning( + "The config option saml2_config.mxid_source_attribute is deprecated. " + "Please use saml2_config.user_mapping_provider.config" + ".mxid_source_attribute instead." + ) + ump_dict["config"]["mxid_source_attribute"] = old_mxid_source_attribute + + old_mxid_mapping = saml2_config.get("mxid_mapping") + if old_mxid_mapping: + logger.warning( + "The config option saml2_config.mxid_mapping is deprecated. Please " + "use saml2_config.user_mapping_provider.config.mxid_mapping instead." + ) + ump_dict["config"]["mxid_mapping"] = old_mxid_mapping + + # Retrieve an instance of the module's class + # Pass the config dictionary to the module for processing + ( + self.saml2_user_mapping_provider_class, + self.saml2_user_mapping_provider_config, + ) = load_module(ump_dict) + + # Ensure loaded user mapping module has defined all necessary methods + # Note parse_config() is already checked during the call to load_module + required_methods = [ + "get_saml_attributes", + "saml_response_to_user_attributes", + ] + missing_methods = [ + method + for method in required_methods + if not hasattr(self.saml2_user_mapping_provider_class, method) + ] + if missing_methods: + raise ConfigError( + "Class specified by saml2_config." + "user_mapping_provider.module is missing required " + "methods: %s" % (", ".join(missing_methods),) + ) + + # Get the desired saml auth response attributes from the module + saml2_config_dict = self._default_saml_config_dict( + *self.saml2_user_mapping_provider_class.get_saml_attributes( + self.saml2_user_mapping_provider_config + ) + ) _dict_merge( merge_dict=saml2_config.get("sp_config", {}), into_dict=saml2_config_dict ) @@ -103,22 +159,27 @@ class SAML2Config(Config): saml2_config.get("saml_session_lifetime", "5m") ) - mapping = saml2_config.get("mxid_mapping", "hexencode") - try: - self.saml2_mxid_mapper = MXID_MAPPER_MAP[mapping] - except KeyError: - raise ConfigError("%s is not a known mxid_mapping" % (mapping,)) + def _default_saml_config_dict( + self, required_attributes: set, optional_attributes: set + ): + """Generate a configuration dictionary with required and optional attributes that + will be needed to process new user registration - def _default_saml_config_dict(self): + Args: + required_attributes: SAML auth response attributes that are + necessary to function + optional_attributes: SAML auth response attributes that can be used to add + additional information to Synapse user accounts, but are not required + + Returns: + dict: A SAML configuration dictionary + """ import saml2 public_baseurl = self.public_baseurl if public_baseurl is None: raise ConfigError("saml2_config requires a public_baseurl to be set") - required_attributes = {"uid", self.saml2_mxid_source_attribute} - - optional_attributes = {"displayName"} if self.saml2_grandfathered_mxid_source_attribute: optional_attributes.add(self.saml2_grandfathered_mxid_source_attribute) optional_attributes -= required_attributes @@ -207,33 +268,58 @@ class SAML2Config(Config): # #config_path: "%(config_dir_path)s/sp_conf.py" - # the lifetime of a SAML session. This defines how long a user has to + # The lifetime of a SAML session. This defines how long a user has to # complete the authentication process, if allow_unsolicited is unset. # The default is 5 minutes. # #saml_session_lifetime: 5m - # The SAML attribute (after mapping via the attribute maps) to use to derive - # the Matrix ID from. 'uid' by default. + # An external module can be provided here as a custom solution to + # mapping attributes returned from a saml provider onto a matrix user. # - #mxid_source_attribute: displayName + user_mapping_provider: + # The custom module's class. Uncomment to use a custom module. + # + #module: mapping_provider.SamlMappingProvider - # The mapping system to use for mapping the saml attribute onto a matrix ID. - # Options include: - # * 'hexencode' (which maps unpermitted characters to '=xx') - # * 'dotreplace' (which replaces unpermitted characters with '.'). - # The default is 'hexencode'. - # - #mxid_mapping: dotreplace + # Custom configuration values for the module. Below options are + # intended for the built-in provider, they should be changed if + # using a custom module. This section will be passed as a Python + # dictionary to the module's `parse_config` method. + # + config: + # The SAML attribute (after mapping via the attribute maps) to use + # to derive the Matrix ID from. 'uid' by default. + # + # Note: This used to be configured by the + # saml2_config.mxid_source_attribute option. If that is still + # defined, its value will be used instead. + # + #mxid_source_attribute: displayName - # In previous versions of synapse, the mapping from SAML attribute to MXID was - # always calculated dynamically rather than stored in a table. For backwards- - # compatibility, we will look for user_ids matching such a pattern before - # creating a new account. + # The mapping system to use for mapping the saml attribute onto a + # matrix ID. + # + # Options include: + # * 'hexencode' (which maps unpermitted characters to '=xx') + # * 'dotreplace' (which replaces unpermitted characters with + # '.'). + # The default is 'hexencode'. + # + # Note: This used to be configured by the + # saml2_config.mxid_mapping option. If that is still defined, its + # value will be used instead. + # + #mxid_mapping: dotreplace + + # In previous versions of synapse, the mapping from SAML attribute to + # MXID was always calculated dynamically rather than stored in a + # table. For backwards- compatibility, we will look for user_ids + # matching such a pattern before creating a new account. # # This setting controls the SAML attribute which will be used for this - # backwards-compatibility lookup. Typically it should be 'uid', but if the - # attribute maps are changed, it may be necessary to change it. + # backwards-compatibility lookup. Typically it should be 'uid', but if + # the attribute maps are changed, it may be necessary to change it. # # The default is 'uid'. # @@ -241,23 +327,3 @@ class SAML2Config(Config): """ % { "config_dir_path": config_dir_path } - - -DOT_REPLACE_PATTERN = re.compile( - ("[^%s]" % (re.escape("".join(mxid_localpart_allowed_characters)),)) -) - - -def dot_replace_for_mxid(username: str) -> str: - username = username.lower() - username = DOT_REPLACE_PATTERN.sub(".", username) - - # regular mxids aren't allowed to start with an underscore either - username = re.sub("^_", "", username) - return username - - -MXID_MAPPER_MAP = { - "hexencode": map_username_to_mxid_localpart, - "dotreplace": dot_replace_for_mxid, -} diff --git a/synapse/handlers/saml_handler.py b/synapse/handlers/saml_handler.py index cc9e6b9bd..0082f85c2 100644 --- a/synapse/handlers/saml_handler.py +++ b/synapse/handlers/saml_handler.py @@ -13,20 +13,36 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +import re +from typing import Tuple import attr import saml2 +import saml2.response from saml2.client import Saml2Client from synapse.api.errors import SynapseError +from synapse.config import ConfigError from synapse.http.servlet import parse_string from synapse.rest.client.v1.login import SSOAuthHandler -from synapse.types import UserID, map_username_to_mxid_localpart +from synapse.types import ( + UserID, + map_username_to_mxid_localpart, + mxid_localpart_allowed_characters, +) from synapse.util.async_helpers import Linearizer logger = logging.getLogger(__name__) +@attr.s +class Saml2SessionData: + """Data we track about SAML2 sessions""" + + # time the session was created, in milliseconds + creation_time = attr.ib() + + class SamlHandler: def __init__(self, hs): self._saml_client = Saml2Client(hs.config.saml2_sp_config) @@ -37,11 +53,14 @@ class SamlHandler: self._datastore = hs.get_datastore() self._hostname = hs.hostname self._saml2_session_lifetime = hs.config.saml2_session_lifetime - self._mxid_source_attribute = hs.config.saml2_mxid_source_attribute self._grandfathered_mxid_source_attribute = ( hs.config.saml2_grandfathered_mxid_source_attribute ) - self._mxid_mapper = hs.config.saml2_mxid_mapper + + # plugin to do custom mapping from saml response to mxid + self._user_mapping_provider = hs.config.saml2_user_mapping_provider_class( + hs.config.saml2_user_mapping_provider_config + ) # identifier for the external_ids table self._auth_provider_id = "saml" @@ -118,22 +137,10 @@ class SamlHandler: remote_user_id = saml2_auth.ava["uid"][0] except KeyError: logger.warning("SAML2 response lacks a 'uid' attestation") - raise SynapseError(400, "uid not in SAML2 response") - - try: - mxid_source = saml2_auth.ava[self._mxid_source_attribute][0] - except KeyError: - logger.warning( - "SAML2 response lacks a '%s' attestation", self._mxid_source_attribute - ) - raise SynapseError( - 400, "%s not in SAML2 response" % (self._mxid_source_attribute,) - ) + raise SynapseError(400, "'uid' not in SAML2 response") self._outstanding_requests_dict.pop(saml2_auth.in_response_to, None) - displayName = saml2_auth.ava.get("displayName", [None])[0] - with (await self._mapping_lock.queue(self._auth_provider_id)): # first of all, check if we already have a mapping for this user logger.info( @@ -173,22 +180,46 @@ class SamlHandler: ) return registered_user_id - # figure out a new mxid for this user - base_mxid_localpart = self._mxid_mapper(mxid_source) + # Map saml response to user attributes using the configured mapping provider + for i in range(1000): + attribute_dict = self._user_mapping_provider.saml_response_to_user_attributes( + saml2_auth, i + ) - suffix = 0 - while True: - localpart = base_mxid_localpart + (str(suffix) if suffix else "") + logger.debug( + "Retrieved SAML attributes from user mapping provider: %s " + "(attempt %d)", + attribute_dict, + i, + ) + + localpart = attribute_dict.get("mxid_localpart") + if not localpart: + logger.error( + "SAML mapping provider plugin did not return a " + "mxid_localpart object" + ) + raise SynapseError(500, "Error parsing SAML2 response") + + displayname = attribute_dict.get("displayname") + + # Check if this mxid already exists if not await self._datastore.get_users_by_id_case_insensitive( UserID(localpart, self._hostname).to_string() ): + # This mxid is free break - suffix += 1 - logger.info("Allocating mxid for new user with localpart %s", localpart) + else: + # Unable to generate a username in 1000 iterations + # Break and return error to the user + raise SynapseError( + 500, "Unable to generate a Matrix ID from the SAML response" + ) registered_user_id = await self._registration_handler.register_user( - localpart=localpart, default_display_name=displayName + localpart=localpart, default_display_name=displayname ) + await self._datastore.record_user_external_id( self._auth_provider_id, remote_user_id, registered_user_id ) @@ -205,9 +236,120 @@ class SamlHandler: del self._outstanding_requests_dict[reqid] -@attr.s -class Saml2SessionData: - """Data we track about SAML2 sessions""" +DOT_REPLACE_PATTERN = re.compile( + ("[^%s]" % (re.escape("".join(mxid_localpart_allowed_characters)),)) +) - # time the session was created, in milliseconds - creation_time = attr.ib() + +def dot_replace_for_mxid(username: str) -> str: + username = username.lower() + username = DOT_REPLACE_PATTERN.sub(".", username) + + # regular mxids aren't allowed to start with an underscore either + username = re.sub("^_", "", username) + return username + + +MXID_MAPPER_MAP = { + "hexencode": map_username_to_mxid_localpart, + "dotreplace": dot_replace_for_mxid, +} + + +@attr.s +class SamlConfig(object): + mxid_source_attribute = attr.ib() + mxid_mapper = attr.ib() + + +class DefaultSamlMappingProvider(object): + __version__ = "0.0.1" + + def __init__(self, parsed_config: SamlConfig): + """The default SAML user mapping provider + + Args: + parsed_config: Module configuration + """ + self._mxid_source_attribute = parsed_config.mxid_source_attribute + self._mxid_mapper = parsed_config.mxid_mapper + + def saml_response_to_user_attributes( + self, saml_response: saml2.response.AuthnResponse, failures: int = 0, + ) -> dict: + """Maps some text from a SAML response to attributes of a new user + + Args: + saml_response: A SAML auth response object + + failures: How many times a call to this function with this + saml_response has resulted in a failure + + Returns: + dict: A dict containing new user attributes. Possible keys: + * mxid_localpart (str): Required. The localpart of the user's mxid + * displayname (str): The displayname of the user + """ + try: + mxid_source = saml_response.ava[self._mxid_source_attribute][0] + except KeyError: + logger.warning( + "SAML2 response lacks a '%s' attestation", self._mxid_source_attribute, + ) + raise SynapseError( + 400, "%s not in SAML2 response" % (self._mxid_source_attribute,) + ) + + # Use the configured mapper for this mxid_source + base_mxid_localpart = self._mxid_mapper(mxid_source) + + # Append suffix integer if last call to this function failed to produce + # a usable mxid + localpart = base_mxid_localpart + (str(failures) if failures else "") + + # Retrieve the display name from the saml response + # If displayname is None, the mxid_localpart will be used instead + displayname = saml_response.ava.get("displayName", [None])[0] + + return { + "mxid_localpart": localpart, + "displayname": displayname, + } + + @staticmethod + def parse_config(config: dict) -> SamlConfig: + """Parse the dict provided by the homeserver's config + Args: + config: A dictionary containing configuration options for this provider + Returns: + SamlConfig: A custom config object for this module + """ + # Parse config options and use defaults where necessary + mxid_source_attribute = config.get("mxid_source_attribute", "uid") + mapping_type = config.get("mxid_mapping", "hexencode") + + # Retrieve the associating mapping function + try: + mxid_mapper = MXID_MAPPER_MAP[mapping_type] + except KeyError: + raise ConfigError( + "saml2_config.user_mapping_provider.config: '%s' is not a valid " + "mxid_mapping value" % (mapping_type,) + ) + + return SamlConfig(mxid_source_attribute, mxid_mapper) + + @staticmethod + def get_saml_attributes(config: SamlConfig) -> Tuple[set, set]: + """Returns the required attributes of a SAML + + Args: + config: A SamlConfig object containing configuration params for this provider + + Returns: + tuple[set,set]: The first set equates to the saml auth response + attributes that are required for the module to function, whereas the + second set consists of those attributes which can be used if + available, but are not necessary + """ + return {"uid", config.mxid_source_attribute}, {"displayName"} From f8bc2ae8830615698ae683cafe4fdddb9a05a1f9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 10 Dec 2019 17:42:46 +0000 Subject: [PATCH 48/60] Move get_state methods into FederationHandler (#6503) This is a non-functional refactor as a precursor to some other work. --- changelog.d/6503.misc | 1 + synapse/federation/federation_client.py | 91 +++------------------ synapse/handlers/federation.py | 101 ++++++++++++++++++++++-- 3 files changed, 107 insertions(+), 86 deletions(-) create mode 100644 changelog.d/6503.misc diff --git a/changelog.d/6503.misc b/changelog.d/6503.misc new file mode 100644 index 000000000..e4e9a5a3d --- /dev/null +++ b/changelog.d/6503.misc @@ -0,0 +1 @@ +Move get_state methods into FederationHandler. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 73e1dda6a..d396e6564 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -37,9 +37,9 @@ from synapse.api.room_versions import ( ) from synapse.events import builder, room_version_to_event_format from synapse.federation.federation_base import FederationBase, event_from_pdu_json -from synapse.logging.context import make_deferred_yieldable, run_in_background +from synapse.logging.context import make_deferred_yieldable from synapse.logging.utils import log_function -from synapse.util import batch_iter, unwrapFirstError +from synapse.util import unwrapFirstError from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.retryutils import NotRetryingDestination @@ -308,19 +308,12 @@ class FederationClient(FederationBase): return signed_pdu @defer.inlineCallbacks - @log_function - def get_state_for_room(self, destination, room_id, event_id): - """Requests all of the room state at a given event from a remote homeserver. - - Args: - destination (str): The remote homeserver to query for the state. - room_id (str): The id of the room we're interested in. - event_id (str): The id of the event we want the state at. + def get_room_state_ids(self, destination: str, room_id: str, event_id: str): + """Calls the /state_ids endpoint to fetch the state at a particular point + in the room, and the auth events for the given event Returns: - Deferred[Tuple[List[EventBase], List[EventBase]]]: - A list of events in the state, and a list of events in the auth chain - for the given event. + Tuple[List[str], List[str]]: a tuple of (state event_ids, auth event_ids) """ result = yield self.transport_layer.get_room_state_ids( destination, room_id, event_id=event_id @@ -329,74 +322,12 @@ class FederationClient(FederationBase): state_event_ids = result["pdu_ids"] auth_event_ids = result.get("auth_chain_ids", []) - desired_events = set(state_event_ids + auth_event_ids) - event_map = yield self.get_events_from_store_or_dest( - destination, room_id, desired_events - ) + if not isinstance(state_event_ids, list) or not isinstance( + auth_event_ids, list + ): + raise Exception("invalid response from /state_ids") - failed_to_fetch = desired_events - event_map.keys() - if failed_to_fetch: - logger.warning( - "Failed to fetch missing state/auth events for %s: %s", - room_id, - failed_to_fetch, - ) - - pdus = [event_map[e_id] for e_id in state_event_ids if e_id in event_map] - auth_chain = [event_map[e_id] for e_id in auth_event_ids if e_id in event_map] - - auth_chain.sort(key=lambda e: e.depth) - - return pdus, auth_chain - - @defer.inlineCallbacks - def get_events_from_store_or_dest(self, destination, room_id, event_ids): - """Fetch events from a remote destination, checking if we already have them. - - Args: - destination (str) - room_id (str) - event_ids (Iterable[str]) - - Returns: - Deferred[dict[str, EventBase]]: A deferred resolving to a map - from event_id to event - """ - fetched_events = yield self.store.get_events(event_ids, allow_rejected=True) - - missing_events = set(event_ids) - fetched_events.keys() - - if not missing_events: - return fetched_events - - logger.debug( - "Fetching unknown state/auth events %s for room %s", - missing_events, - event_ids, - ) - - room_version = yield self.store.get_room_version(room_id) - - # XXX 20 requests at once? really? - for batch in batch_iter(missing_events, 20): - deferreds = [ - run_in_background( - self.get_pdu, - destinations=[destination], - event_id=e_id, - room_version=room_version, - ) - for e_id in batch - ] - - res = yield make_deferred_yieldable( - defer.DeferredList(deferreds, consumeErrors=True) - ) - for success, result in res: - if success and result: - fetched_events[result.event_id] = result - - return fetched_events + return state_event_ids, auth_event_ids @defer.inlineCallbacks @log_function diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index bc2692176..c0dcf9abf 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -64,7 +64,7 @@ from synapse.replication.http.federation import ( from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet from synapse.state import StateResolutionStore, resolve_events_with_store from synapse.types import UserID, get_domain_from_id -from synapse.util import unwrapFirstError +from synapse.util import batch_iter, unwrapFirstError from synapse.util.async_helpers import Linearizer from synapse.util.distributor import user_joined_room from synapse.util.retryutils import NotRetryingDestination @@ -379,11 +379,9 @@ class FederationHandler(BaseHandler): ( remote_state, got_auth_chain, - ) = yield self.federation_client.get_state_for_room( - origin, room_id, p - ) + ) = yield self._get_state_for_room(origin, room_id, p) - # we want the state *after* p; get_state_for_room returns the + # we want the state *after* p; _get_state_for_room returns the # state *before* p. remote_event = yield self.federation_client.get_pdu( [origin], p, room_version, outlier=True @@ -583,6 +581,97 @@ class FederationHandler(BaseHandler): else: raise + @defer.inlineCallbacks + @log_function + def _get_state_for_room(self, destination, room_id, event_id): + """Requests all of the room state at a given event from a remote homeserver. + + Args: + destination (str): The remote homeserver to query for the state. + room_id (str): The id of the room we're interested in. + event_id (str): The id of the event we want the state at. + + Returns: + Deferred[Tuple[List[EventBase], List[EventBase]]]: + A list of events in the state, and a list of events in the auth chain + for the given event. + """ + ( + state_event_ids, + auth_event_ids, + ) = yield self.federation_client.get_room_state_ids( + destination, room_id, event_id=event_id + ) + + desired_events = set(state_event_ids + auth_event_ids) + event_map = yield self._get_events_from_store_or_dest( + destination, room_id, desired_events + ) + + failed_to_fetch = desired_events - event_map.keys() + if failed_to_fetch: + logger.warning( + "Failed to fetch missing state/auth events for %s: %s", + room_id, + failed_to_fetch, + ) + + pdus = [event_map[e_id] for e_id in state_event_ids if e_id in event_map] + auth_chain = [event_map[e_id] for e_id in auth_event_ids if e_id in event_map] + + auth_chain.sort(key=lambda e: e.depth) + + return pdus, auth_chain + + @defer.inlineCallbacks + def _get_events_from_store_or_dest(self, destination, room_id, event_ids): + """Fetch events from a remote destination, checking if we already have them. + + Args: + destination (str) + room_id (str) + event_ids (Iterable[str]) + + Returns: + Deferred[dict[str, EventBase]]: A deferred resolving to a map + from event_id to event + """ + fetched_events = yield self.store.get_events(event_ids, allow_rejected=True) + + missing_events = set(event_ids) - fetched_events.keys() + + if not missing_events: + return fetched_events + + logger.debug( + "Fetching unknown state/auth events %s for room %s", + missing_events, + event_ids, + ) + + room_version = yield self.store.get_room_version(room_id) + + # XXX 20 requests at once? really? + for batch in batch_iter(missing_events, 20): + deferreds = [ + run_in_background( + self.federation_client.get_pdu, + destinations=[destination], + event_id=e_id, + room_version=room_version, + ) + for e_id in batch + ] + + res = yield make_deferred_yieldable( + defer.DeferredList(deferreds, consumeErrors=True) + ) + for success, result in res: + if success and result: + fetched_events[result.event_id] = result + + return fetched_events + @defer.inlineCallbacks def _process_received_pdu(self, origin, event, state, auth_chain): """ Called when we have a new pdu. We need to do auth checks and put it @@ -723,7 +812,7 @@ class FederationHandler(BaseHandler): state_events = {} events_to_state = {} for e_id in edges: - state, auth = yield self.federation_client.get_state_for_room( + state, auth = yield self._get_state_for_room( destination=dest, room_id=room_id, event_id=e_id ) auth_events.update({a.event_id: a for a in auth}) From 72acca6a32697a53f8f659e641d65dbf25ff6b4d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 11 Dec 2019 11:46:55 +0000 Subject: [PATCH 49/60] Back out change preventing setting null avatar URLs --- changelog.d/6497.bugfix | 2 +- synapse/rest/client/v1/profile.py | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/changelog.d/6497.bugfix b/changelog.d/6497.bugfix index 92ed08fc4..6a2644d8e 100644 --- a/changelog.d/6497.bugfix +++ b/changelog.d/6497.bugfix @@ -1 +1 @@ -Fix error message when setting your profile's avatar URL mentioning displaynames, and prevent NoneType avatar_urls. \ No newline at end of file +Fix incorrect error message for invalid requests when setting user's avatar URL. diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index 4f47562c1..e7fe50ed7 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -15,6 +15,7 @@ """ This module contains REST servlets to do with profile: /profile/ """ +from synapse.api.errors import Codes, SynapseError from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.rest.client.v2_alpha._base import client_patterns from synapse.types import UserID @@ -103,12 +104,11 @@ class ProfileAvatarURLRestServlet(RestServlet): content = parse_json_object_from_request(request) try: - new_avatar_url = content.get("avatar_url") - except Exception: - return 400, "Unable to parse avatar_url" - - if new_avatar_url is None: - return 400, "Missing required key: avatar_url" + new_avatar_url = content["avatar_url"] + except KeyError: + raise SynapseError( + 400, "Missing key 'avatar_url'", errcode=Codes.MISSING_PARAM + ) await self.profile_handler.set_avatar_url( user, requester, new_avatar_url, is_admin From ea0f0ad4144e3ce0cf10f3ec461ecd8f654955a2 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 11 Dec 2019 13:07:25 +0000 Subject: [PATCH 50/60] Prevent message search in upgraded rooms we're not in (#6385) --- changelog.d/6385.bugfix | 1 + synapse/handlers/federation.py | 4 +-- synapse/handlers/search.py | 34 +++++++++++++++++------ synapse/storage/data_stores/main/state.py | 18 ++++++++---- 4 files changed, 41 insertions(+), 16 deletions(-) create mode 100644 changelog.d/6385.bugfix diff --git a/changelog.d/6385.bugfix b/changelog.d/6385.bugfix new file mode 100644 index 000000000..7a2bc0217 --- /dev/null +++ b/changelog.d/6385.bugfix @@ -0,0 +1 @@ +Prevent error on trying to search a upgraded room when the server is not in the predecessor room. \ No newline at end of file diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index c0dcf9abf..13865c470 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1299,7 +1299,7 @@ class FederationHandler(BaseHandler): # Check whether this room is the result of an upgrade of a room we already know # about. If so, migrate over user information predecessor = yield self.store.get_room_predecessor(room_id) - if not predecessor: + if not predecessor or not isinstance(predecessor.get("room_id"), str): return old_room_id = predecessor["room_id"] logger.debug( @@ -1542,7 +1542,7 @@ class FederationHandler(BaseHandler): @defer.inlineCallbacks def do_remotely_reject_invite(self, target_hosts, room_id, user_id, content): origin, event, event_format_version = yield self._make_and_verify_event( - target_hosts, room_id, user_id, "leave", content=content, + target_hosts, room_id, user_id, "leave", content=content ) # Mark as outlier as we don't have any state for this event; we're not # even in the room. diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index 56ed262a1..ef750d149 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -21,7 +21,7 @@ from unpaddedbase64 import decode_base64, encode_base64 from twisted.internet import defer from synapse.api.constants import EventTypes, Membership -from synapse.api.errors import SynapseError +from synapse.api.errors import NotFoundError, SynapseError from synapse.api.filtering import Filter from synapse.storage.state import StateFilter from synapse.visibility import filter_events_for_client @@ -37,6 +37,7 @@ class SearchHandler(BaseHandler): self._event_serializer = hs.get_event_client_serializer() self.storage = hs.get_storage() self.state_store = self.storage.state + self.auth = hs.get_auth() @defer.inlineCallbacks def get_old_rooms_from_upgraded_room(self, room_id): @@ -53,23 +54,38 @@ class SearchHandler(BaseHandler): room_id (str): id of the room to search through. Returns: - Deferred[iterable[unicode]]: predecessor room ids + Deferred[iterable[str]]: predecessor room ids """ historical_room_ids = [] - while True: - predecessor = yield self.store.get_room_predecessor(room_id) + # The initial room must have been known for us to get this far + predecessor = yield self.store.get_room_predecessor(room_id) - # If no predecessor, assume we've hit a dead end + while True: if not predecessor: + # We have reached the end of the chain of predecessors break - # Add predecessor's room ID - historical_room_ids.append(predecessor["room_id"]) + if not isinstance(predecessor.get("room_id"), str): + # This predecessor object is malformed. Exit here + break - # Scan through the old room for further predecessors - room_id = predecessor["room_id"] + predecessor_room_id = predecessor["room_id"] + + # Don't add it to the list until we have checked that we are in the room + try: + next_predecessor_room = yield self.store.get_room_predecessor( + predecessor_room_id + ) + except NotFoundError: + # The predecessor is not a known room, so we are done here + break + + historical_room_ids.append(predecessor_room_id) + + # And repeat + predecessor = next_predecessor_room return historical_room_ids diff --git a/synapse/storage/data_stores/main/state.py b/synapse/storage/data_stores/main/state.py index 9ef7b48c7..dcc6b43cd 100644 --- a/synapse/storage/data_stores/main/state.py +++ b/synapse/storage/data_stores/main/state.py @@ -278,7 +278,7 @@ class StateGroupWorkerStore( @defer.inlineCallbacks def get_room_predecessor(self, room_id): - """Get the predecessor room of an upgraded room if one exists. + """Get the predecessor of an upgraded room if it exists. Otherwise return None. Args: @@ -291,14 +291,22 @@ class StateGroupWorkerStore( * room_id (str): The room ID of the predecessor room * event_id (str): The ID of the tombstone event in the predecessor room + None if a predecessor key is not found, or is not a dictionary. + Raises: - NotFoundError if the room is unknown + NotFoundError if the given room is unknown """ # Retrieve the room's create event create_event = yield self.get_create_event_for_room(room_id) - # Return predecessor if present - return create_event.content.get("predecessor", None) + # Retrieve the predecessor key of the create event + predecessor = create_event.content.get("predecessor", None) + + # Ensure the key is a dictionary + if not isinstance(predecessor, dict): + return None + + return predecessor @defer.inlineCallbacks def get_create_event_for_room(self, room_id): @@ -318,7 +326,7 @@ class StateGroupWorkerStore( # If we can't find the create event, assume we've hit a dead end if not create_id: - raise NotFoundError("Unknown room %s" % (room_id)) + raise NotFoundError("Unknown room %s" % (room_id,)) # Retrieve the room's create event and return create_event = yield self.get_event(create_id) From 6676ee9c4a74e15afdd752e05ca38d82da94c2c1 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 11 Dec 2019 13:16:01 +0000 Subject: [PATCH 51/60] Add dev script to generate full SQL schema files (#6394) --- changelog.d/6394.feature | 1 + scripts-dev/make_full_schema.sh | 184 ++++++++++++++++++ .../main/schema/full_schemas/README.md | 13 ++ .../main/schema/full_schemas/README.txt | 19 -- 4 files changed, 198 insertions(+), 19 deletions(-) create mode 100644 changelog.d/6394.feature create mode 100755 scripts-dev/make_full_schema.sh create mode 100644 synapse/storage/data_stores/main/schema/full_schemas/README.md delete mode 100644 synapse/storage/data_stores/main/schema/full_schemas/README.txt diff --git a/changelog.d/6394.feature b/changelog.d/6394.feature new file mode 100644 index 000000000..1a0e8845a --- /dev/null +++ b/changelog.d/6394.feature @@ -0,0 +1 @@ +Add a develop script to generate full SQL schemas. \ No newline at end of file diff --git a/scripts-dev/make_full_schema.sh b/scripts-dev/make_full_schema.sh new file mode 100755 index 000000000..60e8970a3 --- /dev/null +++ b/scripts-dev/make_full_schema.sh @@ -0,0 +1,184 @@ +#!/bin/bash +# +# This script generates SQL files for creating a brand new Synapse DB with the latest +# schema, on both SQLite3 and Postgres. +# +# It does so by having Synapse generate an up-to-date SQLite DB, then running +# synapse_port_db to convert it to Postgres. It then dumps the contents of both. + +POSTGRES_HOST="localhost" +POSTGRES_DB_NAME="synapse_full_schema.$$" + +SQLITE_FULL_SCHEMA_OUTPUT_FILE="full.sql.sqlite" +POSTGRES_FULL_SCHEMA_OUTPUT_FILE="full.sql.postgres" + +REQUIRED_DEPS=("matrix-synapse" "psycopg2") + +usage() { + echo + echo "Usage: $0 -p -o [-c] [-n] [-h]" + echo + echo "-p " + echo " Username to connect to local postgres instance. The password will be requested" + echo " during script execution." + echo "-c" + echo " CI mode. Enables coverage tracking and prints every command that the script runs." + echo "-o " + echo " Directory to output full schema files to." + echo "-h" + echo " Display this help text." +} + +while getopts "p:co:h" opt; do + case $opt in + p) + POSTGRES_USERNAME=$OPTARG + ;; + c) + # Print all commands that are being executed + set -x + + # Modify required dependencies for coverage + REQUIRED_DEPS+=("coverage" "coverage-enable-subprocess") + + COVERAGE=1 + ;; + o) + command -v realpath > /dev/null || (echo "The -o flag requires the 'realpath' binary to be installed" && exit 1) + OUTPUT_DIR="$(realpath "$OPTARG")" + ;; + h) + usage + exit + ;; + \?) + echo "ERROR: Invalid option: -$OPTARG" >&2 + usage + exit + ;; + esac +done + +# Check that required dependencies are installed +unsatisfied_requirements=() +for dep in "${REQUIRED_DEPS[@]}"; do + pip show "$dep" --quiet || unsatisfied_requirements+=("$dep") +done +if [ ${#unsatisfied_requirements} -ne 0 ]; then + echo "Please install the following python packages: ${unsatisfied_requirements[*]}" + exit 1 +fi + +if [ -z "$POSTGRES_USERNAME" ]; then + echo "No postgres username supplied" + usage + exit 1 +fi + +if [ -z "$OUTPUT_DIR" ]; then + echo "No output directory supplied" + usage + exit 1 +fi + +# Create the output directory if it doesn't exist +mkdir -p "$OUTPUT_DIR" + +read -rsp "Postgres password for '$POSTGRES_USERNAME': " POSTGRES_PASSWORD +echo "" + +# Exit immediately if a command fails +set -e + +# cd to root of the synapse directory +cd "$(dirname "$0")/.." + +# Create temporary SQLite and Postgres homeserver db configs and key file +TMPDIR=$(mktemp -d) +KEY_FILE=$TMPDIR/test.signing.key # default Synapse signing key path +SQLITE_CONFIG=$TMPDIR/sqlite.conf +SQLITE_DB=$TMPDIR/homeserver.db +POSTGRES_CONFIG=$TMPDIR/postgres.conf + +# Ensure these files are delete on script exit +trap 'rm -rf $TMPDIR' EXIT + +cat > "$SQLITE_CONFIG" < "$POSTGRES_CONFIG" < "$OUTPUT_DIR/$SQLITE_FULL_SCHEMA_OUTPUT_FILE" + +echo "Dumping Postgres schema to '$OUTPUT_DIR/$POSTGRES_FULL_SCHEMA_OUTPUT_FILE'..." +pg_dump --format=plain --no-tablespaces --no-acl --no-owner $POSTGRES_DB_NAME | sed -e '/^--/d' -e 's/public\.//g' -e '/^SET /d' -e '/^SELECT /d' > "$OUTPUT_DIR/$POSTGRES_FULL_SCHEMA_OUTPUT_FILE" + +echo "Cleaning up temporary Postgres database..." +dropdb $POSTGRES_DB_NAME + +echo "Done! Files dumped to: $OUTPUT_DIR" diff --git a/synapse/storage/data_stores/main/schema/full_schemas/README.md b/synapse/storage/data_stores/main/schema/full_schemas/README.md new file mode 100644 index 000000000..bbd3f1860 --- /dev/null +++ b/synapse/storage/data_stores/main/schema/full_schemas/README.md @@ -0,0 +1,13 @@ +# Building full schema dumps + +These schemas need to be made from a database that has had all background updates run. + +To do so, use `scripts-dev/make_full_schema.sh`. This will produce +`full.sql.postgres ` and `full.sql.sqlite` files. + +Ensure postgres is installed and your user has the ability to run bash commands +such as `createdb`. + +``` +./scripts-dev/make_full_schema.sh -p postgres_username -o output_dir/ +``` diff --git a/synapse/storage/data_stores/main/schema/full_schemas/README.txt b/synapse/storage/data_stores/main/schema/full_schemas/README.txt deleted file mode 100644 index d3f640134..000000000 --- a/synapse/storage/data_stores/main/schema/full_schemas/README.txt +++ /dev/null @@ -1,19 +0,0 @@ -Building full schema dumps -========================== - -These schemas need to be made from a database that has had all background updates run. - -Postgres --------- - -$ pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner $DATABASE_NAME| sed -e '/^--/d' -e 's/public\.//g' -e '/^SET /d' -e '/^SELECT /d' > full.sql.postgres - -SQLite ------- - -$ sqlite3 $DATABASE_FILE ".schema" > full.sql.sqlite - -After ------ - -Delete the CREATE statements for "sqlite_stat1", "schema_version", "applied_schema_deltas", and "applied_module_schemas". \ No newline at end of file From fc316a4894912f49f5d0321e533aabca5624b0ba Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 11 Dec 2019 13:39:47 +0000 Subject: [PATCH 52/60] Prevent redacted events from appearing in message search (#6377) --- changelog.d/6377.bugfix | 1 + synapse/handlers/federation.py | 7 +- synapse/handlers/message.py | 5 +- synapse/state/__init__.py | 3 +- .../storage/data_stores/main/events_worker.py | 97 ++++++++++++------- synapse/storage/data_stores/main/search.py | 8 +- 6 files changed, 78 insertions(+), 43 deletions(-) create mode 100644 changelog.d/6377.bugfix diff --git a/changelog.d/6377.bugfix b/changelog.d/6377.bugfix new file mode 100644 index 000000000..ccda96962 --- /dev/null +++ b/changelog.d/6377.bugfix @@ -0,0 +1 @@ +Prevent redacted events from being returned during message search. \ No newline at end of file diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 13865c470..8f3c9d770 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -63,6 +63,7 @@ from synapse.replication.http.federation import ( ) from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet from synapse.state import StateResolutionStore, resolve_events_with_store +from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour from synapse.types import UserID, get_domain_from_id from synapse.util import batch_iter, unwrapFirstError from synapse.util.async_helpers import Linearizer @@ -423,7 +424,7 @@ class FederationHandler(BaseHandler): evs = yield self.store.get_events( list(state_map.values()), get_prev_content=False, - check_redacted=False, + redact_behaviour=EventRedactBehaviour.AS_IS, ) event_map.update(evs) @@ -1000,7 +1001,9 @@ class FederationHandler(BaseHandler): forward_events = yield self.store.get_successor_events(list(extremities)) extremities_events = yield self.store.get_events( - forward_events, check_redacted=False, get_prev_content=False + forward_events, + redact_behaviour=EventRedactBehaviour.AS_IS, + get_prev_content=False, ) # We set `check_history_visibility_only` as we might otherwise get false diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 54fa216d8..bf9add7fe 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -46,6 +46,7 @@ from synapse.events.validator import EventValidator from synapse.logging.context import run_in_background from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http.send_event import ReplicationSendEventRestServlet +from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour from synapse.storage.state import StateFilter from synapse.types import RoomAlias, UserID, create_requester from synapse.util.async_helpers import Linearizer @@ -875,7 +876,7 @@ class EventCreationHandler(object): if event.type == EventTypes.Redaction: original_event = yield self.store.get_event( event.redacts, - check_redacted=False, + redact_behaviour=EventRedactBehaviour.AS_IS, get_prev_content=False, allow_rejected=False, allow_none=True, @@ -952,7 +953,7 @@ class EventCreationHandler(object): if event.type == EventTypes.Redaction: original_event = yield self.store.get_event( event.redacts, - check_redacted=False, + redact_behaviour=EventRedactBehaviour.AS_IS, get_prev_content=False, allow_rejected=False, allow_none=True, diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 139beef8e..3e6d62eef 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -32,6 +32,7 @@ from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.logging.utils import log_function from synapse.state import v1, v2 +from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour from synapse.util.async_helpers import Linearizer from synapse.util.caches import get_cache_factor_for from synapse.util.caches.expiringcache import ExpiringCache @@ -645,7 +646,7 @@ class StateResolutionStore(object): return self.store.get_events( event_ids, - check_redacted=False, + redact_behaviour=EventRedactBehaviour.AS_IS, get_prev_content=False, allow_rejected=allow_rejected, ) diff --git a/synapse/storage/data_stores/main/events_worker.py b/synapse/storage/data_stores/main/events_worker.py index 9ee117ce0..2c9142814 100644 --- a/synapse/storage/data_stores/main/events_worker.py +++ b/synapse/storage/data_stores/main/events_worker.py @@ -19,8 +19,10 @@ import itertools import logging import threading from collections import namedtuple +from typing import List, Optional from canonicaljson import json +from constantly import NamedConstant, Names from twisted.internet import defer @@ -55,6 +57,16 @@ EVENT_QUEUE_TIMEOUT_S = 0.1 # Timeout when waiting for requests for events _EventCacheEntry = namedtuple("_EventCacheEntry", ("event", "redacted_event")) +class EventRedactBehaviour(Names): + """ + What to do when retrieving a redacted event from the database. + """ + + AS_IS = NamedConstant() + REDACT = NamedConstant() + BLOCK = NamedConstant() + + class EventsWorkerStore(SQLBaseStore): def __init__(self, database: Database, db_conn, hs): super(EventsWorkerStore, self).__init__(database, db_conn, hs) @@ -125,25 +137,27 @@ class EventsWorkerStore(SQLBaseStore): @defer.inlineCallbacks def get_event( self, - event_id, - check_redacted=True, - get_prev_content=False, - allow_rejected=False, - allow_none=False, - check_room_id=None, + event_id: List[str], + redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT, + get_prev_content: bool = False, + allow_rejected: bool = False, + allow_none: bool = False, + check_room_id: Optional[str] = None, ): """Get an event from the database by event_id. Args: - event_id (str): The event_id of the event to fetch - check_redacted (bool): If True, check if event has been redacted - and redact it. - get_prev_content (bool): If True and event is a state event, + event_id: The event_id of the event to fetch + redact_behaviour: Determine what to do with a redacted event. Possible values: + * AS_IS - Return the full event body with no redacted content + * REDACT - Return the event but with a redacted body + * DISALLOW - Do not return redacted events + get_prev_content: If True and event is a state event, include the previous states content in the unsigned field. - allow_rejected (bool): If True return rejected events. - allow_none (bool): If True, return None if no event found, if + allow_rejected: If True return rejected events. + allow_none: If True, return None if no event found, if False throw a NotFoundError - check_room_id (str|None): if not None, check the room of the found event. + check_room_id: if not None, check the room of the found event. If there is a mismatch, behave as per allow_none. Returns: @@ -154,7 +168,7 @@ class EventsWorkerStore(SQLBaseStore): events = yield self.get_events_as_list( [event_id], - check_redacted=check_redacted, + redact_behaviour=redact_behaviour, get_prev_content=get_prev_content, allow_rejected=allow_rejected, ) @@ -173,27 +187,30 @@ class EventsWorkerStore(SQLBaseStore): @defer.inlineCallbacks def get_events( self, - event_ids, - check_redacted=True, - get_prev_content=False, - allow_rejected=False, + event_ids: List[str], + redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT, + get_prev_content: bool = False, + allow_rejected: bool = False, ): """Get events from the database Args: - event_ids (list): The event_ids of the events to fetch - check_redacted (bool): If True, check if event has been redacted - and redact it. - get_prev_content (bool): If True and event is a state event, + event_ids: The event_ids of the events to fetch + redact_behaviour: Determine what to do with a redacted event. Possible + values: + * AS_IS - Return the full event body with no redacted content + * REDACT - Return the event but with a redacted body + * DISALLOW - Do not return redacted events + get_prev_content: If True and event is a state event, include the previous states content in the unsigned field. - allow_rejected (bool): If True return rejected events. + allow_rejected: If True return rejected events. Returns: Deferred : Dict from event_id to event. """ events = yield self.get_events_as_list( event_ids, - check_redacted=check_redacted, + redact_behaviour=redact_behaviour, get_prev_content=get_prev_content, allow_rejected=allow_rejected, ) @@ -203,21 +220,23 @@ class EventsWorkerStore(SQLBaseStore): @defer.inlineCallbacks def get_events_as_list( self, - event_ids, - check_redacted=True, - get_prev_content=False, - allow_rejected=False, + event_ids: List[str], + redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT, + get_prev_content: bool = False, + allow_rejected: bool = False, ): """Get events from the database and return in a list in the same order as given by `event_ids` arg. Args: - event_ids (list): The event_ids of the events to fetch - check_redacted (bool): If True, check if event has been redacted - and redact it. - get_prev_content (bool): If True and event is a state event, + event_ids: The event_ids of the events to fetch + redact_behaviour: Determine what to do with a redacted event. Possible values: + * AS_IS - Return the full event body with no redacted content + * REDACT - Return the event but with a redacted body + * DISALLOW - Do not return redacted events + get_prev_content: If True and event is a state event, include the previous states content in the unsigned field. - allow_rejected (bool): If True return rejected events. + allow_rejected: If True, return rejected events. Returns: Deferred[list[EventBase]]: List of events fetched from the database. The @@ -319,10 +338,14 @@ class EventsWorkerStore(SQLBaseStore): # Update the cache to save doing the checks again. entry.event.internal_metadata.recheck_redaction = False - if check_redacted and entry.redacted_event: - event = entry.redacted_event - else: - event = entry.event + event = entry.event + + if entry.redacted_event: + if redact_behaviour == EventRedactBehaviour.BLOCK: + # Skip this event + continue + elif redact_behaviour == EventRedactBehaviour.REDACT: + event = entry.redacted_event events.append(event) diff --git a/synapse/storage/data_stores/main/search.py b/synapse/storage/data_stores/main/search.py index 4eec2fae5..dfb46ee0f 100644 --- a/synapse/storage/data_stores/main/search.py +++ b/synapse/storage/data_stores/main/search.py @@ -25,6 +25,7 @@ from twisted.internet import defer from synapse.api.errors import SynapseError from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause +from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour from synapse.storage.database import Database from synapse.storage.engines import PostgresEngine, Sqlite3Engine @@ -453,7 +454,12 @@ class SearchStore(SearchBackgroundUpdateStore): results = list(filter(lambda row: row["room_id"] in room_ids, results)) - events = yield self.get_events_as_list([r["event_id"] for r in results]) + # We set redact_behaviour to BLOCK here to prevent redacted events being returned in + # search results (which is a data leak) + events = yield self.get_events_as_list( + [r["event_id"] for r in results], + redact_behaviour=EventRedactBehaviour.BLOCK, + ) event_map = {ev.event_id: ev for ev in events} From d156912c4c4f65b821eab202654d740422008d82 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 11 Dec 2019 13:56:50 +0000 Subject: [PATCH 53/60] 1.7.0rc2 --- CHANGES.md | 11 +++++++++++ changelog.d/6497.bugfix | 1 - changelog.d/6499.bugfix | 1 - changelog.d/6507.bugfix | 1 - changelog.d/6509.bugfix | 1 - synapse/__init__.py | 2 +- 6 files changed, 12 insertions(+), 5 deletions(-) delete mode 100644 changelog.d/6497.bugfix delete mode 100644 changelog.d/6499.bugfix delete mode 100644 changelog.d/6507.bugfix delete mode 100644 changelog.d/6509.bugfix diff --git a/CHANGES.md b/CHANGES.md index c30ea4718..c83a6afbc 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,14 @@ +Synapse 1.7.0rc2 (2019-12-11) +============================= + +Bugfixes +-------- + +- Fix incorrect error message for invalid requests when setting user's avatar URL. ([\#6497](https://github.com/matrix-org/synapse/issues/6497)) +- Fix support for SQLite 3.7. ([\#6499](https://github.com/matrix-org/synapse/issues/6499)) +- Fix regression where sending email push would not work when using a pusher worker. ([\#6507](https://github.com/matrix-org/synapse/issues/6507), [\#6509](https://github.com/matrix-org/synapse/issues/6509)) + + Synapse 1.7.0rc1 (2019-12-09) ============================= diff --git a/changelog.d/6497.bugfix b/changelog.d/6497.bugfix deleted file mode 100644 index 6a2644d8e..000000000 --- a/changelog.d/6497.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix incorrect error message for invalid requests when setting user's avatar URL. diff --git a/changelog.d/6499.bugfix b/changelog.d/6499.bugfix deleted file mode 100644 index 299feba0f..000000000 --- a/changelog.d/6499.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix support for SQLite 3.7. diff --git a/changelog.d/6507.bugfix b/changelog.d/6507.bugfix deleted file mode 100644 index d767a6237..000000000 --- a/changelog.d/6507.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix regression where sending email push would not work when using a pusher worker. diff --git a/changelog.d/6509.bugfix b/changelog.d/6509.bugfix deleted file mode 100644 index d767a6237..000000000 --- a/changelog.d/6509.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix regression where sending email push would not work when using a pusher worker. diff --git a/synapse/__init__.py b/synapse/__init__.py index c67a51a8d..fc2a6e4ee 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.7.0rc1" +__version__ = "1.7.0rc2" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 7c429f92d6935a3e9e0140fdd82801edc43b66b8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 11 Dec 2019 14:32:25 +0000 Subject: [PATCH 54/60] Clean up some logging (#6515) This just makes some of the logging easier to follow when things start going wrong. --- changelog.d/6515.misc | 1 + synapse/handlers/federation.py | 37 +++++++++++++++++----------------- 2 files changed, 20 insertions(+), 18 deletions(-) create mode 100644 changelog.d/6515.misc diff --git a/changelog.d/6515.misc b/changelog.d/6515.misc new file mode 100644 index 000000000..a9c303ed1 --- /dev/null +++ b/changelog.d/6515.misc @@ -0,0 +1 @@ +Clean up some logging when handling incoming events over federation. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 8f3c9d770..cf9c46d02 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -183,7 +183,7 @@ class FederationHandler(BaseHandler): room_id = pdu.room_id event_id = pdu.event_id - logger.info("[%s %s] handling received PDU: %s", room_id, event_id, pdu) + logger.info("handling received PDU: %s", pdu) # We reprocess pdus when we have seen them only as outliers existing = yield self.store.get_event( @@ -279,9 +279,15 @@ class FederationHandler(BaseHandler): len(missing_prevs), ) - yield self._get_missing_events_for_pdu( - origin, pdu, prevs, min_depth - ) + try: + yield self._get_missing_events_for_pdu( + origin, pdu, prevs, min_depth + ) + except Exception as e: + raise Exception( + "Error fetching missing prev_events for %s: %s" + % (event_id, e) + ) # Update the set of things we've seen after trying to # fetch the missing stuff @@ -293,14 +299,6 @@ class FederationHandler(BaseHandler): room_id, event_id, ) - elif missing_prevs: - logger.info( - "[%s %s] Not recursively fetching %d missing prev_events: %s", - room_id, - event_id, - len(missing_prevs), - shortstr(missing_prevs), - ) if prevs - seen: # We've still not been able to get all of the prev_events for this event. @@ -345,6 +343,12 @@ class FederationHandler(BaseHandler): affected=pdu.event_id, ) + logger.info( + "Event %s is missing prev_events: calculating state for a " + "backwards extremity", + event_id, + ) + # Calculate the state after each of the previous events, and # resolve them to find the correct state at the current event. auth_chains = set() @@ -365,10 +369,7 @@ class FederationHandler(BaseHandler): # know about for p in prevs - seen: logger.info( - "[%s %s] Requesting state at missing prev_event %s", - room_id, - event_id, - p, + "Requesting state at missing prev_event %s", event_id, ) room_version = yield self.store.get_room_version(room_id) @@ -612,8 +613,8 @@ class FederationHandler(BaseHandler): failed_to_fetch = desired_events - event_map.keys() if failed_to_fetch: logger.warning( - "Failed to fetch missing state/auth events for %s: %s", - room_id, + "Failed to fetch missing state/auth events for %s %s", + event_id, failed_to_fetch, ) From 7712e751b87b83086a8cb0a1cda1eef40e177d07 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 10 Dec 2019 16:54:34 +0000 Subject: [PATCH 55/60] Convert federation backfill to async PaginationHandler.get_messages is only called by RoomMessageListRestServlet, which is async. Chase the code path down from there: - FederationHandler.maybe_backfill (and nested try_backfill) - FederationHandler.backfill --- synapse/handlers/federation.py | 47 ++++++++++++++++------------------ synapse/handlers/pagination.py | 27 ++++++++++--------- 2 files changed, 35 insertions(+), 39 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index cf9c46d02..e54d509b6 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -756,8 +756,7 @@ class FederationHandler(BaseHandler): yield self.user_joined_room(user, room_id) @log_function - @defer.inlineCallbacks - def backfill(self, dest, room_id, limit, extremities): + async def backfill(self, dest, room_id, limit, extremities): """ Trigger a backfill request to `dest` for the given `room_id` This will attempt to get more events from the remote. If the other side @@ -774,9 +773,9 @@ class FederationHandler(BaseHandler): if dest == self.server_name: raise SynapseError(400, "Can't backfill from self.") - room_version = yield self.store.get_room_version(room_id) + room_version = await self.store.get_room_version(room_id) - events = yield self.federation_client.backfill( + events = await self.federation_client.backfill( dest, room_id, limit=limit, extremities=extremities ) @@ -791,7 +790,7 @@ class FederationHandler(BaseHandler): # self._sanity_check_event(ev) # Don't bother processing events we already have. - seen_events = yield self.store.have_events_in_timeline( + seen_events = await self.store.have_events_in_timeline( set(e.event_id for e in events) ) @@ -814,7 +813,7 @@ class FederationHandler(BaseHandler): state_events = {} events_to_state = {} for e_id in edges: - state, auth = yield self._get_state_for_room( + state, auth = await self._get_state_for_room( destination=dest, room_id=room_id, event_id=e_id ) auth_events.update({a.event_id: a for a in auth}) @@ -839,7 +838,7 @@ class FederationHandler(BaseHandler): # We repeatedly do this until we stop finding new auth events. while missing_auth - failed_to_fetch: logger.info("Missing auth for backfill: %r", missing_auth) - ret_events = yield self.store.get_events(missing_auth - failed_to_fetch) + ret_events = await self.store.get_events(missing_auth - failed_to_fetch) auth_events.update(ret_events) required_auth.update( @@ -853,7 +852,7 @@ class FederationHandler(BaseHandler): missing_auth - failed_to_fetch, ) - results = yield make_deferred_yieldable( + results = await make_deferred_yieldable( defer.gatherResults( [ run_in_background( @@ -880,7 +879,7 @@ class FederationHandler(BaseHandler): failed_to_fetch = missing_auth - set(auth_events) - seen_events = yield self.store.have_seen_events( + seen_events = await self.store.have_seen_events( set(auth_events.keys()) | set(state_events.keys()) ) @@ -942,7 +941,7 @@ class FederationHandler(BaseHandler): ) ) - yield self._handle_new_events(dest, ev_infos, backfilled=True) + await self._handle_new_events(dest, ev_infos, backfilled=True) # Step 2: Persist the rest of the events in the chunk one by one events.sort(key=lambda e: e.depth) @@ -958,16 +957,15 @@ class FederationHandler(BaseHandler): # We store these one at a time since each event depends on the # previous to work out the state. # TODO: We can probably do something more clever here. - yield self._handle_new_event(dest, event, backfilled=True) + await self._handle_new_event(dest, event, backfilled=True) return events - @defer.inlineCallbacks - def maybe_backfill(self, room_id, current_depth): + async def maybe_backfill(self, room_id, current_depth): """Checks the database to see if we should backfill before paginating, and if so do. """ - extremities = yield self.store.get_oldest_events_with_depth_in_room(room_id) + extremities = await self.store.get_oldest_events_with_depth_in_room(room_id) if not extremities: logger.debug("Not backfilling as no extremeties found.") @@ -999,9 +997,9 @@ class FederationHandler(BaseHandler): # state *before* the event, ignoring the special casing certain event # types have. - forward_events = yield self.store.get_successor_events(list(extremities)) + forward_events = await self.store.get_successor_events(list(extremities)) - extremities_events = yield self.store.get_events( + extremities_events = await self.store.get_events( forward_events, redact_behaviour=EventRedactBehaviour.AS_IS, get_prev_content=False, @@ -1009,7 +1007,7 @@ class FederationHandler(BaseHandler): # We set `check_history_visibility_only` as we might otherwise get false # positives from users having been erased. - filtered_extremities = yield filter_events_for_server( + filtered_extremities = await filter_events_for_server( self.storage, self.server_name, list(extremities_events.values()), @@ -1039,7 +1037,7 @@ class FederationHandler(BaseHandler): # First we try hosts that are already in the room # TODO: HEURISTIC ALERT. - curr_state = yield self.state_handler.get_current_state(room_id) + curr_state = await self.state_handler.get_current_state(room_id) def get_domains_from_state(state): """Get joined domains from state @@ -1078,12 +1076,11 @@ class FederationHandler(BaseHandler): domain for domain, depth in curr_domains if domain != self.server_name ] - @defer.inlineCallbacks - def try_backfill(domains): + async def try_backfill(domains): # TODO: Should we try multiple of these at a time? for dom in domains: try: - yield self.backfill( + await self.backfill( dom, room_id, limit=100, extremities=extremities ) # If this succeeded then we probably already have the @@ -1114,7 +1111,7 @@ class FederationHandler(BaseHandler): return False - success = yield try_backfill(likely_domains) + success = await try_backfill(likely_domains) if success: return True @@ -1128,7 +1125,7 @@ class FederationHandler(BaseHandler): logger.debug("calling resolve_state_groups in _maybe_backfill") resolve = preserve_fn(self.state_handler.resolve_state_groups_for_events) - states = yield make_deferred_yieldable( + states = await make_deferred_yieldable( defer.gatherResults( [resolve(room_id, [e]) for e in event_ids], consumeErrors=True ) @@ -1138,7 +1135,7 @@ class FederationHandler(BaseHandler): # event_ids. states = dict(zip(event_ids, [s.state for s in states])) - state_map = yield self.store.get_events( + state_map = await self.store.get_events( [e_id for ids in itervalues(states) for e_id in itervalues(ids)], get_prev_content=False, ) @@ -1154,7 +1151,7 @@ class FederationHandler(BaseHandler): for e_id, _ in sorted_extremeties_tuple: likely_domains = get_domains_from_state(states[e_id]) - success = yield try_backfill( + success = await try_backfill( [dom for dom, _ in likely_domains if dom not in tried_domains] ) if success: diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 8514ddc60..00a6afc96 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -280,8 +280,7 @@ class PaginationHandler(object): await self.storage.purge_events.purge_room(room_id) - @defer.inlineCallbacks - def get_messages( + async def get_messages( self, requester, room_id=None, @@ -307,7 +306,7 @@ class PaginationHandler(object): room_token = pagin_config.from_token.room_key else: pagin_config.from_token = ( - yield self.hs.get_event_sources().get_current_token_for_pagination() + await self.hs.get_event_sources().get_current_token_for_pagination() ) room_token = pagin_config.from_token.room_key @@ -319,11 +318,11 @@ class PaginationHandler(object): source_config = pagin_config.get_source_config("room") - with (yield self.pagination_lock.read(room_id)): + with (await self.pagination_lock.read(room_id)): ( membership, member_event_id, - ) = yield self.auth.check_in_room_or_world_readable(room_id, user_id) + ) = await self.auth.check_in_room_or_world_readable(room_id, user_id) if source_config.direction == "b": # if we're going backwards, we might need to backfill. This @@ -331,7 +330,7 @@ class PaginationHandler(object): if room_token.topological: max_topo = room_token.topological else: - max_topo = yield self.store.get_max_topological_token( + max_topo = await self.store.get_max_topological_token( room_id, room_token.stream ) @@ -339,18 +338,18 @@ class PaginationHandler(object): # If they have left the room then clamp the token to be before # they left the room, to save the effort of loading from the # database. - leave_token = yield self.store.get_topological_token_for_event( + leave_token = await self.store.get_topological_token_for_event( member_event_id ) leave_token = RoomStreamToken.parse(leave_token) if leave_token.topological < max_topo: source_config.from_key = str(leave_token) - yield self.hs.get_handlers().federation_handler.maybe_backfill( + await self.hs.get_handlers().federation_handler.maybe_backfill( room_id, max_topo ) - events, next_key = yield self.store.paginate_room_events( + events, next_key = await self.store.paginate_room_events( room_id=room_id, from_key=source_config.from_key, to_key=source_config.to_key, @@ -365,7 +364,7 @@ class PaginationHandler(object): if event_filter: events = event_filter.filter(events) - events = yield filter_events_for_client( + events = await filter_events_for_client( self.storage, user_id, events, is_peeking=(member_event_id is None) ) @@ -385,19 +384,19 @@ class PaginationHandler(object): (EventTypes.Member, event.sender) for event in events ) - state_ids = yield self.state_store.get_state_ids_for_event( + state_ids = await self.state_store.get_state_ids_for_event( events[0].event_id, state_filter=state_filter ) if state_ids: - state = yield self.store.get_events(list(state_ids.values())) + state = await self.store.get_events(list(state_ids.values())) state = state.values() time_now = self.clock.time_msec() chunk = { "chunk": ( - yield self._event_serializer.serialize_events( + await self._event_serializer.serialize_events( events, time_now, as_client_event=as_client_event ) ), @@ -406,7 +405,7 @@ class PaginationHandler(object): } if state: - chunk["state"] = yield self._event_serializer.serialize_events( + chunk["state"] = await self._event_serializer.serialize_events( state, time_now, as_client_event=as_client_event ) From e77237b9350a7054657b1a641883a09c6b5d44f3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 10 Dec 2019 17:01:37 +0000 Subject: [PATCH 56/60] convert to async: FederationHandler.on_receive_pdu and associated functions: * on_receive_pdu * handle_queued_pdus * get_missing_events_for_pdu --- synapse/handlers/federation.py | 49 +++++++++++++++------------------- tests/test_federation.py | 14 ++++++---- 2 files changed, 31 insertions(+), 32 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index e54d509b6..01d9c5120 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -165,8 +165,7 @@ class FederationHandler(BaseHandler): self._ephemeral_messages_enabled = hs.config.enable_ephemeral_messages - @defer.inlineCallbacks - def on_receive_pdu(self, origin, pdu, sent_to_us_directly=False): + async def on_receive_pdu(self, origin, pdu, sent_to_us_directly=False) -> None: """ Process a PDU received via a federation /send/ transaction, or via backfill of missing prev_events @@ -176,8 +175,6 @@ class FederationHandler(BaseHandler): pdu (FrozenEvent): received PDU sent_to_us_directly (bool): True if this event was pushed to us; False if we pulled it as the result of a missing prev_event. - - Returns (Deferred): completes with None """ room_id = pdu.room_id @@ -186,7 +183,7 @@ class FederationHandler(BaseHandler): logger.info("handling received PDU: %s", pdu) # We reprocess pdus when we have seen them only as outliers - existing = yield self.store.get_event( + existing = await self.store.get_event( event_id, allow_none=True, allow_rejected=True ) @@ -230,7 +227,7 @@ class FederationHandler(BaseHandler): # # Note that if we were never in the room then we would have already # dropped the event, since we wouldn't know the room version. - is_in_room = yield self.auth.check_host_in_room(room_id, self.server_name) + is_in_room = await self.auth.check_host_in_room(room_id, self.server_name) if not is_in_room: logger.info( "[%s %s] Ignoring PDU from %s as we're not in the room", @@ -246,12 +243,12 @@ class FederationHandler(BaseHandler): # Get missing pdus if necessary. if not pdu.internal_metadata.is_outlier(): # We only backfill backwards to the min depth. - min_depth = yield self.get_min_depth_for_context(pdu.room_id) + min_depth = await self.get_min_depth_for_context(pdu.room_id) logger.debug("[%s %s] min_depth: %d", room_id, event_id, min_depth) prevs = set(pdu.prev_event_ids()) - seen = yield self.store.have_seen_events(prevs) + seen = await self.store.have_seen_events(prevs) if min_depth and pdu.depth < min_depth: # This is so that we don't notify the user about this @@ -271,7 +268,7 @@ class FederationHandler(BaseHandler): len(missing_prevs), shortstr(missing_prevs), ) - with (yield self._room_pdu_linearizer.queue(pdu.room_id)): + with (await self._room_pdu_linearizer.queue(pdu.room_id)): logger.info( "[%s %s] Acquired room lock to fetch %d missing prev_events", room_id, @@ -280,7 +277,7 @@ class FederationHandler(BaseHandler): ) try: - yield self._get_missing_events_for_pdu( + await self._get_missing_events_for_pdu( origin, pdu, prevs, min_depth ) except Exception as e: @@ -291,7 +288,7 @@ class FederationHandler(BaseHandler): # Update the set of things we've seen after trying to # fetch the missing stuff - seen = yield self.store.have_seen_events(prevs) + seen = await self.store.have_seen_events(prevs) if not prevs - seen: logger.info( @@ -355,7 +352,7 @@ class FederationHandler(BaseHandler): event_map = {event_id: pdu} try: # Get the state of the events we know about - ours = yield self.state_store.get_state_groups_ids(room_id, seen) + ours = await self.state_store.get_state_groups_ids(room_id, seen) # state_maps is a list of mappings from (type, state_key) to event_id state_maps = list( @@ -372,7 +369,7 @@ class FederationHandler(BaseHandler): "Requesting state at missing prev_event %s", event_id, ) - room_version = yield self.store.get_room_version(room_id) + room_version = await self.store.get_room_version(room_id) with nested_logging_context(p): # note that if any of the missing prevs share missing state or @@ -381,11 +378,11 @@ class FederationHandler(BaseHandler): ( remote_state, got_auth_chain, - ) = yield self._get_state_for_room(origin, room_id, p) + ) = await self._get_state_for_room(origin, room_id, p) # we want the state *after* p; _get_state_for_room returns the # state *before* p. - remote_event = yield self.federation_client.get_pdu( + remote_event = await self.federation_client.get_pdu( [origin], p, room_version, outlier=True ) @@ -410,7 +407,7 @@ class FederationHandler(BaseHandler): for x in remote_state: event_map[x.event_id] = x - state_map = yield resolve_events_with_store( + state_map = await resolve_events_with_store( room_version, state_maps, event_map, @@ -422,7 +419,7 @@ class FederationHandler(BaseHandler): # First though we need to fetch all the events that are in # state_map, so we can build up the state below. - evs = yield self.store.get_events( + evs = await self.store.get_events( list(state_map.values()), get_prev_content=False, redact_behaviour=EventRedactBehaviour.AS_IS, @@ -446,12 +443,11 @@ class FederationHandler(BaseHandler): affected=event_id, ) - yield self._process_received_pdu( + await self._process_received_pdu( origin, pdu, state=state, auth_chain=auth_chain ) - @defer.inlineCallbacks - def _get_missing_events_for_pdu(self, origin, pdu, prevs, min_depth): + async def _get_missing_events_for_pdu(self, origin, pdu, prevs, min_depth): """ Args: origin (str): Origin of the pdu. Will be called to get the missing events @@ -463,12 +459,12 @@ class FederationHandler(BaseHandler): room_id = pdu.room_id event_id = pdu.event_id - seen = yield self.store.have_seen_events(prevs) + seen = await self.store.have_seen_events(prevs) if not prevs - seen: return - latest = yield self.store.get_latest_event_ids_in_room(room_id) + latest = await self.store.get_latest_event_ids_in_room(room_id) # We add the prev events that we have seen to the latest # list to ensure the remote server doesn't give them to us @@ -532,7 +528,7 @@ class FederationHandler(BaseHandler): # All that said: Let's try increasing the timout to 60s and see what happens. try: - missing_events = yield self.federation_client.get_missing_events( + missing_events = await self.federation_client.get_missing_events( origin, room_id, earliest_events_ids=list(latest), @@ -571,7 +567,7 @@ class FederationHandler(BaseHandler): ) with nested_logging_context(ev.event_id): try: - yield self.on_receive_pdu(origin, ev, sent_to_us_directly=False) + await self.on_receive_pdu(origin, ev, sent_to_us_directly=False) except FederationError as e: if e.code == 403: logger.warning( @@ -1328,8 +1324,7 @@ class FederationHandler(BaseHandler): return True - @defer.inlineCallbacks - def _handle_queued_pdus(self, room_queue): + async def _handle_queued_pdus(self, room_queue): """Process PDUs which got queued up while we were busy send_joining. Args: @@ -1345,7 +1340,7 @@ class FederationHandler(BaseHandler): p.room_id, ) with nested_logging_context(p.event_id): - yield self.on_receive_pdu(origin, p, sent_to_us_directly=True) + await self.on_receive_pdu(origin, p, sent_to_us_directly=True) except Exception as e: logger.warning( "Error handling queued PDU %s from %s: %s", p.event_id, origin, e diff --git a/tests/test_federation.py b/tests/test_federation.py index ad165d729..68684460c 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -1,6 +1,6 @@ from mock import Mock -from twisted.internet.defer import maybeDeferred, succeed +from twisted.internet.defer import ensureDeferred, maybeDeferred, succeed from synapse.events import FrozenEvent from synapse.logging.context import LoggingContext @@ -70,8 +70,10 @@ class MessageAcceptTests(unittest.TestCase): ) # Send the join, it should return None (which is not an error) - d = self.handler.on_receive_pdu( - "test.serv", join_event, sent_to_us_directly=True + d = ensureDeferred( + self.handler.on_receive_pdu( + "test.serv", join_event, sent_to_us_directly=True + ) ) self.reactor.advance(1) self.assertEqual(self.successResultOf(d), None) @@ -119,8 +121,10 @@ class MessageAcceptTests(unittest.TestCase): ) with LoggingContext(request="lying_event"): - d = self.handler.on_receive_pdu( - "test.serv", lying_event, sent_to_us_directly=True + d = ensureDeferred( + self.handler.on_receive_pdu( + "test.serv", lying_event, sent_to_us_directly=True + ) ) # Step the reactor, so the database fetches come back From 4db394a4b3e9c823655cdd3e716a5f234107d337 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 10 Dec 2019 17:25:18 +0000 Subject: [PATCH 57/60] convert to async: FederationHandler._get_state_for_room ... and _get_events_from_store_or_dest --- synapse/handlers/federation.py | 42 +++++++++++++++++----------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 01d9c5120..724cae964 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -19,7 +19,7 @@ import itertools import logging -from typing import Dict, Iterable, Optional, Sequence, Tuple +from typing import Dict, Iterable, List, Optional, Sequence, Tuple import six from six import iteritems, itervalues @@ -579,30 +579,30 @@ class FederationHandler(BaseHandler): else: raise - @defer.inlineCallbacks @log_function - def _get_state_for_room(self, destination, room_id, event_id): + async def _get_state_for_room( + self, destination: str, room_id: str, event_id: str + ) -> Tuple[List[EventBase], List[EventBase]]: """Requests all of the room state at a given event from a remote homeserver. Args: - destination (str): The remote homeserver to query for the state. - room_id (str): The id of the room we're interested in. - event_id (str): The id of the event we want the state at. + destination:: The remote homeserver to query for the state. + room_id: The id of the room we're interested in. + event_id: The id of the event we want the state at. Returns: - Deferred[Tuple[List[EventBase], List[EventBase]]]: - A list of events in the state, and a list of events in the auth chain - for the given event. + A list of events in the state, and a list of events in the auth chain + for the given event. """ ( state_event_ids, auth_event_ids, - ) = yield self.federation_client.get_room_state_ids( + ) = await self.federation_client.get_room_state_ids( destination, room_id, event_id=event_id ) desired_events = set(state_event_ids + auth_event_ids) - event_map = yield self._get_events_from_store_or_dest( + event_map = await self._get_events_from_store_or_dest( destination, room_id, desired_events ) @@ -621,20 +621,20 @@ class FederationHandler(BaseHandler): return pdus, auth_chain - @defer.inlineCallbacks - def _get_events_from_store_or_dest(self, destination, room_id, event_ids): + async def _get_events_from_store_or_dest( + self, destination: str, room_id: str, event_ids: Iterable[str] + ) -> Dict[str, EventBase]: """Fetch events from a remote destination, checking if we already have them. Args: - destination (str) - room_id (str) - event_ids (Iterable[str]) + destination + room_id + event_ids Returns: - Deferred[dict[str, EventBase]]: A deferred resolving to a map - from event_id to event + map from event_id to event """ - fetched_events = yield self.store.get_events(event_ids, allow_rejected=True) + fetched_events = await self.store.get_events(event_ids, allow_rejected=True) missing_events = set(event_ids) - fetched_events.keys() @@ -647,7 +647,7 @@ class FederationHandler(BaseHandler): event_ids, ) - room_version = yield self.store.get_room_version(room_id) + room_version = await self.store.get_room_version(room_id) # XXX 20 requests at once? really? for batch in batch_iter(missing_events, 20): @@ -661,7 +661,7 @@ class FederationHandler(BaseHandler): for e_id in batch ] - res = yield make_deferred_yieldable( + res = await make_deferred_yieldable( defer.DeferredList(deferreds, consumeErrors=True) ) for success, result in res: From 6637d90d778f5604b4827ab4f7d9a4cf05802466 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 10 Dec 2019 17:27:13 +0000 Subject: [PATCH 58/60] convert to async: FederationHandler._process_received_pdu also fix user_joined_room to consistently return deferreds --- synapse/handlers/federation.py | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 724cae964..bcd3b422a 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -670,8 +670,7 @@ class FederationHandler(BaseHandler): return fetched_events - @defer.inlineCallbacks - def _process_received_pdu(self, origin, event, state, auth_chain): + async def _process_received_pdu(self, origin, event, state, auth_chain): """ Called when we have a new pdu. We need to do auth checks and put it through the StateHandler. """ @@ -686,7 +685,7 @@ class FederationHandler(BaseHandler): if auth_chain: event_ids |= {e.event_id for e in auth_chain} - seen_ids = yield self.store.have_seen_events(event_ids) + seen_ids = await self.store.have_seen_events(event_ids) if state and auth_chain is not None: # If we have any state or auth_chain given to us by the replication @@ -713,18 +712,18 @@ class FederationHandler(BaseHandler): event_id, [e.event.event_id for e in event_infos], ) - yield self._handle_new_events(origin, event_infos) + await self._handle_new_events(origin, event_infos) try: - context = yield self._handle_new_event(origin, event, state=state) + context = await self._handle_new_event(origin, event, state=state) except AuthError as e: raise FederationError("ERROR", e.code, e.msg, affected=event.event_id) - room = yield self.store.get_room(room_id) + room = await self.store.get_room(room_id) if not room: try: - yield self.store.store_room( + await self.store.store_room( room_id=room_id, room_creator_user_id="", is_public=False ) except StoreError: @@ -737,11 +736,11 @@ class FederationHandler(BaseHandler): # changing their profile info. newly_joined = True - prev_state_ids = yield context.get_prev_state_ids(self.store) + prev_state_ids = await context.get_prev_state_ids(self.store) prev_state_id = prev_state_ids.get((event.type, event.state_key)) if prev_state_id: - prev_state = yield self.store.get_event( + prev_state = await self.store.get_event( prev_state_id, allow_none=True ) if prev_state and prev_state.membership == Membership.JOIN: @@ -749,7 +748,7 @@ class FederationHandler(BaseHandler): if newly_joined: user = UserID.from_string(event.state_key) - yield self.user_joined_room(user, room_id) + await self.user_joined_room(user, room_id) @log_function async def backfill(self, dest, room_id, limit, extremities): @@ -2899,7 +2898,7 @@ class FederationHandler(BaseHandler): room_id=room_id, user_id=user.to_string(), change="joined" ) else: - return user_joined_room(self.distributor, user, room_id) + return defer.succeed(user_joined_room(self.distributor, user, room_id)) @defer.inlineCallbacks def get_room_complexity(self, remote_room_hosts, room_id): From 5324bc20a628116aecf4781744cadd611f51d7a6 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 10 Dec 2019 17:59:46 +0000 Subject: [PATCH 59/60] changelog --- changelog.d/6517.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6517.misc diff --git a/changelog.d/6517.misc b/changelog.d/6517.misc new file mode 100644 index 000000000..c6ffed995 --- /dev/null +++ b/changelog.d/6517.misc @@ -0,0 +1 @@ +Port some of FederationHandler to async/await. \ No newline at end of file From 20453565176cfd358212a23cf89dfd2deab1d690 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 11 Dec 2019 16:37:51 +0000 Subject: [PATCH 60/60] Add `include_event_in_state` to _get_state_for_room (#6521) Make it return the state *after* the requested event, rather than the one before it. This is a bit easier and requires fewer calls to get_events_from_store_or_dest. --- changelog.d/6521.misc | 1 + synapse/handlers/federation.py | 50 +++++++++++++++++++--------------- 2 files changed, 29 insertions(+), 22 deletions(-) create mode 100644 changelog.d/6521.misc diff --git a/changelog.d/6521.misc b/changelog.d/6521.misc new file mode 100644 index 000000000..d9a44389b --- /dev/null +++ b/changelog.d/6521.misc @@ -0,0 +1 @@ +Refactor some code in the event authentication path for clarity. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index bcd3b422a..62985bab9 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -378,22 +378,10 @@ class FederationHandler(BaseHandler): ( remote_state, got_auth_chain, - ) = await self._get_state_for_room(origin, room_id, p) - - # we want the state *after* p; _get_state_for_room returns the - # state *before* p. - remote_event = await self.federation_client.get_pdu( - [origin], p, room_version, outlier=True + ) = await self._get_state_for_room( + origin, room_id, p, include_event_in_state=True ) - if remote_event is None: - raise Exception( - "Unable to get missing prev_event %s" % (p,) - ) - - if remote_event.is_state(): - remote_state.append(remote_event) - # XXX hrm I'm not convinced that duplicate events will compare # for equality, so I'm not sure this does what the author # hoped. @@ -579,20 +567,25 @@ class FederationHandler(BaseHandler): else: raise - @log_function async def _get_state_for_room( - self, destination: str, room_id: str, event_id: str + self, + destination: str, + room_id: str, + event_id: str, + include_event_in_state: bool = False, ) -> Tuple[List[EventBase], List[EventBase]]: """Requests all of the room state at a given event from a remote homeserver. Args: - destination:: The remote homeserver to query for the state. + destination: The remote homeserver to query for the state. room_id: The id of the room we're interested in. event_id: The id of the event we want the state at. + include_event_in_state: if true, the event itself will be included in the + returned state event list. Returns: - A list of events in the state, and a list of events in the auth chain - for the given event. + A list of events in the state, possibly including the event itself, and + a list of events in the auth chain for the given event. """ ( state_event_ids, @@ -602,6 +595,10 @@ class FederationHandler(BaseHandler): ) desired_events = set(state_event_ids + auth_event_ids) + + if include_event_in_state: + desired_events.add(event_id) + event_map = await self._get_events_from_store_or_dest( destination, room_id, desired_events ) @@ -614,12 +611,21 @@ class FederationHandler(BaseHandler): failed_to_fetch, ) - pdus = [event_map[e_id] for e_id in state_event_ids if e_id in event_map] - auth_chain = [event_map[e_id] for e_id in auth_event_ids if e_id in event_map] + remote_state = [ + event_map[e_id] for e_id in state_event_ids if e_id in event_map + ] + if include_event_in_state: + remote_event = event_map.get(event_id) + if not remote_event: + raise Exception("Unable to get missing prev_event %s" % (event_id,)) + if remote_event.is_state(): + remote_state.append(remote_event) + + auth_chain = [event_map[e_id] for e_id in auth_event_ids if e_id in event_map] auth_chain.sort(key=lambda e: e.depth) - return pdus, auth_chain + return remote_state, auth_chain async def _get_events_from_store_or_dest( self, destination: str, room_id: str, event_ids: Iterable[str]