From 1cf3ff6b40a9f0e72c39e471e921a46f56e4511f Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 9 Jul 2024 12:26:45 -0500 Subject: [PATCH 01/43] Add `rooms` `name` and `avatar` to Sliding Sync `/sync` (#17418) Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync --- changelog.d/17418.feature | 1 + synapse/handlers/sliding_sync.py | 149 ++++++++++++++-------- tests/rest/client/test_sync.py | 208 +++++++++++++++++++++++++++++++ 3 files changed, 304 insertions(+), 54 deletions(-) create mode 100644 changelog.d/17418.feature diff --git a/changelog.d/17418.feature b/changelog.d/17418.feature new file mode 100644 index 000000000..c5e56bc50 --- /dev/null +++ b/changelog.d/17418.feature @@ -0,0 +1 @@ +Populate `name`/`avatar` fields in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 8e2f751c0..bb81ca9d9 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -18,6 +18,7 @@ # # import logging +from itertools import chain from typing import TYPE_CHECKING, Any, Dict, Final, List, Optional, Set, Tuple import attr @@ -464,6 +465,7 @@ class SlidingSyncHandler: membership_state_keys = room_sync_config.required_state_map.get( EventTypes.Member ) + # Also see `StateFilter.must_await_full_state(...)` for comparison lazy_loading = ( membership_state_keys is not None and len(membership_state_keys) == 1 @@ -1202,7 +1204,7 @@ class SlidingSyncHandler: # Figure out any stripped state events for invite/knocks. This allows the # potential joiner to identify the room. - stripped_state: List[JsonDict] = [] + stripped_state: Optional[List[JsonDict]] = None if room_membership_for_user_at_to_token.membership in ( Membership.INVITE, Membership.KNOCK, @@ -1239,7 +1241,7 @@ class SlidingSyncHandler: # updates. initial = True - # Fetch the required state for the room + # Fetch the `required_state` for the room # # No `required_state` for invite/knock rooms (just `stripped_state`) # @@ -1247,13 +1249,15 @@ class SlidingSyncHandler: # of membership. Currently, we have to make this optional because # `invite`/`knock` rooms only have `stripped_state`. See # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932 + # + # Calculate the `StateFilter` based on the `required_state` for the room room_state: Optional[StateMap[EventBase]] = None + required_room_state: Optional[StateMap[EventBase]] = None if room_membership_for_user_at_to_token.membership not in ( Membership.INVITE, Membership.KNOCK, ): - # Calculate the `StateFilter` based on the `required_state` for the room - state_filter: Optional[StateFilter] = StateFilter.none() + required_state_filter = StateFilter.none() # If we have a double wildcard ("*", "*") in the `required_state`, we need # to fetch all state for the room # @@ -1276,7 +1280,7 @@ class SlidingSyncHandler: if StateValues.WILDCARD in room_sync_config.required_state_map.get( StateValues.WILDCARD, set() ): - state_filter = StateFilter.all() + required_state_filter = StateFilter.all() # TODO: `StateFilter` currently doesn't support wildcard event types. We're # currently working around this by returning all state to the client but it # would be nice to fetch less from the database and return just what the @@ -1285,7 +1289,7 @@ class SlidingSyncHandler: room_sync_config.required_state_map.get(StateValues.WILDCARD) is not None ): - state_filter = StateFilter.all() + required_state_filter = StateFilter.all() else: required_state_types: List[Tuple[str, Optional[str]]] = [] for ( @@ -1317,51 +1321,88 @@ class SlidingSyncHandler: else: required_state_types.append((state_type, state_key)) - state_filter = StateFilter.from_types(required_state_types) + required_state_filter = StateFilter.from_types(required_state_types) - # We can skip fetching state if we don't need any - if state_filter != StateFilter.none(): - # We can return all of the state that was requested if we're doing an - # initial sync - if initial: - # People shouldn't see past their leave/ban event - if room_membership_for_user_at_to_token.membership in ( - Membership.LEAVE, - Membership.BAN, - ): - room_state = await self.storage_controllers.state.get_state_at( - room_id, - stream_position=to_token.copy_and_replace( - StreamKeyType.ROOM, - room_membership_for_user_at_to_token.event_pos.to_room_stream_token(), - ), - state_filter=state_filter, - # Partially-stated rooms should have all state events except for - # the membership events and since we've already excluded - # partially-stated rooms unless `required_state` only has - # `["m.room.member", "$LAZY"]` for membership, we should be able - # to retrieve everything requested. Plus we don't want to block - # the whole sync waiting for this one room. - await_full_state=False, - ) - # Otherwise, we can get the latest current state in the room - else: - room_state = await self.storage_controllers.state.get_current_state( - room_id, - state_filter, - # Partially-stated rooms should have all state events except for - # the membership events and since we've already excluded - # partially-stated rooms unless `required_state` only has - # `["m.room.member", "$LAZY"]` for membership, we should be able - # to retrieve everything requested. Plus we don't want to block - # the whole sync waiting for this one room. - await_full_state=False, - ) - # TODO: Query `current_state_delta_stream` and reverse/rewind back to the `to_token` + # We need this base set of info for the response so let's just fetch it along + # with the `required_state` for the room + META_ROOM_STATE = [(EventTypes.Name, ""), (EventTypes.RoomAvatar, "")] + state_filter = StateFilter( + types=StateFilter.from_types( + chain(META_ROOM_STATE, required_state_filter.to_types()) + ).types, + include_others=required_state_filter.include_others, + ) + + # We can return all of the state that was requested if this was the first + # time we've sent the room down this connection. + if initial: + # People shouldn't see past their leave/ban event + if room_membership_for_user_at_to_token.membership in ( + Membership.LEAVE, + Membership.BAN, + ): + room_state = await self.storage_controllers.state.get_state_at( + room_id, + stream_position=to_token.copy_and_replace( + StreamKeyType.ROOM, + room_membership_for_user_at_to_token.event_pos.to_room_stream_token(), + ), + state_filter=state_filter, + # Partially-stated rooms should have all state events except for + # remote membership events. Since we've already excluded + # partially-stated rooms unless `required_state` only has + # `["m.room.member", "$LAZY"]` for membership, we should be able to + # retrieve everything requested. When we're lazy-loading, if there + # are some remote senders in the timeline, we should also have their + # membership event because we had to auth that timeline event. Plus + # we don't want to block the whole sync waiting for this one room. + await_full_state=False, + ) + # Otherwise, we can get the latest current state in the room else: - # TODO: Once we can figure out if we've sent a room down this connection before, - # we can return updates instead of the full required state. - raise NotImplementedError() + room_state = await self.storage_controllers.state.get_current_state( + room_id, + state_filter, + # Partially-stated rooms should have all state events except for + # remote membership events. Since we've already excluded + # partially-stated rooms unless `required_state` only has + # `["m.room.member", "$LAZY"]` for membership, we should be able to + # retrieve everything requested. When we're lazy-loading, if there + # are some remote senders in the timeline, we should also have their + # membership event because we had to auth that timeline event. Plus + # we don't want to block the whole sync waiting for this one room. + await_full_state=False, + ) + # TODO: Query `current_state_delta_stream` and reverse/rewind back to the `to_token` + else: + # TODO: Once we can figure out if we've sent a room down this connection before, + # we can return updates instead of the full required state. + raise NotImplementedError() + + if required_state_filter != StateFilter.none(): + required_room_state = required_state_filter.filter_state(room_state) + + # Find the room name and avatar from the state + room_name: Optional[str] = None + room_avatar: Optional[str] = None + if room_state is not None: + name_event = room_state.get((EventTypes.Name, "")) + if name_event is not None: + room_name = name_event.content.get("name") + + avatar_event = room_state.get((EventTypes.RoomAvatar, "")) + if avatar_event is not None: + room_avatar = avatar_event.content.get("url") + elif stripped_state is not None: + for event in stripped_state: + if event["type"] == EventTypes.Name: + room_name = event.get("content", {}).get("name") + elif event["type"] == EventTypes.RoomAvatar: + room_avatar = event.get("content", {}).get("url") + + # Found everything so we can stop looking + if room_name is not None and room_avatar is not None: + break # Figure out the last bump event in the room last_bump_event_result = ( @@ -1378,16 +1419,16 @@ class SlidingSyncHandler: bump_stamp = bump_event_pos.stream return SlidingSyncResult.RoomResult( - # TODO: Dummy value - name=None, - # TODO: Dummy value - avatar=None, + name=room_name, + avatar=room_avatar, # TODO: Dummy value heroes=None, # TODO: Dummy value is_dm=False, initial=initial, - required_state=list(room_state.values()) if room_state else None, + required_state=( + list(required_room_state.values()) if required_room_state else None + ), timeline_events=timeline_events, bundled_aggregations=bundled_aggregations, stripped_state=stripped_state, diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index 6ff1f03c9..f7852562b 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -1802,6 +1802,206 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): channel.json_body["lists"]["foo-list"], ) + def test_rooms_meta_when_joined(self) -> None: + """ + Test that the `rooms` `name` and `avatar` (soon to test `heroes`) are included + in the response when the user is joined to the room. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "name": "my super room", + }, + ) + # Set the room avatar URL + self.helper.send_state( + room_id1, + EventTypes.RoomAvatar, + {"url": "mxc://DUMMY_MEDIA_ID"}, + tok=user2_tok, + ) + + self.helper.join(room_id1, user1_id, tok=user1_tok) + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + } + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Reflect the current state of the room + self.assertEqual( + channel.json_body["rooms"][room_id1]["name"], + "my super room", + channel.json_body["rooms"][room_id1], + ) + self.assertEqual( + channel.json_body["rooms"][room_id1]["avatar"], + "mxc://DUMMY_MEDIA_ID", + channel.json_body["rooms"][room_id1], + ) + + def test_rooms_meta_when_invited(self) -> None: + """ + Test that the `rooms` `name` and `avatar` (soon to test `heroes`) are included + in the response when the user is invited to the room. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "name": "my super room", + }, + ) + # Set the room avatar URL + self.helper.send_state( + room_id1, + EventTypes.RoomAvatar, + {"url": "mxc://DUMMY_MEDIA_ID"}, + tok=user2_tok, + ) + + self.helper.join(room_id1, user1_id, tok=user1_tok) + + # Update the room name after user1 has left + self.helper.send_state( + room_id1, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user2_tok, + ) + # Update the room avatar URL after user1 has left + self.helper.send_state( + room_id1, + EventTypes.RoomAvatar, + {"url": "mxc://UPDATED_DUMMY_MEDIA_ID"}, + tok=user2_tok, + ) + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + } + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # This should still reflect the current state of the room even when the user is + # invited. + self.assertEqual( + channel.json_body["rooms"][room_id1]["name"], + "my super duper room", + channel.json_body["rooms"][room_id1], + ) + self.assertEqual( + channel.json_body["rooms"][room_id1]["avatar"], + "mxc://UPDATED_DUMMY_MEDIA_ID", + channel.json_body["rooms"][room_id1], + ) + + def test_rooms_meta_when_banned(self) -> None: + """ + Test that the `rooms` `name` and `avatar` (soon to test `heroes`) reflect the + state of the room when the user was banned (do not leak current state). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "name": "my super room", + }, + ) + # Set the room avatar URL + self.helper.send_state( + room_id1, + EventTypes.RoomAvatar, + {"url": "mxc://DUMMY_MEDIA_ID"}, + tok=user2_tok, + ) + + self.helper.join(room_id1, user1_id, tok=user1_tok) + self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) + + # Update the room name after user1 has left + self.helper.send_state( + room_id1, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user2_tok, + ) + # Update the room avatar URL after user1 has left + self.helper.send_state( + room_id1, + EventTypes.RoomAvatar, + {"url": "mxc://UPDATED_DUMMY_MEDIA_ID"}, + tok=user2_tok, + ) + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + } + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Reflect the state of the room at the time of leaving + self.assertEqual( + channel.json_body["rooms"][room_id1]["name"], + "my super room", + channel.json_body["rooms"][room_id1], + ) + self.assertEqual( + channel.json_body["rooms"][room_id1]["avatar"], + "mxc://DUMMY_MEDIA_ID", + channel.json_body["rooms"][room_id1], + ) + def test_rooms_limited_initial_sync(self) -> None: """ Test that we mark `rooms` as `limited=True` when we saturate the `timeline_limit` @@ -2973,6 +3173,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): }, exact=True, ) + self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) def test_rooms_required_state_incremental_sync(self) -> None: """ @@ -3027,6 +3228,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): }, exact=True, ) + self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) def test_rooms_required_state_wildcard(self) -> None: """ @@ -3084,6 +3286,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): state_map.values(), exact=True, ) + self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) def test_rooms_required_state_wildcard_event_type(self) -> None: """ @@ -3147,6 +3350,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): # events when the `event_type` is a wildcard. exact=False, ) + self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) def test_rooms_required_state_wildcard_state_key(self) -> None: """ @@ -3192,6 +3396,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): }, exact=True, ) + self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) def test_rooms_required_state_lazy_loading_room_members(self) -> None: """ @@ -3247,6 +3452,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): }, exact=True, ) + self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) @parameterized.expand([(Membership.LEAVE,), (Membership.BAN,)]) def test_rooms_required_state_leave_ban(self, stop_membership: str) -> None: @@ -3329,6 +3535,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): }, exact=True, ) + self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) def test_rooms_required_state_combine_superset(self) -> None: """ @@ -3401,6 +3608,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): }, exact=True, ) + self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) def test_rooms_required_state_partial_state(self) -> None: """ From 4ca13ce0dd6d1dc931cfde7e06191200ca0ec066 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 10 Jul 2024 11:58:42 +0100 Subject: [PATCH 02/43] Handle to-device extensions to Sliding Sync (#17416) Implements MSC3885 --------- Co-authored-by: Eric Eastwood --- changelog.d/17416.feature | 1 + synapse/handlers/sliding_sync.py | 103 ++++++++++++- synapse/rest/client/sync.py | 17 ++- synapse/types/handlers/__init__.py | 35 ++++- synapse/types/rest/client/__init__.py | 48 ++++++- tests/rest/client/test_sync.py | 200 +++++++++++++++++++++++++- 6 files changed, 392 insertions(+), 12 deletions(-) create mode 100644 changelog.d/17416.feature diff --git a/changelog.d/17416.feature b/changelog.d/17416.feature new file mode 100644 index 000000000..1d119cf48 --- /dev/null +++ b/changelog.d/17416.feature @@ -0,0 +1 @@ +Add to-device extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index bb81ca9d9..818b13621 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -542,11 +542,15 @@ class SlidingSyncHandler: rooms[room_id] = room_sync_result + extensions = await self.get_extensions_response( + sync_config=sync_config, to_token=to_token + ) + return SlidingSyncResult( next_pos=to_token, lists=lists, rooms=rooms, - extensions={}, + extensions=extensions, ) async def get_sync_room_ids_for_user( @@ -1445,3 +1449,100 @@ class SlidingSyncHandler: notification_count=0, highlight_count=0, ) + + async def get_extensions_response( + self, + sync_config: SlidingSyncConfig, + to_token: StreamToken, + ) -> SlidingSyncResult.Extensions: + """Handle extension requests. + + Args: + sync_config: Sync configuration + to_token: The point in the stream to sync up to. + """ + + if sync_config.extensions is None: + return SlidingSyncResult.Extensions() + + to_device_response = None + if sync_config.extensions.to_device: + to_device_response = await self.get_to_device_extensions_response( + sync_config=sync_config, + to_device_request=sync_config.extensions.to_device, + to_token=to_token, + ) + + return SlidingSyncResult.Extensions(to_device=to_device_response) + + async def get_to_device_extensions_response( + self, + sync_config: SlidingSyncConfig, + to_device_request: SlidingSyncConfig.Extensions.ToDeviceExtension, + to_token: StreamToken, + ) -> SlidingSyncResult.Extensions.ToDeviceExtension: + """Handle to-device extension (MSC3885) + + Args: + sync_config: Sync configuration + to_device_request: The to-device extension from the request + to_token: The point in the stream to sync up to. + """ + + user_id = sync_config.user.to_string() + device_id = sync_config.device_id + + # Check that this request has a valid device ID (not all requests have + # to belong to a device, and so device_id is None), and that the + # extension is enabled. + if device_id is None or not to_device_request.enabled: + return SlidingSyncResult.Extensions.ToDeviceExtension( + next_batch=f"{to_token.to_device_key}", + events=[], + ) + + since_stream_id = 0 + if to_device_request.since is not None: + # We've already validated this is an int. + since_stream_id = int(to_device_request.since) + + if to_token.to_device_key < since_stream_id: + # The since token is ahead of our current token, so we return an + # empty response. + logger.warning( + "Got to-device.since from the future. since token: %r is ahead of our current to_device stream position: %r", + since_stream_id, + to_token.to_device_key, + ) + return SlidingSyncResult.Extensions.ToDeviceExtension( + next_batch=to_device_request.since, + events=[], + ) + + # Delete everything before the given since token, as we know the + # device must have received them. + deleted = await self.store.delete_messages_for_device( + user_id=user_id, + device_id=device_id, + up_to_stream_id=since_stream_id, + ) + + logger.debug( + "Deleted %d to-device messages up to %d for %s", + deleted, + since_stream_id, + user_id, + ) + + messages, stream_id = await self.store.get_messages_for_device( + user_id=user_id, + device_id=device_id, + from_stream_id=since_stream_id, + to_stream_id=to_token.to_device_key, + limit=min(to_device_request.limit, 100), # Limit to at most 100 events + ) + + return SlidingSyncResult.Extensions.ToDeviceExtension( + next_batch=f"{stream_id}", + events=messages, + ) diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 13aed1dc8..94d5faf9f 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -942,7 +942,9 @@ class SlidingSyncRestServlet(RestServlet): response["rooms"] = await self.encode_rooms( requester, sliding_sync_result.rooms ) - response["extensions"] = {} # TODO: sliding_sync_result.extensions + response["extensions"] = await self.encode_extensions( + requester, sliding_sync_result.extensions + ) return response @@ -1054,6 +1056,19 @@ class SlidingSyncRestServlet(RestServlet): return serialized_rooms + async def encode_extensions( + self, requester: Requester, extensions: SlidingSyncResult.Extensions + ) -> JsonDict: + result = {} + + if extensions.to_device is not None: + result["to_device"] = { + "next_batch": extensions.to_device.next_batch, + "events": extensions.to_device.events, + } + + return result + def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: SyncRestServlet(hs).register(http_server) diff --git a/synapse/types/handlers/__init__.py b/synapse/types/handlers/__init__.py index 43dcdf20d..a8a3a8f24 100644 --- a/synapse/types/handlers/__init__.py +++ b/synapse/types/handlers/__init__.py @@ -18,7 +18,7 @@ # # from enum import Enum -from typing import TYPE_CHECKING, Dict, Final, List, Optional, Tuple +from typing import TYPE_CHECKING, Dict, Final, List, Optional, Sequence, Tuple import attr from typing_extensions import TypedDict @@ -252,10 +252,39 @@ class SlidingSyncResult: count: int ops: List[Operation] + @attr.s(slots=True, frozen=True, auto_attribs=True) + class Extensions: + """Responses for extensions + + Attributes: + to_device: The to-device extension (MSC3885) + """ + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class ToDeviceExtension: + """The to-device extension (MSC3885) + + Attributes: + next_batch: The to-device stream token the client should use + to get more results + events: A list of to-device messages for the client + """ + + next_batch: str + events: Sequence[JsonMapping] + + def __bool__(self) -> bool: + return bool(self.events) + + to_device: Optional[ToDeviceExtension] = None + + def __bool__(self) -> bool: + return bool(self.to_device) + next_pos: StreamToken lists: Dict[str, SlidingWindowList] rooms: Dict[str, RoomResult] - extensions: JsonMapping + extensions: Extensions def __bool__(self) -> bool: """Make the result appear empty if there are no updates. This is used @@ -271,5 +300,5 @@ class SlidingSyncResult: next_pos=next_pos, lists={}, rooms={}, - extensions={}, + extensions=SlidingSyncResult.Extensions(), ) diff --git a/synapse/types/rest/client/__init__.py b/synapse/types/rest/client/__init__.py index 55f6b4405..1e8fe76c9 100644 --- a/synapse/types/rest/client/__init__.py +++ b/synapse/types/rest/client/__init__.py @@ -276,10 +276,48 @@ class SlidingSyncBody(RequestBodyModel): class RoomSubscription(CommonRoomParameters): pass - class Extension(RequestBodyModel): - enabled: Optional[StrictBool] = False - lists: Optional[List[StrictStr]] = None - rooms: Optional[List[StrictStr]] = None + class Extensions(RequestBodyModel): + """The extensions section of the request. + + Extensions MUST have an `enabled` flag which defaults to `false`. If a client + sends an unknown extension name, the server MUST ignore it (or else backwards + compatibility between clients and servers is broken when a newer client tries to + communicate with an older server). + """ + + class ToDeviceExtension(RequestBodyModel): + """The to-device extension (MSC3885) + + Attributes: + enabled + limit: Maximum number of to-device messages to return + since: The `next_batch` from the previous sync response + """ + + enabled: Optional[StrictBool] = False + limit: StrictInt = 100 + since: Optional[StrictStr] = None + + @validator("since") + def since_token_check( + cls, value: Optional[StrictStr] + ) -> Optional[StrictStr]: + # `since` comes in as an opaque string token but we know that it's just + # an integer representing the position in the device inbox stream. We + # want to pre-validate it to make sure it works fine in downstream code. + if value is None: + return value + + try: + int(value) + except ValueError: + raise ValueError( + "'extensions.to_device.since' is invalid (should look like an int)" + ) + + return value + + to_device: Optional[ToDeviceExtension] = None # mypy workaround via https://github.com/pydantic/pydantic/issues/156#issuecomment-1130883884 if TYPE_CHECKING: @@ -287,7 +325,7 @@ class SlidingSyncBody(RequestBodyModel): else: lists: Optional[Dict[constr(max_length=64, strict=True), SlidingSyncList]] = None # type: ignore[valid-type] room_subscriptions: Optional[Dict[StrictStr, RoomSubscription]] = None - extensions: Optional[Dict[StrictStr, Extension]] = None + extensions: Optional[Extensions] = None @validator("lists") def lists_length_check( diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index f7852562b..304c0d4d3 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -38,7 +38,16 @@ from synapse.api.constants import ( ) from synapse.events import EventBase from synapse.handlers.sliding_sync import StateValues -from synapse.rest.client import devices, knock, login, read_marker, receipts, room, sync +from synapse.rest.client import ( + devices, + knock, + login, + read_marker, + receipts, + room, + sendtodevice, + sync, +) from synapse.server import HomeServer from synapse.types import JsonDict, RoomStreamToken, StreamKeyType, StreamToken, UserID from synapse.util import Clock @@ -47,7 +56,7 @@ from tests import unittest from tests.federation.transport.test_knocking import ( KnockingStrippedStateEventHelperMixin, ) -from tests.server import TimedOutException +from tests.server import FakeChannel, TimedOutException from tests.test_utils.event_injection import mark_event_as_partial_state logger = logging.getLogger(__name__) @@ -3696,3 +3705,190 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): ], channel.json_body["lists"]["foo-list"], ) + + +class SlidingSyncToDeviceExtensionTestCase(unittest.HomeserverTestCase): + """Tests for the to-device sliding sync extension""" + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + sync.register_servlets, + sendtodevice.register_servlets, + ] + + def default_config(self) -> JsonDict: + config = super().default_config() + # Enable sliding sync + config["experimental_features"] = {"msc3575_enabled": True} + return config + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.sync_endpoint = ( + "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync" + ) + + def _assert_to_device_response( + self, channel: FakeChannel, expected_messages: List[JsonDict] + ) -> str: + """Assert the sliding sync response was successful and has the expected + to-device messages. + + Returns the next_batch token from the to-device section. + """ + self.assertEqual(channel.code, 200, channel.json_body) + extensions = channel.json_body["extensions"] + to_device = extensions["to_device"] + self.assertIsInstance(to_device["next_batch"], str) + self.assertEqual(to_device["events"], expected_messages) + + return to_device["next_batch"] + + def test_no_data(self) -> None: + """Test that enabling to-device extension works, even if there is + no-data + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + } + }, + }, + access_token=user1_tok, + ) + + # We expect no to-device messages + self._assert_to_device_response(channel, []) + + def test_data_initial_sync(self) -> None: + """Test that we get to-device messages when we don't specify a since + token""" + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass", "d1") + user2_id = self.register_user("u2", "pass") + user2_tok = self.login(user2_id, "pass", "d2") + + # Send the to-device message + test_msg = {"foo": "bar"} + chan = self.make_request( + "PUT", + "/_matrix/client/r0/sendToDevice/m.test/1234", + content={"messages": {user1_id: {"d1": test_msg}}}, + access_token=user2_tok, + ) + self.assertEqual(chan.code, 200, chan.result) + + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + } + }, + }, + access_token=user1_tok, + ) + self._assert_to_device_response( + channel, + [{"content": test_msg, "sender": user2_id, "type": "m.test"}], + ) + + def test_data_incremental_sync(self) -> None: + """Test that we get to-device messages over incremental syncs""" + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass", "d1") + user2_id = self.register_user("u2", "pass") + user2_tok = self.login(user2_id, "pass", "d2") + + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + } + }, + }, + access_token=user1_tok, + ) + # No to-device messages yet. + next_batch = self._assert_to_device_response(channel, []) + + test_msg = {"foo": "bar"} + chan = self.make_request( + "PUT", + "/_matrix/client/r0/sendToDevice/m.test/1234", + content={"messages": {user1_id: {"d1": test_msg}}}, + access_token=user2_tok, + ) + self.assertEqual(chan.code, 200, chan.result) + + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + "since": next_batch, + } + }, + }, + access_token=user1_tok, + ) + next_batch = self._assert_to_device_response( + channel, + [{"content": test_msg, "sender": user2_id, "type": "m.test"}], + ) + + # The next sliding sync request should not include the to-device + # message. + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + "since": next_batch, + } + }, + }, + access_token=user1_tok, + ) + self._assert_to_device_response(channel, []) + + # An initial sliding sync request should not include the to-device + # message, as it should have been deleted + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + } + }, + }, + access_token=user1_tok, + ) + self._assert_to_device_response(channel, []) From 3232bc29826d030e62c034ec582c73532bc7e465 Mon Sep 17 00:00:00 2001 From: Will Lewis <1543626+wrjlewis@users.noreply.github.com> Date: Wed, 10 Jul 2024 14:59:24 +0100 Subject: [PATCH 03/43] Upload new logo with white bg and update readme to use it (#17387) --- README.rst | 2 +- changelog.d/17387.doc | 1 + docs/element_logo_white_bg.svg | 94 ++++++++++++++++++++++++++++++++++ 3 files changed, 96 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17387.doc create mode 100644 docs/element_logo_white_bg.svg diff --git a/README.rst b/README.rst index a52e0c193..d5625afe8 100644 --- a/README.rst +++ b/README.rst @@ -1,4 +1,4 @@ -.. image:: https://github.com/element-hq/product/assets/87339233/7abf477a-5277-47f3-be44-ea44917d8ed7 +.. image:: ./docs/element_logo_white_bg.svg :height: 60px **Element Synapse - Matrix homeserver implementation** diff --git a/changelog.d/17387.doc b/changelog.d/17387.doc new file mode 100644 index 000000000..82be10f13 --- /dev/null +++ b/changelog.d/17387.doc @@ -0,0 +1 @@ +Update the readme image to have a white background, so that it is readable in dark mode. \ No newline at end of file diff --git a/docs/element_logo_white_bg.svg b/docs/element_logo_white_bg.svg new file mode 100644 index 000000000..50195ab1c --- /dev/null +++ b/docs/element_logo_white_bg.svg @@ -0,0 +1,94 @@ + + + + From 30e14c8510ab7b0b73284ba3f9bfa5970f8fef61 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 11 Jul 2024 11:54:21 +0100 Subject: [PATCH 04/43] Bump zipp from 3.15.0 to 3.19.1 (#17427) --- poetry.lock | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/poetry.lock b/poetry.lock index 19393bb6b..f578a33ed 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3113,18 +3113,18 @@ docs = ["Sphinx", "elementpath (>=4.1.5,<5.0.0)", "jinja2", "sphinx-rtd-theme"] [[package]] name = "zipp" -version = "3.15.0" +version = "3.19.1" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"}, - {file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"}, + {file = "zipp-3.19.1-py3-none-any.whl", hash = "sha256:2828e64edb5386ea6a52e7ba7cdb17bb30a73a858f5eb6eb93d8d36f5ea26091"}, + {file = "zipp-3.19.1.tar.gz", hash = "sha256:35427f6d5594f4acf82d25541438348c26736fa9b3afa2754bcd63cdb99d8e8f"}, ] [package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +test = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] [[package]] name = "zope-event" From 5871daf87769f44f81c6c6adb78a7aedd6c4389e Mon Sep 17 00:00:00 2001 From: Joe Groocock Date: Thu, 11 Jul 2024 14:56:25 +0200 Subject: [PATCH 05/43] Use consistent casing between FROM and AS (#17431) Signed-off-by: Joe Groocock --- docker/Dockerfile | 4 ++-- docker/Dockerfile-dhvirtualenv | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 1bef8045c..1da196b12 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -27,7 +27,7 @@ ARG PYTHON_VERSION=3.11 ### # We hardcode the use of Debian bookworm here because this could change upstream # and other Dockerfiles used for testing are expecting bookworm. -FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm as requirements +FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm AS requirements # RUN --mount is specific to buildkit and is documented at # https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount. @@ -87,7 +87,7 @@ RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \ ### ### Stage 1: builder ### -FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm as builder +FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm AS builder # install the OS build deps RUN \ diff --git a/docker/Dockerfile-dhvirtualenv b/docker/Dockerfile-dhvirtualenv index f00014456..9266f134b 100644 --- a/docker/Dockerfile-dhvirtualenv +++ b/docker/Dockerfile-dhvirtualenv @@ -24,7 +24,7 @@ ARG distro="" # https://launchpad.net/~jyrki-pulliainen/+archive/ubuntu/dh-virtualenv, but # it's not obviously easier to use that than to build our own.) -FROM docker.io/library/${distro} as builder +FROM docker.io/library/${distro} AS builder RUN apt-get update -qq -o Acquire::Languages=none RUN env DEBIAN_FRONTEND=noninteractive apt-get install \ From 342f0c35b727fc807ead71ba2fbeb876cff523a8 Mon Sep 17 00:00:00 2001 From: villepeh <100730729+villepeh@users.noreply.github.com> Date: Thu, 11 Jul 2024 16:02:19 +0300 Subject: [PATCH 06/43] Add Red Hat Enterprise Linux and Rocky Linux installation instructions (#17423) Added RHEL/Rocky install instructions (PyPI). Instructions cover versions 8 and 9 which are the only supported ones - except for RHEL7 which is now on extended life cycle support phase. Large part of the guide is for installing Python 3.11 or 3.12. RHEL8 ships with Python 3.6 and RHEL9 ships with 3.9. Newer Python versions can be installed easily as they don't interfere with OS software that still relies on the default Python version. I was first planning to add prerequisites part to the prerequisites section and then install instructions on the top of the page but that section is for pre-built packages so it just didn't sound right. So I just dumped everything to the PyPI section of the page. But suggestions to change are welcome. I also didn't combine these with Fedora section. I haven't tested those packages on RHEL and Fedora ships with Python 3.12 out-of-box. --- changelog.d/17423.doc | 1 + docs/setup/forward_proxy.md | 2 +- docs/setup/installation.md | 57 ++++++++++++++++++++++++++++++++++++- 3 files changed, 58 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17423.doc diff --git a/changelog.d/17423.doc b/changelog.d/17423.doc new file mode 100644 index 000000000..972bc659e --- /dev/null +++ b/changelog.d/17423.doc @@ -0,0 +1 @@ +Add Red Hat Enterprise Linux and Rocky Linux 8 and 9 installation instructions. diff --git a/docs/setup/forward_proxy.md b/docs/setup/forward_proxy.md index 3482691f8..f02c7b5fc 100644 --- a/docs/setup/forward_proxy.md +++ b/docs/setup/forward_proxy.md @@ -67,7 +67,7 @@ in Synapse can be deactivated. **NOTE**: This has an impact on security and is for testing purposes only! To deactivate the certificate validation, the following setting must be added to -your [homserver.yaml](../usage/configuration/homeserver_sample_config.md). +your [homeserver.yaml](../usage/configuration/homeserver_sample_config.md). ```yaml use_insecure_ssl_client_just_for_testing_do_not_use: true diff --git a/docs/setup/installation.md b/docs/setup/installation.md index ed3e59c47..f538e1498 100644 --- a/docs/setup/installation.md +++ b/docs/setup/installation.md @@ -309,7 +309,62 @@ sudo dnf install libtiff-devel libjpeg-devel libzip-devel freetype-devel \ libwebp-devel libxml2-devel libxslt-devel libpq-devel \ python3-virtualenv libffi-devel openssl-devel python3-devel \ libicu-devel -sudo dnf groupinstall "Development Tools" +sudo dnf group install "Development Tools" +``` + +##### Red Hat Enterprise Linux / Rocky Linux + +*Note: The term "RHEL" below refers to both Red Hat Enterprise Linux and Rocky Linux. The distributions are 1:1 binary compatible.* + +It's recommended to use the latest Python versions. + +RHEL 8 in particular ships with Python 3.6 by default which is EOL and therefore no longer supported by Synapse. RHEL 9 ship with Python 3.9 which is still supported by the Python core team as of this writing. However, newer Python versions provide significant performance improvements and they're available in official distributions' repositories. Therefore it's recommended to use them. + +Python 3.11 and 3.12 are available for both RHEL 8 and 9. + +These commands should be run as root user. + +RHEL 8 +```bash +# Enable PowerTools repository +dnf config-manager --set-enabled powertools +``` +RHEL 9 +```bash +# Enable CodeReady Linux Builder repository +crb enable +``` + +Install new version of Python. You only need one of these: +```bash +# Python 3.11 +dnf install python3.11 python3.11-devel +``` +```bash +# Python 3.12 +dnf install python3.12 python3.12-devel +``` +Finally, install common prerequisites +```bash +dnf install libicu libicu-devel libpq5 libpq5-devel lz4 pkgconf +dnf group install "Development Tools" +``` +###### Using venv module instead of virtualenv command + +It's recommended to use Python venv module directly rather than the virtualenv command. +* On RHEL 9, virtualenv is only available on [EPEL](https://docs.fedoraproject.org/en-US/epel/). +* On RHEL 8, virtualenv is based on Python 3.6. It does not support creating 3.11/3.12 virtual environments. + +Here's an example of creating Python 3.12 virtual environment and installing Synapse from PyPI. + +```bash +mkdir -p ~/synapse +# To use Python 3.11, simply use the command "python3.11" instead. +python3.12 -m venv ~/synapse/env +source ~/synapse/env/bin/activate +pip install --upgrade pip +pip install --upgrade setuptools +pip install matrix-synapse ``` ##### macOS From 677142b6a965baf36a3cc59c4e997bbd67891d42 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Thu, 11 Jul 2024 07:03:13 -0600 Subject: [PATCH 07/43] Fix docs on `record_action` to clarify the actions are applied (#17426) This looks like a copy/paste error: the function doesn't reject anything, but instead allows the action count to go through regardless. The remainder of the function's documentation appears correct. --- changelog.d/17426.misc | 1 + synapse/api/ratelimiting.py | 5 ++--- 2 files changed, 3 insertions(+), 3 deletions(-) create mode 100644 changelog.d/17426.misc diff --git a/changelog.d/17426.misc b/changelog.d/17426.misc new file mode 100644 index 000000000..886e5d438 --- /dev/null +++ b/changelog.d/17426.misc @@ -0,0 +1 @@ +Fix documentation on `RateLimiter#record_action`. \ No newline at end of file diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py index 26b871185..b80630c5d 100644 --- a/synapse/api/ratelimiting.py +++ b/synapse/api/ratelimiting.py @@ -236,9 +236,8 @@ class Ratelimiter: requester: The requester that is doing the action, if any. key: An arbitrary key used to classify an action. Defaults to the requester's user ID. - n_actions: The number of times the user wants to do this action. If the user - cannot do all of the actions, the user's action count is not incremented - at all. + n_actions: The number of times the user performed the action. May be negative + to "refund" the rate limit. _time_now_s: The current time. Optional, defaults to the current time according to self.clock. Only used by tests. """ From 606da398fc4c693f2e75b9520264e0fc51d03581 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 11 Jul 2024 16:00:44 +0100 Subject: [PATCH 08/43] Fix filtering room types on remote rooms (#17434) We can only fetch room types for rooms the server is in, so we need to only filter rooms that we're joined to. Also includes a perf fix to bulk fetch room types. --- changelog.d/17434.bugfix | 1 + synapse/handlers/sliding_sync.py | 22 ++++---- synapse/storage/databases/main/state.py | 52 ++++++++++++++++++- tests/handlers/test_sliding_sync.py | 68 +++++++++++++++++++++++++ 4 files changed, 130 insertions(+), 13 deletions(-) create mode 100644 changelog.d/17434.bugfix diff --git a/changelog.d/17434.bugfix b/changelog.d/17434.bugfix new file mode 100644 index 000000000..c7cce5239 --- /dev/null +++ b/changelog.d/17434.bugfix @@ -0,0 +1 @@ +Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using room type filters and the user has one or more remote invites. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 818b13621..8e6c2fb86 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -24,13 +24,7 @@ from typing import TYPE_CHECKING, Any, Dict, Final, List, Optional, Set, Tuple import attr from immutabledict import immutabledict -from synapse.api.constants import ( - AccountDataTypes, - Direction, - EventContentFields, - EventTypes, - Membership, -) +from synapse.api.constants import AccountDataTypes, Direction, EventTypes, Membership from synapse.events import EventBase from synapse.events.utils import strip_event from synapse.handlers.relations import BundledAggregations @@ -959,11 +953,15 @@ class SlidingSyncHandler: # provided in the list. `None` is a valid type for rooms which do not have a # room type. if filters.room_types is not None or filters.not_room_types is not None: - # Make a copy so we don't run into an error: `Set changed size during - # iteration`, when we filter out and remove items - for room_id in filtered_room_id_set.copy(): - create_event = await self.store.get_create_event_for_room(room_id) - room_type = create_event.content.get(EventContentFields.ROOM_TYPE) + room_to_type = await self.store.bulk_get_room_type( + { + room_id + for room_id in filtered_room_id_set + # We only know the room types for joined rooms + if sync_room_map[room_id].membership == Membership.JOIN + } + ) + for room_id, room_type in room_to_type.items(): if ( filters.room_types is not None and room_type not in filters.room_types diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index b2a67aff8..5188b2f7a 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -41,7 +41,7 @@ from typing import ( import attr -from synapse.api.constants import EventTypes, Membership +from synapse.api.constants import EventContentFields, EventTypes, Membership from synapse.api.errors import NotFoundError, UnsupportedRoomVersionError from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion from synapse.events import EventBase @@ -298,6 +298,56 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): create_event = await self.get_event(create_id) return create_event + @cached(max_entries=10000) + async def get_room_type(self, room_id: str) -> Optional[str]: + """Get the room type for a given room. The server must be joined to the + given room. + """ + + row = await self.db_pool.simple_select_one( + table="room_stats_state", + keyvalues={"room_id": room_id}, + retcols=("room_type",), + allow_none=True, + desc="get_room_type", + ) + + if row is not None: + return row[0] + + # If we haven't updated `room_stats_state` with the room yet, query the + # create event directly. + create_event = await self.get_create_event_for_room(room_id) + room_type = create_event.content.get(EventContentFields.ROOM_TYPE) + return room_type + + @cachedList(cached_method_name="get_room_type", list_name="room_ids") + async def bulk_get_room_type( + self, room_ids: Set[str] + ) -> Mapping[str, Optional[str]]: + """Bulk fetch room types for the given rooms, the server must be in all + the rooms given. + """ + + rows = await self.db_pool.simple_select_many_batch( + table="room_stats_state", + column="room_id", + iterable=room_ids, + retcols=("room_id", "room_type"), + desc="bulk_get_room_type", + ) + + # If we haven't updated `room_stats_state` with the room yet, query the + # create events directly. This should happen only rarely so we don't + # mind if we do this in a loop. + results = dict(rows) + for room_id in room_ids - results.keys(): + create_event = await self.get_create_event_for_room(room_id) + room_type = create_event.content.get(EventContentFields.ROOM_TYPE) + results[room_id] = room_type + + return results + @cached(max_entries=100000, iterable=True) async def get_partial_current_state_ids(self, room_id: str) -> StateMap[str]: """Get the current state event ids for a room based on the diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py index 9dd2363ad..eb4b0a05c 100644 --- a/tests/handlers/test_sliding_sync.py +++ b/tests/handlers/test_sliding_sync.py @@ -35,6 +35,8 @@ from synapse.api.constants import ( RoomTypes, ) from synapse.api.room_versions import RoomVersions +from synapse.events import make_event_from_dict +from synapse.events.snapshot import EventContext from synapse.handlers.sliding_sync import RoomSyncConfig, StateValues from synapse.rest import admin from synapse.rest.client import knock, login, room @@ -2791,6 +2793,72 @@ class FilterRoomsTestCase(HomeserverTestCase): self.assertEqual(filtered_room_map.keys(), {space_room_id}) + def test_filter_room_types_with_invite_remote_room(self) -> None: + """Test that we can apply a room type filter, even if we have an invite + for a remote room. + + This is a regression test. + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a fake remote invite and persist it. + invite_room_id = "!some:room" + invite_event = make_event_from_dict( + { + "room_id": invite_room_id, + "sender": "@user:test.serv", + "state_key": user1_id, + "depth": 1, + "origin_server_ts": 1, + "type": EventTypes.Member, + "content": {"membership": Membership.INVITE}, + "auth_events": [], + "prev_events": [], + }, + room_version=RoomVersions.V10, + ) + invite_event.internal_metadata.outlier = True + invite_event.internal_metadata.out_of_band_membership = True + + self.get_success( + self.store.maybe_store_room_on_outlier_membership( + room_id=invite_room_id, room_version=invite_event.room_version + ) + ) + context = EventContext.for_outlier(self.hs.get_storage_controllers()) + persist_controller = self.hs.get_storage_controllers().persistence + assert persist_controller is not None + self.get_success(persist_controller.persist_event(invite_event, context)) + + # Create a normal room (no room type) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + after_rooms_token = self.event_sources.get_current_token() + + # Get the rooms the user should be syncing with + sync_room_map = self.get_success( + self.sliding_sync_handler.get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=None, + to_token=after_rooms_token, + ) + ) + + filtered_room_map = self.get_success( + self.sliding_sync_handler.filter_rooms( + UserID.from_string(user1_id), + sync_room_map, + SlidingSyncConfig.SlidingSyncList.Filters( + room_types=[None, RoomTypes.SPACE], + ), + after_rooms_token, + ) + ) + + self.assertEqual(filtered_room_map.keys(), {room_id, invite_room_id}) + class SortRoomsTestCase(HomeserverTestCase): """ From 5a97bbd8958548f16461cfa3fde201f3e032d6b8 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 11 Jul 2024 14:05:38 -0500 Subject: [PATCH 09/43] Add `heroes` and room summary fields to Sliding Sync `/sync` (#17419) Additional room summary fields: `joined_count`, `invited_count` Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync --- changelog.d/17419.feature | 1 + synapse/handlers/sliding_sync.py | 278 ++++++++++++++++------- synapse/rest/client/sync.py | 32 ++- synapse/types/handlers/__init__.py | 18 +- synapse/types/rest/client/__init__.py | 4 - tests/rest/client/test_sync.py | 304 +++++++++++++++++++++++++- 6 files changed, 528 insertions(+), 109 deletions(-) create mode 100644 changelog.d/17419.feature diff --git a/changelog.d/17419.feature b/changelog.d/17419.feature new file mode 100644 index 000000000..186a27c47 --- /dev/null +++ b/changelog.d/17419.feature @@ -0,0 +1 @@ +Populate `heroes` and room summary fields (`joined_count`, `invited_count`) in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 8e6c2fb86..e3230d28e 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -19,7 +19,7 @@ # import logging from itertools import chain -from typing import TYPE_CHECKING, Any, Dict, Final, List, Optional, Set, Tuple +from typing import TYPE_CHECKING, Any, Dict, Final, List, Mapping, Optional, Set, Tuple import attr from immutabledict import immutabledict @@ -28,7 +28,9 @@ from synapse.api.constants import AccountDataTypes, Direction, EventTypes, Membe from synapse.events import EventBase from synapse.events.utils import strip_event from synapse.handlers.relations import BundledAggregations +from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary from synapse.storage.databases.main.stream import CurrentStateDeltaMembership +from synapse.storage.roommember import MemberSummary from synapse.types import ( JsonDict, PersistedEventPosition, @@ -1043,6 +1045,103 @@ class SlidingSyncHandler: reverse=True, ) + async def get_current_state_ids_at( + self, + room_id: str, + room_membership_for_user_at_to_token: _RoomMembershipForUser, + state_filter: StateFilter, + to_token: StreamToken, + ) -> StateMap[str]: + """ + Get current state IDs for the user in the room according to their membership. This + will be the current state at the time of their LEAVE/BAN, otherwise will be the + current state <= to_token. + + Args: + room_id: The room ID to fetch data for + room_membership_for_user_at_token: Membership information for the user + in the room at the time of `to_token`. + to_token: The point in the stream to sync up to. + """ + + room_state_ids: StateMap[str] + # People shouldn't see past their leave/ban event + if room_membership_for_user_at_to_token.membership in ( + Membership.LEAVE, + Membership.BAN, + ): + # TODO: `get_state_ids_at(...)` doesn't take into account the "current state" + room_state_ids = await self.storage_controllers.state.get_state_ids_at( + room_id, + stream_position=to_token.copy_and_replace( + StreamKeyType.ROOM, + room_membership_for_user_at_to_token.event_pos.to_room_stream_token(), + ), + state_filter=state_filter, + # Partially-stated rooms should have all state events except for + # remote membership events. Since we've already excluded + # partially-stated rooms unless `required_state` only has + # `["m.room.member", "$LAZY"]` for membership, we should be able to + # retrieve everything requested. When we're lazy-loading, if there + # are some remote senders in the timeline, we should also have their + # membership event because we had to auth that timeline event. Plus + # we don't want to block the whole sync waiting for this one room. + await_full_state=False, + ) + # Otherwise, we can get the latest current state in the room + else: + room_state_ids = await self.storage_controllers.state.get_current_state_ids( + room_id, + state_filter, + # Partially-stated rooms should have all state events except for + # remote membership events. Since we've already excluded + # partially-stated rooms unless `required_state` only has + # `["m.room.member", "$LAZY"]` for membership, we should be able to + # retrieve everything requested. When we're lazy-loading, if there + # are some remote senders in the timeline, we should also have their + # membership event because we had to auth that timeline event. Plus + # we don't want to block the whole sync waiting for this one room. + await_full_state=False, + ) + # TODO: Query `current_state_delta_stream` and reverse/rewind back to the `to_token` + + return room_state_ids + + async def get_current_state_at( + self, + room_id: str, + room_membership_for_user_at_to_token: _RoomMembershipForUser, + state_filter: StateFilter, + to_token: StreamToken, + ) -> StateMap[EventBase]: + """ + Get current state for the user in the room according to their membership. This + will be the current state at the time of their LEAVE/BAN, otherwise will be the + current state <= to_token. + + Args: + room_id: The room ID to fetch data for + room_membership_for_user_at_token: Membership information for the user + in the room at the time of `to_token`. + to_token: The point in the stream to sync up to. + """ + room_state_ids = await self.get_current_state_ids_at( + room_id=room_id, + room_membership_for_user_at_to_token=room_membership_for_user_at_to_token, + state_filter=state_filter, + to_token=to_token, + ) + + event_map = await self.store.get_events(list(room_state_ids.values())) + + state_map = {} + for key, event_id in room_state_ids.items(): + event = event_map.get(event_id) + if event: + state_map[key] = event + + return state_map + async def get_room_sync_data( self, user: UserID, @@ -1074,7 +1173,7 @@ class SlidingSyncHandler: # membership. Currently, we have to make all of these optional because # `invite`/`knock` rooms only have `stripped_state`. See # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932 - timeline_events: Optional[List[EventBase]] = None + timeline_events: List[EventBase] = [] bundled_aggregations: Optional[Dict[str, BundledAggregations]] = None limited: Optional[bool] = None prev_batch_token: Optional[StreamToken] = None @@ -1206,7 +1305,7 @@ class SlidingSyncHandler: # Figure out any stripped state events for invite/knocks. This allows the # potential joiner to identify the room. - stripped_state: Optional[List[JsonDict]] = None + stripped_state: List[JsonDict] = [] if room_membership_for_user_at_to_token.membership in ( Membership.INVITE, Membership.KNOCK, @@ -1243,6 +1342,44 @@ class SlidingSyncHandler: # updates. initial = True + # Check whether the room has a name set + name_state_ids = await self.get_current_state_ids_at( + room_id=room_id, + room_membership_for_user_at_to_token=room_membership_for_user_at_to_token, + state_filter=StateFilter.from_types([(EventTypes.Name, "")]), + to_token=to_token, + ) + name_event_id = name_state_ids.get((EventTypes.Name, "")) + + room_membership_summary: Mapping[str, MemberSummary] + empty_membership_summary = MemberSummary([], 0) + if room_membership_for_user_at_to_token.membership in ( + Membership.LEAVE, + Membership.BAN, + ): + # TODO: Figure out how to get the membership summary for left/banned rooms + room_membership_summary = {} + else: + room_membership_summary = await self.store.get_room_summary(room_id) + # TODO: Reverse/rewind back to the `to_token` + + # `heroes` are required if the room name is not set. + # + # Note: When you're the first one on your server to be invited to a new room + # over federation, we only have access to some stripped state in + # `event.unsigned.invite_room_state` which currently doesn't include `heroes`, + # see https://github.com/matrix-org/matrix-spec/issues/380. This means that + # clients won't be able to calculate the room name when necessary and just a + # pitfall we have to deal with until that spec issue is resolved. + hero_user_ids: List[str] = [] + # TODO: Should we also check for `EventTypes.CanonicalAlias` + # (`m.room.canonical_alias`) as a fallback for the room name? see + # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1671260153 + if name_event_id is None: + hero_user_ids = extract_heroes_from_room_summary( + room_membership_summary, me=user.to_string() + ) + # Fetch the `required_state` for the room # # No `required_state` for invite/knock rooms (just `stripped_state`) @@ -1253,13 +1390,11 @@ class SlidingSyncHandler: # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932 # # Calculate the `StateFilter` based on the `required_state` for the room - room_state: Optional[StateMap[EventBase]] = None - required_room_state: Optional[StateMap[EventBase]] = None + required_state_filter = StateFilter.none() if room_membership_for_user_at_to_token.membership not in ( Membership.INVITE, Membership.KNOCK, ): - required_state_filter = StateFilter.none() # If we have a double wildcard ("*", "*") in the `required_state`, we need # to fetch all state for the room # @@ -1325,86 +1460,65 @@ class SlidingSyncHandler: required_state_filter = StateFilter.from_types(required_state_types) - # We need this base set of info for the response so let's just fetch it along - # with the `required_state` for the room - META_ROOM_STATE = [(EventTypes.Name, ""), (EventTypes.RoomAvatar, "")] + # We need this base set of info for the response so let's just fetch it along + # with the `required_state` for the room + meta_room_state = [(EventTypes.Name, ""), (EventTypes.RoomAvatar, "")] + [ + (EventTypes.Member, hero_user_id) for hero_user_id in hero_user_ids + ] + state_filter = StateFilter.all() + if required_state_filter != StateFilter.all(): state_filter = StateFilter( types=StateFilter.from_types( - chain(META_ROOM_STATE, required_state_filter.to_types()) + chain(meta_room_state, required_state_filter.to_types()) ).types, include_others=required_state_filter.include_others, ) - # We can return all of the state that was requested if this was the first - # time we've sent the room down this connection. - if initial: - # People shouldn't see past their leave/ban event - if room_membership_for_user_at_to_token.membership in ( - Membership.LEAVE, - Membership.BAN, - ): - room_state = await self.storage_controllers.state.get_state_at( - room_id, - stream_position=to_token.copy_and_replace( - StreamKeyType.ROOM, - room_membership_for_user_at_to_token.event_pos.to_room_stream_token(), - ), - state_filter=state_filter, - # Partially-stated rooms should have all state events except for - # remote membership events. Since we've already excluded - # partially-stated rooms unless `required_state` only has - # `["m.room.member", "$LAZY"]` for membership, we should be able to - # retrieve everything requested. When we're lazy-loading, if there - # are some remote senders in the timeline, we should also have their - # membership event because we had to auth that timeline event. Plus - # we don't want to block the whole sync waiting for this one room. - await_full_state=False, - ) - # Otherwise, we can get the latest current state in the room - else: - room_state = await self.storage_controllers.state.get_current_state( - room_id, - state_filter, - # Partially-stated rooms should have all state events except for - # remote membership events. Since we've already excluded - # partially-stated rooms unless `required_state` only has - # `["m.room.member", "$LAZY"]` for membership, we should be able to - # retrieve everything requested. When we're lazy-loading, if there - # are some remote senders in the timeline, we should also have their - # membership event because we had to auth that timeline event. Plus - # we don't want to block the whole sync waiting for this one room. - await_full_state=False, - ) - # TODO: Query `current_state_delta_stream` and reverse/rewind back to the `to_token` - else: - # TODO: Once we can figure out if we've sent a room down this connection before, - # we can return updates instead of the full required state. - raise NotImplementedError() + # We can return all of the state that was requested if this was the first + # time we've sent the room down this connection. + room_state: StateMap[EventBase] = {} + if initial: + room_state = await self.get_current_state_at( + room_id=room_id, + room_membership_for_user_at_to_token=room_membership_for_user_at_to_token, + state_filter=state_filter, + to_token=to_token, + ) + else: + # TODO: Once we can figure out if we've sent a room down this connection before, + # we can return updates instead of the full required state. + raise NotImplementedError() - if required_state_filter != StateFilter.none(): - required_room_state = required_state_filter.filter_state(room_state) + required_room_state: StateMap[EventBase] = {} + if required_state_filter != StateFilter.none(): + required_room_state = required_state_filter.filter_state(room_state) # Find the room name and avatar from the state room_name: Optional[str] = None + # TODO: Should we also check for `EventTypes.CanonicalAlias` + # (`m.room.canonical_alias`) as a fallback for the room name? see + # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1671260153 + name_event = room_state.get((EventTypes.Name, "")) + if name_event is not None: + room_name = name_event.content.get("name") + room_avatar: Optional[str] = None - if room_state is not None: - name_event = room_state.get((EventTypes.Name, "")) - if name_event is not None: - room_name = name_event.content.get("name") + avatar_event = room_state.get((EventTypes.RoomAvatar, "")) + if avatar_event is not None: + room_avatar = avatar_event.content.get("url") - avatar_event = room_state.get((EventTypes.RoomAvatar, "")) - if avatar_event is not None: - room_avatar = avatar_event.content.get("url") - elif stripped_state is not None: - for event in stripped_state: - if event["type"] == EventTypes.Name: - room_name = event.get("content", {}).get("name") - elif event["type"] == EventTypes.RoomAvatar: - room_avatar = event.get("content", {}).get("url") - - # Found everything so we can stop looking - if room_name is not None and room_avatar is not None: - break + # Assemble heroes: extract the info from the state we just fetched + heroes: List[SlidingSyncResult.RoomResult.StrippedHero] = [] + for hero_user_id in hero_user_ids: + member_event = room_state.get((EventTypes.Member, hero_user_id)) + if member_event is not None: + heroes.append( + SlidingSyncResult.RoomResult.StrippedHero( + user_id=hero_user_id, + display_name=member_event.content.get("displayname"), + avatar_url=member_event.content.get("avatar_url"), + ) + ) # Figure out the last bump event in the room last_bump_event_result = ( @@ -1423,14 +1537,11 @@ class SlidingSyncHandler: return SlidingSyncResult.RoomResult( name=room_name, avatar=room_avatar, - # TODO: Dummy value - heroes=None, + heroes=heroes, # TODO: Dummy value is_dm=False, initial=initial, - required_state=( - list(required_room_state.values()) if required_room_state else None - ), + required_state=list(required_room_state.values()), timeline_events=timeline_events, bundled_aggregations=bundled_aggregations, stripped_state=stripped_state, @@ -1438,9 +1549,12 @@ class SlidingSyncHandler: limited=limited, num_live=num_live, bump_stamp=bump_stamp, - # TODO: Dummy values - joined_count=0, - invited_count=0, + joined_count=room_membership_summary.get( + Membership.JOIN, empty_membership_summary + ).count, + invited_count=room_membership_summary.get( + Membership.INVITE, empty_membership_summary + ).count, # TODO: These are just dummy values. We could potentially just remove these # since notifications can only really be done correctly on the client anyway # (encrypted rooms). diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 94d5faf9f..1d8cbfdf0 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -997,8 +997,21 @@ class SlidingSyncRestServlet(RestServlet): if room_result.avatar: serialized_rooms[room_id]["avatar"] = room_result.avatar - if room_result.heroes: - serialized_rooms[room_id]["heroes"] = room_result.heroes + if room_result.heroes is not None and len(room_result.heroes) > 0: + serialized_heroes = [] + for hero in room_result.heroes: + serialized_hero = { + "user_id": hero.user_id, + } + if hero.display_name is not None: + # Not a typo, just how "displayname" is spelled in the spec + serialized_hero["displayname"] = hero.display_name + + if hero.avatar_url is not None: + serialized_hero["avatar_url"] = hero.avatar_url + + serialized_heroes.append(serialized_hero) + serialized_rooms[room_id]["heroes"] = serialized_heroes # We should only include the `initial` key if it's `True` to save bandwidth. # The absense of this flag means `False`. @@ -1006,7 +1019,10 @@ class SlidingSyncRestServlet(RestServlet): serialized_rooms[room_id]["initial"] = room_result.initial # This will be omitted for invite/knock rooms with `stripped_state` - if room_result.required_state is not None: + if ( + room_result.required_state is not None + and len(room_result.required_state) > 0 + ): serialized_required_state = ( await self.event_serializer.serialize_events( room_result.required_state, @@ -1017,7 +1033,10 @@ class SlidingSyncRestServlet(RestServlet): serialized_rooms[room_id]["required_state"] = serialized_required_state # This will be omitted for invite/knock rooms with `stripped_state` - if room_result.timeline_events is not None: + if ( + room_result.timeline_events is not None + and len(room_result.timeline_events) > 0 + ): serialized_timeline = await self.event_serializer.serialize_events( room_result.timeline_events, time_now, @@ -1045,7 +1064,10 @@ class SlidingSyncRestServlet(RestServlet): serialized_rooms[room_id]["is_dm"] = room_result.is_dm # Stripped state only applies to invite/knock rooms - if room_result.stripped_state is not None: + if ( + room_result.stripped_state is not None + and len(room_result.stripped_state) > 0 + ): # TODO: `knocked_state` but that isn't specced yet. # # TODO: Instead of adding `knocked_state`, it would be good to rename diff --git a/synapse/types/handlers/__init__.py b/synapse/types/handlers/__init__.py index a8a3a8f24..409120470 100644 --- a/synapse/types/handlers/__init__.py +++ b/synapse/types/handlers/__init__.py @@ -200,18 +200,24 @@ class SlidingSyncResult: flag set. (same as sync v2) """ + @attr.s(slots=True, frozen=True, auto_attribs=True) + class StrippedHero: + user_id: str + display_name: Optional[str] + avatar_url: Optional[str] + name: Optional[str] avatar: Optional[str] - heroes: Optional[List[EventBase]] + heroes: Optional[List[StrippedHero]] is_dm: bool initial: bool - # Only optional because it won't be included for invite/knock rooms with `stripped_state` - required_state: Optional[List[EventBase]] - # Only optional because it won't be included for invite/knock rooms with `stripped_state` - timeline_events: Optional[List[EventBase]] + # Should be empty for invite/knock rooms with `stripped_state` + required_state: List[EventBase] + # Should be empty for invite/knock rooms with `stripped_state` + timeline_events: List[EventBase] bundled_aggregations: Optional[Dict[str, "BundledAggregations"]] # Optional because it's only relevant to invite/knock rooms - stripped_state: Optional[List[JsonDict]] + stripped_state: List[JsonDict] # Only optional because it won't be included for invite/knock rooms with `stripped_state` prev_batch: Optional[StreamToken] # Only optional because it won't be included for invite/knock rooms with `stripped_state` diff --git a/synapse/types/rest/client/__init__.py b/synapse/types/rest/client/__init__.py index 1e8fe76c9..dbe37bc71 100644 --- a/synapse/types/rest/client/__init__.py +++ b/synapse/types/rest/client/__init__.py @@ -200,9 +200,6 @@ class SlidingSyncBody(RequestBodyModel): } timeline_limit: The maximum number of timeline events to return per response. - include_heroes: Return a stripped variant of membership events (containing - `user_id` and optionally `avatar_url` and `displayname`) for the users used - to calculate the room name. filters: Filters to apply to the list before sorting. """ @@ -270,7 +267,6 @@ class SlidingSyncBody(RequestBodyModel): else: ranges: Optional[List[Tuple[conint(ge=0, strict=True), conint(ge=0, strict=True)]]] = None # type: ignore[valid-type] slow_get_all_rooms: Optional[StrictBool] = False - include_heroes: Optional[StrictBool] = False filters: Optional[Filters] = None class RoomSubscription(CommonRoomParameters): diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index 304c0d4d3..0d0bea538 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -1813,8 +1813,8 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): def test_rooms_meta_when_joined(self) -> None: """ - Test that the `rooms` `name` and `avatar` (soon to test `heroes`) are included - in the response when the user is joined to the room. + Test that the `rooms` `name` and `avatar` are included in the response and + reflect the current state of the room when the user is joined to the room. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -1866,11 +1866,19 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): "mxc://DUMMY_MEDIA_ID", channel.json_body["rooms"][room_id1], ) + self.assertEqual( + channel.json_body["rooms"][room_id1]["joined_count"], + 2, + ) + self.assertEqual( + channel.json_body["rooms"][room_id1]["invited_count"], + 0, + ) def test_rooms_meta_when_invited(self) -> None: """ - Test that the `rooms` `name` and `avatar` (soon to test `heroes`) are included - in the response when the user is invited to the room. + Test that the `rooms` `name` and `avatar` are included in the response and + reflect the current state of the room when the user is invited to the room. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -1892,7 +1900,8 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): tok=user2_tok, ) - self.helper.join(room_id1, user1_id, tok=user1_tok) + # User1 is invited to the room + self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) # Update the room name after user1 has left self.helper.send_state( @@ -1938,11 +1947,19 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): "mxc://UPDATED_DUMMY_MEDIA_ID", channel.json_body["rooms"][room_id1], ) + self.assertEqual( + channel.json_body["rooms"][room_id1]["joined_count"], + 1, + ) + self.assertEqual( + channel.json_body["rooms"][room_id1]["invited_count"], + 1, + ) def test_rooms_meta_when_banned(self) -> None: """ - Test that the `rooms` `name` and `avatar` (soon to test `heroes`) reflect the - state of the room when the user was banned (do not leak current state). + Test that the `rooms` `name` and `avatar` reflect the state of the room when the + user was banned (do not leak current state). """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -2010,6 +2027,273 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): "mxc://DUMMY_MEDIA_ID", channel.json_body["rooms"][room_id1], ) + self.assertEqual( + channel.json_body["rooms"][room_id1]["joined_count"], + # FIXME: The actual number should be "1" (user2) but we currently don't + # support this for rooms where the user has left/been banned. + 0, + ) + self.assertEqual( + channel.json_body["rooms"][room_id1]["invited_count"], + 0, + ) + + def test_rooms_meta_heroes(self) -> None: + """ + Test that the `rooms` `heroes` are included in the response when the room + doesn't have a room name set. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + _user3_tok = self.login(user3_id, "pass") + + room_id1 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "name": "my super room", + }, + ) + self.helper.join(room_id1, user1_id, tok=user1_tok) + # User3 is invited + self.helper.invite(room_id1, src=user2_id, targ=user3_id, tok=user2_tok) + + room_id2 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + # No room name set so that `heroes` is populated + # + # "name": "my super room2", + }, + ) + self.helper.join(room_id2, user1_id, tok=user1_tok) + # User3 is invited + self.helper.invite(room_id2, src=user2_id, targ=user3_id, tok=user2_tok) + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + } + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Room1 has a name so we shouldn't see any `heroes` which the client would use + # the calculate the room name themselves. + self.assertEqual( + channel.json_body["rooms"][room_id1]["name"], + "my super room", + channel.json_body["rooms"][room_id1], + ) + self.assertIsNone(channel.json_body["rooms"][room_id1].get("heroes")) + self.assertEqual( + channel.json_body["rooms"][room_id1]["joined_count"], + 2, + ) + self.assertEqual( + channel.json_body["rooms"][room_id1]["invited_count"], + 1, + ) + + # Room2 doesn't have a name so we should see `heroes` populated + self.assertIsNone(channel.json_body["rooms"][room_id2].get("name")) + self.assertCountEqual( + [ + hero["user_id"] + for hero in channel.json_body["rooms"][room_id2].get("heroes", []) + ], + # Heroes shouldn't include the user themselves (we shouldn't see user1) + [user2_id, user3_id], + ) + self.assertEqual( + channel.json_body["rooms"][room_id2]["joined_count"], + 2, + ) + self.assertEqual( + channel.json_body["rooms"][room_id2]["invited_count"], + 1, + ) + + # We didn't request any state so we shouldn't see any `required_state` + self.assertIsNone(channel.json_body["rooms"][room_id1].get("required_state")) + self.assertIsNone(channel.json_body["rooms"][room_id2].get("required_state")) + + def test_rooms_meta_heroes_max(self) -> None: + """ + Test that the `rooms` `heroes` only includes the first 5 users (not including + yourself). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass") + user4_id = self.register_user("user4", "pass") + user4_tok = self.login(user4_id, "pass") + user5_id = self.register_user("user5", "pass") + user5_tok = self.login(user5_id, "pass") + user6_id = self.register_user("user6", "pass") + user6_tok = self.login(user6_id, "pass") + user7_id = self.register_user("user7", "pass") + user7_tok = self.login(user7_id, "pass") + + room_id1 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + # No room name set so that `heroes` is populated + # + # "name": "my super room", + }, + ) + self.helper.join(room_id1, user1_id, tok=user1_tok) + self.helper.join(room_id1, user3_id, tok=user3_tok) + self.helper.join(room_id1, user4_id, tok=user4_tok) + self.helper.join(room_id1, user5_id, tok=user5_tok) + self.helper.join(room_id1, user6_id, tok=user6_tok) + self.helper.join(room_id1, user7_id, tok=user7_tok) + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + } + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Room2 doesn't have a name so we should see `heroes` populated + self.assertIsNone(channel.json_body["rooms"][room_id1].get("name")) + # FIXME: Remove this basic assertion and uncomment the better assertion below + # after https://github.com/element-hq/synapse/pull/17435 merges + self.assertEqual(len(channel.json_body["rooms"][room_id1].get("heroes", [])), 5) + # self.assertCountEqual( + # [ + # hero["user_id"] + # for hero in channel.json_body["rooms"][room_id1].get("heroes", []) + # ], + # # Heroes should be the first 5 users in the room (excluding the user + # # themselves, we shouldn't see `user1`) + # [user2_id, user3_id, user4_id, user5_id, user6_id], + # ) + self.assertEqual( + channel.json_body["rooms"][room_id1]["joined_count"], + 7, + ) + self.assertEqual( + channel.json_body["rooms"][room_id1]["invited_count"], + 0, + ) + + # We didn't request any state so we shouldn't see any `required_state` + self.assertIsNone(channel.json_body["rooms"][room_id1].get("required_state")) + + def test_rooms_meta_heroes_when_banned(self) -> None: + """ + Test that the `rooms` `heroes` are included in the response when the room + doesn't have a room name set but doesn't leak information past their ban. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + _user3_tok = self.login(user3_id, "pass") + user4_id = self.register_user("user4", "pass") + user4_tok = self.login(user4_id, "pass") + user5_id = self.register_user("user5", "pass") + _user5_tok = self.login(user5_id, "pass") + + room_id1 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + # No room name set so that `heroes` is populated + # + # "name": "my super room", + }, + ) + # User1 joins the room + self.helper.join(room_id1, user1_id, tok=user1_tok) + # User3 is invited + self.helper.invite(room_id1, src=user2_id, targ=user3_id, tok=user2_tok) + + # User1 is banned from the room + self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) + + # User4 joins the room after user1 is banned + self.helper.join(room_id1, user4_id, tok=user4_tok) + # User5 is invited after user1 is banned + self.helper.invite(room_id1, src=user2_id, targ=user5_id, tok=user2_tok) + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + } + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Room2 doesn't have a name so we should see `heroes` populated + self.assertIsNone(channel.json_body["rooms"][room_id1].get("name")) + self.assertCountEqual( + [ + hero["user_id"] + for hero in channel.json_body["rooms"][room_id1].get("heroes", []) + ], + # Heroes shouldn't include the user themselves (we shouldn't see user1). We + # also shouldn't see user4 since they joined after user1 was banned. + # + # FIXME: The actual result should be `[user2_id, user3_id]` but we currently + # don't support this for rooms where the user has left/been banned. + [], + ) + + self.assertEqual( + channel.json_body["rooms"][room_id1]["joined_count"], + # FIXME: The actual number should be "1" (user2) but we currently don't + # support this for rooms where the user has left/been banned. + 0, + ) + self.assertEqual( + channel.json_body["rooms"][room_id1]["invited_count"], + # We shouldn't see user5 since they were invited after user1 was banned. + # + # FIXME: The actual number should be "1" (user3) but we currently don't + # support this for rooms where the user has left/been banned. + 0, + ) def test_rooms_limited_initial_sync(self) -> None: """ @@ -3081,11 +3365,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.code, 200, channel.json_body) # Nothing to see for this banned user in the room in the token range - self.assertEqual( - channel.json_body["rooms"][room_id1]["timeline"], - [], - channel.json_body["rooms"][room_id1]["timeline"], - ) + self.assertIsNone(channel.json_body["rooms"][room_id1].get("timeline")) # No events returned in the timeline so nothing is "live" self.assertEqual( channel.json_body["rooms"][room_id1]["num_live"], From fb66e938b26e96384af5a72c71ed7d9dec12f1a2 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 11 Jul 2024 18:19:26 -0500 Subject: [PATCH 10/43] Add `is_dm` room field to Sliding Sync `/sync` (#17429) Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync --- changelog.d/17429.feature | 1 + synapse/handlers/sliding_sync.py | 75 ++++++++++++++++++++------------ tests/rest/client/test_sync.py | 23 ++++++++++ 3 files changed, 70 insertions(+), 29 deletions(-) create mode 100644 changelog.d/17429.feature diff --git a/changelog.d/17429.feature b/changelog.d/17429.feature new file mode 100644 index 000000000..608b75d63 --- /dev/null +++ b/changelog.d/17429.feature @@ -0,0 +1 @@ +Populate `is_dm` room field in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index e3230d28e..904787ced 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -291,6 +291,7 @@ class _RoomMembershipForUser: sender: The person who sent the membership event newly_joined: Whether the user newly joined the room during the given token range + is_dm: Whether this user considers this room as a direct-message (DM) room """ room_id: str @@ -299,6 +300,7 @@ class _RoomMembershipForUser: membership: str sender: Optional[str] newly_joined: bool + is_dm: bool def copy_and_replace(self, **kwds: Any) -> "_RoomMembershipForUser": return attr.evolve(self, **kwds) @@ -613,6 +615,7 @@ class SlidingSyncHandler: membership=room_for_user.membership, sender=room_for_user.sender, newly_joined=False, + is_dm=False, ) for room_for_user in room_for_user_list } @@ -652,6 +655,7 @@ class SlidingSyncHandler: # - 1c) Update room membership events to the point in time of the `to_token` # - 2) Add back newly_left rooms (> `from_token` and <= `to_token`) # - 3) Figure out which rooms are `newly_joined` + # - 4) Figure out which rooms are DM's # 1) ----------------------------------------------------- @@ -714,6 +718,7 @@ class SlidingSyncHandler: membership=first_membership_change_after_to_token.prev_membership, sender=first_membership_change_after_to_token.prev_sender, newly_joined=False, + is_dm=False, ) else: # If we can't find the previous membership event, we shouldn't @@ -809,6 +814,7 @@ class SlidingSyncHandler: membership=last_membership_change_in_from_to_range.membership, sender=last_membership_change_in_from_to_range.sender, newly_joined=False, + is_dm=False, ) # 3) Figure out `newly_joined` @@ -846,6 +852,35 @@ class SlidingSyncHandler: room_id ].copy_and_replace(newly_joined=True) + # 4) Figure out which rooms the user considers to be direct-message (DM) rooms + # + # We're using global account data (`m.direct`) instead of checking for + # `is_direct` on membership events because that property only appears for + # the invitee membership event (doesn't show up for the inviter). + # + # We're unable to take `to_token` into account for global account data since + # we only keep track of the latest account data for the user. + dm_map = await self.store.get_global_account_data_by_type_for_user( + user_id, AccountDataTypes.DIRECT + ) + + # Flatten out the map. Account data is set by the client so it needs to be + # scrutinized. + dm_room_id_set = set() + if isinstance(dm_map, dict): + for room_ids in dm_map.values(): + # Account data should be a list of room IDs. Ignore anything else + if isinstance(room_ids, list): + for room_id in room_ids: + if isinstance(room_id, str): + dm_room_id_set.add(room_id) + + # 4) Fixup + for room_id in filtered_sync_room_id_set: + filtered_sync_room_id_set[room_id] = filtered_sync_room_id_set[ + room_id + ].copy_and_replace(is_dm=room_id in dm_room_id_set) + return filtered_sync_room_id_set async def filter_rooms( @@ -869,41 +904,24 @@ class SlidingSyncHandler: A filtered dictionary of room IDs along with membership information in the room at the time of `to_token`. """ - user_id = user.to_string() - - # TODO: Apply filters - filtered_room_id_set = set(sync_room_map.keys()) # Filter for Direct-Message (DM) rooms if filters.is_dm is not None: - # We're using global account data (`m.direct`) instead of checking for - # `is_direct` on membership events because that property only appears for - # the invitee membership event (doesn't show up for the inviter). Account - # data is set by the client so it needs to be scrutinized. - # - # We're unable to take `to_token` into account for global account data since - # we only keep track of the latest account data for the user. - dm_map = await self.store.get_global_account_data_by_type_for_user( - user_id, AccountDataTypes.DIRECT - ) - - # Flatten out the map - dm_room_id_set = set() - if isinstance(dm_map, dict): - for room_ids in dm_map.values(): - # Account data should be a list of room IDs. Ignore anything else - if isinstance(room_ids, list): - for room_id in room_ids: - if isinstance(room_id, str): - dm_room_id_set.add(room_id) - if filters.is_dm: # Only DM rooms please - filtered_room_id_set = filtered_room_id_set.intersection(dm_room_id_set) + filtered_room_id_set = { + room_id + for room_id in filtered_room_id_set + if sync_room_map[room_id].is_dm + } else: # Only non-DM rooms please - filtered_room_id_set = filtered_room_id_set.difference(dm_room_id_set) + filtered_room_id_set = { + room_id + for room_id in filtered_room_id_set + if not sync_room_map[room_id].is_dm + } if filters.spaces: raise NotImplementedError() @@ -1538,8 +1556,7 @@ class SlidingSyncHandler: name=room_name, avatar=room_avatar, heroes=heroes, - # TODO: Dummy value - is_dm=False, + is_dm=room_membership_for_user_at_to_token.is_dm, initial=initial, required_state=list(required_room_state.values()), timeline_events=timeline_events, diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index 0d0bea538..4236812db 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -1662,6 +1662,20 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): list(channel.json_body["lists"]["room-invites"]), ) + # Ensure DM's are correctly marked + self.assertDictEqual( + { + room_id: room.get("is_dm") + for room_id, room in channel.json_body["rooms"].items() + }, + { + invite_room_id: None, + room_id: None, + invited_dm_room_id: True, + joined_dm_room_id: True, + }, + ) + def test_sort_list(self) -> None: """ Test that the `lists` are sorted by `stream_ordering` @@ -1874,6 +1888,9 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): channel.json_body["rooms"][room_id1]["invited_count"], 0, ) + self.assertIsNone( + channel.json_body["rooms"][room_id1].get("is_dm"), + ) def test_rooms_meta_when_invited(self) -> None: """ @@ -1955,6 +1972,9 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): channel.json_body["rooms"][room_id1]["invited_count"], 1, ) + self.assertIsNone( + channel.json_body["rooms"][room_id1].get("is_dm"), + ) def test_rooms_meta_when_banned(self) -> None: """ @@ -2037,6 +2057,9 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): channel.json_body["rooms"][room_id1]["invited_count"], 0, ) + self.assertIsNone( + channel.json_body["rooms"][room_id1].get("is_dm"), + ) def test_rooms_meta_heroes(self) -> None: """ From ab62aa09da4a3c4444d80a9d3a899c685f1bb798 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 15 Jul 2024 04:37:10 -0500 Subject: [PATCH 11/43] Add room subscriptions to Sliding Sync `/sync` (#17432) Add room subscriptions to Sliding Sync `/sync` Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync Currently, you can only subscribe to rooms you have had *any* membership in before. In the future, we will allow `world_readable` rooms to be subscribed to without joining. --- changelog.d/17432.feature | 1 + synapse/handlers/sliding_sync.py | 403 ++++++++--- tests/handlers/test_sliding_sync.py | 1045 ++++++++++++++++++++++----- tests/rest/client/test_sync.py | 347 +++++++-- tests/unittest.py | 51 ++ 5 files changed, 1489 insertions(+), 358 deletions(-) create mode 100644 changelog.d/17432.feature diff --git a/changelog.d/17432.feature b/changelog.d/17432.feature new file mode 100644 index 000000000..c86f04c11 --- /dev/null +++ b/changelog.d/17432.feature @@ -0,0 +1 @@ +Add room subscriptions to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 904787ced..be98b379e 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -62,32 +62,79 @@ DEFAULT_BUMP_EVENT_TYPES = { } +@attr.s(slots=True, frozen=True, auto_attribs=True) +class _RoomMembershipForUser: + """ + Attributes: + room_id: The room ID of the membership event + event_id: The event ID of the membership event + event_pos: The stream position of the membership event + membership: The membership state of the user in the room + sender: The person who sent the membership event + newly_joined: Whether the user newly joined the room during the given token + range and is still joined to the room at the end of this range. + newly_left: Whether the user newly left (or kicked) the room during the given + token range and is still "leave" at the end of this range. + is_dm: Whether this user considers this room as a direct-message (DM) room + """ + + room_id: str + # Optional because state resets can affect room membership without a corresponding event. + event_id: Optional[str] + # Even during a state reset which removes the user from the room, we expect this to + # be set because `current_state_delta_stream` will note the position that the reset + # happened. + event_pos: PersistedEventPosition + # Even during a state reset which removes the user from the room, we expect this to + # be set to `LEAVE` because we can make that assumption based on the situaton (see + # `get_current_state_delta_membership_changes_for_user(...)`) + membership: str + # Optional because state resets can affect room membership without a corresponding event. + sender: Optional[str] + newly_joined: bool + newly_left: bool + is_dm: bool + + def copy_and_replace(self, **kwds: Any) -> "_RoomMembershipForUser": + return attr.evolve(self, **kwds) + + def filter_membership_for_sync( - *, membership: str, user_id: str, sender: Optional[str] + *, user_id: str, room_membership_for_user: _RoomMembershipForUser ) -> bool: """ Returns True if the membership event should be included in the sync response, otherwise False. Attributes: - membership: The membership state of the user in the room. user_id: The user ID that the membership applies to - sender: The person who sent the membership event + room_membership_for_user: Membership information for the user in the room """ - # Everything except `Membership.LEAVE` because we want everything that's *still* - # relevant to the user. There are few more things to include in the sync response - # (newly_left) but those are handled separately. + membership = room_membership_for_user.membership + sender = room_membership_for_user.sender + newly_left = room_membership_for_user.newly_left + + # We want to allow everything except rooms the user has left unless `newly_left` + # because we want everything that's *still* relevant to the user. We include + # `newly_left` rooms because the last event that the user should see is their own + # leave event. # - # This logic includes kicks (leave events where the sender is not the same user) and - # can be read as "anything that isn't a leave or a leave with a different sender". + # A leave != kick. This logic includes kicks (leave events where the sender is not + # the same user). # - # When `sender=None` and `membership=Membership.LEAVE`, it means that a state reset - # happened that removed the user from the room, or the user was the last person - # locally to leave the room which caused the server to leave the room. In both - # cases, we can just remove the rooms since they are no longer relevant to the user. - # They could still be added back later if they are `newly_left`. - return membership != Membership.LEAVE or sender not in (user_id, None) + # When `sender=None`, it means that a state reset happened that removed the user + # from the room without a corresponding leave event. We can just remove the rooms + # since they are no longer relevant to the user but will still appear if they are + # `newly_left`. + return ( + # Anything except leave events + membership != Membership.LEAVE + # Unless... + or newly_left + # Allow kicks + or (membership == Membership.LEAVE and sender not in (user_id, None)) + ) # We can't freeze this class because we want to update it in place with the @@ -281,31 +328,6 @@ class StateValues: LAZY: Final = "$LAZY" -@attr.s(slots=True, frozen=True, auto_attribs=True) -class _RoomMembershipForUser: - """ - Attributes: - event_id: The event ID of the membership event - event_pos: The stream position of the membership event - membership: The membership state of the user in the room - sender: The person who sent the membership event - newly_joined: Whether the user newly joined the room during the given token - range - is_dm: Whether this user considers this room as a direct-message (DM) room - """ - - room_id: str - event_id: Optional[str] - event_pos: PersistedEventPosition - membership: str - sender: Optional[str] - newly_joined: bool - is_dm: bool - - def copy_and_replace(self, **kwds: Any) -> "_RoomMembershipForUser": - return attr.evolve(self, **kwds) - - class SlidingSyncHandler: def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() @@ -424,18 +446,31 @@ class SlidingSyncHandler: # See https://github.com/matrix-org/matrix-doc/issues/1144 raise NotImplementedError() + # Get all of the room IDs that the user should be able to see in the sync + # response + has_lists = sync_config.lists is not None and len(sync_config.lists) > 0 + has_room_subscriptions = ( + sync_config.room_subscriptions is not None + and len(sync_config.room_subscriptions) > 0 + ) + if has_lists or has_room_subscriptions: + room_membership_for_user_map = ( + await self.get_room_membership_for_user_at_to_token( + user=sync_config.user, + to_token=to_token, + from_token=from_token, + ) + ) + # Assemble sliding window lists lists: Dict[str, SlidingSyncResult.SlidingWindowList] = {} # Keep track of the rooms that we're going to display and need to fetch more # info about relevant_room_map: Dict[str, RoomSyncConfig] = {} - if sync_config.lists: - # Get all of the room IDs that the user should be able to see in the sync - # response - sync_room_map = await self.get_sync_room_ids_for_user( - sync_config.user, - from_token=from_token, - to_token=to_token, + if has_lists and sync_config.lists is not None: + sync_room_map = await self.filter_rooms_relevant_for_sync( + user=sync_config.user, + room_membership_for_user_map=room_membership_for_user_map, ) for list_key, list_config in sync_config.lists.items(): @@ -524,7 +559,35 @@ class SlidingSyncHandler: ops=ops, ) - # TODO: if (sync_config.room_subscriptions): + # Handle room subscriptions + if has_room_subscriptions and sync_config.room_subscriptions is not None: + for room_id, room_subscription in sync_config.room_subscriptions.items(): + room_membership_for_user_at_to_token = ( + await self.check_room_subscription_allowed_for_user( + room_id=room_id, + room_membership_for_user_map=room_membership_for_user_map, + to_token=to_token, + ) + ) + + # Skip this room if the user isn't allowed to see it + if not room_membership_for_user_at_to_token: + continue + + room_membership_for_user_map[room_id] = ( + room_membership_for_user_at_to_token + ) + + # Take the superset of the `RoomSyncConfig` for each room. + # + # Update our `relevant_room_map` with the room we're going to display + # and need to fetch more info about. + room_sync_config = RoomSyncConfig.from_room_config(room_subscription) + existing_room_sync_config = relevant_room_map.get(room_id) + if existing_room_sync_config is not None: + existing_room_sync_config.combine_room_sync_config(room_sync_config) + else: + relevant_room_map[room_id] = room_sync_config # Fetch room data rooms: Dict[str, SlidingSyncResult.RoomResult] = {} @@ -533,7 +596,9 @@ class SlidingSyncHandler: user=sync_config.user, room_id=room_id, room_sync_config=room_sync_config, - room_membership_for_user_at_to_token=sync_room_map[room_id], + room_membership_for_user_at_to_token=room_membership_for_user_map[ + room_id + ], from_token=from_token, to_token=to_token, ) @@ -551,28 +616,23 @@ class SlidingSyncHandler: extensions=extensions, ) - async def get_sync_room_ids_for_user( + async def get_room_membership_for_user_at_to_token( self, user: UserID, to_token: StreamToken, - from_token: Optional[StreamToken] = None, + from_token: Optional[StreamToken], ) -> Dict[str, _RoomMembershipForUser]: """ - Fetch room IDs that should be listed for this user in the sync response (the - full room list that will be filtered, sorted, and sliced). + Fetch room IDs that the user has had membership in (the full room list including + long-lost left rooms that will be filtered, sorted, and sliced). - We're looking for rooms where the user has the following state in the token - range (> `from_token` and <= `to_token`): + We're looking for rooms where the user has had any sort of membership in the + token range (> `from_token` and <= `to_token`) - - `invite`, `join`, `knock`, `ban` membership events - - Kicks (`leave` membership events where `sender` is different from the - `user_id`/`state_key`) - - `newly_left` (rooms that were left during the given token range) - - In order for bans/kicks to not show up in sync, you need to `/forget` those - rooms. This doesn't modify the event itself though and only adds the - `forgotten` flag to the `room_memberships` table in Synapse. There isn't a way - to tell when a room was forgotten at the moment so we can't factor it into the - from/to range. + In order for bans/kicks to not show up, you need to `/forget` those rooms. This + doesn't modify the event itself though and only adds the `forgotten` flag to the + `room_memberships` table in Synapse. There isn't a way to tell when a room was + forgotten at the moment so we can't factor it into the token range. Args: user: User to fetch rooms for @@ -580,8 +640,8 @@ class SlidingSyncHandler: from_token: The point in the stream to sync from. Returns: - A dictionary of room IDs that should be listed in the sync response along - with membership information in that room at the time of `to_token`. + A dictionary of room IDs that the user has had membership in along with + membership information in that room at the time of `to_token`. """ user_id = user.to_string() @@ -592,9 +652,6 @@ class SlidingSyncHandler: # We want to fetch any kind of membership (joined and left rooms) in order # to get the `event_pos` of the latest room membership event for the # user. - # - # We will filter out the rooms that don't belong below (see - # `filter_membership_for_sync`) membership_list=Membership.LIST, excluded_rooms=self.rooms_to_exclude_globally, ) @@ -614,7 +671,9 @@ class SlidingSyncHandler: event_pos=room_for_user.event_pos, membership=room_for_user.membership, sender=room_for_user.sender, + # We will update these fields below to be accurate newly_joined=False, + newly_left=False, is_dm=False, ) for room_for_user in room_for_user_list @@ -653,12 +712,10 @@ class SlidingSyncHandler: # - 1a) Remove rooms that the user joined after the `to_token` # - 1b) Add back rooms that the user left after the `to_token` # - 1c) Update room membership events to the point in time of the `to_token` - # - 2) Add back newly_left rooms (> `from_token` and <= `to_token`) - # - 3) Figure out which rooms are `newly_joined` + # - 2) Figure out which rooms are `newly_left` rooms (> `from_token` and <= `to_token`) + # - 3) Figure out which rooms are `newly_joined` (> `from_token` and <= `to_token`) # - 4) Figure out which rooms are DM's - # 1) ----------------------------------------------------- - # 1) Fetch membership changes that fall in the range from `to_token` up to # `membership_snapshot_token` # @@ -717,7 +774,9 @@ class SlidingSyncHandler: event_pos=first_membership_change_after_to_token.prev_event_pos, membership=first_membership_change_after_to_token.prev_membership, sender=first_membership_change_after_to_token.prev_sender, + # We will update these fields below to be accurate newly_joined=False, + newly_left=False, is_dm=False, ) else: @@ -726,22 +785,6 @@ class SlidingSyncHandler: # exact membership state and shouldn't rely on the current snapshot. sync_room_id_set.pop(room_id, None) - # Filter the rooms that that we have updated room membership events to the point - # in time of the `to_token` (from the "1)" fixups) - filtered_sync_room_id_set = { - room_id: room_membership_for_user - for room_id, room_membership_for_user in sync_room_id_set.items() - if filter_membership_for_sync( - membership=room_membership_for_user.membership, - user_id=user_id, - sender=room_membership_for_user.sender, - ) - } - - # 2) ----------------------------------------------------- - # We fix-up newly_left rooms after the first fixup because it may have removed - # some left rooms that we can figure out are newly_left in the following code - # 2) Fetch membership changes that fall in the range from `from_token` up to `to_token` current_state_delta_membership_changes_in_from_to_range = [] if from_token: @@ -803,19 +846,40 @@ class SlidingSyncHandler: if last_membership_change_in_from_to_range.membership == Membership.JOIN: possibly_newly_joined_room_ids.add(room_id) - # 2) Add back newly_left rooms (> `from_token` and <= `to_token`). We - # include newly_left rooms because the last event that the user should see - # is their own leave event + # 2) Figure out newly_left rooms (> `from_token` and <= `to_token`). if last_membership_change_in_from_to_range.membership == Membership.LEAVE: - filtered_sync_room_id_set[room_id] = _RoomMembershipForUser( - room_id=room_id, - event_id=last_membership_change_in_from_to_range.event_id, - event_pos=last_membership_change_in_from_to_range.event_pos, - membership=last_membership_change_in_from_to_range.membership, - sender=last_membership_change_in_from_to_range.sender, - newly_joined=False, - is_dm=False, - ) + # 2) Mark this room as `newly_left` + + # If we're seeing a membership change here, we should expect to already + # have it in our snapshot but if a state reset happens, it wouldn't have + # shown up in our snapshot but appear as a change here. + existing_sync_entry = sync_room_id_set.get(room_id) + if existing_sync_entry is not None: + # Normal expected case + sync_room_id_set[room_id] = existing_sync_entry.copy_and_replace( + newly_left=True + ) + else: + # State reset! + logger.warn( + "State reset detected for room_id %s with %s who is no longer in the room", + room_id, + user_id, + ) + # Even though a state reset happened which removed the person from + # the room, we still add it the list so the user knows they left the + # room. Downstream code can check for a state reset by looking for + # `event_id=None and membership is not None`. + sync_room_id_set[room_id] = _RoomMembershipForUser( + room_id=room_id, + event_id=last_membership_change_in_from_to_range.event_id, + event_pos=last_membership_change_in_from_to_range.event_pos, + membership=last_membership_change_in_from_to_range.membership, + sender=last_membership_change_in_from_to_range.sender, + newly_joined=False, + newly_left=True, + is_dm=False, + ) # 3) Figure out `newly_joined` for room_id in possibly_newly_joined_room_ids: @@ -826,9 +890,9 @@ class SlidingSyncHandler: # also some non-join in the range, we know they `newly_joined`. if has_non_join_in_from_to_range: # We found a `newly_joined` room (we left and joined within the token range) - filtered_sync_room_id_set[room_id] = filtered_sync_room_id_set[ - room_id - ].copy_and_replace(newly_joined=True) + sync_room_id_set[room_id] = sync_room_id_set[room_id].copy_and_replace( + newly_joined=True + ) else: prev_event_id = first_membership_change_by_room_id_in_from_to_range[ room_id @@ -840,7 +904,7 @@ class SlidingSyncHandler: if prev_event_id is None: # We found a `newly_joined` room (we are joining the room for the # first time within the token range) - filtered_sync_room_id_set[room_id] = filtered_sync_room_id_set[ + sync_room_id_set[room_id] = sync_room_id_set[ room_id ].copy_and_replace(newly_joined=True) # Last resort, we need to step back to the previous membership event @@ -848,7 +912,7 @@ class SlidingSyncHandler: elif prev_membership != Membership.JOIN: # We found a `newly_joined` room (we left before the token range # and joined within the token range) - filtered_sync_room_id_set[room_id] = filtered_sync_room_id_set[ + sync_room_id_set[room_id] = sync_room_id_set[ room_id ].copy_and_replace(newly_joined=True) @@ -876,12 +940,122 @@ class SlidingSyncHandler: dm_room_id_set.add(room_id) # 4) Fixup - for room_id in filtered_sync_room_id_set: - filtered_sync_room_id_set[room_id] = filtered_sync_room_id_set[ - room_id - ].copy_and_replace(is_dm=room_id in dm_room_id_set) + for room_id in sync_room_id_set: + sync_room_id_set[room_id] = sync_room_id_set[room_id].copy_and_replace( + is_dm=room_id in dm_room_id_set + ) - return filtered_sync_room_id_set + return sync_room_id_set + + async def filter_rooms_relevant_for_sync( + self, + user: UserID, + room_membership_for_user_map: Dict[str, _RoomMembershipForUser], + ) -> Dict[str, _RoomMembershipForUser]: + """ + Filter room IDs that should/can be listed for this user in the sync response (the + full room list that will be further filtered, sorted, and sliced). + + We're looking for rooms where the user has the following state in the token + range (> `from_token` and <= `to_token`): + + - `invite`, `join`, `knock`, `ban` membership events + - Kicks (`leave` membership events where `sender` is different from the + `user_id`/`state_key`) + - `newly_left` (rooms that were left during the given token range) + - In order for bans/kicks to not show up in sync, you need to `/forget` those + rooms. This doesn't modify the event itself though and only adds the + `forgotten` flag to the `room_memberships` table in Synapse. There isn't a way + to tell when a room was forgotten at the moment so we can't factor it into the + from/to range. + + Args: + user: User that is syncing + room_membership_for_user_map: Room membership for the user + + Returns: + A dictionary of room IDs that should be listed in the sync response along + with membership information in that room at the time of `to_token`. + """ + user_id = user.to_string() + + # Filter rooms to only what we're interested to sync with + filtered_sync_room_map = { + room_id: room_membership_for_user + for room_id, room_membership_for_user in room_membership_for_user_map.items() + if filter_membership_for_sync( + user_id=user_id, + room_membership_for_user=room_membership_for_user, + ) + } + + return filtered_sync_room_map + + async def check_room_subscription_allowed_for_user( + self, + room_id: str, + room_membership_for_user_map: Dict[str, _RoomMembershipForUser], + to_token: StreamToken, + ) -> Optional[_RoomMembershipForUser]: + """ + Check whether the user is allowed to see the room based on whether they have + ever had membership in the room or if the room is `world_readable`. + + Similar to `check_user_in_room_or_world_readable(...)` + + Args: + room_id: Room to check + room_membership_for_user_map: Room membership for the user at the time of + the `to_token` (<= `to_token`). + to_token: The token to fetch rooms up to. + + Returns: + The room membership for the user if they are allowed to subscribe to the + room else `None`. + """ + + # We can first check if they are already allowed to see the room based + # on our previous work to assemble the `room_membership_for_user_map`. + # + # If they have had any membership in the room over time (up to the `to_token`), + # let them subscribe and see what they can. + existing_membership_for_user = room_membership_for_user_map.get(room_id) + if existing_membership_for_user is not None: + return existing_membership_for_user + + # TODO: Handle `world_readable` rooms + return None + + # If the room is `world_readable`, it doesn't matter whether they can join, + # everyone can see the room. + # not_in_room_membership_for_user = _RoomMembershipForUser( + # room_id=room_id, + # event_id=None, + # event_pos=None, + # membership=None, + # sender=None, + # newly_joined=False, + # newly_left=False, + # is_dm=False, + # ) + # room_state = await self.get_current_state_at( + # room_id=room_id, + # room_membership_for_user_at_to_token=not_in_room_membership_for_user, + # state_filter=StateFilter.from_types( + # [(EventTypes.RoomHistoryVisibility, "")] + # ), + # to_token=to_token, + # ) + + # visibility_event = room_state.get((EventTypes.RoomHistoryVisibility, "")) + # if ( + # visibility_event is not None + # and visibility_event.content.get("history_visibility") + # == HistoryVisibility.WORLD_READABLE + # ): + # return not_in_room_membership_for_user + + # return None async def filter_rooms( self, @@ -1081,7 +1255,6 @@ class SlidingSyncHandler: in the room at the time of `to_token`. to_token: The point in the stream to sync up to. """ - room_state_ids: StateMap[str] # People shouldn't see past their leave/ban event if room_membership_for_user_at_to_token.membership in ( @@ -1349,10 +1522,10 @@ class SlidingSyncHandler: stripped_state.append(strip_event(invite_or_knock_event)) # TODO: Handle state resets. For example, if we see - # `room_membership_for_user_at_to_token.membership = Membership.LEAVE` but - # `required_state` doesn't include it, we should indicate to the client that a - # state reset happened. Perhaps we should indicate this by setting `initial: - # True` and empty `required_state`. + # `room_membership_for_user_at_to_token.event_id=None and + # room_membership_for_user_at_to_token.membership is not None`, we should + # indicate to the client that a state reset happened. Perhaps we should indicate + # this by setting `initial: True` and empty `required_state`. # TODO: Since we can't determine whether we've already sent a room down this # Sliding Sync connection before (we plan to add this optimization in the diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py index eb4b0a05c..a7aa9bb8a 100644 --- a/tests/handlers/test_sliding_sync.py +++ b/tests/handlers/test_sliding_sync.py @@ -19,7 +19,7 @@ # import logging from copy import deepcopy -from typing import Optional +from typing import Dict, Optional from unittest.mock import patch from parameterized import parameterized @@ -37,12 +37,16 @@ from synapse.api.constants import ( from synapse.api.room_versions import RoomVersions from synapse.events import make_event_from_dict from synapse.events.snapshot import EventContext -from synapse.handlers.sliding_sync import RoomSyncConfig, StateValues +from synapse.handlers.sliding_sync import ( + RoomSyncConfig, + StateValues, + _RoomMembershipForUser, +) from synapse.rest import admin from synapse.rest.client import knock, login, room from synapse.server import HomeServer from synapse.storage.util.id_generators import MultiWriterIdGenerator -from synapse.types import JsonDict, UserID +from synapse.types import JsonDict, StreamToken, UserID from synapse.types.handlers import SlidingSyncConfig from synapse.util import Clock @@ -581,9 +585,9 @@ class RoomSyncConfigTestCase(TestCase): self._assert_room_config_equal(room_sync_config_b, expected, "A into B") -class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): +class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): """ - Tests Sliding Sync handler `get_sync_room_ids_for_user()` to make sure it returns + Tests Sliding Sync handler `get_room_membership_for_user_at_to_token()` to make sure it returns the correct list of rooms IDs. """ @@ -616,7 +620,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): now_token = self.event_sources.get_current_token() room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=now_token, to_token=now_token, @@ -643,7 +647,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): after_room_token = self.event_sources.get_current_token() room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room_token, to_token=after_room_token, @@ -657,9 +661,11 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): room_id_results[room_id].event_id, join_response["event_id"], ) + self.assertEqual(room_id_results[room_id].membership, Membership.JOIN) # We should be considered `newly_joined` because we joined during the token # range self.assertEqual(room_id_results[room_id].newly_joined, True) + self.assertEqual(room_id_results[room_id].newly_left, False) def test_get_already_joined_room(self) -> None: """ @@ -676,7 +682,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): after_room_token = self.event_sources.get_current_token() room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room_token, to_token=after_room_token, @@ -690,8 +696,10 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): room_id_results[room_id].event_id, join_response["event_id"], ) + self.assertEqual(room_id_results[room_id].membership, Membership.JOIN) # We should *NOT* be `newly_joined` because we joined before the token range self.assertEqual(room_id_results[room_id].newly_joined, False) + self.assertEqual(room_id_results[room_id].newly_left, False) def test_get_invited_banned_knocked_room(self) -> None: """ @@ -748,7 +756,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): after_room_token = self.event_sources.get_current_token() room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room_token, to_token=after_room_token, @@ -770,19 +778,25 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): room_id_results[invited_room_id].event_id, invite_response["event_id"], ) + self.assertEqual(room_id_results[invited_room_id].membership, Membership.INVITE) + self.assertEqual(room_id_results[invited_room_id].newly_joined, False) + self.assertEqual(room_id_results[invited_room_id].newly_left, False) + self.assertEqual( room_id_results[ban_room_id].event_id, ban_response["event_id"], ) + self.assertEqual(room_id_results[ban_room_id].membership, Membership.BAN) + self.assertEqual(room_id_results[ban_room_id].newly_joined, False) + self.assertEqual(room_id_results[ban_room_id].newly_left, False) + self.assertEqual( room_id_results[knock_room_id].event_id, knock_room_membership_state_event.event_id, ) - # We should *NOT* be `newly_joined` because we were not joined at the the time - # of the `to_token`. - self.assertEqual(room_id_results[invited_room_id].newly_joined, False) - self.assertEqual(room_id_results[ban_room_id].newly_joined, False) + self.assertEqual(room_id_results[knock_room_id].membership, Membership.KNOCK) self.assertEqual(room_id_results[knock_room_id].newly_joined, False) + self.assertEqual(room_id_results[knock_room_id].newly_left, False) def test_get_kicked_room(self) -> None: """ @@ -814,7 +828,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): after_kick_token = self.event_sources.get_current_token() room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_kick_token, to_token=after_kick_token, @@ -828,9 +842,12 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): room_id_results[kick_room_id].event_id, kick_response["event_id"], ) + self.assertEqual(room_id_results[kick_room_id].membership, Membership.LEAVE) + self.assertNotEqual(room_id_results[kick_room_id].sender, user1_id) # We should *NOT* be `newly_joined` because we were not joined at the the time # of the `to_token`. self.assertEqual(room_id_results[kick_room_id].newly_joined, False) + self.assertEqual(room_id_results[kick_room_id].newly_left, False) def test_forgotten_rooms(self) -> None: """ @@ -904,7 +921,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): self.assertEqual(channel.code, 200, channel.result) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room_forgets, to_token=before_room_forgets, @@ -914,52 +931,58 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): # We shouldn't see the room because it was forgotten self.assertEqual(room_id_results.keys(), set()) - def test_only_newly_left_rooms_show_up(self) -> None: + def test_newly_left_rooms(self) -> None: """ - Test that newly_left rooms still show up in the sync response but rooms that - were left before the `from_token` don't show up. See condition "2)" comments in - the `get_sync_room_ids_for_user` method. + Test that newly_left are marked properly """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") # Leave before we calculate the `from_token` room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) - self.helper.leave(room_id1, user1_id, tok=user1_tok) + leave_response1 = self.helper.leave(room_id1, user1_id, tok=user1_tok) after_room1_token = self.event_sources.get_current_token() # Leave during the from_token/to_token range (newly_left) room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) - _leave_response2 = self.helper.leave(room_id2, user1_id, tok=user1_tok) + leave_response2 = self.helper.leave(room_id2, user1_id, tok=user1_tok) after_room2_token = self.event_sources.get_current_token() room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, to_token=after_room2_token, ) ) - # Only the newly_left room should show up - self.assertEqual(room_id_results.keys(), {room_id2}) - # It should be pointing to the latest membership event in the from/to range but - # the `event_id` is `None` because we left the room causing the server to leave - # the room because no other local users are in it (quirk of the - # `current_state_delta_stream` table that we source things from) + self.assertEqual(room_id_results.keys(), {room_id1, room_id2}) + + self.assertEqual( + room_id_results[room_id1].event_id, + leave_response1["event_id"], + ) + self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) + # We should *NOT* be `newly_joined` or `newly_left` because that happened before + # the from/to range + self.assertEqual(room_id_results[room_id1].newly_joined, False) + self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertEqual( room_id_results[room_id2].event_id, - None, # _leave_response2["event_id"], + leave_response2["event_id"], ) + self.assertEqual(room_id_results[room_id2].membership, Membership.LEAVE) # We should *NOT* be `newly_joined` because we are instead `newly_left` self.assertEqual(room_id_results[room_id2].newly_joined, False) + self.assertEqual(room_id_results[room_id2].newly_left, True) def test_no_joins_after_to_token(self) -> None: """ Rooms we join after the `to_token` should *not* show up. See condition "1b)" - comments in the `get_sync_room_ids_for_user()` method. + comments in the `get_room_membership_for_user_at_to_token()` method. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -978,7 +1001,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): self.helper.join(room_id2, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, to_token=after_room1_token, @@ -991,14 +1014,16 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): room_id_results[room_id1].event_id, join_response1["event_id"], ) + self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should be `newly_joined` because we joined during the token range self.assertEqual(room_id_results[room_id1].newly_joined, True) + self.assertEqual(room_id_results[room_id1].newly_left, False) def test_join_during_range_and_left_room_after_to_token(self) -> None: """ Room still shows up if we left the room but were joined during the from_token/to_token. See condition "1a)" comments in the - `get_sync_room_ids_for_user()` method. + `get_room_membership_for_user_at_to_token()` method. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -1016,7 +1041,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, to_token=after_room1_token, @@ -1038,14 +1063,16 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): } ), ) + self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should be `newly_joined` because we joined during the token range self.assertEqual(room_id_results[room_id1].newly_joined, True) + self.assertEqual(room_id_results[room_id1].newly_left, False) def test_join_before_range_and_left_room_after_to_token(self) -> None: """ Room still shows up if we left the room but were joined before the `from_token` so it should show up. See condition "1a)" comments in the - `get_sync_room_ids_for_user()` method. + `get_room_membership_for_user_at_to_token()` method. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -1061,7 +1088,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, to_token=after_room1_token, @@ -1082,14 +1109,16 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): } ), ) + self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should *NOT* be `newly_joined` because we joined before the token range self.assertEqual(room_id_results[room_id1].newly_joined, False) + self.assertEqual(room_id_results[room_id1].newly_left, False) def test_kicked_before_range_and_left_after_to_token(self) -> None: """ Room still shows up if we left the room but were kicked before the `from_token` so it should show up. See condition "1a)" comments in the - `get_sync_room_ids_for_user()` method. + `get_room_membership_for_user_at_to_token()` method. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -1123,7 +1152,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): leave_response = self.helper.leave(kick_room_id, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_kick_token, to_token=after_kick_token, @@ -1146,14 +1175,17 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): } ), ) + self.assertEqual(room_id_results[kick_room_id].membership, Membership.LEAVE) + self.assertNotEqual(room_id_results[kick_room_id].sender, user1_id) # We should *NOT* be `newly_joined` because we were kicked self.assertEqual(room_id_results[kick_room_id].newly_joined, False) + self.assertEqual(room_id_results[kick_room_id].newly_left, False) def test_newly_left_during_range_and_join_leave_after_to_token(self) -> None: """ Newly left room should show up. But we're also testing that joining and leaving after the `to_token` doesn't mess with the results. See condition "2)" and "1a)" - comments in the `get_sync_room_ids_for_user()` method. + comments in the `get_room_membership_for_user_at_to_token()` method. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -1176,7 +1208,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): leave_response2 = self.helper.leave(room_id1, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, to_token=after_room1_token, @@ -1199,14 +1231,17 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): } ), ) - # We should *NOT* be `newly_joined` because we left during the token range + self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) + # We should *NOT* be `newly_joined` because we are actually `newly_left` during + # the token range self.assertEqual(room_id_results[room_id1].newly_joined, False) + self.assertEqual(room_id_results[room_id1].newly_left, True) def test_newly_left_during_range_and_join_after_to_token(self) -> None: """ Newly left room should show up. But we're also testing that joining after the `to_token` doesn't mess with the results. See condition "2)" and "1b)" comments - in the `get_sync_room_ids_for_user()` method. + in the `get_room_membership_for_user_at_to_token()` method. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -1228,7 +1263,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): join_response2 = self.helper.join(room_id1, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, to_token=after_room1_token, @@ -1250,16 +1285,19 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): } ), ) - # We should *NOT* be `newly_joined` because we left during the token range + self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) + # We should *NOT* be `newly_joined` because we are actually `newly_left` during + # the token range self.assertEqual(room_id_results[room_id1].newly_joined, False) + self.assertEqual(room_id_results[room_id1].newly_left, True) def test_no_from_token(self) -> None: """ - Test that if we don't provide a `from_token`, we get all the rooms that we we're - joined up to the `to_token`. + Test that if we don't provide a `from_token`, we get all the rooms that we had + membership in up to the `to_token`. - Providing `from_token` only really has the effect that it adds `newly_left` - rooms to the response. + Providing `from_token` only really has the effect that it marks rooms as + `newly_left` in the response. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -1276,7 +1314,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): # Join and leave the room2 before the `to_token` self.helper.join(room_id2, user1_id, tok=user1_tok) - self.helper.leave(room_id2, user1_id, tok=user1_tok) + leave_response2 = self.helper.leave(room_id2, user1_id, tok=user1_tok) after_room1_token = self.event_sources.get_current_token() @@ -1284,7 +1322,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): self.helper.join(room_id2, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=None, to_token=after_room1_token, @@ -1292,15 +1330,31 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): ) # Only rooms we were joined to before the `to_token` should show up - self.assertEqual(room_id_results.keys(), {room_id1}) + self.assertEqual(room_id_results.keys(), {room_id1, room_id2}) + + # Room1 # It should be pointing to the latest membership event in the from/to range self.assertEqual( room_id_results[room_id1].event_id, join_response1["event_id"], ) - # We should *NOT* be `newly_joined` because there is no `from_token` to - # define a "live" range to compare against + self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) + # We should *NOT* be `newly_joined`/`newly_left` because there is no + # `from_token` to define a "live" range to compare against self.assertEqual(room_id_results[room_id1].newly_joined, False) + self.assertEqual(room_id_results[room_id1].newly_left, False) + + # Room2 + # It should be pointing to the latest membership event in the from/to range + self.assertEqual( + room_id_results[room_id2].event_id, + leave_response2["event_id"], + ) + self.assertEqual(room_id_results[room_id2].membership, Membership.LEAVE) + # We should *NOT* be `newly_joined`/`newly_left` because there is no + # `from_token` to define a "live" range to compare against + self.assertEqual(room_id_results[room_id2].newly_joined, False) + self.assertEqual(room_id_results[room_id2].newly_left, False) def test_from_token_ahead_of_to_token(self) -> None: """ @@ -1319,28 +1373,28 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): room_id3 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True) room_id4 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True) - # Join room1 before `before_room_token` - join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok) + # Join room1 before `to_token` + join_room1_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok) - # Join and leave the room2 before `before_room_token` - self.helper.join(room_id2, user1_id, tok=user1_tok) - self.helper.leave(room_id2, user1_id, tok=user1_tok) + # Join and leave the room2 before `to_token` + _join_room2_response1 = self.helper.join(room_id2, user1_id, tok=user1_tok) + leave_room2_response1 = self.helper.leave(room_id2, user1_id, tok=user1_tok) # Note: These are purposely swapped. The `from_token` should come after # the `to_token` in this test to_token = self.event_sources.get_current_token() - # Join room2 after `before_room_token` - self.helper.join(room_id2, user1_id, tok=user1_tok) + # Join room2 after `to_token` + _join_room2_response2 = self.helper.join(room_id2, user1_id, tok=user1_tok) # -------- - # Join room3 after `before_room_token` - self.helper.join(room_id3, user1_id, tok=user1_tok) + # Join room3 after `to_token` + _join_room3_response1 = self.helper.join(room_id3, user1_id, tok=user1_tok) - # Join and leave the room4 after `before_room_token` - self.helper.join(room_id4, user1_id, tok=user1_tok) - self.helper.leave(room_id4, user1_id, tok=user1_tok) + # Join and leave the room4 after `to_token` + _join_room4_response1 = self.helper.join(room_id4, user1_id, tok=user1_tok) + _leave_room4_response1 = self.helper.leave(room_id4, user1_id, tok=user1_tok) # Note: These are purposely swapped. The `from_token` should come after the # `to_token` in this test @@ -1350,31 +1404,59 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): self.helper.join(room_id4, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=from_token, to_token=to_token, ) ) - # Only rooms we were joined to before the `to_token` should show up - # - # There won't be any newly_left rooms because the `from_token` is ahead of the - # `to_token` and that range will give no membership changes to check. - self.assertEqual(room_id_results.keys(), {room_id1}) + # In the "current" state snapshot, we're joined to all of the rooms but in the + # from/to token range... + self.assertIncludes( + room_id_results.keys(), + { + # Included because we were joined before both tokens + room_id1, + # Included because we had membership before the to_token + room_id2, + # Excluded because we joined after the `to_token` + # room_id3, + # Excluded because we joined after the `to_token` + # room_id4, + }, + exact=True, + ) + + # Room1 # It should be pointing to the latest membership event in the from/to range self.assertEqual( room_id_results[room_id1].event_id, - join_response1["event_id"], + join_room1_response1["event_id"], ) - # We should *NOT* be `newly_joined` because we joined `room1` before either of the tokens + self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) + # We should *NOT* be `newly_joined`/`newly_left` because we joined `room1` + # before either of the tokens self.assertEqual(room_id_results[room_id1].newly_joined, False) + self.assertEqual(room_id_results[room_id1].newly_left, False) + + # Room2 + # It should be pointing to the latest membership event in the from/to range + self.assertEqual( + room_id_results[room_id2].event_id, + leave_room2_response1["event_id"], + ) + self.assertEqual(room_id_results[room_id2].membership, Membership.LEAVE) + # We should *NOT* be `newly_joined`/`newly_left` because we joined and left + # `room1` before either of the tokens + self.assertEqual(room_id_results[room_id2].newly_joined, False) + self.assertEqual(room_id_results[room_id2].newly_left, False) def test_leave_before_range_and_join_leave_after_to_token(self) -> None: """ - Old left room shouldn't show up. But we're also testing that joining and leaving - after the `to_token` doesn't mess with the results. See condition "1a)" comments - in the `get_sync_room_ids_for_user()` method. + Test old left rooms. But we're also testing that joining and leaving after the + `to_token` doesn't mess with the results. See condition "1a)" comments in the + `get_room_membership_for_user_at_to_token()` method. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -1386,7 +1468,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True) # Join and leave the room before the from/to range self.helper.join(room_id1, user1_id, tok=user1_tok) - self.helper.leave(room_id1, user1_id, tok=user1_tok) + leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) after_room1_token = self.event_sources.get_current_token() @@ -1395,21 +1477,30 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): self.helper.leave(room_id1, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, to_token=after_room1_token, ) ) - # Room shouldn't show up because it was left before the `from_token` - self.assertEqual(room_id_results.keys(), set()) + self.assertEqual(room_id_results.keys(), {room_id1}) + # It should be pointing to the latest membership event in the from/to range + self.assertEqual( + room_id_results[room_id1].event_id, + leave_response["event_id"], + ) + self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) + # We should *NOT* be `newly_joined`/`newly_left` because we joined and left + # `room1` before either of the tokens + self.assertEqual(room_id_results[room_id1].newly_joined, False) + self.assertEqual(room_id_results[room_id1].newly_left, False) def test_leave_before_range_and_join_after_to_token(self) -> None: """ - Old left room shouldn't show up. But we're also testing that joining after the - `to_token` doesn't mess with the results. See condition "1b)" comments in the - `get_sync_room_ids_for_user()` method. + Test old left room. But we're also testing that joining after the `to_token` + doesn't mess with the results. See condition "1b)" comments in the + `get_room_membership_for_user_at_to_token()` method. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -1421,7 +1512,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True) # Join and leave the room before the from/to range self.helper.join(room_id1, user1_id, tok=user1_tok) - self.helper.leave(room_id1, user1_id, tok=user1_tok) + leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) after_room1_token = self.event_sources.get_current_token() @@ -1429,24 +1520,32 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): self.helper.join(room_id1, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, to_token=after_room1_token, ) ) - # Room shouldn't show up because it was left before the `from_token` - self.assertEqual(room_id_results.keys(), set()) + self.assertEqual(room_id_results.keys(), {room_id1}) + # It should be pointing to the latest membership event in the from/to range + self.assertEqual( + room_id_results[room_id1].event_id, + leave_response["event_id"], + ) + self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) + # We should *NOT* be `newly_joined`/`newly_left` because we joined and left + # `room1` before either of the tokens + self.assertEqual(room_id_results[room_id1].newly_joined, False) + self.assertEqual(room_id_results[room_id1].newly_left, False) def test_join_leave_multiple_times_during_range_and_after_to_token( self, ) -> None: """ Join and leave multiple times shouldn't affect rooms from showing up. It just - matters that we were joined or newly_left in the from/to range. But we're also - testing that joining and leaving after the `to_token` doesn't mess with the - results. + matters that we had membership in the from/to range. But we're also testing that + joining and leaving after the `to_token` doesn't mess with the results. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -1458,7 +1557,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): # We create the room with user2 so the room isn't left with no members when we # leave and can still re-join. room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True) - # Join, leave, join back to the room before the from/to range + # Join, leave, join back to the room during the from/to range join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok) leave_response1 = self.helper.leave(room_id1, user1_id, tok=user1_tok) join_response2 = self.helper.join(room_id1, user1_id, tok=user1_tok) @@ -1471,7 +1570,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): leave_response3 = self.helper.leave(room_id1, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, to_token=after_room1_token, @@ -1496,15 +1595,19 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): } ), ) + self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should be `newly_joined` because we joined during the token range self.assertEqual(room_id_results[room_id1].newly_joined, True) + # We should *NOT* be `newly_left` because we joined during the token range and + # was still joined at the end of the range + self.assertEqual(room_id_results[room_id1].newly_left, False) def test_join_leave_multiple_times_before_range_and_after_to_token( self, ) -> None: """ Join and leave multiple times before the from/to range shouldn't affect rooms - from showing up. It just matters that we were joined or newly_left in the + from showing up. It just matters that we had membership in the from/to range. But we're also testing that joining and leaving after the `to_token` doesn't mess with the results. """ @@ -1529,7 +1632,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): leave_response3 = self.helper.leave(room_id1, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, to_token=after_room1_token, @@ -1554,8 +1657,10 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): } ), ) + self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should *NOT* be `newly_joined` because we joined before the token range self.assertEqual(room_id_results[room_id1].newly_joined, False) + self.assertEqual(room_id_results[room_id1].newly_left, False) def test_invite_before_range_and_join_leave_after_to_token( self, @@ -1563,7 +1668,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): """ Make it look like we joined after the token range but we were invited before the from/to range so the room should still show up. See condition "1a)" comments in - the `get_sync_room_ids_for_user()` method. + the `get_room_membership_for_user_at_to_token()` method. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -1586,7 +1691,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, to_token=after_room1_token, @@ -1608,9 +1713,11 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): } ), ) + self.assertEqual(room_id_results[room_id1].membership, Membership.INVITE) # We should *NOT* be `newly_joined` because we were only invited before the # token range self.assertEqual(room_id_results[room_id1].newly_joined, False) + self.assertEqual(room_id_results[room_id1].newly_left, False) def test_join_and_display_name_changes_in_token_range( self, @@ -1658,7 +1765,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): ) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, to_token=after_room1_token, @@ -1684,8 +1791,10 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): } ), ) + self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should be `newly_joined` because we joined during the token range self.assertEqual(room_id_results[room_id1].newly_joined, True) + self.assertEqual(room_id_results[room_id1].newly_left, False) def test_display_name_changes_in_token_range( self, @@ -1721,7 +1830,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): after_change1_token = self.event_sources.get_current_token() room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, to_token=after_change1_token, @@ -1744,8 +1853,10 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): } ), ) + self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should *NOT* be `newly_joined` because we joined before the token range self.assertEqual(room_id_results[room_id1].newly_joined, False) + self.assertEqual(room_id_results[room_id1].newly_left, False) def test_display_name_changes_before_and_after_token_range( self, @@ -1791,7 +1902,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): ) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, to_token=after_room1_token, @@ -1817,8 +1928,10 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): } ), ) + self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should *NOT* be `newly_joined` because we joined before the token range self.assertEqual(room_id_results[room_id1].newly_joined, False) + self.assertEqual(room_id_results[room_id1].newly_left, False) def test_display_name_changes_leave_after_token_range( self, @@ -1828,7 +1941,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): if there are multiple `join` membership events in a row indicating `displayname`/`avatar_url` updates and we leave after the `to_token`. - See condition "1a)" comments in the `get_sync_room_ids_for_user()` method. + See condition "1a)" comments in the `get_room_membership_for_user_at_to_token()` method. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -1871,7 +1984,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): self.helper.leave(room_id1, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, to_token=after_room1_token, @@ -1897,8 +2010,10 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): } ), ) + self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should be `newly_joined` because we joined during the token range self.assertEqual(room_id_results[room_id1].newly_joined, True) + self.assertEqual(room_id_results[room_id1].newly_left, False) def test_display_name_changes_join_after_token_range( self, @@ -1908,7 +2023,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): indicating `displayname`/`avatar_url` updates doesn't affect the results (we joined after the token range so it shouldn't show up) - See condition "1b)" comments in the `get_sync_room_ids_for_user()` method. + See condition "1b)" comments in the `get_room_membership_for_user_at_to_token()` method. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -1937,7 +2052,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): ) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, to_token=after_room1_token, @@ -1973,7 +2088,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): after_more_changes_token = self.event_sources.get_current_token() room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, to_token=after_more_changes_token, @@ -1987,9 +2102,11 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): room_id_results[room_id1].event_id, join_response2["event_id"], ) + self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should be considered `newly_joined` because there is some non-join event in # between our latest join event. self.assertEqual(room_id_results[room_id1].newly_joined, True) + self.assertEqual(room_id_results[room_id1].newly_left, False) def test_newly_joined_only_joins_during_token_range( self, @@ -2036,7 +2153,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): after_room1_token = self.event_sources.get_current_token() room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, to_token=after_room1_token, @@ -2062,8 +2179,10 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): } ), ) + self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should be `newly_joined` because we first joined during the token range self.assertEqual(room_id_results[room_id1].newly_joined, True) + self.assertEqual(room_id_results[room_id1].newly_left, False) def test_multiple_rooms_are_not_confused( self, @@ -2086,16 +2205,18 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): # Invited and left the room before the token self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) - self.helper.leave(room_id1, user1_id, tok=user1_tok) + leave_room1_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) # Invited to room2 - self.helper.invite(room_id2, src=user2_id, targ=user1_id, tok=user2_tok) + invite_room2_response = self.helper.invite( + room_id2, src=user2_id, targ=user1_id, tok=user2_tok + ) before_room3_token = self.event_sources.get_current_token() # Invited and left room3 during the from/to range room_id3 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True) self.helper.invite(room_id3, src=user2_id, targ=user1_id, tok=user2_tok) - self.helper.leave(room_id3, user1_id, tok=user1_tok) + leave_room3_response = self.helper.leave(room_id3, user1_id, tok=user1_tok) after_room3_token = self.event_sources.get_current_token() @@ -2108,7 +2229,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): self.helper.leave(room_id3, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room3_token, to_token=after_room3_token, @@ -2118,19 +2239,158 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): self.assertEqual( room_id_results.keys(), { - # `room_id1` shouldn't show up because we left before the from/to range - # - # Room should show up because we were invited before the from/to range + # Left before the from/to range + room_id1, + # Invited before the from/to range room_id2, - # Room should show up because it was newly_left during the from/to range + # `newly_left` during the from/to range room_id3, }, ) + # Room1 + # It should be pointing to the latest membership event in the from/to range + self.assertEqual( + room_id_results[room_id1].event_id, + leave_room1_response["event_id"], + ) + self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) + # We should *NOT* be `newly_joined`/`newly_left` because we were invited and left + # before the token range + self.assertEqual(room_id_results[room_id1].newly_joined, False) + self.assertEqual(room_id_results[room_id1].newly_left, False) -class GetSyncRoomIdsForUserEventShardTestCase(BaseMultiWorkerStreamTestCase): + # Room2 + # It should be pointing to the latest membership event in the from/to range + self.assertEqual( + room_id_results[room_id2].event_id, + invite_room2_response["event_id"], + ) + self.assertEqual(room_id_results[room_id2].membership, Membership.INVITE) + # We should *NOT* be `newly_joined`/`newly_left` because we were invited before + # the token range + self.assertEqual(room_id_results[room_id2].newly_joined, False) + self.assertEqual(room_id_results[room_id2].newly_left, False) + + # Room3 + # It should be pointing to the latest membership event in the from/to range + self.assertEqual( + room_id_results[room_id3].event_id, + leave_room3_response["event_id"], + ) + self.assertEqual(room_id_results[room_id3].membership, Membership.LEAVE) + # We should be `newly_left` because we were invited and left during + # the token range + self.assertEqual(room_id_results[room_id3].newly_joined, False) + self.assertEqual(room_id_results[room_id3].newly_left, True) + + def test_state_reset(self) -> None: + """ + Test a state reset scenario where the user gets removed from the room (when + there is no corresponding leave event) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # The room where the state reset will happen + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok) + + # Join another room so we don't hit the short-circuit and return early if they + # have no room membership + room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id2, user1_id, tok=user1_tok) + + before_reset_token = self.event_sources.get_current_token() + + # Send another state event to make a position for the state reset to happen at + dummy_state_response = self.helper.send_state( + room_id1, + event_type="foobarbaz", + state_key="", + body={"foo": "bar"}, + tok=user2_tok, + ) + dummy_state_pos = self.get_success( + self.store.get_position_for_event(dummy_state_response["event_id"]) + ) + + # Mock a state reset removing the membership for user1 in the current state + self.get_success( + self.store.db_pool.simple_delete( + table="current_state_events", + keyvalues={ + "room_id": room_id1, + "type": EventTypes.Member, + "state_key": user1_id, + }, + desc="state reset user in current_state_events", + ) + ) + self.get_success( + self.store.db_pool.simple_delete( + table="local_current_membership", + keyvalues={ + "room_id": room_id1, + "user_id": user1_id, + }, + desc="state reset user in local_current_membership", + ) + ) + self.get_success( + self.store.db_pool.simple_insert( + table="current_state_delta_stream", + values={ + "stream_id": dummy_state_pos.stream, + "room_id": room_id1, + "type": EventTypes.Member, + "state_key": user1_id, + "event_id": None, + "prev_event_id": join_response1["event_id"], + "instance_name": dummy_state_pos.instance_name, + }, + desc="state reset user in current_state_delta_stream", + ) + ) + + # Manually bust the cache since we we're just manually messing with the database + # and not causing an actual state reset. + self.store._membership_stream_cache.entity_has_changed( + user1_id, dummy_state_pos.stream + ) + + after_reset_token = self.event_sources.get_current_token() + + # The function under test + room_id_results = self.get_success( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + UserID.from_string(user1_id), + from_token=before_reset_token, + to_token=after_reset_token, + ) + ) + + # Room1 should show up because it was `newly_left` via state reset during the from/to range + self.assertEqual(room_id_results.keys(), {room_id1, room_id2}) + # It should be pointing to no event because we were removed from the room + # without a corresponding leave event + self.assertEqual( + room_id_results[room_id1].event_id, + None, + ) + # State reset caused us to leave the room and there is no corresponding leave event + self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) + # We should *NOT* be `newly_joined` because we joined before the token range + self.assertEqual(room_id_results[room_id1].newly_joined, False) + # We should be `newly_left` because we were removed via state reset during the from/to range + self.assertEqual(room_id_results[room_id1].newly_left, True) + + +class GetRoomMembershipForUserAtToTokenShardTestCase(BaseMultiWorkerStreamTestCase): """ - Tests Sliding Sync handler `get_sync_room_ids_for_user()` to make sure it works with + Tests Sliding Sync handler `get_room_membership_for_user_at_to_token()` to make sure it works with sharded event stream_writers enabled """ @@ -2189,7 +2449,7 @@ class GetSyncRoomIdsForUserEventShardTestCase(BaseMultiWorkerStreamTestCase): We then send some events to advance the stream positions of worker1 and worker3 but worker2 is lagging behind because it's stuck. We are specifically testing - that `get_sync_room_ids_for_user(from_token=xxx, to_token=xxx)` should work + that `get_room_membership_for_user_at_to_token(from_token=xxx, to_token=xxx)` should work correctly in these adverse conditions. """ user1_id = self.register_user("user1", "pass") @@ -2228,7 +2488,7 @@ class GetSyncRoomIdsForUserEventShardTestCase(BaseMultiWorkerStreamTestCase): join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok) join_response2 = self.helper.join(room_id2, user1_id, tok=user1_tok) # Leave room2 - self.helper.leave(room_id2, user1_id, tok=user1_tok) + leave_room2_response = self.helper.leave(room_id2, user1_id, tok=user1_tok) join_response3 = self.helper.join(room_id3, user1_id, tok=user1_tok) # Leave room3 self.helper.leave(room_id3, user1_id, tok=user1_tok) @@ -2265,7 +2525,7 @@ class GetSyncRoomIdsForUserEventShardTestCase(BaseMultiWorkerStreamTestCase): # For room_id1/worker1: leave and join the room to advance the stream position # and generate membership changes. self.helper.leave(room_id1, user1_id, tok=user1_tok) - self.helper.join(room_id1, user1_id, tok=user1_tok) + join_room1_response = self.helper.join(room_id1, user1_id, tok=user1_tok) # For room_id2/worker2: which is currently stuck, join the room. join_on_worker2_response = self.helper.join(room_id2, user1_id, tok=user1_tok) # For room_id3/worker3: leave and join the room to advance the stream position @@ -2319,7 +2579,7 @@ class GetSyncRoomIdsForUserEventShardTestCase(BaseMultiWorkerStreamTestCase): # The function under test room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_stuck_activity_token, to_token=stuck_activity_token, @@ -2330,18 +2590,411 @@ class GetSyncRoomIdsForUserEventShardTestCase(BaseMultiWorkerStreamTestCase): room_id_results.keys(), { room_id1, - # room_id2 shouldn't show up because we left before the from/to range - # and the join event during the range happened while worker2 was stuck. - # This means that from the perspective of the master, where the - # `stuck_activity_token` is generated, the stream position for worker2 - # wasn't advanced to the join yet. Looking at the `instance_map`, the - # join technically comes after `stuck_activity_token``. - # - # room_id2, + room_id2, room_id3, }, ) + # Room1 + # It should be pointing to the latest membership event in the from/to range + self.assertEqual( + room_id_results[room_id1].event_id, + join_room1_response["event_id"], + ) + self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) + # We should be `newly_joined` because we joined during the token range + self.assertEqual(room_id_results[room_id1].newly_joined, True) + self.assertEqual(room_id_results[room_id1].newly_left, False) + + # Room2 + # It should be pointing to the latest membership event in the from/to range + self.assertEqual( + room_id_results[room_id2].event_id, + leave_room2_response["event_id"], + ) + self.assertEqual(room_id_results[room_id2].membership, Membership.LEAVE) + # room_id2 should *NOT* be considered `newly_left` because we left before the + # from/to range and the join event during the range happened while worker2 was + # stuck. This means that from the perspective of the master, where the + # `stuck_activity_token` is generated, the stream position for worker2 wasn't + # advanced to the join yet. Looking at the `instance_map`, the join technically + # comes after `stuck_activity_token`. + self.assertEqual(room_id_results[room_id2].newly_joined, False) + self.assertEqual(room_id_results[room_id2].newly_left, False) + + # Room3 + # It should be pointing to the latest membership event in the from/to range + self.assertEqual( + room_id_results[room_id3].event_id, + join_on_worker3_response["event_id"], + ) + self.assertEqual(room_id_results[room_id3].membership, Membership.JOIN) + # We should be `newly_joined` because we joined during the token range + self.assertEqual(room_id_results[room_id3].newly_joined, True) + self.assertEqual(room_id_results[room_id3].newly_left, False) + + +class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): + """ + Tests Sliding Sync handler `filter_rooms_relevant_for_sync()` to make sure it returns + the correct list of rooms IDs. + """ + + servlets = [ + admin.register_servlets, + knock.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def default_config(self) -> JsonDict: + config = super().default_config() + # Enable sliding sync + config["experimental_features"] = {"msc3575_enabled": True} + return config + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.sliding_sync_handler = self.hs.get_sliding_sync_handler() + self.store = self.hs.get_datastores().main + self.event_sources = hs.get_event_sources() + self.storage_controllers = hs.get_storage_controllers() + + def _get_sync_room_ids_for_user( + self, + user: UserID, + to_token: StreamToken, + from_token: Optional[StreamToken], + ) -> Dict[str, _RoomMembershipForUser]: + """ + Get the rooms the user should be syncing with + """ + room_membership_for_user_map = self.get_success( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + user=user, + from_token=from_token, + to_token=to_token, + ) + ) + filtered_sync_room_map = self.get_success( + self.sliding_sync_handler.filter_rooms_relevant_for_sync( + user=user, + room_membership_for_user_map=room_membership_for_user_map, + ) + ) + + return filtered_sync_room_map + + def test_no_rooms(self) -> None: + """ + Test when the user has never joined any rooms before + """ + user1_id = self.register_user("user1", "pass") + # user1_tok = self.login(user1_id, "pass") + + now_token = self.event_sources.get_current_token() + + room_id_results = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=now_token, + to_token=now_token, + ) + + self.assertEqual(room_id_results.keys(), set()) + + def test_basic_rooms(self) -> None: + """ + Test that rooms that the user is joined to, invited to, banned from, and knocked + on show up. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + before_room_token = self.event_sources.get_current_token() + + join_room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + join_response = self.helper.join(join_room_id, user1_id, tok=user1_tok) + + # Setup the invited room (user2 invites user1 to the room) + invited_room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + invite_response = self.helper.invite( + invited_room_id, targ=user1_id, tok=user2_tok + ) + + # Setup the ban room (user2 bans user1 from the room) + ban_room_id = self.helper.create_room_as( + user2_id, tok=user2_tok, is_public=True + ) + self.helper.join(ban_room_id, user1_id, tok=user1_tok) + ban_response = self.helper.ban( + ban_room_id, src=user2_id, targ=user1_id, tok=user2_tok + ) + + # Setup the knock room (user1 knocks on the room) + knock_room_id = self.helper.create_room_as( + user2_id, tok=user2_tok, room_version=RoomVersions.V7.identifier + ) + self.helper.send_state( + knock_room_id, + EventTypes.JoinRules, + {"join_rule": JoinRules.KNOCK}, + tok=user2_tok, + ) + # User1 knocks on the room + knock_channel = self.make_request( + "POST", + "/_matrix/client/r0/knock/%s" % (knock_room_id,), + b"{}", + user1_tok, + ) + self.assertEqual(knock_channel.code, 200, knock_channel.result) + knock_room_membership_state_event = self.get_success( + self.storage_controllers.state.get_current_state_event( + knock_room_id, EventTypes.Member, user1_id + ) + ) + assert knock_room_membership_state_event is not None + + after_room_token = self.event_sources.get_current_token() + + room_id_results = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=before_room_token, + to_token=after_room_token, + ) + + # Ensure that the invited, ban, and knock rooms show up + self.assertEqual( + room_id_results.keys(), + { + join_room_id, + invited_room_id, + ban_room_id, + knock_room_id, + }, + ) + # It should be pointing to the the respective membership event (latest + # membership event in the from/to range) + self.assertEqual( + room_id_results[join_room_id].event_id, + join_response["event_id"], + ) + self.assertEqual(room_id_results[join_room_id].membership, Membership.JOIN) + self.assertEqual(room_id_results[join_room_id].newly_joined, True) + self.assertEqual(room_id_results[join_room_id].newly_left, False) + + self.assertEqual( + room_id_results[invited_room_id].event_id, + invite_response["event_id"], + ) + self.assertEqual(room_id_results[invited_room_id].membership, Membership.INVITE) + self.assertEqual(room_id_results[invited_room_id].newly_joined, False) + self.assertEqual(room_id_results[invited_room_id].newly_left, False) + + self.assertEqual( + room_id_results[ban_room_id].event_id, + ban_response["event_id"], + ) + self.assertEqual(room_id_results[ban_room_id].membership, Membership.BAN) + self.assertEqual(room_id_results[ban_room_id].newly_joined, False) + self.assertEqual(room_id_results[ban_room_id].newly_left, False) + + self.assertEqual( + room_id_results[knock_room_id].event_id, + knock_room_membership_state_event.event_id, + ) + self.assertEqual(room_id_results[knock_room_id].membership, Membership.KNOCK) + self.assertEqual(room_id_results[knock_room_id].newly_joined, False) + self.assertEqual(room_id_results[knock_room_id].newly_left, False) + + def test_only_newly_left_rooms_show_up(self) -> None: + """ + Test that `newly_left` rooms still show up in the sync response but rooms that + were left before the `from_token` don't show up. See condition "2)" comments in + the `get_room_membership_for_user_at_to_token()` method. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Leave before we calculate the `from_token` + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + self.helper.leave(room_id1, user1_id, tok=user1_tok) + + after_room1_token = self.event_sources.get_current_token() + + # Leave during the from_token/to_token range (newly_left) + room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) + _leave_response2 = self.helper.leave(room_id2, user1_id, tok=user1_tok) + + after_room2_token = self.event_sources.get_current_token() + + room_id_results = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=after_room1_token, + to_token=after_room2_token, + ) + + # Only the `newly_left` room should show up + self.assertEqual(room_id_results.keys(), {room_id2}) + self.assertEqual( + room_id_results[room_id2].event_id, + _leave_response2["event_id"], + ) + # We should *NOT* be `newly_joined` because we are instead `newly_left` + self.assertEqual(room_id_results[room_id2].newly_joined, False) + self.assertEqual(room_id_results[room_id2].newly_left, True) + + def test_get_kicked_room(self) -> None: + """ + Test that a room that the user was kicked from still shows up. When the user + comes back to their client, they should see that they were kicked. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Setup the kick room (user2 kicks user1 from the room) + kick_room_id = self.helper.create_room_as( + user2_id, tok=user2_tok, is_public=True + ) + self.helper.join(kick_room_id, user1_id, tok=user1_tok) + # Kick user1 from the room + kick_response = self.helper.change_membership( + room=kick_room_id, + src=user2_id, + targ=user1_id, + tok=user2_tok, + membership=Membership.LEAVE, + extra_data={ + "reason": "Bad manners", + }, + ) + + after_kick_token = self.event_sources.get_current_token() + + room_id_results = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=after_kick_token, + to_token=after_kick_token, + ) + + # The kicked room should show up + self.assertEqual(room_id_results.keys(), {kick_room_id}) + # It should be pointing to the latest membership event in the from/to range + self.assertEqual( + room_id_results[kick_room_id].event_id, + kick_response["event_id"], + ) + self.assertEqual(room_id_results[kick_room_id].membership, Membership.LEAVE) + self.assertNotEqual(room_id_results[kick_room_id].sender, user1_id) + # We should *NOT* be `newly_joined` because we were not joined at the the time + # of the `to_token`. + self.assertEqual(room_id_results[kick_room_id].newly_joined, False) + self.assertEqual(room_id_results[kick_room_id].newly_left, False) + + def test_state_reset(self) -> None: + """ + Test a state reset scenario where the user gets removed from the room (when + there is no corresponding leave event) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # The room where the state reset will happen + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok) + + # Join another room so we don't hit the short-circuit and return early if they + # have no room membership + room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id2, user1_id, tok=user1_tok) + + before_reset_token = self.event_sources.get_current_token() + + # Send another state event to make a position for the state reset to happen at + dummy_state_response = self.helper.send_state( + room_id1, + event_type="foobarbaz", + state_key="", + body={"foo": "bar"}, + tok=user2_tok, + ) + dummy_state_pos = self.get_success( + self.store.get_position_for_event(dummy_state_response["event_id"]) + ) + + # Mock a state reset removing the membership for user1 in the current state + self.get_success( + self.store.db_pool.simple_delete( + table="current_state_events", + keyvalues={ + "room_id": room_id1, + "type": EventTypes.Member, + "state_key": user1_id, + }, + desc="state reset user in current_state_events", + ) + ) + self.get_success( + self.store.db_pool.simple_delete( + table="local_current_membership", + keyvalues={ + "room_id": room_id1, + "user_id": user1_id, + }, + desc="state reset user in local_current_membership", + ) + ) + self.get_success( + self.store.db_pool.simple_insert( + table="current_state_delta_stream", + values={ + "stream_id": dummy_state_pos.stream, + "room_id": room_id1, + "type": EventTypes.Member, + "state_key": user1_id, + "event_id": None, + "prev_event_id": join_response1["event_id"], + "instance_name": dummy_state_pos.instance_name, + }, + desc="state reset user in current_state_delta_stream", + ) + ) + + # Manually bust the cache since we we're just manually messing with the database + # and not causing an actual state reset. + self.store._membership_stream_cache.entity_has_changed( + user1_id, dummy_state_pos.stream + ) + + after_reset_token = self.event_sources.get_current_token() + + # The function under test + room_id_results = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=before_reset_token, + to_token=after_reset_token, + ) + + # Room1 should show up because it was `newly_left` via state reset during the from/to range + self.assertEqual(room_id_results.keys(), {room_id1, room_id2}) + # It should be pointing to no event because we were removed from the room + # without a corresponding leave event + self.assertEqual( + room_id_results[room_id1].event_id, + None, + ) + # State reset caused us to leave the room and there is no corresponding leave event + self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) + # We should *NOT* be `newly_joined` because we joined before the token range + self.assertEqual(room_id_results[room_id1].newly_joined, False) + # We should be `newly_left` because we were removed via state reset during the from/to range + self.assertEqual(room_id_results[room_id1].newly_left, True) + class FilterRoomsTestCase(HomeserverTestCase): """ @@ -2367,6 +3020,31 @@ class FilterRoomsTestCase(HomeserverTestCase): self.store = self.hs.get_datastores().main self.event_sources = hs.get_event_sources() + def _get_sync_room_ids_for_user( + self, + user: UserID, + to_token: StreamToken, + from_token: Optional[StreamToken], + ) -> Dict[str, _RoomMembershipForUser]: + """ + Get the rooms the user should be syncing with + """ + room_membership_for_user_map = self.get_success( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + user=user, + from_token=from_token, + to_token=to_token, + ) + ) + filtered_sync_room_map = self.get_success( + self.sliding_sync_handler.filter_rooms_relevant_for_sync( + user=user, + room_membership_for_user_map=room_membership_for_user_map, + ) + ) + + return filtered_sync_room_map + def _create_dm_room( self, inviter_user_id: str, @@ -2438,12 +3116,10 @@ class FilterRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) + sync_room_map = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=None, + to_token=after_rooms_token, ) # Try with `is_dm=True` @@ -2496,12 +3172,10 @@ class FilterRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) + sync_room_map = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=None, + to_token=after_rooms_token, ) # Try with `is_encrypted=True` @@ -2552,12 +3226,10 @@ class FilterRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) + sync_room_map = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=None, + to_token=after_rooms_token, ) # Try with `is_invite=True` @@ -2621,12 +3293,10 @@ class FilterRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) + sync_room_map = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=None, + to_token=after_rooms_token, ) # Try finding only normal rooms @@ -2714,12 +3384,10 @@ class FilterRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) + sync_room_map = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=None, + to_token=after_rooms_token, ) # Try finding *NOT* normal rooms @@ -2838,12 +3506,10 @@ class FilterRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) + sync_room_map = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=None, + to_token=after_rooms_token, ) filtered_room_map = self.get_success( @@ -2884,6 +3550,31 @@ class SortRoomsTestCase(HomeserverTestCase): self.store = self.hs.get_datastores().main self.event_sources = hs.get_event_sources() + def _get_sync_room_ids_for_user( + self, + user: UserID, + to_token: StreamToken, + from_token: Optional[StreamToken], + ) -> Dict[str, _RoomMembershipForUser]: + """ + Get the rooms the user should be syncing with + """ + room_membership_for_user_map = self.get_success( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + user=user, + from_token=from_token, + to_token=to_token, + ) + ) + filtered_sync_room_map = self.get_success( + self.sliding_sync_handler.filter_rooms_relevant_for_sync( + user=user, + room_membership_for_user_map=room_membership_for_user_map, + ) + ) + + return filtered_sync_room_map + def test_sort_activity_basic(self) -> None: """ Rooms with newer activity are sorted first. @@ -2903,12 +3594,10 @@ class SortRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) + sync_room_map = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=None, + to_token=after_rooms_token, ) # Sort the rooms (what we're testing) @@ -2986,12 +3675,10 @@ class SortRoomsTestCase(HomeserverTestCase): self.helper.send(room_id3, "activity in room3", tok=user2_tok) # Get the rooms the user should be syncing with - sync_room_map = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=before_rooms_token, - to_token=after_rooms_token, - ) + sync_room_map = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=before_rooms_token, + to_token=after_rooms_token, ) # Sort the rooms (what we're testing) @@ -3052,12 +3739,10 @@ class SortRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) + sync_room_map = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=None, + to_token=after_rooms_token, ) # Sort the rooms (what we're testing) diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index 4236812db..f5d57e689 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -20,7 +20,8 @@ # import json import logging -from typing import AbstractSet, Any, Dict, Iterable, List, Optional +from http import HTTPStatus +from typing import Any, Dict, Iterable, List from parameterized import parameterized, parameterized_class @@ -1259,7 +1260,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): exact: bool = False, ) -> None: """ - Wrapper around `_assertIncludes` to give slightly better looking diff error + Wrapper around `assertIncludes` to give slightly better looking diff error messages that include some context "$event_id (type, state_key)". Args: @@ -1275,7 +1276,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): for event in actual_required_state: assert isinstance(event, dict) - self._assertIncludes( + self.assertIncludes( { f'{event["event_id"]} ("{event["type"]}", "{event["state_key"]}")' for event in actual_required_state @@ -1289,56 +1290,6 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): message=str(actual_required_state), ) - def _assertIncludes( - self, - actual_items: AbstractSet[str], - expected_items: AbstractSet[str], - exact: bool = False, - message: Optional[str] = None, - ) -> None: - """ - Assert that all of the `expected_items` are included in the `actual_items`. - - This assert could also be called `assertContains`, `assertItemsInSet` - - Args: - actual_items: The container - expected_items: The items to check for in the container - exact: Whether the actual state should be exactly equal to the expected - state (no extras). - message: Optional message to include in the failure message. - """ - # Check that each set has the same items - if exact and actual_items == expected_items: - return - # Check for a superset - elif not exact and actual_items >= expected_items: - return - - expected_lines: List[str] = [] - for expected_item in expected_items: - is_expected_in_actual = expected_item in actual_items - expected_lines.append( - "{} {}".format(" " if is_expected_in_actual else "?", expected_item) - ) - - actual_lines: List[str] = [] - for actual_item in actual_items: - is_actual_in_expected = actual_item in expected_items - actual_lines.append( - "{} {}".format("+" if is_actual_in_expected else " ", actual_item) - ) - - newline = "\n" - expected_string = f"Expected items to be in actual ('?' = missing expected items):\n {{\n{newline.join(expected_lines)}\n }}" - actual_string = f"Actual ('+' = found expected items):\n {{\n{newline.join(actual_lines)}\n }}" - first_message = ( - "Items must match exactly" if exact else "Some expected items are missing." - ) - diff_message = f"{first_message}\n{expected_string}\n{actual_string}" - - self.fail(f"{diff_message}\n{message}") - def _add_new_dm_to_global_account_data( self, source_user_id: str, target_user_id: str, target_room_id: str ) -> None: @@ -3868,6 +3819,13 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): body={"foo": "bar"}, tok=user2_tok, ) + self.helper.send_state( + room_id1, + event_type="org.matrix.bar_state", + state_key="", + body={"bar": "qux"}, + tok=user2_tok, + ) # Make the Sliding Sync request with wildcards for the `state_key` channel = self.make_request( @@ -3891,16 +3849,13 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): ], "timeline_limit": 0, }, - } - # TODO: Room subscription should also combine with the `required_state` - # "room_subscriptions": { - # room_id1: { - # "required_state": [ - # ["org.matrix.bar_state", ""] - # ], - # "timeline_limit": 0, - # } - # } + }, + "room_subscriptions": { + room_id1: { + "required_state": [["org.matrix.bar_state", ""]], + "timeline_limit": 0, + } + }, }, access_token=user1_tok, ) @@ -3917,6 +3872,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): state_map[(EventTypes.Member, user1_id)], state_map[(EventTypes.Member, user2_id)], state_map[("org.matrix.foo_state", "")], + state_map[("org.matrix.bar_state", "")], }, exact=True, ) @@ -4009,6 +3965,271 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): channel.json_body["lists"]["foo-list"], ) + def test_room_subscriptions_with_join_membership(self) -> None: + """ + Test `room_subscriptions` with a joined room should give us timeline and current + state events. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + join_response = self.helper.join(room_id1, user1_id, tok=user1_tok) + + # Make the Sliding Sync request with just the room subscription + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "room_subscriptions": { + room_id1: { + "required_state": [ + [EventTypes.Create, ""], + ], + "timeline_limit": 1, + } + }, + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # We should see some state + self._assertRequiredStateIncludes( + channel.json_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Create, "")], + }, + exact=True, + ) + self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) + + # We should see some events + self.assertEqual( + [ + event["event_id"] + for event in channel.json_body["rooms"][room_id1]["timeline"] + ], + [ + join_response["event_id"], + ], + channel.json_body["rooms"][room_id1]["timeline"], + ) + # No "live" events in an initial sync (no `from_token` to define the "live" + # range) + self.assertEqual( + channel.json_body["rooms"][room_id1]["num_live"], + 0, + channel.json_body["rooms"][room_id1], + ) + # There are more events to paginate to + self.assertEqual( + channel.json_body["rooms"][room_id1]["limited"], + True, + channel.json_body["rooms"][room_id1], + ) + + def test_room_subscriptions_with_leave_membership(self) -> None: + """ + Test `room_subscriptions` with a leave room should give us timeline and state + events up to the leave event. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.send_state( + room_id1, + event_type="org.matrix.foo_state", + state_key="", + body={"foo": "bar"}, + tok=user2_tok, + ) + + join_response = self.helper.join(room_id1, user1_id, tok=user1_tok) + leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # Send some events after user1 leaves + self.helper.send(room_id1, "activity after leave", tok=user2_tok) + # Update state after user1 leaves + self.helper.send_state( + room_id1, + event_type="org.matrix.foo_state", + state_key="", + body={"foo": "qux"}, + tok=user2_tok, + ) + + # Make the Sliding Sync request with just the room subscription + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "room_subscriptions": { + room_id1: { + "required_state": [ + ["org.matrix.foo_state", ""], + ], + "timeline_limit": 2, + } + }, + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # We should see the state at the time of the leave + self._assertRequiredStateIncludes( + channel.json_body["rooms"][room_id1]["required_state"], + { + state_map[("org.matrix.foo_state", "")], + }, + exact=True, + ) + self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) + + # We should see some before we left (nothing after) + self.assertEqual( + [ + event["event_id"] + for event in channel.json_body["rooms"][room_id1]["timeline"] + ], + [ + join_response["event_id"], + leave_response["event_id"], + ], + channel.json_body["rooms"][room_id1]["timeline"], + ) + # No "live" events in an initial sync (no `from_token` to define the "live" + # range) + self.assertEqual( + channel.json_body["rooms"][room_id1]["num_live"], + 0, + channel.json_body["rooms"][room_id1], + ) + # There are more events to paginate to + self.assertEqual( + channel.json_body["rooms"][room_id1]["limited"], + True, + channel.json_body["rooms"][room_id1], + ) + + def test_room_subscriptions_no_leak_private_room(self) -> None: + """ + Test `room_subscriptions` with a private room we have never been in should not + leak any data to the user. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=False) + + # We should not be able to join the private room + self.helper.join( + room_id1, user1_id, tok=user1_tok, expect_code=HTTPStatus.FORBIDDEN + ) + + # Make the Sliding Sync request with just the room subscription + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "room_subscriptions": { + room_id1: { + "required_state": [ + [EventTypes.Create, ""], + ], + "timeline_limit": 1, + } + }, + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # We should not see the room at all (we're not in it) + self.assertIsNone( + channel.json_body["rooms"].get(room_id1), channel.json_body["rooms"] + ) + + def test_room_subscriptions_world_readable(self) -> None: + """ + Test `room_subscriptions` with a room that has `world_readable` history visibility + + FIXME: We should be able to see the room timeline and state + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create a room with `world_readable` history visibility + room_id1 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "preset": "public_chat", + "initial_state": [ + { + "content": { + "history_visibility": HistoryVisibility.WORLD_READABLE + }, + "state_key": "", + "type": EventTypes.RoomHistoryVisibility, + } + ], + }, + ) + # Ensure we're testing with a room with `world_readable` history visibility + # which means events are visible to anyone even without membership. + history_visibility_response = self.helper.get_state( + room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok + ) + self.assertEqual( + history_visibility_response.get("history_visibility"), + HistoryVisibility.WORLD_READABLE, + ) + + # Note: We never join the room + + # Make the Sliding Sync request with just the room subscription + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "room_subscriptions": { + room_id1: { + "required_state": [ + [EventTypes.Create, ""], + ], + "timeline_limit": 1, + } + }, + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # FIXME: In the future, we should be able to see the room because it's + # `world_readable` but currently we don't support this. + self.assertIsNone( + channel.json_body["rooms"].get(room_id1), channel.json_body["rooms"] + ) + class SlidingSyncToDeviceExtensionTestCase(unittest.HomeserverTestCase): """Tests for the to-device sliding sync extension""" diff --git a/tests/unittest.py b/tests/unittest.py index a7c20556a..4aa7f5610 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -28,6 +28,7 @@ import logging import secrets import time from typing import ( + AbstractSet, Any, Awaitable, Callable, @@ -269,6 +270,56 @@ class TestCase(unittest.TestCase): required[key], actual[key], msg="%s mismatch. %s" % (key, actual) ) + def assertIncludes( + self, + actual_items: AbstractSet[str], + expected_items: AbstractSet[str], + exact: bool = False, + message: Optional[str] = None, + ) -> None: + """ + Assert that all of the `expected_items` are included in the `actual_items`. + + This assert could also be called `assertContains`, `assertItemsInSet` + + Args: + actual_items: The container + expected_items: The items to check for in the container + exact: Whether the actual state should be exactly equal to the expected + state (no extras). + message: Optional message to include in the failure message. + """ + # Check that each set has the same items + if exact and actual_items == expected_items: + return + # Check for a superset + elif not exact and actual_items >= expected_items: + return + + expected_lines: List[str] = [] + for expected_item in expected_items: + is_expected_in_actual = expected_item in actual_items + expected_lines.append( + "{} {}".format(" " if is_expected_in_actual else "?", expected_item) + ) + + actual_lines: List[str] = [] + for actual_item in actual_items: + is_actual_in_expected = actual_item in expected_items + actual_lines.append( + "{} {}".format("+" if is_actual_in_expected else " ", actual_item) + ) + + newline = "\n" + expected_string = f"Expected items to be in actual ('?' = missing expected items):\n {{\n{newline.join(expected_lines)}\n }}" + actual_string = f"Actual ('+' = found expected items):\n {{\n{newline.join(actual_lines)}\n }}" + first_message = ( + "Items must match exactly" if exact else "Some expected items are missing." + ) + diff_message = f"{first_message}\n{expected_string}\n{actual_string}" + + self.fail(f"{diff_message}\n{message}") + def DEBUG(target: TV) -> TV: """A decorator to set the .loglevel attribute to logging.DEBUG. From 4f6194492a4f16c37e55d15d0434d5357266fed0 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Mon, 15 Jul 2024 11:42:59 +0200 Subject: [PATCH 12/43] Make sure we use the right logic for enabling the media repo. (#17424) This removes the `enable_media_repo` attribute on the server config in favour of always using the `can_load_media_repo` in the media config. This should avoid issues like in #17420 in the future --- changelog.d/17424.misc | 1 + synapse/app/homeserver.py | 2 +- synapse/config/repository.py | 2 +- synapse/config/server.py | 6 ------ 4 files changed, 3 insertions(+), 8 deletions(-) create mode 100644 changelog.d/17424.misc diff --git a/changelog.d/17424.misc b/changelog.d/17424.misc new file mode 100644 index 000000000..d4a81c137 --- /dev/null +++ b/changelog.d/17424.misc @@ -0,0 +1 @@ +Make sure we always use the right logic for enabling the media repo. diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 2b111847b..e114ab7ec 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -217,7 +217,7 @@ class SynapseHomeServer(HomeServer): ) if name in ["media", "federation", "client"]: - if self.config.server.enable_media_repo: + if self.config.media.can_load_media_repo: media_repo = self.get_media_repository_resource() resources.update( { diff --git a/synapse/config/repository.py b/synapse/config/repository.py index 164547049..dc0e93ffa 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -126,7 +126,7 @@ class ContentRepositoryConfig(Config): # Only enable the media repo if either the media repo is enabled or the # current worker app is the media repo. if ( - self.root.server.enable_media_repo is False + config.get("enable_media_repo", True) is False and config.get("worker_app") != "synapse.app.media_repository" ): self.can_load_media_repo = False diff --git a/synapse/config/server.py b/synapse/config/server.py index a2b230577..8bb97df17 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -395,12 +395,6 @@ class ServerConfig(Config): self.presence_router_config, ) = load_module(presence_router_config, ("presence", "presence_router")) - # whether to enable the media repository endpoints. This should be set - # to false if the media repository is running as a separate endpoint; - # doing so ensures that we will not run cache cleanup jobs on the - # master, potentially causing inconsistency. - self.enable_media_repo = config.get("enable_media_repo", True) - # Whether to require authentication to retrieve profile data (avatars, # display names) of other users through the client API. self.require_auth_for_profile_requests = config.get( From 4c44020838ddf49dba3e22029d25ff880b260a69 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Jul 2024 13:57:13 +0100 Subject: [PATCH 13/43] Bump twine from 5.1.0 to 5.1.1 (#17443) --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index f578a33ed..3f3a3e38d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2704,19 +2704,19 @@ docs = ["sphinx (<7.0.0)"] [[package]] name = "twine" -version = "5.1.0" +version = "5.1.1" description = "Collection of utilities for publishing packages on PyPI" optional = false python-versions = ">=3.8" files = [ - {file = "twine-5.1.0-py3-none-any.whl", hash = "sha256:fe1d814395bfe50cfbe27783cb74efe93abeac3f66deaeb6c8390e4e92bacb43"}, - {file = "twine-5.1.0.tar.gz", hash = "sha256:4d74770c88c4fcaf8134d2a6a9d863e40f08255ff7d8e2acb3cbbd57d25f6e9d"}, + {file = "twine-5.1.1-py3-none-any.whl", hash = "sha256:215dbe7b4b94c2c50a7315c0275d2258399280fbb7d04182c7e55e24b5f93997"}, + {file = "twine-5.1.1.tar.gz", hash = "sha256:9aa0825139c02b3434d913545c7b847a21c835e11597f5255842d457da2322db"}, ] [package.dependencies] importlib-metadata = ">=3.6" keyring = ">=15.1" -pkginfo = ">=1.8.1" +pkginfo = ">=1.8.1,<1.11" readme-renderer = ">=35.0" requests = ">=2.20" requests-toolbelt = ">=0.8.0,<0.9.0 || >0.9.0" From 0de0689ae8fc4b2311aea0e007f8144dfbde370b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Jul 2024 13:57:30 +0100 Subject: [PATCH 14/43] Bump jsonschema from 4.22.0 to 4.23.0 (#17444) --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index 3f3a3e38d..0cff51faf 100644 --- a/poetry.lock +++ b/poetry.lock @@ -934,13 +934,13 @@ i18n = ["Babel (>=2.7)"] [[package]] name = "jsonschema" -version = "4.22.0" +version = "4.23.0" description = "An implementation of JSON Schema validation for Python" optional = false python-versions = ">=3.8" files = [ - {file = "jsonschema-4.22.0-py3-none-any.whl", hash = "sha256:ff4cfd6b1367a40e7bc6411caec72effadd3db0bbe5017de188f2d6108335802"}, - {file = "jsonschema-4.22.0.tar.gz", hash = "sha256:5b22d434a45935119af990552c862e5d6d564e8f6601206b305a61fdf661a2b7"}, + {file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"}, + {file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"}, ] [package.dependencies] @@ -953,7 +953,7 @@ rpds-py = ">=0.7.1" [package.extras] format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] -format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=24.6.0)"] [[package]] name = "jsonschema-specifications" From 2af729a193389cc71e61232b67a62574e166128c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Jul 2024 13:57:45 +0100 Subject: [PATCH 15/43] Bump ulid from 1.1.2 to 1.1.3 (#17442) --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3a8bf7a49..cc2c59d6f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -597,9 +597,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "ulid" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34778c17965aa2a08913b57e1f34db9b4a63f5de31768b55bf20d2795f921259" +checksum = "04f903f293d11f31c0c29e4148f6dc0d033a7f80cebc0282bea147611667d289" dependencies = [ "getrandom", "rand", From 14f2b1eb00986b7ca06739abe01fc88533885991 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Jul 2024 13:58:12 +0100 Subject: [PATCH 16/43] Bump bytes from 1.6.0 to 1.6.1 (#17441) --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cc2c59d6f..e9adfcbdc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -67,9 +67,9 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "bytes" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" +checksum = "a12916984aab3fa6e39d655a33e09c0071eb36d6ab3aea5c2d78551f1df6d952" [[package]] name = "cfg-if" From d88ba45db9f5d344542a04a06c29ea26ebb8fa21 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Jul 2024 13:58:28 +0100 Subject: [PATCH 17/43] Bump types-jsonschema from 4.22.0.20240610 to 4.23.0.20240712 (#17446) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 0cff51faf..805089e79 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2851,13 +2851,13 @@ files = [ [[package]] name = "types-jsonschema" -version = "4.22.0.20240610" +version = "4.23.0.20240712" description = "Typing stubs for jsonschema" optional = false python-versions = ">=3.8" files = [ - {file = "types-jsonschema-4.22.0.20240610.tar.gz", hash = "sha256:f82ab9fe756e3a2642ea9712c46b403ce61eb380b939b696cff3252af42f65b0"}, - {file = "types_jsonschema-4.22.0.20240610-py3-none-any.whl", hash = "sha256:89996b9bd1928f820a0e252b2844be21cd2e55d062b6fa1048d88453006ad89e"}, + {file = "types-jsonschema-4.23.0.20240712.tar.gz", hash = "sha256:b20db728dcf7ea3e80e9bdeb55e8b8420c6c040cda14e8cf284465adee71d217"}, + {file = "types_jsonschema-4.23.0.20240712-py3-none-any.whl", hash = "sha256:8c33177ce95336241c1d61ccb56a9964d4361b99d5f1cd81a1ab4909b0dd7cf4"}, ] [package.dependencies] From df11af14dbd2faad916924cab96e75bd7c95a66a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 15 Jul 2024 16:13:04 +0100 Subject: [PATCH 18/43] Fix bug where sync could get stuck when using workers (#17438) This is because we serialized the token wrong if the instance map contained entries from before the minimum token. --- changelog.d/17438.bugfix | 1 + synapse/handlers/sliding_sync.py | 11 ++++- synapse/types/__init__.py | 65 +++++++++++++++++++++++++---- tests/test_types.py | 71 ++++++++++++++++++++++++++++++++ 4 files changed, 138 insertions(+), 10 deletions(-) create mode 100644 changelog.d/17438.bugfix diff --git a/changelog.d/17438.bugfix b/changelog.d/17438.bugfix new file mode 100644 index 000000000..cff6eecd4 --- /dev/null +++ b/changelog.d/17438.bugfix @@ -0,0 +1 @@ +Fix rare bug where `/sync` would break for a user when using workers with multiple stream writers. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index be98b379e..1b5262d66 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -699,10 +699,17 @@ class SlidingSyncHandler: instance_to_max_stream_ordering_map[instance_name] = stream_ordering # Then assemble the `RoomStreamToken` + min_stream_pos = min(instance_to_max_stream_ordering_map.values()) membership_snapshot_token = RoomStreamToken( # Minimum position in the `instance_map` - stream=min(instance_to_max_stream_ordering_map.values()), - instance_map=immutabledict(instance_to_max_stream_ordering_map), + stream=min_stream_pos, + instance_map=immutabledict( + { + instance_name: stream_pos + for instance_name, stream_pos in instance_to_max_stream_ordering_map.items() + if stream_pos > min_stream_pos + } + ), ) # Since we fetched the users room list at some point in time after the from/to diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index b22a13ef0..3962ecc99 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -20,6 +20,7 @@ # # import abc +import logging import re import string from enum import Enum @@ -74,6 +75,9 @@ if TYPE_CHECKING: from synapse.storage.databases.main import DataStore, PurgeEventsStore from synapse.storage.databases.main.appservice import ApplicationServiceWorkerStore + +logger = logging.getLogger(__name__) + # Define a state map type from type/state_key to T (usually an event ID or # event) T = TypeVar("T") @@ -454,6 +458,8 @@ class AbstractMultiWriterStreamToken(metaclass=abc.ABCMeta): represented by a default `stream` attribute and a map of instance name to stream position of any writers that are ahead of the default stream position. + + The values in `instance_map` must be greater than the `stream` attribute. """ stream: int = attr.ib(validator=attr.validators.instance_of(int), kw_only=True) @@ -468,6 +474,15 @@ class AbstractMultiWriterStreamToken(metaclass=abc.ABCMeta): kw_only=True, ) + def __attrs_post_init__(self) -> None: + # Enforce that all instances have a value greater than the min stream + # position. + for i, v in self.instance_map.items(): + if v <= self.stream: + raise ValueError( + f"'instance_map' includes a stream position before the main 'stream' attribute. Instance: {i}" + ) + @classmethod @abc.abstractmethod async def parse(cls, store: "DataStore", string: str) -> "Self": @@ -494,6 +509,9 @@ class AbstractMultiWriterStreamToken(metaclass=abc.ABCMeta): for instance in set(self.instance_map).union(other.instance_map) } + # Filter out any redundant entries. + instance_map = {i: s for i, s in instance_map.items() if s > max_stream} + return attr.evolve( self, stream=max_stream, instance_map=immutabledict(instance_map) ) @@ -539,10 +557,15 @@ class AbstractMultiWriterStreamToken(metaclass=abc.ABCMeta): def bound_stream_token(self, max_stream: int) -> "Self": """Bound the stream positions to a maximum value""" + min_pos = min(self.stream, max_stream) return type(self)( - stream=min(self.stream, max_stream), + stream=min_pos, instance_map=immutabledict( - {k: min(s, max_stream) for k, s in self.instance_map.items()} + { + k: min(s, max_stream) + for k, s in self.instance_map.items() + if min(s, max_stream) > min_pos + } ), ) @@ -637,6 +660,8 @@ class RoomStreamToken(AbstractMultiWriterStreamToken): "Cannot set both 'topological' and 'instance_map' on 'RoomStreamToken'." ) + super().__attrs_post_init__() + @classmethod async def parse(cls, store: "PurgeEventsStore", string: str) -> "RoomStreamToken": try: @@ -651,6 +676,11 @@ class RoomStreamToken(AbstractMultiWriterStreamToken): instance_map = {} for part in parts[1:]: + if not part: + # Handle tokens of the form `m5~`, which were created by + # a bug + continue + key, value = part.split(".") instance_id = int(key) pos = int(value) @@ -666,7 +696,10 @@ class RoomStreamToken(AbstractMultiWriterStreamToken): except CancelledError: raise except Exception: - pass + # We log an exception here as even though this *might* be a client + # handing a bad token, its more likely that Synapse returned a bad + # token (and we really want to catch those!). + logger.exception("Failed to parse stream token: %r", string) raise SynapseError(400, "Invalid room stream token %r" % (string,)) @classmethod @@ -713,6 +746,8 @@ class RoomStreamToken(AbstractMultiWriterStreamToken): return self.instance_map.get(instance_name, self.stream) async def to_string(self, store: "DataStore") -> str: + """See class level docstring for information about the format.""" + if self.topological is not None: return "t%d-%d" % (self.topological, self.stream) elif self.instance_map: @@ -727,8 +762,10 @@ class RoomStreamToken(AbstractMultiWriterStreamToken): instance_id = await store.get_id_for_instance(name) entries.append(f"{instance_id}.{pos}") - encoded_map = "~".join(entries) - return f"m{self.stream}~{encoded_map}" + if entries: + encoded_map = "~".join(entries) + return f"m{self.stream}~{encoded_map}" + return f"s{self.stream}" else: return "s%d" % (self.stream,) @@ -756,6 +793,11 @@ class MultiWriterStreamToken(AbstractMultiWriterStreamToken): instance_map = {} for part in parts[1:]: + if not part: + # Handle tokens of the form `m5~`, which were created by + # a bug + continue + key, value = part.split(".") instance_id = int(key) pos = int(value) @@ -770,10 +812,15 @@ class MultiWriterStreamToken(AbstractMultiWriterStreamToken): except CancelledError: raise except Exception: - pass + # We log an exception here as even though this *might* be a client + # handing a bad token, its more likely that Synapse returned a bad + # token (and we really want to catch those!). + logger.exception("Failed to parse stream token: %r", string) raise SynapseError(400, "Invalid stream token %r" % (string,)) async def to_string(self, store: "DataStore") -> str: + """See class level docstring for information about the format.""" + if self.instance_map: entries = [] for name, pos in self.instance_map.items(): @@ -786,8 +833,10 @@ class MultiWriterStreamToken(AbstractMultiWriterStreamToken): instance_id = await store.get_id_for_instance(name) entries.append(f"{instance_id}.{pos}") - encoded_map = "~".join(entries) - return f"m{self.stream}~{encoded_map}" + if entries: + encoded_map = "~".join(entries) + return f"m{self.stream}~{encoded_map}" + return str(self.stream) else: return str(self.stream) diff --git a/tests/test_types.py b/tests/test_types.py index 944aa784f..00adc65a5 100644 --- a/tests/test_types.py +++ b/tests/test_types.py @@ -19,9 +19,18 @@ # # +from typing import Type +from unittest import skipUnless + +from immutabledict import immutabledict +from parameterized import parameterized_class + from synapse.api.errors import SynapseError from synapse.types import ( + AbstractMultiWriterStreamToken, + MultiWriterStreamToken, RoomAlias, + RoomStreamToken, UserID, get_domain_from_id, get_localpart_from_id, @@ -29,6 +38,7 @@ from synapse.types import ( ) from tests import unittest +from tests.utils import USE_POSTGRES_FOR_TESTS class IsMineIDTests(unittest.HomeserverTestCase): @@ -127,3 +137,64 @@ class MapUsernameTestCase(unittest.TestCase): # this should work with either a unicode or a bytes self.assertEqual(map_username_to_mxid_localpart("têst"), "t=c3=aast") self.assertEqual(map_username_to_mxid_localpart("têst".encode()), "t=c3=aast") + + +@parameterized_class( + ("token_type",), + [ + (MultiWriterStreamToken,), + (RoomStreamToken,), + ], + class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{params_dict['token_type'].__name__}", +) +class MultiWriterTokenTestCase(unittest.HomeserverTestCase): + """Tests for the different types of multi writer tokens.""" + + token_type: Type[AbstractMultiWriterStreamToken] + + def test_basic_token(self) -> None: + """Test that a simple stream token can be serialized and unserialized""" + store = self.hs.get_datastores().main + + token = self.token_type(stream=5) + + string_token = self.get_success(token.to_string(store)) + + if isinstance(token, RoomStreamToken): + self.assertEqual(string_token, "s5") + else: + self.assertEqual(string_token, "5") + + parsed_token = self.get_success(self.token_type.parse(store, string_token)) + self.assertEqual(parsed_token, token) + + @skipUnless(USE_POSTGRES_FOR_TESTS, "Requires Postgres") + def test_instance_map(self) -> None: + """Test for stream token with instance map""" + store = self.hs.get_datastores().main + + token = self.token_type(stream=5, instance_map=immutabledict({"foo": 6})) + + string_token = self.get_success(token.to_string(store)) + self.assertEqual(string_token, "m5~1.6") + + parsed_token = self.get_success(self.token_type.parse(store, string_token)) + self.assertEqual(parsed_token, token) + + def test_instance_map_assertion(self) -> None: + """Test that we assert values in the instance map are greater than the + min stream position""" + + with self.assertRaises(ValueError): + self.token_type(stream=5, instance_map=immutabledict({"foo": 4})) + + with self.assertRaises(ValueError): + self.token_type(stream=5, instance_map=immutabledict({"foo": 5})) + + def test_parse_bad_token(self) -> None: + """Test that we can parse tokens produced by a bug in Synapse of the + form `m5~`""" + store = self.hs.get_datastores().main + + parsed_token = self.get_success(self.token_type.parse(store, "m5~")) + self.assertEqual(parsed_token, self.token_type(stream=5)) From 899d33f2baf42727fb9b4c118a931d872690a81c Mon Sep 17 00:00:00 2001 From: Shay Date: Tue, 16 Jul 2024 01:52:39 -0700 Subject: [PATCH 19/43] Remove unnecessary call to resume producing in fake channel (#17449) This fell out of the authenticated media work - this bit of code masked a bug but does not break anything when removed, so probably should be removed. --- changelog.d/17449.bugfix | 1 + tests/server.py | 4 ---- 2 files changed, 1 insertion(+), 4 deletions(-) create mode 100644 changelog.d/17449.bugfix diff --git a/changelog.d/17449.bugfix b/changelog.d/17449.bugfix new file mode 100644 index 000000000..cd847a3d1 --- /dev/null +++ b/changelog.d/17449.bugfix @@ -0,0 +1 @@ +Remove unnecessary call to resume producing in fake channel. \ No newline at end of file diff --git a/tests/server.py b/tests/server.py index f1cd0f76b..85602e695 100644 --- a/tests/server.py +++ b/tests/server.py @@ -289,10 +289,6 @@ class FakeChannel: self._reactor.run() while not self.is_finished(): - # If there's a producer, tell it to resume producing so we get content - if self._producer: - self._producer.resumeProducing() - if self._reactor.seconds() > end_time: raise TimedOutException("Timed out waiting for request to finish.") From 9e1acea0512bc6dd6ceddcc44619c754e16c2f97 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Jul 2024 10:06:05 +0100 Subject: [PATCH 20/43] Bump setuptools from 67.6.0 to 70.0.0 (#17448) --- poetry.lock | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/poetry.lock b/poetry.lock index 805089e79..4afedfbe0 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2504,19 +2504,18 @@ tests = ["coverage[toml] (>=5.0.2)", "pytest"] [[package]] name = "setuptools" -version = "67.6.0" +version = "70.0.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "setuptools-67.6.0-py3-none-any.whl", hash = "sha256:b78aaa36f6b90a074c1fa651168723acbf45d14cb1196b6f02c0fd07f17623b2"}, - {file = "setuptools-67.6.0.tar.gz", hash = "sha256:2ee892cd5f29f3373097f5a814697e397cf3ce313616df0af11231e2ad118077"}, + {file = "setuptools-70.0.0-py3-none-any.whl", hash = "sha256:54faa7f2e8d2d11bcd2c07bed282eef1046b5c080d1c32add737d7b5817b1ad4"}, + {file = "setuptools-70.0.0.tar.gz", hash = "sha256:f211a66637b8fa059bb28183da127d4e86396c991a942b028c6650d4319c3fd0"}, ] [package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] -testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +testing = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mypy (==1.9)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] [[package]] name = "setuptools-rust" From 429ecb7564640b4d04588173e9e54bf53524aadf Mon Sep 17 00:00:00 2001 From: Shay Date: Tue, 16 Jul 2024 03:13:55 -0700 Subject: [PATCH 21/43] Handle remote download responses with `UNKNOWN_LENGTH` more gracefully (#17439) Prior to this PR, remote downloads which did not provide a `content-length` were decremented from the remote download ratelimiter at the max allowable size, leading to excessive ratelimiting - see https://github.com/element-hq/synapse/issues/17394. This PR adds a linearizer to limit concurrent remote downloads to 6 per IP address, and decrements remote downloads without a `content-length` from the ratelimiter *after* the download is complete and the response length is known. Also adds logic to ensure that responses with a known length respect the `max_download_size`. --- changelog.d/17439.bugfix | 1 + synapse/http/matrixfederationclient.py | 126 ++++++++++++++++--------- tests/media/test_media_storage.py | 49 ++++++++-- tests/rest/client/test_media.py | 50 +++++++++- 4 files changed, 166 insertions(+), 60 deletions(-) create mode 100644 changelog.d/17439.bugfix diff --git a/changelog.d/17439.bugfix b/changelog.d/17439.bugfix new file mode 100644 index 000000000..f36c3ec25 --- /dev/null +++ b/changelog.d/17439.bugfix @@ -0,0 +1 @@ +Limit concurrent remote downloads to 6 per IP address, and decrement remote downloads without a content-length from the ratelimiter after the download is complete. \ No newline at end of file diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 749b01dd0..6fd75fd38 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -90,7 +90,7 @@ from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.logging.opentracing import set_tag, start_active_span, tags from synapse.types import JsonDict from synapse.util import json_decoder -from synapse.util.async_helpers import AwakenableSleeper, timeout_deferred +from synapse.util.async_helpers import AwakenableSleeper, Linearizer, timeout_deferred from synapse.util.metrics import Measure from synapse.util.stringutils import parse_and_validate_server_name @@ -475,6 +475,8 @@ class MatrixFederationHttpClient: use_proxy=True, ) + self.remote_download_linearizer = Linearizer("remote_download_linearizer", 6) + def wake_destination(self, destination: str) -> None: """Called when the remote server may have come back online.""" @@ -1486,35 +1488,44 @@ class MatrixFederationHttpClient: ) headers = dict(response.headers.getAllRawHeaders()) - expected_size = response.length - # if we don't get an expected length then use the max length + if expected_size == UNKNOWN_LENGTH: expected_size = max_size - logger.debug( - f"File size unknown, assuming file is max allowable size: {max_size}" - ) + else: + if int(expected_size) > max_size: + msg = "Requested file is too large > %r bytes" % (max_size,) + logger.warning( + "{%s} [%s] %s", + request.txn_id, + request.destination, + msg, + ) + raise SynapseError(HTTPStatus.BAD_GATEWAY, msg, Codes.TOO_LARGE) - read_body, _ = await download_ratelimiter.can_do_action( - requester=None, - key=ip_address, - n_actions=expected_size, - ) - if not read_body: - msg = "Requested file size exceeds ratelimits" - logger.warning( - "{%s} [%s] %s", - request.txn_id, - request.destination, - msg, + read_body, _ = await download_ratelimiter.can_do_action( + requester=None, + key=ip_address, + n_actions=expected_size, ) - raise SynapseError(HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED) + if not read_body: + msg = "Requested file size exceeds ratelimits" + logger.warning( + "{%s} [%s] %s", + request.txn_id, + request.destination, + msg, + ) + raise SynapseError( + HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED + ) try: - # add a byte of headroom to max size as function errs at >= - d = read_body_with_max_size(response, output_stream, expected_size + 1) - d.addTimeout(self.default_timeout_seconds, self.reactor) - length = await make_deferred_yieldable(d) + async with self.remote_download_linearizer.queue(ip_address): + # add a byte of headroom to max size as function errs at >= + d = read_body_with_max_size(response, output_stream, expected_size + 1) + d.addTimeout(self.default_timeout_seconds, self.reactor) + length = await make_deferred_yieldable(d) except BodyExceededMaxSize: msg = "Requested file is too large > %r bytes" % (expected_size,) logger.warning( @@ -1560,6 +1571,13 @@ class MatrixFederationHttpClient: request.method, request.uri.decode("ascii"), ) + + # if we didn't know the length upfront, decrement the actual size from ratelimiter + if response.length == UNKNOWN_LENGTH: + download_ratelimiter.record_action( + requester=None, key=ip_address, n_actions=length + ) + return length, headers async def federation_get_file( @@ -1630,29 +1648,37 @@ class MatrixFederationHttpClient: ) headers = dict(response.headers.getAllRawHeaders()) - expected_size = response.length - # if we don't get an expected length then use the max length + if expected_size == UNKNOWN_LENGTH: expected_size = max_size - logger.debug( - f"File size unknown, assuming file is max allowable size: {max_size}" - ) + else: + if int(expected_size) > max_size: + msg = "Requested file is too large > %r bytes" % (max_size,) + logger.warning( + "{%s} [%s] %s", + request.txn_id, + request.destination, + msg, + ) + raise SynapseError(HTTPStatus.BAD_GATEWAY, msg, Codes.TOO_LARGE) - read_body, _ = await download_ratelimiter.can_do_action( - requester=None, - key=ip_address, - n_actions=expected_size, - ) - if not read_body: - msg = "Requested file size exceeds ratelimits" - logger.warning( - "{%s} [%s] %s", - request.txn_id, - request.destination, - msg, + read_body, _ = await download_ratelimiter.can_do_action( + requester=None, + key=ip_address, + n_actions=expected_size, ) - raise SynapseError(HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED) + if not read_body: + msg = "Requested file size exceeds ratelimits" + logger.warning( + "{%s} [%s] %s", + request.txn_id, + request.destination, + msg, + ) + raise SynapseError( + HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED + ) # this should be a multipart/mixed response with the boundary string in the header try: @@ -1672,11 +1698,12 @@ class MatrixFederationHttpClient: raise SynapseError(HTTPStatus.BAD_GATEWAY, msg) try: - # add a byte of headroom to max size as `_MultipartParserProtocol.dataReceived` errs at >= - deferred = read_multipart_response( - response, output_stream, boundary, expected_size + 1 - ) - deferred.addTimeout(self.default_timeout_seconds, self.reactor) + async with self.remote_download_linearizer.queue(ip_address): + # add a byte of headroom to max size as `_MultipartParserProtocol.dataReceived` errs at >= + deferred = read_multipart_response( + response, output_stream, boundary, expected_size + 1 + ) + deferred.addTimeout(self.default_timeout_seconds, self.reactor) except BodyExceededMaxSize: msg = "Requested file is too large > %r bytes" % (expected_size,) logger.warning( @@ -1743,6 +1770,13 @@ class MatrixFederationHttpClient: request.method, request.uri.decode("ascii"), ) + + # if we didn't know the length upfront, decrement the actual size from ratelimiter + if response.length == UNKNOWN_LENGTH: + download_ratelimiter.record_action( + requester=None, key=ip_address, n_actions=length + ) + return length, headers, multipart_response.json diff --git a/tests/media/test_media_storage.py b/tests/media/test_media_storage.py index 70912e22f..e55001fb4 100644 --- a/tests/media/test_media_storage.py +++ b/tests/media/test_media_storage.py @@ -1057,13 +1057,15 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase): ) assert channel.code == 200 + @override_config({"remote_media_download_burst_count": "87M"}) @patch( "synapse.http.matrixfederationclient.read_body_with_max_size", read_body_with_max_size_30MiB, ) - def test_download_ratelimit_max_size_sub(self) -> None: + def test_download_ratelimit_unknown_length(self) -> None: """ - Test that if no content-length is provided, the default max size is applied instead + Test that if no content-length is provided, ratelimit will still be applied after + download once length is known """ # mock out actually sending the request @@ -1077,19 +1079,48 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase): self.client._send_request = _send_request # type: ignore - # ten requests should go through using the max size (500MB/50MB) - for i in range(10): - channel2 = self.make_request( + # 3 requests should go through (note 3rd one would technically violate ratelimit but + # is applied *after* download - the next one will be ratelimited) + for i in range(3): + channel = self.make_request( "GET", f"/_matrix/media/v3/download/remote.org/abcdefghijklmnopqrstuvwxy{i}", shorthand=False, ) - assert channel2.code == 200 + assert channel.code == 200 - # eleventh will hit ratelimit - channel3 = self.make_request( + # 4th will hit ratelimit + channel2 = self.make_request( "GET", "/_matrix/media/v3/download/remote.org/abcdefghijklmnopqrstuvwxyx", shorthand=False, ) - assert channel3.code == 429 + assert channel2.code == 429 + + @override_config({"max_upload_size": "29M"}) + @patch( + "synapse.http.matrixfederationclient.read_body_with_max_size", + read_body_with_max_size_30MiB, + ) + def test_max_download_respected(self) -> None: + """ + Test that the max download size is enforced - note that max download size is determined + by the max_upload_size + """ + + # mock out actually sending the request + async def _send_request(*args: Any, **kwargs: Any) -> IResponse: + resp = MagicMock(spec=IResponse) + resp.code = 200 + resp.length = 31457280 + resp.headers = Headers({"Content-Type": ["application/octet-stream"]}) + resp.phrase = b"OK" + return resp + + self.client._send_request = _send_request # type: ignore + + channel = self.make_request( + "GET", "/_matrix/media/v3/download/remote.org/abcd", shorthand=False + ) + assert channel.code == 502 + assert channel.json_body["errcode"] == "M_TOO_LARGE" diff --git a/tests/rest/client/test_media.py b/tests/rest/client/test_media.py index 7f2caed7d..466c5a0b7 100644 --- a/tests/rest/client/test_media.py +++ b/tests/rest/client/test_media.py @@ -1809,13 +1809,19 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase): ) assert channel.code == 200 + @override_config( + { + "remote_media_download_burst_count": "87M", + } + ) @patch( "synapse.http.matrixfederationclient.read_multipart_response", read_multipart_response_30MiB, ) - def test_download_ratelimit_max_size_sub(self) -> None: + def test_download_ratelimit_unknown_length(self) -> None: """ - Test that if no content-length is provided, the default max size is applied instead + Test that if no content-length is provided, ratelimiting is still applied after + media is downloaded and length is known """ # mock out actually sending the request @@ -1831,8 +1837,9 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase): self.client._send_request = _send_request # type: ignore - # ten requests should go through using the max size (500MB/50MB) - for i in range(10): + # first 3 will go through (note that 3rd request technically violates rate limit but + # that since the ratelimiting is applied *after* download it goes through, but next one fails) + for i in range(3): channel2 = self.make_request( "GET", f"/_matrix/client/v1/media/download/remote.org/abc{i}", @@ -1841,7 +1848,7 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase): ) assert channel2.code == 200 - # eleventh will hit ratelimit + # 4th will hit ratelimit channel3 = self.make_request( "GET", "/_matrix/client/v1/media/download/remote.org/abcd", @@ -1850,6 +1857,39 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase): ) assert channel3.code == 429 + @override_config({"max_upload_size": "29M"}) + @patch( + "synapse.http.matrixfederationclient.read_multipart_response", + read_multipart_response_30MiB, + ) + def test_max_download_respected(self) -> None: + """ + Test that the max download size is enforced - note that max download size is determined + by the max_upload_size + """ + + # mock out actually sending the request, returns a 30MiB response + async def _send_request(*args: Any, **kwargs: Any) -> IResponse: + resp = MagicMock(spec=IResponse) + resp.code = 200 + resp.length = 31457280 + resp.headers = Headers( + {"Content-Type": ["multipart/mixed; boundary=gc0p4Jq0M2Yt08jU534c0p"]} + ) + resp.phrase = b"OK" + return resp + + self.client._send_request = _send_request # type: ignore + + channel = self.make_request( + "GET", + "/_matrix/client/v1/media/download/remote.org/abcd", + shorthand=False, + access_token=self.tok, + ) + assert channel.code == 502 + assert channel.json_body["errcode"] == "M_TOO_LARGE" + def test_file_download(self) -> None: content = io.BytesIO(b"file_to_stream") content_uri = self.get_success( From 83894180b2a4345d4b1c51d9268136b6167858e6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Jul 2024 11:35:47 +0100 Subject: [PATCH 22/43] Bump matrix-org/done-action from 2 to 3 (#17440) --- .github/workflows/tests.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 767495101..730f8552d 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -305,7 +305,7 @@ jobs: - lint-readme runs-on: ubuntu-latest steps: - - uses: matrix-org/done-action@v2 + - uses: matrix-org/done-action@v3 with: needs: ${{ toJSON(needs) }} @@ -737,7 +737,7 @@ jobs: - linting-done runs-on: ubuntu-latest steps: - - uses: matrix-org/done-action@v2 + - uses: matrix-org/done-action@v3 with: needs: ${{ toJSON(needs) }} From 79924aebefbcc2663f541efe083ae1fdbb10eab6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Jul 2024 12:08:06 +0100 Subject: [PATCH 23/43] Bump mypy from 1.9.0 to 1.10.1 (#17445) --- poetry.lock | 56 +++++++++++++-------------- tests/util/test_check_dependencies.py | 3 +- 2 files changed, 30 insertions(+), 29 deletions(-) diff --git a/poetry.lock b/poetry.lock index 4afedfbe0..4092a4188 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1389,38 +1389,38 @@ files = [ [[package]] name = "mypy" -version = "1.9.0" +version = "1.10.1" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" files = [ - {file = "mypy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f8a67616990062232ee4c3952f41c779afac41405806042a8126fe96e098419f"}, - {file = "mypy-1.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d357423fa57a489e8c47b7c85dfb96698caba13d66e086b412298a1a0ea3b0ed"}, - {file = "mypy-1.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49c87c15aed320de9b438ae7b00c1ac91cd393c1b854c2ce538e2a72d55df150"}, - {file = "mypy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:48533cdd345c3c2e5ef48ba3b0d3880b257b423e7995dada04248725c6f77374"}, - {file = "mypy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:4d3dbd346cfec7cb98e6cbb6e0f3c23618af826316188d587d1c1bc34f0ede03"}, - {file = "mypy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:653265f9a2784db65bfca694d1edd23093ce49740b2244cde583aeb134c008f3"}, - {file = "mypy-1.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a3c007ff3ee90f69cf0a15cbcdf0995749569b86b6d2f327af01fd1b8aee9dc"}, - {file = "mypy-1.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2418488264eb41f69cc64a69a745fad4a8f86649af4b1041a4c64ee61fc61129"}, - {file = "mypy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:68edad3dc7d70f2f17ae4c6c1b9471a56138ca22722487eebacfd1eb5321d612"}, - {file = "mypy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:85ca5fcc24f0b4aeedc1d02f93707bccc04733f21d41c88334c5482219b1ccb3"}, - {file = "mypy-1.9.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aceb1db093b04db5cd390821464504111b8ec3e351eb85afd1433490163d60cd"}, - {file = "mypy-1.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0235391f1c6f6ce487b23b9dbd1327b4ec33bb93934aa986efe8a9563d9349e6"}, - {file = "mypy-1.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4d5ddc13421ba3e2e082a6c2d74c2ddb3979c39b582dacd53dd5d9431237185"}, - {file = "mypy-1.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:190da1ee69b427d7efa8aa0d5e5ccd67a4fb04038c380237a0d96829cb157913"}, - {file = "mypy-1.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:fe28657de3bfec596bbeef01cb219833ad9d38dd5393fc649f4b366840baefe6"}, - {file = "mypy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e54396d70be04b34f31d2edf3362c1edd023246c82f1730bbf8768c28db5361b"}, - {file = "mypy-1.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5e6061f44f2313b94f920e91b204ec600982961e07a17e0f6cd83371cb23f5c2"}, - {file = "mypy-1.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a10926e5473c5fc3da8abb04119a1f5811a236dc3a38d92015cb1e6ba4cb9e"}, - {file = "mypy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b685154e22e4e9199fc95f298661deea28aaede5ae16ccc8cbb1045e716b3e04"}, - {file = "mypy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:5d741d3fc7c4da608764073089e5f58ef6352bedc223ff58f2f038c2c4698a89"}, - {file = "mypy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:587ce887f75dd9700252a3abbc9c97bbe165a4a630597845c61279cf32dfbf02"}, - {file = "mypy-1.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f88566144752999351725ac623471661c9d1cd8caa0134ff98cceeea181789f4"}, - {file = "mypy-1.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61758fabd58ce4b0720ae1e2fea5cfd4431591d6d590b197775329264f86311d"}, - {file = "mypy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e49499be624dead83927e70c756970a0bc8240e9f769389cdf5714b0784ca6bf"}, - {file = "mypy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:571741dc4194b4f82d344b15e8837e8c5fcc462d66d076748142327626a1b6e9"}, - {file = "mypy-1.9.0-py3-none-any.whl", hash = "sha256:a260627a570559181a9ea5de61ac6297aa5af202f06fd7ab093ce74e7181e43e"}, - {file = "mypy-1.9.0.tar.gz", hash = "sha256:3cc5da0127e6a478cddd906068496a97a7618a21ce9b54bde5bf7e539c7af974"}, + {file = "mypy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e36f229acfe250dc660790840916eb49726c928e8ce10fbdf90715090fe4ae02"}, + {file = "mypy-1.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:51a46974340baaa4145363b9e051812a2446cf583dfaeba124af966fa44593f7"}, + {file = "mypy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:901c89c2d67bba57aaaca91ccdb659aa3a312de67f23b9dfb059727cce2e2e0a"}, + {file = "mypy-1.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0cd62192a4a32b77ceb31272d9e74d23cd88c8060c34d1d3622db3267679a5d9"}, + {file = "mypy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:a2cbc68cb9e943ac0814c13e2452d2046c2f2b23ff0278e26599224cf164e78d"}, + {file = "mypy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bd6f629b67bb43dc0d9211ee98b96d8dabc97b1ad38b9b25f5e4c4d7569a0c6a"}, + {file = "mypy-1.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a1bbb3a6f5ff319d2b9d40b4080d46cd639abe3516d5a62c070cf0114a457d84"}, + {file = "mypy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8edd4e9bbbc9d7b79502eb9592cab808585516ae1bcc1446eb9122656c6066f"}, + {file = "mypy-1.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6166a88b15f1759f94a46fa474c7b1b05d134b1b61fca627dd7335454cc9aa6b"}, + {file = "mypy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:5bb9cd11c01c8606a9d0b83ffa91d0b236a0e91bc4126d9ba9ce62906ada868e"}, + {file = "mypy-1.10.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d8681909f7b44d0b7b86e653ca152d6dff0eb5eb41694e163c6092124f8246d7"}, + {file = "mypy-1.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:378c03f53f10bbdd55ca94e46ec3ba255279706a6aacaecac52ad248f98205d3"}, + {file = "mypy-1.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bacf8f3a3d7d849f40ca6caea5c055122efe70e81480c8328ad29c55c69e93e"}, + {file = "mypy-1.10.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:701b5f71413f1e9855566a34d6e9d12624e9e0a8818a5704d74d6b0402e66c04"}, + {file = "mypy-1.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:3c4c2992f6ea46ff7fce0072642cfb62af7a2484efe69017ed8b095f7b39ef31"}, + {file = "mypy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:604282c886497645ffb87b8f35a57ec773a4a2721161e709a4422c1636ddde5c"}, + {file = "mypy-1.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37fd87cab83f09842653f08de066ee68f1182b9b5282e4634cdb4b407266bade"}, + {file = "mypy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8addf6313777dbb92e9564c5d32ec122bf2c6c39d683ea64de6a1fd98b90fe37"}, + {file = "mypy-1.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5cc3ca0a244eb9a5249c7c583ad9a7e881aa5d7b73c35652296ddcdb33b2b9c7"}, + {file = "mypy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:1b3a2ffce52cc4dbaeee4df762f20a2905aa171ef157b82192f2e2f368eec05d"}, + {file = "mypy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe85ed6836165d52ae8b88f99527d3d1b2362e0cb90b005409b8bed90e9059b3"}, + {file = "mypy-1.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2ae450d60d7d020d67ab440c6e3fae375809988119817214440033f26ddf7bf"}, + {file = "mypy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6be84c06e6abd72f960ba9a71561c14137a583093ffcf9bbfaf5e613d63fa531"}, + {file = "mypy-1.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2189ff1e39db399f08205e22a797383613ce1cb0cb3b13d8bcf0170e45b96cc3"}, + {file = "mypy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:97a131ee36ac37ce9581f4220311247ab6cba896b4395b9c87af0675a13a755f"}, + {file = "mypy-1.10.1-py3-none-any.whl", hash = "sha256:71d8ac0b906354ebda8ef1673e5fde785936ac1f29ff6987c7483cfbd5a4235a"}, + {file = "mypy-1.10.1.tar.gz", hash = "sha256:1f8f492d7db9e3593ef42d4f115f04e556130f2819ad33ab84551403e97dd4c0"}, ] [package.dependencies] diff --git a/tests/util/test_check_dependencies.py b/tests/util/test_check_dependencies.py index fb67146c6..13a4e6dda 100644 --- a/tests/util/test_check_dependencies.py +++ b/tests/util/test_check_dependencies.py @@ -21,6 +21,7 @@ from contextlib import contextmanager from os import PathLike +from pathlib import Path from typing import Generator, Optional, Union from unittest.mock import patch @@ -41,7 +42,7 @@ class DummyDistribution(metadata.Distribution): def version(self) -> str: return self._version - def locate_file(self, path: Union[str, PathLike]) -> PathLike: + def locate_file(self, path: Union[str, PathLike]) -> Path: raise NotImplementedError() def read_text(self, filename: str) -> None: From 3fee32ed6be7eeb5845b0ce199ba29fa49fdd41e Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 17 Jul 2024 13:10:15 -0500 Subject: [PATCH 24/43] Order `heroes` by `stream_ordering` (as spec'ed) (#17435) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The spec specifically mentions `stream_ordering` but that's a Synapse specific concept. In any case, the essence of the spec is basically the first 5 members of the room which `stream_ordering` accomplishes. Split off from https://github.com/element-hq/synapse/pull/17419#discussion_r1671342794 ## Spec compliance > This should be the first 5 members of the room, **ordered by stream ordering**, which are joined or invited. The list must never include the client’s own user ID. When no joined or invited members are available, this should consist of the banned and left users. > > *-- https://spec.matrix.org/v1.10/client-server-api/#_matrixclientv3sync_roomsummary* Related to https://github.com/matrix-org/matrix-spec/issues/1334 --- changelog.d/17435.bugfix | 1 + synapse/storage/databases/main/roommember.py | 57 ++- tests/rest/client/test_sync.py | 21 +- tests/storage/test_roommember.py | 403 ++++++++++++++++++- 4 files changed, 456 insertions(+), 26 deletions(-) create mode 100644 changelog.d/17435.bugfix diff --git a/changelog.d/17435.bugfix b/changelog.d/17435.bugfix new file mode 100644 index 000000000..2d06a7c7f --- /dev/null +++ b/changelog.d/17435.bugfix @@ -0,0 +1 @@ +Order `heroes` by `stream_ordering` as the Matrix specification states (applies to `/sync`). diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 5d2fd0849..f62d9f705 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -279,8 +279,19 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): @cached(max_entries=100000) # type: ignore[synapse-@cached-mutable] async def get_room_summary(self, room_id: str) -> Mapping[str, MemberSummary]: - """Get the details of a room roughly suitable for use by the room + """ + Get the details of a room roughly suitable for use by the room summary extension to /sync. Useful when lazy loading room members. + + Returns the total count of members in the room by membership type, and a + truncated list of members (the heroes). This will be the first 6 members of the + room: + - We want 5 heroes plus 1, in case one of them is the + calling user. + - They are ordered by `stream_ordering`, which are joined or + invited. When no joined or invited members are available, this also includes + banned and left users. + Args: room_id: The room ID to query Returns: @@ -308,23 +319,36 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): for count, membership in txn: res.setdefault(membership, MemberSummary([], count)) - # we order by membership and then fairly arbitrarily by event_id so - # heroes are consistent - # Note, rejected events will have a null membership field, so - # we we manually filter them out. + # Order by membership (joins -> invites -> leave (former insiders) -> + # everything else (outsiders like bans/knocks), then by `stream_ordering` so + # the first members in the room show up first and to make the sort stable + # (consistent heroes). + # + # Note: rejected events will have a null membership field, so we we manually + # filter them out. sql = """ SELECT state_key, membership, event_id FROM current_state_events WHERE type = 'm.room.member' AND room_id = ? AND membership IS NOT NULL ORDER BY - CASE membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC, - event_id ASC + CASE membership WHEN ? THEN 1 WHEN ? THEN 2 WHEN ? THEN 3 ELSE 4 END ASC, + event_stream_ordering ASC LIMIT ? """ - # 6 is 5 (number of heroes) plus 1, in case one of them is the calling user. - txn.execute(sql, (room_id, Membership.JOIN, Membership.INVITE, 6)) + txn.execute( + sql, + ( + room_id, + # Sort order + Membership.JOIN, + Membership.INVITE, + Membership.LEAVE, + # 6 is 5 (number of heroes) plus 1, in case one of them is the calling user. + 6, + ), + ) for user_id, membership, event_id in txn: summary = res[membership] # we will always have a summary for this membership type at this @@ -1509,10 +1533,19 @@ def extract_heroes_from_room_summary( ) -> List[str]: """Determine the users that represent a room, from the perspective of the `me` user. + This function expects `MemberSummary.members` to already be sorted by + `stream_ordering` like the results from `get_room_summary(...)`. + The rules which say which users we select are specified in the "Room Summary" section of https://spec.matrix.org/v1.4/client-server-api/#get_matrixclientv3sync + + Args: + details: Mapping from membership type to member summary. We expect + `MemberSummary.members` to already be sorted by `stream_ordering`. + me: The user for whom we are determining the heroes for. + Returns a list (possibly empty) of heroes' mxids. """ empty_ms = MemberSummary([], 0) @@ -1527,11 +1560,11 @@ def extract_heroes_from_room_summary( r[0] for r in details.get(Membership.LEAVE, empty_ms).members if r[0] != me ] + [r[0] for r in details.get(Membership.BAN, empty_ms).members if r[0] != me] - # FIXME: order by stream ordering rather than as returned by SQL + # We expect `MemberSummary.members` to already be sorted by `stream_ordering` if joined_user_ids or invited_user_ids: - return sorted(joined_user_ids + invited_user_ids)[0:5] + return (joined_user_ids + invited_user_ids)[0:5] else: - return sorted(gone_user_ids)[0:5] + return gone_user_ids[0:5] @attr.s(slots=True, auto_attribs=True) diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index f5d57e689..a008ee465 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -2160,18 +2160,15 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): # Room2 doesn't have a name so we should see `heroes` populated self.assertIsNone(channel.json_body["rooms"][room_id1].get("name")) - # FIXME: Remove this basic assertion and uncomment the better assertion below - # after https://github.com/element-hq/synapse/pull/17435 merges - self.assertEqual(len(channel.json_body["rooms"][room_id1].get("heroes", [])), 5) - # self.assertCountEqual( - # [ - # hero["user_id"] - # for hero in channel.json_body["rooms"][room_id1].get("heroes", []) - # ], - # # Heroes should be the first 5 users in the room (excluding the user - # # themselves, we shouldn't see `user1`) - # [user2_id, user3_id, user4_id, user5_id, user6_id], - # ) + self.assertCountEqual( + [ + hero["user_id"] + for hero in channel.json_body["rooms"][room_id1].get("heroes", []) + ], + # Heroes should be the first 5 users in the room (excluding the user + # themselves, we shouldn't see `user1`) + [user2_id, user3_id, user4_id, user5_id, user6_id], + ) self.assertEqual( channel.json_body["rooms"][room_id1]["joined_count"], 7, diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index 882f3bbbd..418b55610 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -19,20 +19,28 @@ # [This file includes modifications made by New Vector Limited] # # +import logging from typing import List, Optional, Tuple, cast from twisted.test.proto_helpers import MemoryReactor -from synapse.api.constants import Membership +from synapse.api.constants import EventTypes, JoinRules, Membership +from synapse.api.room_versions import RoomVersions +from synapse.rest import admin from synapse.rest.admin import register_servlets_for_client_rest_resource -from synapse.rest.client import login, room +from synapse.rest.client import knock, login, room from synapse.server import HomeServer +from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary +from synapse.storage.roommember import MemberSummary from synapse.types import UserID, create_requester from synapse.util import Clock from tests import unittest from tests.server import TestHomeServer from tests.test_utils import event_injection +from tests.unittest import skip_unless + +logger = logging.getLogger(__name__) class RoomMemberStoreTestCase(unittest.HomeserverTestCase): @@ -240,6 +248,397 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase): ) +class RoomSummaryTestCase(unittest.HomeserverTestCase): + """ + Test `/sync` room summary related logic like `get_room_summary(...)` and + `extract_heroes_from_room_summary(...)` + """ + + servlets = [ + admin.register_servlets, + knock.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.sliding_sync_handler = self.hs.get_sliding_sync_handler() + self.store = self.hs.get_datastores().main + + def _assert_member_summary( + self, + actual_member_summary: MemberSummary, + expected_member_list: List[str], + *, + expected_member_count: Optional[int] = None, + ) -> None: + """ + Assert that the `MemberSummary` object has the expected members. + """ + self.assertListEqual( + [ + user_id + for user_id, _membership_event_id in actual_member_summary.members + ], + expected_member_list, + ) + self.assertEqual( + actual_member_summary.count, + ( + expected_member_count + if expected_member_count is not None + else len(expected_member_list) + ), + ) + + def test_get_room_summary_membership(self) -> None: + """ + Test that `get_room_summary(...)` gets every kind of membership when there + aren't that many members in the room. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + _user3_tok = self.login(user3_id, "pass") + user4_id = self.register_user("user4", "pass") + user4_tok = self.login(user4_id, "pass") + user5_id = self.register_user("user5", "pass") + user5_tok = self.login(user5_id, "pass") + + # Setup a room (user1 is the creator and is joined to the room) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # User2 is banned + self.helper.join(room_id, user2_id, tok=user2_tok) + self.helper.ban(room_id, src=user1_id, targ=user2_id, tok=user1_tok) + + # User3 is invited by user1 + self.helper.invite(room_id, targ=user3_id, tok=user1_tok) + + # User4 leaves + self.helper.join(room_id, user4_id, tok=user4_tok) + self.helper.leave(room_id, user4_id, tok=user4_tok) + + # User5 joins + self.helper.join(room_id, user5_id, tok=user5_tok) + + room_membership_summary = self.get_success(self.store.get_room_summary(room_id)) + empty_ms = MemberSummary([], 0) + + self._assert_member_summary( + room_membership_summary.get(Membership.JOIN, empty_ms), + [user1_id, user5_id], + ) + self._assert_member_summary( + room_membership_summary.get(Membership.INVITE, empty_ms), [user3_id] + ) + self._assert_member_summary( + room_membership_summary.get(Membership.LEAVE, empty_ms), [user4_id] + ) + self._assert_member_summary( + room_membership_summary.get(Membership.BAN, empty_ms), [user2_id] + ) + self._assert_member_summary( + room_membership_summary.get(Membership.KNOCK, empty_ms), + [ + # No one knocked + ], + ) + + def test_get_room_summary_membership_order(self) -> None: + """ + Test that `get_room_summary(...)` stacks our limit of 6 in this order: joins -> + invites -> leave -> everything else (bans/knocks) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + _user3_tok = self.login(user3_id, "pass") + user4_id = self.register_user("user4", "pass") + user4_tok = self.login(user4_id, "pass") + user5_id = self.register_user("user5", "pass") + user5_tok = self.login(user5_id, "pass") + user6_id = self.register_user("user6", "pass") + user6_tok = self.login(user6_id, "pass") + user7_id = self.register_user("user7", "pass") + user7_tok = self.login(user7_id, "pass") + + # Setup the room (user1 is the creator and is joined to the room) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # We expect the order to be joins -> invites -> leave -> bans so setup the users + # *NOT* in that same order to make sure we're actually sorting them. + + # User2 is banned + self.helper.join(room_id, user2_id, tok=user2_tok) + self.helper.ban(room_id, src=user1_id, targ=user2_id, tok=user1_tok) + + # User3 is invited by user1 + self.helper.invite(room_id, targ=user3_id, tok=user1_tok) + + # User4 leaves + self.helper.join(room_id, user4_id, tok=user4_tok) + self.helper.leave(room_id, user4_id, tok=user4_tok) + + # User5, User6, User7 joins + self.helper.join(room_id, user5_id, tok=user5_tok) + self.helper.join(room_id, user6_id, tok=user6_tok) + self.helper.join(room_id, user7_id, tok=user7_tok) + + room_membership_summary = self.get_success(self.store.get_room_summary(room_id)) + empty_ms = MemberSummary([], 0) + + self._assert_member_summary( + room_membership_summary.get(Membership.JOIN, empty_ms), + [user1_id, user5_id, user6_id, user7_id], + ) + self._assert_member_summary( + room_membership_summary.get(Membership.INVITE, empty_ms), [user3_id] + ) + self._assert_member_summary( + room_membership_summary.get(Membership.LEAVE, empty_ms), [user4_id] + ) + self._assert_member_summary( + room_membership_summary.get(Membership.BAN, empty_ms), + [ + # The banned user is not in the summary because the summary can only fit + # 6 members and prefers everything else before bans + # + # user2_id + ], + # But we still see the count of banned users + expected_member_count=1, + ) + self._assert_member_summary( + room_membership_summary.get(Membership.KNOCK, empty_ms), + [ + # No one knocked + ], + ) + + def test_extract_heroes_from_room_summary_excludes_self(self) -> None: + """ + Test that `extract_heroes_from_room_summary(...)` does not include the user + itself. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Setup the room (user1 is the creator and is joined to the room) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # User2 joins + self.helper.join(room_id, user2_id, tok=user2_tok) + + room_membership_summary = self.get_success(self.store.get_room_summary(room_id)) + + # We first ask from the perspective of a random fake user + hero_user_ids = extract_heroes_from_room_summary( + room_membership_summary, me="@fakeuser" + ) + + # Make sure user1 is in the room (ensure our test setup is correct) + self.assertListEqual(hero_user_ids, [user1_id, user2_id]) + + # Now, we ask for the room summary from the perspective of user1 + hero_user_ids = extract_heroes_from_room_summary( + room_membership_summary, me=user1_id + ) + + # User1 should not be included in the list of heroes because they are the one + # asking + self.assertListEqual(hero_user_ids, [user2_id]) + + def test_extract_heroes_from_room_summary_first_five_joins(self) -> None: + """ + Test that `extract_heroes_from_room_summary(...)` returns the first 5 joins. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass") + user4_id = self.register_user("user4", "pass") + user4_tok = self.login(user4_id, "pass") + user5_id = self.register_user("user5", "pass") + user5_tok = self.login(user5_id, "pass") + user6_id = self.register_user("user6", "pass") + user6_tok = self.login(user6_id, "pass") + user7_id = self.register_user("user7", "pass") + user7_tok = self.login(user7_id, "pass") + + # Setup the room (user1 is the creator and is joined to the room) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # User2 -> User7 joins + self.helper.join(room_id, user2_id, tok=user2_tok) + self.helper.join(room_id, user3_id, tok=user3_tok) + self.helper.join(room_id, user4_id, tok=user4_tok) + self.helper.join(room_id, user5_id, tok=user5_tok) + self.helper.join(room_id, user6_id, tok=user6_tok) + self.helper.join(room_id, user7_id, tok=user7_tok) + + room_membership_summary = self.get_success(self.store.get_room_summary(room_id)) + + hero_user_ids = extract_heroes_from_room_summary( + room_membership_summary, me="@fakuser" + ) + + # First 5 users to join the room + self.assertListEqual( + hero_user_ids, [user1_id, user2_id, user3_id, user4_id, user5_id] + ) + + def test_extract_heroes_from_room_summary_membership_order(self) -> None: + """ + Test that `extract_heroes_from_room_summary(...)` prefers joins/invites over + everything else. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + _user3_tok = self.login(user3_id, "pass") + user4_id = self.register_user("user4", "pass") + user4_tok = self.login(user4_id, "pass") + user5_id = self.register_user("user5", "pass") + user5_tok = self.login(user5_id, "pass") + + # Setup the room (user1 is the creator and is joined to the room) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # We expect the order to be joins -> invites -> leave -> bans so setup the users + # *NOT* in that same order to make sure we're actually sorting them. + + # User2 is banned + self.helper.join(room_id, user2_id, tok=user2_tok) + self.helper.ban(room_id, src=user1_id, targ=user2_id, tok=user1_tok) + + # User3 is invited by user1 + self.helper.invite(room_id, targ=user3_id, tok=user1_tok) + + # User4 leaves + self.helper.join(room_id, user4_id, tok=user4_tok) + self.helper.leave(room_id, user4_id, tok=user4_tok) + + # User5 joins + self.helper.join(room_id, user5_id, tok=user5_tok) + + room_membership_summary = self.get_success(self.store.get_room_summary(room_id)) + + hero_user_ids = extract_heroes_from_room_summary( + room_membership_summary, me="@fakeuser" + ) + + # Prefer joins -> invites, over everything else + self.assertListEqual( + hero_user_ids, + [ + # The joins + user1_id, + user5_id, + # The invites + user3_id, + ], + ) + + @skip_unless( + False, + "Test is not possible because when everyone leaves the room, " + + "the server is `no_longer_in_room` and we don't have any `current_state_events` to query", + ) + def test_extract_heroes_from_room_summary_fallback_leave_ban(self) -> None: + """ + Test that `extract_heroes_from_room_summary(...)` falls back to leave/ban if + there aren't any joins/invites. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass") + + # Setup the room (user1 is the creator and is joined to the room) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # User2 is banned + self.helper.join(room_id, user2_id, tok=user2_tok) + self.helper.ban(room_id, src=user1_id, targ=user2_id, tok=user1_tok) + + # User3 leaves + self.helper.join(room_id, user3_id, tok=user3_tok) + self.helper.leave(room_id, user3_id, tok=user3_tok) + + # User1 leaves (we're doing this last because they're the room creator) + self.helper.leave(room_id, user1_id, tok=user1_tok) + + room_membership_summary = self.get_success(self.store.get_room_summary(room_id)) + + hero_user_ids = extract_heroes_from_room_summary( + room_membership_summary, me="@fakeuser" + ) + + # Fallback to people who left -> banned + self.assertListEqual( + hero_user_ids, + [user3_id, user1_id, user3_id], + ) + + def test_extract_heroes_from_room_summary_excludes_knocks(self) -> None: + """ + People who knock on the room have (potentially) never been in the room before + and are total outsiders. Plus the spec doesn't mention them at all for heroes. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Setup the knock room (user1 is the creator and is joined to the room) + knock_room_id = self.helper.create_room_as( + user1_id, tok=user1_tok, room_version=RoomVersions.V7.identifier + ) + self.helper.send_state( + knock_room_id, + EventTypes.JoinRules, + {"join_rule": JoinRules.KNOCK}, + tok=user1_tok, + ) + + # User2 knocks on the room + knock_channel = self.make_request( + "POST", + "/_matrix/client/r0/knock/%s" % (knock_room_id,), + b"{}", + user2_tok, + ) + self.assertEqual(knock_channel.code, 200, knock_channel.result) + + room_membership_summary = self.get_success( + self.store.get_room_summary(knock_room_id) + ) + + hero_user_ids = extract_heroes_from_room_summary( + room_membership_summary, me="@fakeuser" + ) + + # user1 is the creator and is joined to the room (should show up as a hero) + # user2 is knocking on the room (should not show up as a hero) + self.assertListEqual( + hero_user_ids, + [user1_id], + ) + + class CurrentStateMembershipUpdateTestCase(unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main From a574de006286352e150b397f34fecc0d83e68148 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 18 Jul 2024 06:49:53 -0500 Subject: [PATCH 25/43] Add `m.room.create` to default bump event types (#17453) Add `m.room.create` to default bump event types This probably helps when no messages have been sent in the room and it was just created. --- changelog.d/17453.misc | 1 + synapse/handlers/sliding_sync.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/17453.misc diff --git a/changelog.d/17453.misc b/changelog.d/17453.misc new file mode 100644 index 000000000..2978a5247 --- /dev/null +++ b/changelog.d/17453.misc @@ -0,0 +1 @@ +Update experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint to bump room when it is created. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 1b5262d66..a23a6b9dd 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -53,6 +53,7 @@ logger = logging.getLogger(__name__) # The event types that clients should consider as new activity. DEFAULT_BUMP_EVENT_TYPES = { + EventTypes.Create, EventTypes.Message, EventTypes.Encrypted, EventTypes.Sticker, From f583f1dce409210c08cbdd7fc581b1c8e47dffb9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Jul 2024 12:59:53 +0100 Subject: [PATCH 26/43] Revert "Bump setuptools from 67.6.0 to 70.0.0" (#17455) Reverts element-hq/synapse#17448 We hit a bug when deploying with synctl: ``` Traceback (most recent call last): File "/home/synapse/env-python311/bin/synctl", line 33, in sys.exit(load_entry_point('matrix-synapse', 'console_scripts', 'synctl')()) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/synapse/env-python311/bin/synctl", line 25, in importlib_load_entry_point return next(matches).load() ^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/importlib/metadata/__init__.py", line 202, in load module = import_module(match.group('module')) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/importlib/__init__.py", line 126, in import_module return _bootstrap._gcd_import(name[level:], package, level) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "", line 1204, in _gcd_import File "", line 1176, in _find_and_load File "", line 1147, in _find_and_load_unlocked File "", line 690, in _load_unlocked File "", line 940, in exec_module File "", line 241, in _call_with_frames_removed File "/home/synapse/src/synapse/_scripts/synctl.py", line 37, in from synapse.config import find_config_files File "/home/synapse/src/synapse/config/__init__.py", line 22, in from ._base import ConfigError, find_config_files File "/home/synapse/src/synapse/config/_base.py", line 49, in import pkg_resources File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 3282, in @_call_aside ^^^^^^^^^^^ File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 3266, in _call_aside f(*args, **kwargs) File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 3295, in _initialize_master_working_set working_set = _declare_state('object', 'working_set', WorkingSet._build_master()) ^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 589, in _build_master ws.require(__requires__) File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 926, in require needed = self.resolve(parse_requirements(requirements)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 787, in resolve dist = self._resolve_dist( ^^^^^^^^^^^^^^^^^^^ File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 816, in _resolve_dist env = Environment(self.entries) ^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 1014, in __init__ self.scan(search_path) File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 1046, in scan for dist in find_distributions(item): File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 2091, in find_on_path yield from factory(fullpath) ^^^^^^^^^^^^^^^^^ File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 2183, in resolve_egg_link return next(dist_groups, ()) ^^^^^^^^^^^^^^^^^^^^^ File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 2179, in resolved_paths = ( ^ File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 2167, in non_empty_lines for line in _read_utf8_with_fallback(path).splitlines(): ^^^^^^^^^^^^^^^^^^^^^^^^ NameError: name '_read_utf8_with_fallback' is not defined ``` --- poetry.lock | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/poetry.lock b/poetry.lock index 4092a4188..b3e45972f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2504,18 +2504,19 @@ tests = ["coverage[toml] (>=5.0.2)", "pytest"] [[package]] name = "setuptools" -version = "70.0.0" +version = "67.6.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false -python-versions = ">=3.8" +python-versions = ">=3.7" files = [ - {file = "setuptools-70.0.0-py3-none-any.whl", hash = "sha256:54faa7f2e8d2d11bcd2c07bed282eef1046b5c080d1c32add737d7b5817b1ad4"}, - {file = "setuptools-70.0.0.tar.gz", hash = "sha256:f211a66637b8fa059bb28183da127d4e86396c991a942b028c6650d4319c3fd0"}, + {file = "setuptools-67.6.0-py3-none-any.whl", hash = "sha256:b78aaa36f6b90a074c1fa651168723acbf45d14cb1196b6f02c0fd07f17623b2"}, + {file = "setuptools-67.6.0.tar.gz", hash = "sha256:2ee892cd5f29f3373097f5a814697e397cf3ce313616df0af11231e2ad118077"}, ] [package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mypy (==1.9)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] [[package]] name = "setuptools-rust" From 6a01af59e14b67960e290c26131249fb8c833293 Mon Sep 17 00:00:00 2001 From: Ben Banfield-Zanin Date: Thu, 18 Jul 2024 13:32:32 +0100 Subject: [PATCH 27/43] Improve default_power_level_content_override documentation (#17451) Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- changelog.d/17451.doc | 1 + .../configuration/config_documentation.md | 32 +++++++++++++++++++ synapse/handlers/room.py | 2 ++ 3 files changed, 35 insertions(+) create mode 100644 changelog.d/17451.doc diff --git a/changelog.d/17451.doc b/changelog.d/17451.doc new file mode 100644 index 000000000..357ac2c90 --- /dev/null +++ b/changelog.d/17451.doc @@ -0,0 +1 @@ +Improve documentation for the [`default_power_level_content_override`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#default_power_level_content_override) config option. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 65b03ad0f..38b24b504 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -4134,6 +4134,38 @@ default_power_level_content_override: trusted_private_chat: null public_chat: null ``` + +The default power levels for each preset are: +```yaml +"m.room.name": 50 +"m.room.power_levels": 100 +"m.room.history_visibility": 100 +"m.room.canonical_alias": 50 +"m.room.avatar": 50 +"m.room.tombstone": 100 +"m.room.server_acl": 100 +"m.room.encryption": 100 +``` + +So a complete example where the default power-levels for a preset are maintained +but the power level for a new key is set is: +```yaml +default_power_level_content_override: + private_chat: + events: + "com.example.foo": 0 + "m.room.name": 50 + "m.room.power_levels": 100 + "m.room.history_visibility": 100 + "m.room.canonical_alias": 50 + "m.room.avatar": 50 + "m.room.tombstone": 100 + "m.room.server_acl": 100 + "m.room.encryption": 100 + trusted_private_chat: null + public_chat: null +``` + --- ### `forget_rooms_on_leave` diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 2302d283a..262d9f404 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1188,6 +1188,8 @@ class RoomCreationHandler: ) events_to_send.append((power_event, power_context)) else: + # Please update the docs for `default_power_level_content_override` when + # updating the `events` dict below power_level_content: JsonDict = { "users": {creator_id: 100}, "users_default": 0, From 71d83477cb494db7f8384eedf127e59c8c8cd479 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 19 Jul 2024 11:02:38 +0100 Subject: [PATCH 28/43] Bump sentry-sdk from 2.6.0 to 2.8.0 (#17456) --- poetry.lock | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/poetry.lock b/poetry.lock index b3e45972f..2bfcb59cf 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2430,13 +2430,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "2.6.0" +version = "2.8.0" description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = ">=3.6" files = [ - {file = "sentry_sdk-2.6.0-py2.py3-none-any.whl", hash = "sha256:422b91cb49378b97e7e8d0e8d5a1069df23689d45262b86f54988a7db264e874"}, - {file = "sentry_sdk-2.6.0.tar.gz", hash = "sha256:65cc07e9c6995c5e316109f138570b32da3bd7ff8d0d0ee4aaf2628c3dd8127d"}, + {file = "sentry_sdk-2.8.0-py2.py3-none-any.whl", hash = "sha256:6051562d2cfa8087bb8b4b8b79dc44690f8a054762a29c07e22588b1f619bfb5"}, + {file = "sentry_sdk-2.8.0.tar.gz", hash = "sha256:aa4314f877d9cd9add5a0c9ba18e3f27f99f7de835ce36bd150e48a41c7c646f"}, ] [package.dependencies] @@ -2466,7 +2466,7 @@ langchain = ["langchain (>=0.0.210)"] loguru = ["loguru (>=0.5)"] openai = ["openai (>=1.0.0)", "tiktoken (>=0.3.0)"] opentelemetry = ["opentelemetry-distro (>=0.35b0)"] -opentelemetry-experimental = ["opentelemetry-distro (>=0.40b0,<1.0)", "opentelemetry-instrumentation-aiohttp-client (>=0.40b0,<1.0)", "opentelemetry-instrumentation-django (>=0.40b0,<1.0)", "opentelemetry-instrumentation-fastapi (>=0.40b0,<1.0)", "opentelemetry-instrumentation-flask (>=0.40b0,<1.0)", "opentelemetry-instrumentation-requests (>=0.40b0,<1.0)", "opentelemetry-instrumentation-sqlite3 (>=0.40b0,<1.0)", "opentelemetry-instrumentation-urllib (>=0.40b0,<1.0)"] +opentelemetry-experimental = ["opentelemetry-instrumentation-aio-pika (==0.46b0)", "opentelemetry-instrumentation-aiohttp-client (==0.46b0)", "opentelemetry-instrumentation-aiopg (==0.46b0)", "opentelemetry-instrumentation-asgi (==0.46b0)", "opentelemetry-instrumentation-asyncio (==0.46b0)", "opentelemetry-instrumentation-asyncpg (==0.46b0)", "opentelemetry-instrumentation-aws-lambda (==0.46b0)", "opentelemetry-instrumentation-boto (==0.46b0)", "opentelemetry-instrumentation-boto3sqs (==0.46b0)", "opentelemetry-instrumentation-botocore (==0.46b0)", "opentelemetry-instrumentation-cassandra (==0.46b0)", "opentelemetry-instrumentation-celery (==0.46b0)", "opentelemetry-instrumentation-confluent-kafka (==0.46b0)", "opentelemetry-instrumentation-dbapi (==0.46b0)", "opentelemetry-instrumentation-django (==0.46b0)", "opentelemetry-instrumentation-elasticsearch (==0.46b0)", "opentelemetry-instrumentation-falcon (==0.46b0)", "opentelemetry-instrumentation-fastapi (==0.46b0)", "opentelemetry-instrumentation-flask (==0.46b0)", "opentelemetry-instrumentation-grpc (==0.46b0)", "opentelemetry-instrumentation-httpx (==0.46b0)", "opentelemetry-instrumentation-jinja2 (==0.46b0)", "opentelemetry-instrumentation-kafka-python (==0.46b0)", "opentelemetry-instrumentation-logging (==0.46b0)", "opentelemetry-instrumentation-mysql (==0.46b0)", "opentelemetry-instrumentation-mysqlclient (==0.46b0)", "opentelemetry-instrumentation-pika (==0.46b0)", "opentelemetry-instrumentation-psycopg (==0.46b0)", "opentelemetry-instrumentation-psycopg2 (==0.46b0)", "opentelemetry-instrumentation-pymemcache (==0.46b0)", "opentelemetry-instrumentation-pymongo (==0.46b0)", "opentelemetry-instrumentation-pymysql (==0.46b0)", "opentelemetry-instrumentation-pyramid (==0.46b0)", "opentelemetry-instrumentation-redis (==0.46b0)", "opentelemetry-instrumentation-remoulade (==0.46b0)", "opentelemetry-instrumentation-requests (==0.46b0)", "opentelemetry-instrumentation-sklearn (==0.46b0)", "opentelemetry-instrumentation-sqlalchemy (==0.46b0)", "opentelemetry-instrumentation-sqlite3 (==0.46b0)", "opentelemetry-instrumentation-starlette (==0.46b0)", "opentelemetry-instrumentation-system-metrics (==0.46b0)", "opentelemetry-instrumentation-threading (==0.46b0)", "opentelemetry-instrumentation-tornado (==0.46b0)", "opentelemetry-instrumentation-tortoiseorm (==0.46b0)", "opentelemetry-instrumentation-urllib (==0.46b0)", "opentelemetry-instrumentation-urllib3 (==0.46b0)", "opentelemetry-instrumentation-wsgi (==0.46b0)"] pure-eval = ["asttokens", "executing", "pure-eval"] pymongo = ["pymongo (>=3.1)"] pyspark = ["pyspark (>=2.4.4)"] @@ -2476,7 +2476,7 @@ sanic = ["sanic (>=0.8)"] sqlalchemy = ["sqlalchemy (>=1.2)"] starlette = ["starlette (>=0.19.1)"] starlite = ["starlite (>=1.48)"] -tornado = ["tornado (>=5)"] +tornado = ["tornado (>=6)"] [[package]] name = "service-identity" From 43c865f7c98a1e31d6eb8d3979d1e199fadcb950 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 19 Jul 2024 12:09:39 +0100 Subject: [PATCH 29/43] Generate room sync data concurrently (#17458) This is also what we do for standard `/sync`. --- changelog.d/17458.misc | 1 + synapse/handlers/sliding_sync.py | 12 ++++++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17458.misc diff --git a/changelog.d/17458.misc b/changelog.d/17458.misc new file mode 100644 index 000000000..09cce15d0 --- /dev/null +++ b/changelog.d/17458.misc @@ -0,0 +1 @@ +Speed up generating sliding sync responses. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index a23a6b9dd..423f0329d 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -28,6 +28,7 @@ from synapse.api.constants import AccountDataTypes, Direction, EventTypes, Membe from synapse.events import EventBase from synapse.events.utils import strip_event from synapse.handlers.relations import BundledAggregations +from synapse.logging.opentracing import start_active_span, tag_args, trace from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary from synapse.storage.databases.main.stream import CurrentStateDeltaMembership from synapse.storage.roommember import MemberSummary @@ -43,6 +44,7 @@ from synapse.types import ( ) from synapse.types.handlers import OperationType, SlidingSyncConfig, SlidingSyncResult from synapse.types.state import StateFilter +from synapse.util.async_helpers import concurrently_execute from synapse.visibility import filter_events_for_client if TYPE_CHECKING: @@ -592,11 +594,14 @@ class SlidingSyncHandler: # Fetch room data rooms: Dict[str, SlidingSyncResult.RoomResult] = {} - for room_id, room_sync_config in relevant_room_map.items(): + + @trace + @tag_args + async def handle_room(room_id: str) -> None: room_sync_result = await self.get_room_sync_data( user=sync_config.user, room_id=room_id, - room_sync_config=room_sync_config, + room_sync_config=relevant_room_map[room_id], room_membership_for_user_at_to_token=room_membership_for_user_map[ room_id ], @@ -606,6 +611,9 @@ class SlidingSyncHandler: rooms[room_id] = room_sync_result + with start_active_span("sliding_sync.generate_room_entries"): + await concurrently_execute(handle_room, relevant_room_map, 10) + extensions = await self.get_extensions_response( sync_config=sync_config, to_token=to_token ) From d3f9afd8d9db8c80b342177b9ab162c79357c431 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 19 Jul 2024 16:19:15 +0100 Subject: [PATCH 30/43] Add a cache on `get_rooms_for_local_user_where_membership_is` (#17460) As it gets used in sliding sync. We basically invalidate it in all the same places as `get_rooms_for_user`. Most of the changes are due to needing the arguments you pass in to be hashable (which lists aren't) --- changelog.d/17460.misc | 1 + synapse/api/constants.py | 2 +- synapse/storage/_base.py | 6 +++++ synapse/storage/databases/main/cache.py | 6 +++++ synapse/storage/databases/main/roommember.py | 26 +++++++++++++++++--- tests/handlers/test_sync.py | 1 + 6 files changed, 38 insertions(+), 4 deletions(-) create mode 100644 changelog.d/17460.misc diff --git a/changelog.d/17460.misc b/changelog.d/17460.misc new file mode 100644 index 000000000..fd99da5a9 --- /dev/null +++ b/changelog.d/17460.misc @@ -0,0 +1 @@ +Add cache to `get_rooms_for_local_user_where_membership_is` to speed up sliding sync. diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 12d18137e..85001d967 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -50,7 +50,7 @@ class Membership: KNOCK: Final = "knock" LEAVE: Final = "leave" BAN: Final = "ban" - LIST: Final = {INVITE, JOIN, KNOCK, LEAVE, BAN} + LIST: Final = frozenset((INVITE, JOIN, KNOCK, LEAVE, BAN)) class PresenceState: diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 881888fa9..066f3d08a 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -120,6 +120,9 @@ class SQLBaseStore(metaclass=ABCMeta): "get_user_in_room_with_profile", (room_id, user_id) ) self._attempt_to_invalidate_cache("get_rooms_for_user", (user_id,)) + self._attempt_to_invalidate_cache( + "_get_rooms_for_local_user_where_membership_is_inner", (user_id,) + ) # Purge other caches based on room state. self._attempt_to_invalidate_cache("get_room_summary", (room_id,)) @@ -146,6 +149,9 @@ class SQLBaseStore(metaclass=ABCMeta): self._attempt_to_invalidate_cache("does_pair_of_users_share_a_room", None) self._attempt_to_invalidate_cache("get_user_in_room_with_profile", None) self._attempt_to_invalidate_cache("get_rooms_for_user", None) + self._attempt_to_invalidate_cache( + "_get_rooms_for_local_user_where_membership_is_inner", None + ) self._attempt_to_invalidate_cache("get_room_summary", (room_id,)) def _attempt_to_invalidate_cache( diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index 2d6b75e47..26b8e1a17 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -331,6 +331,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore): "get_invited_rooms_for_local_user", (state_key,) ) self._attempt_to_invalidate_cache("get_rooms_for_user", (state_key,)) + self._attempt_to_invalidate_cache( + "_get_rooms_for_local_user_where_membership_is_inner", (state_key,) + ) self._attempt_to_invalidate_cache( "did_forget", @@ -393,6 +396,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self._attempt_to_invalidate_cache("get_thread_id_for_receipts", None) self._attempt_to_invalidate_cache("get_invited_rooms_for_local_user", None) self._attempt_to_invalidate_cache("get_rooms_for_user", None) + self._attempt_to_invalidate_cache( + "_get_rooms_for_local_user_where_membership_is_inner", None + ) self._attempt_to_invalidate_cache("did_forget", None) self._attempt_to_invalidate_cache("get_forgotten_rooms_for_user", None) self._attempt_to_invalidate_cache("get_references_for_event", None) diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index f62d9f705..640ab123f 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -445,9 +445,11 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): if not membership_list: return [] - rooms = await self.db_pool.runInteraction( - "get_rooms_for_local_user_where_membership_is", - self._get_rooms_for_local_user_where_membership_is_txn, + # Convert membership list to frozen set as a) it needs to be hashable, + # and b) we don't care about the order. + membership_list = frozenset(membership_list) + + rooms = await self._get_rooms_for_local_user_where_membership_is_inner( user_id, membership_list, ) @@ -466,6 +468,24 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): return [room for room in rooms if room.room_id not in rooms_to_exclude] + @cached(max_entries=1000, tree=True) + async def _get_rooms_for_local_user_where_membership_is_inner( + self, + user_id: str, + membership_list: Collection[str], + ) -> Sequence[RoomsForUser]: + if not membership_list: + return [] + + rooms = await self.db_pool.runInteraction( + "get_rooms_for_local_user_where_membership_is", + self._get_rooms_for_local_user_where_membership_is_txn, + user_id, + membership_list, + ) + + return rooms + def _get_rooms_for_local_user_where_membership_is_txn( self, txn: LoggingTransaction, diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py index 77aafa492..fa55f7691 100644 --- a/tests/handlers/test_sync.py +++ b/tests/handlers/test_sync.py @@ -211,6 +211,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): # Blow away caches (supported room versions can only change due to a restart). self.store.get_rooms_for_user.invalidate_all() + self.store._get_rooms_for_local_user_where_membership_is_inner.invalidate_all() self.store._get_event_cache.clear() self.store._event_ref.clear() From dc8ddc6472ba19905b3fd0c4f4da4088223e03b0 Mon Sep 17 00:00:00 2001 From: Shay Date: Mon, 22 Jul 2024 02:33:17 -0700 Subject: [PATCH 31/43] Prepare for authenticated media freeze (#17433) As part of the rollout of [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/main/proposals/3916-authentication-for-media.md) this PR adds support for designating authenticated media and ensuring that authenticated media is not served over unauthenticated endpoints. --- changelog.d/17433.feature | 1 + .../configuration/config_documentation.md | 12 + synapse/_scripts/synapse_port_db.py | 5 +- synapse/config/repository.py | 4 + synapse/media/media_repository.py | 39 +++- synapse/media/thumbnailer.py | 48 +++- synapse/rest/media/download_resource.py | 3 +- synapse/rest/media/thumbnail_resource.py | 5 +- .../databases/main/media_repository.py | 28 ++- synapse/storage/schema/__init__.py | 5 +- .../main/delta/86/01_authenticate_media.sql | 15 ++ tests/rest/client/test_media.py | 209 ++++++++++++++++++ 12 files changed, 362 insertions(+), 12 deletions(-) create mode 100644 changelog.d/17433.feature create mode 100644 synapse/storage/schema/main/delta/86/01_authenticate_media.sql diff --git a/changelog.d/17433.feature b/changelog.d/17433.feature new file mode 100644 index 000000000..ac9b5dee6 --- /dev/null +++ b/changelog.d/17433.feature @@ -0,0 +1 @@ +Prepare for authenticated media freeze. \ No newline at end of file diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 38b24b504..e8bc2df79 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1863,6 +1863,18 @@ federation_rr_transactions_per_room_per_second: 40 ## Media Store Config options related to Synapse's media store. +--- +### `enable_authenticated_media` + +When set to true, all subsequent media uploads will be marked as authenticated, and will not be available over legacy +unauthenticated media endpoints (`/_matrix/media/(r0|v3|v1)/download` and `/_matrix/media/(r0|v3|v1)/thumbnail`) - requests for authenticated media over these endpoints will result in a 404. All media, including authenticated media, will be available over the authenticated media endpoints `_matrix/client/v1/media/download` and `_matrix/client/v1/media/thumbnail`. Media uploaded prior to setting this option to true will still be available over the legacy endpoints. Note if the setting is switched to false +after enabling, media marked as authenticated will be available over legacy endpoints. Defaults to false, but +this will change to true in a future Synapse release. + +Example configuration: +```yaml +enable_authenticated_media: true +``` --- ### `enable_media_repo` diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index 3bb4a3493..5c6db8118 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -119,18 +119,19 @@ BOOLEAN_COLUMNS = { "e2e_room_keys": ["is_verified"], "event_edges": ["is_state"], "events": ["processed", "outlier", "contains_url"], - "local_media_repository": ["safe_from_quarantine"], + "local_media_repository": ["safe_from_quarantine", "authenticated"], + "per_user_experimental_features": ["enabled"], "presence_list": ["accepted"], "presence_stream": ["currently_active"], "public_room_list_stream": ["visibility"], "pushers": ["enabled"], "redactions": ["have_censored"], + "remote_media_cache": ["authenticated"], "room_stats_state": ["is_federatable"], "rooms": ["is_public", "has_auth_chain_index"], "users": ["shadow_banned", "approved", "locked", "suspended"], "un_partial_stated_event_stream": ["rejection_status_changed"], "users_who_share_rooms": ["share_private"], - "per_user_experimental_features": ["enabled"], } diff --git a/synapse/config/repository.py b/synapse/config/repository.py index dc0e93ffa..97ce6de52 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -272,6 +272,10 @@ class ContentRepositoryConfig(Config): remote_media_lifetime ) + self.enable_authenticated_media = config.get( + "enable_authenticated_media", False + ) + def generate_config_section(self, data_dir_path: str, **kwargs: Any) -> str: assert data_dir_path is not None media_store = os.path.join(data_dir_path, "media_store") diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py index 87c929eb2..8bc92305f 100644 --- a/synapse/media/media_repository.py +++ b/synapse/media/media_repository.py @@ -430,6 +430,7 @@ class MediaRepository: media_id: str, name: Optional[str], max_timeout_ms: int, + allow_authenticated: bool = True, federation: bool = False, ) -> None: """Responds to requests for local media, if exists, or returns 404. @@ -442,6 +443,7 @@ class MediaRepository: the filename in the Content-Disposition header of the response. max_timeout_ms: the maximum number of milliseconds to wait for the media to be uploaded. + allow_authenticated: whether media marked as authenticated may be served to this request federation: whether the local media being fetched is for a federation request Returns: @@ -451,6 +453,10 @@ class MediaRepository: if not media_info: return + if self.hs.config.media.enable_authenticated_media and not allow_authenticated: + if media_info.authenticated: + raise NotFoundError() + self.mark_recently_accessed(None, media_id) media_type = media_info.media_type @@ -481,6 +487,7 @@ class MediaRepository: max_timeout_ms: int, ip_address: str, use_federation_endpoint: bool, + allow_authenticated: bool = True, ) -> None: """Respond to requests for remote media. @@ -495,6 +502,8 @@ class MediaRepository: ip_address: the IP address of the requester use_federation_endpoint: whether to request the remote media over the new federation `/download` endpoint + allow_authenticated: whether media marked as authenticated may be served to this + request Returns: Resolves once a response has successfully been written to request @@ -526,6 +535,7 @@ class MediaRepository: self.download_ratelimiter, ip_address, use_federation_endpoint, + allow_authenticated, ) # We deliberately stream the file outside the lock @@ -548,6 +558,7 @@ class MediaRepository: max_timeout_ms: int, ip_address: str, use_federation: bool, + allow_authenticated: bool, ) -> RemoteMedia: """Gets the media info associated with the remote file, downloading if necessary. @@ -560,6 +571,8 @@ class MediaRepository: ip_address: IP address of the requester use_federation: if a download is necessary, whether to request the remote file over the federation `/download` endpoint + allow_authenticated: whether media marked as authenticated may be served to this + request Returns: The media info of the file @@ -581,6 +594,7 @@ class MediaRepository: self.download_ratelimiter, ip_address, use_federation, + allow_authenticated, ) # Ensure we actually use the responder so that it releases resources @@ -598,6 +612,7 @@ class MediaRepository: download_ratelimiter: Ratelimiter, ip_address: str, use_federation_endpoint: bool, + allow_authenticated: bool, ) -> Tuple[Optional[Responder], RemoteMedia]: """Looks for media in local cache, if not there then attempt to download from remote server. @@ -619,6 +634,11 @@ class MediaRepository: """ media_info = await self.store.get_cached_remote_media(server_name, media_id) + if self.hs.config.media.enable_authenticated_media and not allow_authenticated: + # if it isn't cached then don't fetch it or if it's authenticated then don't serve it + if not media_info or media_info.authenticated: + raise NotFoundError() + # file_id is the ID we use to track the file locally. If we've already # seen the file then reuse the existing ID, otherwise generate a new # one. @@ -792,6 +812,11 @@ class MediaRepository: logger.info("Stored remote media in file %r", fname) + if self.hs.config.media.enable_authenticated_media: + authenticated = True + else: + authenticated = False + return RemoteMedia( media_origin=server_name, media_id=media_id, @@ -802,6 +827,7 @@ class MediaRepository: filesystem_id=file_id, last_access_ts=time_now_ms, quarantined_by=None, + authenticated=authenticated, ) async def _federation_download_remote_file( @@ -915,6 +941,11 @@ class MediaRepository: logger.debug("Stored remote media in file %r", fname) + if self.hs.config.media.enable_authenticated_media: + authenticated = True + else: + authenticated = False + return RemoteMedia( media_origin=server_name, media_id=media_id, @@ -925,6 +956,7 @@ class MediaRepository: filesystem_id=file_id, last_access_ts=time_now_ms, quarantined_by=None, + authenticated=authenticated, ) def _get_thumbnail_requirements( @@ -1030,7 +1062,12 @@ class MediaRepository: t_len = os.path.getsize(output_path) await self.store.store_local_thumbnail( - media_id, t_width, t_height, t_type, t_method, t_len + media_id, + t_width, + t_height, + t_type, + t_method, + t_len, ) return output_path diff --git a/synapse/media/thumbnailer.py b/synapse/media/thumbnailer.py index 413a720e4..ef6aa8ccf 100644 --- a/synapse/media/thumbnailer.py +++ b/synapse/media/thumbnailer.py @@ -26,7 +26,7 @@ from typing import TYPE_CHECKING, List, Optional, Tuple, Type from PIL import Image -from synapse.api.errors import Codes, SynapseError, cs_error +from synapse.api.errors import Codes, NotFoundError, SynapseError, cs_error from synapse.config.repository import THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP from synapse.http.server import respond_with_json from synapse.http.site import SynapseRequest @@ -274,6 +274,7 @@ class ThumbnailProvider: m_type: str, max_timeout_ms: int, for_federation: bool, + allow_authenticated: bool = True, ) -> None: media_info = await self.media_repo.get_local_media_info( request, media_id, max_timeout_ms @@ -281,6 +282,12 @@ class ThumbnailProvider: if not media_info: return + # if the media the thumbnail is generated from is authenticated, don't serve the + # thumbnail over an unauthenticated endpoint + if self.hs.config.media.enable_authenticated_media and not allow_authenticated: + if media_info.authenticated: + raise NotFoundError() + thumbnail_infos = await self.store.get_local_media_thumbnails(media_id) await self._select_and_respond_with_thumbnail( request, @@ -307,14 +314,20 @@ class ThumbnailProvider: desired_type: str, max_timeout_ms: int, for_federation: bool, + allow_authenticated: bool = True, ) -> None: media_info = await self.media_repo.get_local_media_info( request, media_id, max_timeout_ms ) - if not media_info: return + # if the media the thumbnail is generated from is authenticated, don't serve the + # thumbnail over an unauthenticated endpoint + if self.hs.config.media.enable_authenticated_media and not allow_authenticated: + if media_info.authenticated: + raise NotFoundError() + thumbnail_infos = await self.store.get_local_media_thumbnails(media_id) for info in thumbnail_infos: t_w = info.width == desired_width @@ -381,14 +394,27 @@ class ThumbnailProvider: max_timeout_ms: int, ip_address: str, use_federation: bool, + allow_authenticated: bool = True, ) -> None: media_info = await self.media_repo.get_remote_media_info( - server_name, media_id, max_timeout_ms, ip_address, use_federation + server_name, + media_id, + max_timeout_ms, + ip_address, + use_federation, + allow_authenticated, ) if not media_info: respond_404(request) return + # if the media the thumbnail is generated from is authenticated, don't serve the + # thumbnail over an unauthenticated endpoint + if self.hs.config.media.enable_authenticated_media and not allow_authenticated: + if media_info.authenticated: + respond_404(request) + return + thumbnail_infos = await self.store.get_remote_media_thumbnails( server_name, media_id ) @@ -446,16 +472,28 @@ class ThumbnailProvider: max_timeout_ms: int, ip_address: str, use_federation: bool, + allow_authenticated: bool = True, ) -> None: # TODO: Don't download the whole remote file # We should proxy the thumbnail from the remote server instead of # downloading the remote file and generating our own thumbnails. media_info = await self.media_repo.get_remote_media_info( - server_name, media_id, max_timeout_ms, ip_address, use_federation + server_name, + media_id, + max_timeout_ms, + ip_address, + use_federation, + allow_authenticated, ) if not media_info: return + # if the media the thumbnail is generated from is authenticated, don't serve the + # thumbnail over an unauthenticated endpoint + if self.hs.config.media.enable_authenticated_media and not allow_authenticated: + if media_info.authenticated: + raise NotFoundError() + thumbnail_infos = await self.store.get_remote_media_thumbnails( server_name, media_id ) @@ -485,8 +523,8 @@ class ThumbnailProvider: file_id: str, url_cache: bool, for_federation: bool, - server_name: Optional[str] = None, media_info: Optional[LocalMedia] = None, + server_name: Optional[str] = None, ) -> None: """ Respond to a request with an appropriate thumbnail from the previously generated thumbnails. diff --git a/synapse/rest/media/download_resource.py b/synapse/rest/media/download_resource.py index c32c62690..3c3f70366 100644 --- a/synapse/rest/media/download_resource.py +++ b/synapse/rest/media/download_resource.py @@ -84,7 +84,7 @@ class DownloadResource(RestServlet): if self._is_mine_server_name(server_name): await self.media_repo.get_local_media( - request, media_id, file_name, max_timeout_ms + request, media_id, file_name, max_timeout_ms, allow_authenticated=False ) else: allow_remote = parse_boolean(request, "allow_remote", default=True) @@ -106,4 +106,5 @@ class DownloadResource(RestServlet): max_timeout_ms, ip_address, False, + allow_authenticated=False, ) diff --git a/synapse/rest/media/thumbnail_resource.py b/synapse/rest/media/thumbnail_resource.py index 70354aa43..536fea4c3 100644 --- a/synapse/rest/media/thumbnail_resource.py +++ b/synapse/rest/media/thumbnail_resource.py @@ -96,6 +96,7 @@ class ThumbnailResource(RestServlet): m_type, max_timeout_ms, False, + allow_authenticated=False, ) else: await self.thumbnail_provider.respond_local_thumbnail( @@ -107,6 +108,7 @@ class ThumbnailResource(RestServlet): m_type, max_timeout_ms, False, + allow_authenticated=False, ) self.media_repo.mark_recently_accessed(None, media_id) else: @@ -134,6 +136,7 @@ class ThumbnailResource(RestServlet): m_type, max_timeout_ms, ip_address, - False, + use_federation=False, + allow_authenticated=False, ) self.media_repo.mark_recently_accessed(server_name, media_id) diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index 6128332af..7617fd3ad 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -64,6 +64,7 @@ class LocalMedia: quarantined_by: Optional[str] safe_from_quarantine: bool user_id: Optional[str] + authenticated: Optional[bool] @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -77,6 +78,7 @@ class RemoteMedia: created_ts: int last_access_ts: int quarantined_by: Optional[str] + authenticated: Optional[bool] @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -218,6 +220,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): "last_access_ts", "safe_from_quarantine", "user_id", + "authenticated", ), allow_none=True, desc="get_local_media", @@ -235,6 +238,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): last_access_ts=row[6], safe_from_quarantine=row[7], user_id=row[8], + authenticated=row[9], ) async def get_local_media_by_user_paginate( @@ -290,7 +294,8 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): last_access_ts, quarantined_by, safe_from_quarantine, - user_id + user_id, + authenticated FROM local_media_repository WHERE user_id = ? ORDER BY {order_by_column} {order}, media_id ASC @@ -314,6 +319,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): quarantined_by=row[7], safe_from_quarantine=bool(row[8]), user_id=row[9], + authenticated=row[10], ) for row in txn ] @@ -417,12 +423,18 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): time_now_ms: int, user_id: UserID, ) -> None: + if self.hs.config.media.enable_authenticated_media: + authenticated = True + else: + authenticated = False + await self.db_pool.simple_insert( "local_media_repository", { "media_id": media_id, "created_ts": time_now_ms, "user_id": user_id.to_string(), + "authenticated": authenticated, }, desc="store_local_media_id", ) @@ -438,6 +450,11 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): user_id: UserID, url_cache: Optional[str] = None, ) -> None: + if self.hs.config.media.enable_authenticated_media: + authenticated = True + else: + authenticated = False + await self.db_pool.simple_insert( "local_media_repository", { @@ -448,6 +465,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): "media_length": media_length, "user_id": user_id.to_string(), "url_cache": url_cache, + "authenticated": authenticated, }, desc="store_local_media", ) @@ -638,6 +656,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): "filesystem_id", "last_access_ts", "quarantined_by", + "authenticated", ), allow_none=True, desc="get_cached_remote_media", @@ -654,6 +673,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): filesystem_id=row[4], last_access_ts=row[5], quarantined_by=row[6], + authenticated=row[7], ) async def store_cached_remote_media( @@ -666,6 +686,11 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): upload_name: Optional[str], filesystem_id: str, ) -> None: + if self.hs.config.media.enable_authenticated_media: + authenticated = True + else: + authenticated = False + await self.db_pool.simple_insert( "remote_media_cache", { @@ -677,6 +702,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): "upload_name": upload_name, "filesystem_id": filesystem_id, "last_access_ts": time_now_ms, + "authenticated": authenticated, }, desc="store_cached_remote_media", ) diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index 0dc5d2424..581d00346 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -19,7 +19,7 @@ # # -SCHEMA_VERSION = 85 # remember to update the list below when updating +SCHEMA_VERSION = 86 # remember to update the list below when updating """Represents the expectations made by the codebase about the database schema This should be incremented whenever the codebase changes its requirements on the @@ -139,6 +139,9 @@ Changes in SCHEMA_VERSION = 84 Changes in SCHEMA_VERSION = 85 - Add a column `suspended` to the `users` table + +Changes in SCHEMA_VERSION = 86 + - Add a column `authenticated` to the tables `local_media_repository` and `remote_media_cache` """ diff --git a/synapse/storage/schema/main/delta/86/01_authenticate_media.sql b/synapse/storage/schema/main/delta/86/01_authenticate_media.sql new file mode 100644 index 000000000..c1ac01ae9 --- /dev/null +++ b/synapse/storage/schema/main/delta/86/01_authenticate_media.sql @@ -0,0 +1,15 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2024 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- . + +ALTER TABLE remote_media_cache ADD COLUMN authenticated BOOLEAN DEFAULT FALSE NOT NULL; +ALTER TABLE local_media_repository ADD COLUMN authenticated BOOLEAN DEFAULT FALSE NOT NULL; diff --git a/tests/rest/client/test_media.py b/tests/rest/client/test_media.py index 466c5a0b7..30b6d31d0 100644 --- a/tests/rest/client/test_media.py +++ b/tests/rest/client/test_media.py @@ -43,6 +43,7 @@ from twisted.python.failure import Failure from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactor from twisted.web.http_headers import Headers from twisted.web.iweb import UNKNOWN_LENGTH, IResponse +from twisted.web.resource import Resource from synapse.api.errors import HttpResponseException from synapse.api.ratelimiting import Ratelimiter @@ -2466,3 +2467,211 @@ class DownloadAndThumbnailTestCase(unittest.HomeserverTestCase): server_name=None, ) ) + + +configs = [ + {"extra_config": {"dynamic_thumbnails": True}}, + {"extra_config": {"dynamic_thumbnails": False}}, +] + + +@parameterized_class(configs) +class AuthenticatedMediaTestCase(unittest.HomeserverTestCase): + extra_config: Dict[str, Any] + servlets = [ + media.register_servlets, + login.register_servlets, + admin.register_servlets, + ] + + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: + config = self.default_config() + + self.clock = clock + self.storage_path = self.mktemp() + self.media_store_path = self.mktemp() + os.mkdir(self.storage_path) + os.mkdir(self.media_store_path) + config["media_store_path"] = self.media_store_path + config["enable_authenticated_media"] = True + + provider_config = { + "module": "synapse.media.storage_provider.FileStorageProviderBackend", + "store_local": True, + "store_synchronous": False, + "store_remote": True, + "config": {"directory": self.storage_path}, + } + + config["media_storage_providers"] = [provider_config] + config.update(self.extra_config) + + return self.setup_test_homeserver(config=config) + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.repo = hs.get_media_repository() + self.client = hs.get_federation_http_client() + self.store = hs.get_datastores().main + self.user = self.register_user("user", "pass") + self.tok = self.login("user", "pass") + + def create_resource_dict(self) -> Dict[str, Resource]: + resources = super().create_resource_dict() + resources["/_matrix/media"] = self.hs.get_media_repository_resource() + return resources + + def test_authenticated_media(self) -> None: + # upload some local media with authentication on + channel = self.make_request( + "POST", + "_matrix/media/v3/upload?filename=test_png_upload", + SMALL_PNG, + self.tok, + shorthand=False, + content_type=b"image/png", + custom_headers=[("Content-Length", str(67))], + ) + self.assertEqual(channel.code, 200) + res = channel.json_body.get("content_uri") + assert res is not None + uri = res.split("mxc://")[1] + + # request media over authenticated endpoint, should be found + channel2 = self.make_request( + "GET", + f"_matrix/client/v1/media/download/{uri}", + access_token=self.tok, + shorthand=False, + ) + self.assertEqual(channel2.code, 200) + + # request same media over unauthenticated media, should raise 404 not found + channel3 = self.make_request( + "GET", f"_matrix/media/v3/download/{uri}", shorthand=False + ) + self.assertEqual(channel3.code, 404) + + # check thumbnails as well + params = "?width=32&height=32&method=crop" + channel4 = self.make_request( + "GET", + f"/_matrix/client/v1/media/thumbnail/{uri}{params}", + shorthand=False, + access_token=self.tok, + ) + self.assertEqual(channel4.code, 200) + + params = "?width=32&height=32&method=crop" + channel5 = self.make_request( + "GET", + f"/_matrix/media/r0/thumbnail/{uri}{params}", + shorthand=False, + access_token=self.tok, + ) + self.assertEqual(channel5.code, 404) + + # Inject a piece of remote media. + file_id = "abcdefg12345" + file_info = FileInfo(server_name="lonelyIsland", file_id=file_id) + + media_storage = self.hs.get_media_repository().media_storage + + ctx = media_storage.store_into_file(file_info) + (f, fname) = self.get_success(ctx.__aenter__()) + f.write(SMALL_PNG) + self.get_success(ctx.__aexit__(None, None, None)) + + # we write the authenticated status when storing media, so this should pick up + # config and authenticate the media + self.get_success( + self.store.store_cached_remote_media( + origin="lonelyIsland", + media_id="52", + media_type="image/png", + media_length=1, + time_now_ms=self.clock.time_msec(), + upload_name="remote_test.png", + filesystem_id=file_id, + ) + ) + + # ensure we have thumbnails for the non-dynamic code path + if self.extra_config == {"dynamic_thumbnails": False}: + self.get_success( + self.repo._generate_thumbnails( + "lonelyIsland", "52", file_id, "image/png" + ) + ) + + channel6 = self.make_request( + "GET", + "_matrix/client/v1/media/download/lonelyIsland/52", + access_token=self.tok, + shorthand=False, + ) + self.assertEqual(channel6.code, 200) + + channel7 = self.make_request( + "GET", f"_matrix/media/v3/download/{uri}", shorthand=False + ) + self.assertEqual(channel7.code, 404) + + params = "?width=32&height=32&method=crop" + channel8 = self.make_request( + "GET", + f"/_matrix/client/v1/media/thumbnail/lonelyIsland/52{params}", + shorthand=False, + access_token=self.tok, + ) + self.assertEqual(channel8.code, 200) + + channel9 = self.make_request( + "GET", + f"/_matrix/media/r0/thumbnail/lonelyIsland/52{params}", + shorthand=False, + access_token=self.tok, + ) + self.assertEqual(channel9.code, 404) + + # Inject a piece of local media that isn't authenticated + file_id = "abcdefg123456" + file_info = FileInfo(None, file_id=file_id) + + ctx = media_storage.store_into_file(file_info) + (f, fname) = self.get_success(ctx.__aenter__()) + f.write(SMALL_PNG) + self.get_success(ctx.__aexit__(None, None, None)) + + self.get_success( + self.store.db_pool.simple_insert( + "local_media_repository", + { + "media_id": "abcdefg123456", + "media_type": "image/png", + "created_ts": self.clock.time_msec(), + "upload_name": "test_local", + "media_length": 1, + "user_id": "someone", + "url_cache": None, + "authenticated": False, + }, + desc="store_local_media", + ) + ) + + # check that unauthenticated media is still available over both endpoints + channel9 = self.make_request( + "GET", + "/_matrix/client/v1/media/download/test/abcdefg123456", + shorthand=False, + access_token=self.tok, + ) + self.assertEqual(channel9.code, 200) + + channel10 = self.make_request( + "GET", + "/_matrix/media/r0/download/test/abcdefg123456", + shorthand=False, + access_token=self.tok, + ) + self.assertEqual(channel10.code, 200) From 1648337775e8004749a29cdb4d163cafc3edd076 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Jul 2024 14:28:54 +0100 Subject: [PATCH 32/43] Bump sentry-sdk from 2.8.0 to 2.10.0 (#17467) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 2bfcb59cf..68d10e7c0 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2430,13 +2430,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "2.8.0" +version = "2.10.0" description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = ">=3.6" files = [ - {file = "sentry_sdk-2.8.0-py2.py3-none-any.whl", hash = "sha256:6051562d2cfa8087bb8b4b8b79dc44690f8a054762a29c07e22588b1f619bfb5"}, - {file = "sentry_sdk-2.8.0.tar.gz", hash = "sha256:aa4314f877d9cd9add5a0c9ba18e3f27f99f7de835ce36bd150e48a41c7c646f"}, + {file = "sentry_sdk-2.10.0-py2.py3-none-any.whl", hash = "sha256:87b3d413c87d8e7f816cc9334bff255a83d8b577db2b22042651c30c19c09190"}, + {file = "sentry_sdk-2.10.0.tar.gz", hash = "sha256:545fcc6e36c335faa6d6cda84669b6e17025f31efbf3b2211ec14efe008b75d1"}, ] [package.dependencies] From 73529d373246b09fd3d83b494500dc944f208867 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Jul 2024 14:29:06 +0100 Subject: [PATCH 33/43] Bump ruff from 0.5.0 to 0.5.4 (#17466) --- poetry.lock | 40 ++++++++++++++++++++-------------------- pyproject.toml | 2 +- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/poetry.lock b/poetry.lock index 68d10e7c0..282219e19 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2373,29 +2373,29 @@ files = [ [[package]] name = "ruff" -version = "0.5.0" +version = "0.5.4" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.5.0-py3-none-linux_armv6l.whl", hash = "sha256:ee770ea8ab38918f34e7560a597cc0a8c9a193aaa01bfbd879ef43cb06bd9c4c"}, - {file = "ruff-0.5.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:38f3b8327b3cb43474559d435f5fa65dacf723351c159ed0dc567f7ab735d1b6"}, - {file = "ruff-0.5.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:7594f8df5404a5c5c8f64b8311169879f6cf42142da644c7e0ba3c3f14130370"}, - {file = "ruff-0.5.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:adc7012d6ec85032bc4e9065110df205752d64010bed5f958d25dbee9ce35de3"}, - {file = "ruff-0.5.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d505fb93b0fabef974b168d9b27c3960714d2ecda24b6ffa6a87ac432905ea38"}, - {file = "ruff-0.5.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9dc5cfd3558f14513ed0d5b70ce531e28ea81a8a3b1b07f0f48421a3d9e7d80a"}, - {file = "ruff-0.5.0-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:db3ca35265de239a1176d56a464b51557fce41095c37d6c406e658cf80bbb362"}, - {file = "ruff-0.5.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b1a321c4f68809fddd9b282fab6a8d8db796b270fff44722589a8b946925a2a8"}, - {file = "ruff-0.5.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2c4dfcd8d34b143916994b3876b63d53f56724c03f8c1a33a253b7b1e6bf2a7d"}, - {file = "ruff-0.5.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81e5facfc9f4a674c6a78c64d38becfbd5e4f739c31fcd9ce44c849f1fad9e4c"}, - {file = "ruff-0.5.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:e589e27971c2a3efff3fadafb16e5aef7ff93250f0134ec4b52052b673cf988d"}, - {file = "ruff-0.5.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d2ffbc3715a52b037bcb0f6ff524a9367f642cdc5817944f6af5479bbb2eb50e"}, - {file = "ruff-0.5.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:cd096e23c6a4f9c819525a437fa0a99d1c67a1b6bb30948d46f33afbc53596cf"}, - {file = "ruff-0.5.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:46e193b36f2255729ad34a49c9a997d506e58f08555366b2108783b3064a0e1e"}, - {file = "ruff-0.5.0-py3-none-win32.whl", hash = "sha256:49141d267100f5ceff541b4e06552e98527870eafa1acc9dec9139c9ec5af64c"}, - {file = "ruff-0.5.0-py3-none-win_amd64.whl", hash = "sha256:e9118f60091047444c1b90952736ee7b1792910cab56e9b9a9ac20af94cd0440"}, - {file = "ruff-0.5.0-py3-none-win_arm64.whl", hash = "sha256:ed5c4df5c1fb4518abcb57725b576659542bdbe93366f4f329e8f398c4b71178"}, - {file = "ruff-0.5.0.tar.gz", hash = "sha256:eb641b5873492cf9bd45bc9c5ae5320648218e04386a5f0c264ad6ccce8226a1"}, + {file = "ruff-0.5.4-py3-none-linux_armv6l.whl", hash = "sha256:82acef724fc639699b4d3177ed5cc14c2a5aacd92edd578a9e846d5b5ec18ddf"}, + {file = "ruff-0.5.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:da62e87637c8838b325e65beee485f71eb36202ce8e3cdbc24b9fcb8b99a37be"}, + {file = "ruff-0.5.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:e98ad088edfe2f3b85a925ee96da652028f093d6b9b56b76fc242d8abb8e2059"}, + {file = "ruff-0.5.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c55efbecc3152d614cfe6c2247a3054cfe358cefbf794f8c79c8575456efe19"}, + {file = "ruff-0.5.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f9b85eaa1f653abd0a70603b8b7008d9e00c9fa1bbd0bf40dad3f0c0bdd06793"}, + {file = "ruff-0.5.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0cf497a47751be8c883059c4613ba2f50dd06ec672692de2811f039432875278"}, + {file = "ruff-0.5.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:09c14ed6a72af9ccc8d2e313d7acf7037f0faff43cde4b507e66f14e812e37f7"}, + {file = "ruff-0.5.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:628f6b8f97b8bad2490240aa84f3e68f390e13fabc9af5c0d3b96b485921cd60"}, + {file = "ruff-0.5.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3520a00c0563d7a7a7c324ad7e2cde2355733dafa9592c671fb2e9e3cd8194c1"}, + {file = "ruff-0.5.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93789f14ca2244fb91ed481456f6d0bb8af1f75a330e133b67d08f06ad85b516"}, + {file = "ruff-0.5.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:029454e2824eafa25b9df46882f7f7844d36fd8ce51c1b7f6d97e2615a57bbcc"}, + {file = "ruff-0.5.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:9492320eed573a13a0bc09a2957f17aa733fff9ce5bf00e66e6d4a88ec33813f"}, + {file = "ruff-0.5.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:a6e1f62a92c645e2919b65c02e79d1f61e78a58eddaebca6c23659e7c7cb4ac7"}, + {file = "ruff-0.5.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:768fa9208df2bec4b2ce61dbc7c2ddd6b1be9fb48f1f8d3b78b3332c7d71c1ff"}, + {file = "ruff-0.5.4-py3-none-win32.whl", hash = "sha256:e1e7393e9c56128e870b233c82ceb42164966f25b30f68acbb24ed69ce9c3a4e"}, + {file = "ruff-0.5.4-py3-none-win_amd64.whl", hash = "sha256:58b54459221fd3f661a7329f177f091eb35cf7a603f01d9eb3eb11cc348d38c4"}, + {file = "ruff-0.5.4-py3-none-win_arm64.whl", hash = "sha256:bd53da65f1085fb5b307c38fd3c0829e76acf7b2a912d8d79cadcdb4875c1eb7"}, + {file = "ruff-0.5.4.tar.gz", hash = "sha256:2795726d5f71c4f4e70653273d1c23a8182f07dd8e48c12de5d867bfb7557eed"}, ] [[package]] @@ -3230,4 +3230,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.8.0" -content-hash = "3372a97db99050a34f8eddad2ddf8efe8b7b704b6123df4a3e36ddc171e8f34d" +content-hash = "e65fbd044230964cae8810c84289bcf0bc43b27532ea5a5ef8843eab4f6514af" diff --git a/pyproject.toml b/pyproject.toml index 0f040fc61..50dc0c3c6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -322,7 +322,7 @@ all = [ # This helps prevents merge conflicts when running a batch of dependabot updates. isort = ">=5.10.1" black = ">=22.7.0" -ruff = "0.5.0" +ruff = "0.5.4" # Type checking only works with the pydantic.v1 compat module from pydantic v2 pydantic = "^2" From ed0face8ada72731eba70f9cc56a9b482b7bef1e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 22 Jul 2024 14:51:17 +0100 Subject: [PATCH 34/43] Speed up room keys query by using read/write lock (#17461) Linaerizing all access slows things down when devices try and fetch lots of keys on login --- changelog.d/17461.misc | 1 + synapse/handlers/e2e_room_keys.py | 18 +++++++++--------- 2 files changed, 10 insertions(+), 9 deletions(-) create mode 100644 changelog.d/17461.misc diff --git a/changelog.d/17461.misc b/changelog.d/17461.misc new file mode 100644 index 000000000..80f7144ba --- /dev/null +++ b/changelog.d/17461.misc @@ -0,0 +1 @@ +Speed up fetching room keys from backup. diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 99f9f6e64..f397911f2 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -34,7 +34,7 @@ from synapse.api.errors import ( from synapse.logging.opentracing import log_kv, trace from synapse.storage.databases.main.e2e_room_keys import RoomKey from synapse.types import JsonDict -from synapse.util.async_helpers import Linearizer +from synapse.util.async_helpers import ReadWriteLock if TYPE_CHECKING: from synapse.server import HomeServer @@ -58,7 +58,7 @@ class E2eRoomKeysHandler: # clients belonging to a user will receive and try to upload a new session at # roughly the same time. Also used to lock out uploads when the key is being # changed. - self._upload_linearizer = Linearizer("upload_room_keys_lock") + self._upload_lock = ReadWriteLock() @trace async def get_room_keys( @@ -89,7 +89,7 @@ class E2eRoomKeysHandler: # we deliberately take the lock to get keys so that changing the version # works atomically - async with self._upload_linearizer.queue(user_id): + async with self._upload_lock.read(user_id): # make sure the backup version exists try: await self.store.get_e2e_room_keys_version_info(user_id, version) @@ -132,7 +132,7 @@ class E2eRoomKeysHandler: """ # lock for consistency with uploading - async with self._upload_linearizer.queue(user_id): + async with self._upload_lock.write(user_id): # make sure the backup version exists try: version_info = await self.store.get_e2e_room_keys_version_info( @@ -193,7 +193,7 @@ class E2eRoomKeysHandler: # TODO: Validate the JSON to make sure it has the right keys. # XXX: perhaps we should use a finer grained lock here? - async with self._upload_linearizer.queue(user_id): + async with self._upload_lock.write(user_id): # Check that the version we're trying to upload is the current version try: version_info = await self.store.get_e2e_room_keys_version_info(user_id) @@ -355,7 +355,7 @@ class E2eRoomKeysHandler: # TODO: Validate the JSON to make sure it has the right keys. # lock everyone out until we've switched version - async with self._upload_linearizer.queue(user_id): + async with self._upload_lock.write(user_id): new_version = await self.store.create_e2e_room_keys_version( user_id, version_info ) @@ -382,7 +382,7 @@ class E2eRoomKeysHandler: } """ - async with self._upload_linearizer.queue(user_id): + async with self._upload_lock.read(user_id): try: res = await self.store.get_e2e_room_keys_version_info(user_id, version) except StoreError as e: @@ -407,7 +407,7 @@ class E2eRoomKeysHandler: NotFoundError: if this backup version doesn't exist """ - async with self._upload_linearizer.queue(user_id): + async with self._upload_lock.write(user_id): try: await self.store.delete_e2e_room_keys_version(user_id, version) except StoreError as e: @@ -437,7 +437,7 @@ class E2eRoomKeysHandler: raise SynapseError( 400, "Version in body does not match", Codes.INVALID_PARAM ) - async with self._upload_linearizer.queue(user_id): + async with self._upload_lock.write(user_id): try: old_info = await self.store.get_e2e_room_keys_version_info( user_id, version From d221512498f7e3267916a289dd2ef4f3e00728e8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 22 Jul 2024 17:48:09 +0100 Subject: [PATCH 35/43] SS: Implement `$ME` support (#17469) `$ME` can be used as a substitute for the requester's user ID. --- changelog.d/17469.misc | 1 + synapse/handlers/sliding_sync.py | 6 ++- tests/rest/client/test_sync.py | 74 ++++++++++++++++++++++++++++++++ 3 files changed, 80 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17469.misc diff --git a/changelog.d/17469.misc b/changelog.d/17469.misc new file mode 100644 index 000000000..ba0419355 --- /dev/null +++ b/changelog.d/17469.misc @@ -0,0 +1 @@ +Implement handling of `$ME` as a state key in sliding sync. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 423f0329d..c362afa6e 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -329,6 +329,9 @@ class StateValues: # `sender` in the timeline). We only give special meaning to this value when it's a # `state_key`. LAZY: Final = "$LAZY" + # Subsitute with the requester's user ID. Typically used by clients to get + # the user's membership. + ME: Final = "$ME" class SlidingSyncHandler: @@ -504,7 +507,6 @@ class SlidingSyncHandler: # Also see `StateFilter.must_await_full_state(...)` for comparison lazy_loading = ( membership_state_keys is not None - and len(membership_state_keys) == 1 and StateValues.LAZY in membership_state_keys ) @@ -1662,6 +1664,8 @@ class SlidingSyncHandler: # FIXME: We probably also care about invite, ban, kick, targets, etc # but the spec only mentions "senders". + elif state_key == StateValues.ME: + required_state_types.append((state_type, user.to_string())) else: required_state_types.append((state_type, state_key)) diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index a008ee465..a88bdb5c1 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -3714,6 +3714,80 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): ) self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) + def test_rooms_required_state_me(self) -> None: + """ + Test `rooms.required_state` correctly handles $ME. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + + self.helper.send(room_id1, "1", tok=user2_tok) + + # Also send normal state events with state keys of the users, first + # change the power levels to allow this. + self.helper.send_state( + room_id1, + event_type=EventTypes.PowerLevels, + body={"users": {user1_id: 50, user2_id: 100}}, + tok=user2_tok, + ) + self.helper.send_state( + room_id1, + event_type="org.matrix.foo", + state_key=user1_id, + body={}, + tok=user1_tok, + ) + self.helper.send_state( + room_id1, + event_type="org.matrix.foo", + state_key=user2_id, + body={}, + tok=user2_tok, + ) + + # Make the Sliding Sync request with a request for '$ME'. + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.Member, StateValues.ME], + ["org.matrix.foo", StateValues.ME], + ], + "timeline_limit": 3, + } + } + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # Only user2 and user3 sent events in the 3 events we see in the `timeline` + self._assertRequiredStateIncludes( + channel.json_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Create, "")], + state_map[(EventTypes.Member, user1_id)], + state_map[("org.matrix.foo", user1_id)], + }, + exact=True, + ) + self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) + @parameterized.expand([(Membership.LEAVE,), (Membership.BAN,)]) def test_rooms_required_state_leave_ban(self, stop_membership: str) -> None: """ From de05a642460fa04f6e279fa166f032e9ff94b4b0 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 22 Jul 2024 15:40:06 -0500 Subject: [PATCH 36/43] Sliding Sync: Add E2EE extension (MSC3884) (#17454) Spec: [MSC3884](https://github.com/matrix-org/matrix-spec-proposals/pull/3884) Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync --- changelog.d/17454.feature | 1 + synapse/handlers/device.py | 17 +- synapse/handlers/sliding_sync.py | 107 +++- synapse/rest/client/keys.py | 10 +- synapse/rest/client/sync.py | 32 +- synapse/types/__init__.py | 7 +- synapse/types/handlers/__init__.py | 48 +- synapse/types/rest/client/__init__.py | 10 + tests/rest/client/test_sync.py | 825 +++++++++++++++++++++++++- 9 files changed, 1023 insertions(+), 34 deletions(-) create mode 100644 changelog.d/17454.feature diff --git a/changelog.d/17454.feature b/changelog.d/17454.feature new file mode 100644 index 000000000..bb088371b --- /dev/null +++ b/changelog.d/17454.feature @@ -0,0 +1 @@ +Add E2EE extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 0432d9710..4fc6fcd7a 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -39,6 +39,7 @@ from synapse.metrics.background_process_metrics import ( ) from synapse.storage.databases.main.client_ips import DeviceLastConnectionInfo from synapse.types import ( + DeviceListUpdates, JsonDict, JsonMapping, ScheduledTask, @@ -214,7 +215,7 @@ class DeviceWorkerHandler: @cancellable async def get_user_ids_changed( self, user_id: str, from_token: StreamToken - ) -> JsonDict: + ) -> DeviceListUpdates: """Get list of users that have had the devices updated, or have newly joined a room, that `user_id` may be interested in. """ @@ -341,11 +342,19 @@ class DeviceWorkerHandler: possibly_joined = set() possibly_left = set() - result = {"changed": list(possibly_joined), "left": list(possibly_left)} + device_list_updates = DeviceListUpdates( + changed=possibly_joined, + left=possibly_left, + ) - log_kv(result) + log_kv( + { + "changed": device_list_updates.changed, + "left": device_list_updates.left, + } + ) - return result + return device_list_updates async def on_federation_query_user_devices(self, user_id: str) -> JsonDict: if not self.hs.is_mine(UserID.from_string(user_id)): diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index c362afa6e..886d7c715 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -19,7 +19,18 @@ # import logging from itertools import chain -from typing import TYPE_CHECKING, Any, Dict, Final, List, Mapping, Optional, Set, Tuple +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Final, + List, + Mapping, + Optional, + Sequence, + Set, + Tuple, +) import attr from immutabledict import immutabledict @@ -33,6 +44,7 @@ from synapse.storage.databases.main.roommember import extract_heroes_from_room_s from synapse.storage.databases.main.stream import CurrentStateDeltaMembership from synapse.storage.roommember import MemberSummary from synapse.types import ( + DeviceListUpdates, JsonDict, PersistedEventPosition, Requester, @@ -343,6 +355,7 @@ class SlidingSyncHandler: self.notifier = hs.get_notifier() self.event_sources = hs.get_event_sources() self.relations_handler = hs.get_relations_handler() + self.device_handler = hs.get_device_handler() self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync async def wait_for_sync_for_user( @@ -371,10 +384,6 @@ class SlidingSyncHandler: # auth_blocking will occur) await self.auth_blocking.check_auth_blocking(requester=requester) - # TODO: If the To-Device extension is enabled and we have a `from_token`, delete - # any to-device messages before that token (since we now know that the device - # has received them). (see sync v2 for how to do this) - # If we're working with a user-provided token, we need to make sure to wait for # this worker to catch up with the token so we don't skip past any incoming # events or future events if the user is nefariously, manually modifying the @@ -617,7 +626,9 @@ class SlidingSyncHandler: await concurrently_execute(handle_room, relevant_room_map, 10) extensions = await self.get_extensions_response( - sync_config=sync_config, to_token=to_token + sync_config=sync_config, + from_token=from_token, + to_token=to_token, ) return SlidingSyncResult( @@ -1776,33 +1787,47 @@ class SlidingSyncHandler: self, sync_config: SlidingSyncConfig, to_token: StreamToken, + from_token: Optional[StreamToken], ) -> SlidingSyncResult.Extensions: """Handle extension requests. Args: sync_config: Sync configuration to_token: The point in the stream to sync up to. + from_token: The point in the stream to sync from. """ if sync_config.extensions is None: return SlidingSyncResult.Extensions() to_device_response = None - if sync_config.extensions.to_device: - to_device_response = await self.get_to_device_extensions_response( + if sync_config.extensions.to_device is not None: + to_device_response = await self.get_to_device_extension_response( sync_config=sync_config, to_device_request=sync_config.extensions.to_device, to_token=to_token, ) - return SlidingSyncResult.Extensions(to_device=to_device_response) + e2ee_response = None + if sync_config.extensions.e2ee is not None: + e2ee_response = await self.get_e2ee_extension_response( + sync_config=sync_config, + e2ee_request=sync_config.extensions.e2ee, + to_token=to_token, + from_token=from_token, + ) - async def get_to_device_extensions_response( + return SlidingSyncResult.Extensions( + to_device=to_device_response, + e2ee=e2ee_response, + ) + + async def get_to_device_extension_response( self, sync_config: SlidingSyncConfig, to_device_request: SlidingSyncConfig.Extensions.ToDeviceExtension, to_token: StreamToken, - ) -> SlidingSyncResult.Extensions.ToDeviceExtension: + ) -> Optional[SlidingSyncResult.Extensions.ToDeviceExtension]: """Handle to-device extension (MSC3885) Args: @@ -1810,14 +1835,16 @@ class SlidingSyncHandler: to_device_request: The to-device extension from the request to_token: The point in the stream to sync up to. """ - user_id = sync_config.user.to_string() device_id = sync_config.device_id + # Skip if the extension is not enabled + if not to_device_request.enabled: + return None + # Check that this request has a valid device ID (not all requests have - # to belong to a device, and so device_id is None), and that the - # extension is enabled. - if device_id is None or not to_device_request.enabled: + # to belong to a device, and so device_id is None) + if device_id is None: return SlidingSyncResult.Extensions.ToDeviceExtension( next_batch=f"{to_token.to_device_key}", events=[], @@ -1868,3 +1895,53 @@ class SlidingSyncHandler: next_batch=f"{stream_id}", events=messages, ) + + async def get_e2ee_extension_response( + self, + sync_config: SlidingSyncConfig, + e2ee_request: SlidingSyncConfig.Extensions.E2eeExtension, + to_token: StreamToken, + from_token: Optional[StreamToken], + ) -> Optional[SlidingSyncResult.Extensions.E2eeExtension]: + """Handle E2EE device extension (MSC3884) + + Args: + sync_config: Sync configuration + e2ee_request: The e2ee extension from the request + to_token: The point in the stream to sync up to. + from_token: The point in the stream to sync from. + """ + user_id = sync_config.user.to_string() + device_id = sync_config.device_id + + # Skip if the extension is not enabled + if not e2ee_request.enabled: + return None + + device_list_updates: Optional[DeviceListUpdates] = None + if from_token is not None: + # TODO: This should take into account the `from_token` and `to_token` + device_list_updates = await self.device_handler.get_user_ids_changed( + user_id=user_id, + from_token=from_token, + ) + + device_one_time_keys_count: Mapping[str, int] = {} + device_unused_fallback_key_types: Sequence[str] = [] + if device_id: + # TODO: We should have a way to let clients differentiate between the states of: + # * no change in OTK count since the provided since token + # * the server has zero OTKs left for this device + # Spec issue: https://github.com/matrix-org/matrix-doc/issues/3298 + device_one_time_keys_count = await self.store.count_e2e_one_time_keys( + user_id, device_id + ) + device_unused_fallback_key_types = ( + await self.store.get_e2e_unused_fallback_key_types(user_id, device_id) + ) + + return SlidingSyncResult.Extensions.E2eeExtension( + device_list_updates=device_list_updates, + device_one_time_keys_count=device_one_time_keys_count, + device_unused_fallback_key_types=device_unused_fallback_key_types, + ) diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index 67de634ea..eddad7d5b 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -256,9 +256,15 @@ class KeyChangesServlet(RestServlet): user_id = requester.user.to_string() - results = await self.device_handler.get_user_ids_changed(user_id, from_token) + device_list_updates = await self.device_handler.get_user_ids_changed( + user_id, from_token + ) - return 200, results + response: JsonDict = {} + response["changed"] = list(device_list_updates.changed) + response["left"] = list(device_list_updates.left) + + return 200, response class OneTimeKeyServlet(RestServlet): diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 1d8cbfdf0..93fe1d439 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -1081,15 +1081,41 @@ class SlidingSyncRestServlet(RestServlet): async def encode_extensions( self, requester: Requester, extensions: SlidingSyncResult.Extensions ) -> JsonDict: - result = {} + serialized_extensions: JsonDict = {} if extensions.to_device is not None: - result["to_device"] = { + serialized_extensions["to_device"] = { "next_batch": extensions.to_device.next_batch, "events": extensions.to_device.events, } - return result + if extensions.e2ee is not None: + serialized_extensions["e2ee"] = { + # We always include this because + # https://github.com/vector-im/element-android/issues/3725. The spec + # isn't terribly clear on when this can be omitted and how a client + # would tell the difference between "no keys present" and "nothing + # changed" in terms of whole field absent / individual key type entry + # absent Corresponding synapse issue: + # https://github.com/matrix-org/synapse/issues/10456 + "device_one_time_keys_count": extensions.e2ee.device_one_time_keys_count, + # https://github.com/matrix-org/matrix-doc/blob/54255851f642f84a4f1aaf7bc063eebe3d76752b/proposals/2732-olm-fallback-keys.md + # states that this field should always be included, as long as the + # server supports the feature. + "device_unused_fallback_key_types": extensions.e2ee.device_unused_fallback_key_types, + } + + if extensions.e2ee.device_list_updates is not None: + serialized_extensions["e2ee"]["device_lists"] = {} + + serialized_extensions["e2ee"]["device_lists"]["changed"] = list( + extensions.e2ee.device_list_updates.changed + ) + serialized_extensions["e2ee"]["device_lists"]["left"] = list( + extensions.e2ee.device_list_updates.left + ) + + return serialized_extensions def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index 3962ecc99..046cdc29c 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -1219,11 +1219,12 @@ class ReadReceipt: @attr.s(slots=True, frozen=True, auto_attribs=True) class DeviceListUpdates: """ - An object containing a diff of information regarding other users' device lists, intended for - a recipient to carry out device list tracking. + An object containing a diff of information regarding other users' device lists, + intended for a recipient to carry out device list tracking. Attributes: - changed: A set of users whose device lists have changed recently. + changed: A set of users who have updated their device identity or + cross-signing keys, or who now share an encrypted room with. left: A set of users who the recipient no longer needs to track the device lists of. Typically when those users no longer share any end-to-end encryption enabled rooms. """ diff --git a/synapse/types/handlers/__init__.py b/synapse/types/handlers/__init__.py index 409120470..4c6c42db0 100644 --- a/synapse/types/handlers/__init__.py +++ b/synapse/types/handlers/__init__.py @@ -18,7 +18,7 @@ # # from enum import Enum -from typing import TYPE_CHECKING, Dict, Final, List, Optional, Sequence, Tuple +from typing import TYPE_CHECKING, Dict, Final, List, Mapping, Optional, Sequence, Tuple import attr from typing_extensions import TypedDict @@ -31,7 +31,7 @@ else: from pydantic import Extra from synapse.events import EventBase -from synapse.types import JsonDict, JsonMapping, StreamToken, UserID +from synapse.types import DeviceListUpdates, JsonDict, JsonMapping, StreamToken, UserID from synapse.types.rest.client import SlidingSyncBody if TYPE_CHECKING: @@ -264,6 +264,7 @@ class SlidingSyncResult: Attributes: to_device: The to-device extension (MSC3885) + e2ee: The E2EE device extension (MSC3884) """ @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -282,10 +283,51 @@ class SlidingSyncResult: def __bool__(self) -> bool: return bool(self.events) + @attr.s(slots=True, frozen=True, auto_attribs=True) + class E2eeExtension: + """The E2EE device extension (MSC3884) + + Attributes: + device_list_updates: List of user_ids whose devices have changed or left (only + present on incremental syncs). + device_one_time_keys_count: Map from key algorithm to the number of + unclaimed one-time keys currently held on the server for this device. If + an algorithm is unlisted, the count for that algorithm is assumed to be + zero. If this entire parameter is missing, the count for all algorithms + is assumed to be zero. + device_unused_fallback_key_types: List of unused fallback key algorithms + for this device. + """ + + # Only present on incremental syncs + device_list_updates: Optional[DeviceListUpdates] + device_one_time_keys_count: Mapping[str, int] + device_unused_fallback_key_types: Sequence[str] + + def __bool__(self) -> bool: + # Note that "signed_curve25519" is always returned in key count responses + # regardless of whether we uploaded any keys for it. This is necessary until + # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed. + # + # Also related: + # https://github.com/element-hq/element-android/issues/3725 and + # https://github.com/matrix-org/synapse/issues/10456 + default_otk = self.device_one_time_keys_count.get("signed_curve25519") + more_than_default_otk = len(self.device_one_time_keys_count) > 1 or ( + default_otk is not None and default_otk > 0 + ) + + return bool( + more_than_default_otk + or self.device_list_updates + or self.device_unused_fallback_key_types + ) + to_device: Optional[ToDeviceExtension] = None + e2ee: Optional[E2eeExtension] = None def __bool__(self) -> bool: - return bool(self.to_device) + return bool(self.to_device or self.e2ee) next_pos: StreamToken lists: Dict[str, SlidingWindowList] diff --git a/synapse/types/rest/client/__init__.py b/synapse/types/rest/client/__init__.py index dbe37bc71..f3c45a0d6 100644 --- a/synapse/types/rest/client/__init__.py +++ b/synapse/types/rest/client/__init__.py @@ -313,7 +313,17 @@ class SlidingSyncBody(RequestBodyModel): return value + class E2eeExtension(RequestBodyModel): + """The E2EE device extension (MSC3884) + + Attributes: + enabled + """ + + enabled: Optional[StrictBool] = False + to_device: Optional[ToDeviceExtension] = None + e2ee: Optional[E2eeExtension] = None # mypy workaround via https://github.com/pydantic/pydantic/issues/156#issuecomment-1130883884 if TYPE_CHECKING: diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index a88bdb5c1..2628869de 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -59,6 +59,7 @@ from tests.federation.transport.test_knocking import ( ) from tests.server import FakeChannel, TimedOutException from tests.test_utils.event_injection import mark_event_as_partial_state +from tests.unittest import skip_unless logger = logging.getLogger(__name__) @@ -1113,12 +1114,11 @@ class DeviceUnusedFallbackKeySyncTestCase(unittest.HomeserverTestCase): self.assertEqual(res, []) # Upload a fallback key for the user/device - fallback_key = {"alg1:k1": "fallback_key1"} self.get_success( self.e2e_keys_handler.upload_keys_for_user( alice_user_id, test_device_id, - {"fallback_keys": fallback_key}, + {"fallback_keys": {"alg1:k1": "fallback_key1"}}, ) ) # We should now have an unused alg1 key @@ -1252,6 +1252,8 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): self.store = hs.get_datastores().main self.event_sources = hs.get_event_sources() self.storage_controllers = hs.get_storage_controllers() + self.account_data_handler = hs.get_account_data_handler() + self.notifier = hs.get_notifier() def _assertRequiredStateIncludes( self, @@ -1377,6 +1379,52 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): return room_id + def _bump_notifier_wait_for_events(self, user_id: str) -> None: + """ + Wake-up a `notifier.wait_for_events(user_id)` call without affecting the Sliding + Sync results. + """ + # We're expecting some new activity from this point onwards + from_token = self.event_sources.get_current_token() + + triggered_notifier_wait_for_events = False + + async def _on_new_acivity( + before_token: StreamToken, after_token: StreamToken + ) -> bool: + nonlocal triggered_notifier_wait_for_events + triggered_notifier_wait_for_events = True + return True + + # Listen for some new activity for the user. We're just trying to confirm that + # our bump below actually does what we think it does (triggers new activity for + # the user). + result_awaitable = self.notifier.wait_for_events( + user_id, + 1000, + _on_new_acivity, + from_token=from_token, + ) + + # Update the account data so that `notifier.wait_for_events(...)` wakes up. + # We're bumping account data because it won't show up in the Sliding Sync + # response so it won't affect whether we have results. + self.get_success( + self.account_data_handler.add_account_data_for_user( + user_id, + "org.matrix.foobarbaz", + {"foo": "bar"}, + ) + ) + + # Wait for our notifier result + self.get_success(result_awaitable) + + if not triggered_notifier_wait_for_events: + raise AssertionError( + "Expected `notifier.wait_for_events(...)` to be triggered" + ) + def test_sync_list(self) -> None: """ Test that room IDs show up in the Sliding Sync `lists` @@ -1482,6 +1530,124 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): # with because we weren't able to find anything new yet. self.assertEqual(channel.json_body["pos"], future_position_token_serialized) + def test_wait_for_new_data(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive. + + (Only applies to incremental syncs with a `timeout` specified) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + + from_token = self.event_sources.get_current_token() + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + + "?timeout=10000" + + f"&pos={self.get_success(from_token.to_string(self.store))}", + { + "lists": { + "foo-list": { + "ranges": [[0, 0]], + "required_state": [], + "timeline_limit": 1, + } + } + }, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Bump the room with new events to trigger new results + event_response1 = self.helper.send( + room_id, "new activity in room", tok=user1_tok + ) + # Should respond before the 10 second timeout + channel.await_result(timeout_ms=3000) + self.assertEqual(channel.code, 200, channel.json_body) + + # Check to make sure the new event is returned + self.assertEqual( + [ + event["event_id"] + for event in channel.json_body["rooms"][room_id]["timeline"] + ], + [ + event_response1["event_id"], + ], + channel.json_body["rooms"][room_id]["timeline"], + ) + + # TODO: Once we remove `ops`, we should be able to add a `RoomResult.__bool__` to + # check if there are any updates since the `from_token`. + @skip_unless( + False, + "Once we remove ops from the Sliding Sync response, this test should pass", + ) + def test_wait_for_new_data_timeout(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive but + no data ever arrives so we timeout. We're also making sure that the default data + doesn't trigger a false-positive for new data. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + + from_token = self.event_sources.get_current_token() + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + + "?timeout=10000" + + f"&pos={self.get_success(from_token.to_string(self.store))}", + { + "lists": { + "foo-list": { + "ranges": [[0, 0]], + "required_state": [], + "timeline_limit": 1, + } + } + }, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Wake-up `notifier.wait_for_events(...)` that will cause us test + # `SlidingSyncResult.__bool__` for new results. + self._bump_notifier_wait_for_events(user1_id) + # Block for a little bit more to ensure we don't see any new results. + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=4000) + # Wait for the sync to complete (wait for the rest of the 10 second timeout, + # 5000 + 4000 + 1200 > 10000) + channel.await_result(timeout_ms=1200) + self.assertEqual(channel.code, 200, channel.json_body) + + # We still see rooms because that's how Sliding Sync lists work but we reached + # the timeout before seeing them + self.assertEqual( + [event["event_id"] for event in channel.json_body["rooms"].keys()], + [room_id], + ) + def test_filter_list(self) -> None: """ Test that filters apply to `lists` @@ -1508,11 +1674,11 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): ) # Create a normal room - room_id = self.helper.create_room_as(user1_id, tok=user2_tok) + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) self.helper.join(room_id, user1_id, tok=user1_tok) # Create a room that user1 is invited to - invite_room_id = self.helper.create_room_as(user1_id, tok=user2_tok) + invite_room_id = self.helper.create_room_as(user2_id, tok=user2_tok) self.helper.invite(invite_room_id, src=user2_id, targ=user1_id, tok=user2_tok) # Make the Sliding Sync request @@ -4320,10 +4486,59 @@ class SlidingSyncToDeviceExtensionTestCase(unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main + self.event_sources = hs.get_event_sources() + self.account_data_handler = hs.get_account_data_handler() + self.notifier = hs.get_notifier() self.sync_endpoint = ( "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync" ) + def _bump_notifier_wait_for_events(self, user_id: str) -> None: + """ + Wake-up a `notifier.wait_for_events(user_id)` call without affecting the Sliding + Sync results. + """ + # We're expecting some new activity from this point onwards + from_token = self.event_sources.get_current_token() + + triggered_notifier_wait_for_events = False + + async def _on_new_acivity( + before_token: StreamToken, after_token: StreamToken + ) -> bool: + nonlocal triggered_notifier_wait_for_events + triggered_notifier_wait_for_events = True + return True + + # Listen for some new activity for the user. We're just trying to confirm that + # our bump below actually does what we think it does (triggers new activity for + # the user). + result_awaitable = self.notifier.wait_for_events( + user_id, + 1000, + _on_new_acivity, + from_token=from_token, + ) + + # Update the account data so that `notifier.wait_for_events(...)` wakes up. + # We're bumping account data because it won't show up in the Sliding Sync + # response so it won't affect whether we have results. + self.get_success( + self.account_data_handler.add_account_data_for_user( + user_id, + "org.matrix.foobarbaz", + {"foo": "bar"}, + ) + ) + + # Wait for our notifier result + self.get_success(result_awaitable) + + if not triggered_notifier_wait_for_events: + raise AssertionError( + "Expected `notifier.wait_for_events(...)` to be triggered" + ) + def _assert_to_device_response( self, channel: FakeChannel, expected_messages: List[JsonDict] ) -> str: @@ -4487,3 +4702,605 @@ class SlidingSyncToDeviceExtensionTestCase(unittest.HomeserverTestCase): access_token=user1_tok, ) self._assert_to_device_response(channel, []) + + def test_wait_for_new_data(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive. + + (Only applies to incremental syncs with a `timeout` specified) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass", "d1") + user2_id = self.register_user("u2", "pass") + user2_tok = self.login(user2_id, "pass", "d2") + + from_token = self.event_sources.get_current_token() + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + + "?timeout=10000" + + f"&pos={self.get_success(from_token.to_string(self.store))}", + { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + } + }, + }, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Bump the to-device messages to trigger new results + test_msg = {"foo": "bar"} + send_to_device_channel = self.make_request( + "PUT", + "/_matrix/client/r0/sendToDevice/m.test/1234", + content={"messages": {user1_id: {"d1": test_msg}}}, + access_token=user2_tok, + ) + self.assertEqual( + send_to_device_channel.code, 200, send_to_device_channel.result + ) + # Should respond before the 10 second timeout + channel.await_result(timeout_ms=3000) + self.assertEqual(channel.code, 200, channel.json_body) + + self._assert_to_device_response( + channel, + [{"content": test_msg, "sender": user2_id, "type": "m.test"}], + ) + + def test_wait_for_new_data_timeout(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive but + no data ever arrives so we timeout. We're also making sure that the default data + from the To-Device extension doesn't trigger a false-positive for new data. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + from_token = self.event_sources.get_current_token() + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + + "?timeout=10000" + + f"&pos={self.get_success(from_token.to_string(self.store))}", + { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + } + }, + }, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Wake-up `notifier.wait_for_events(...)` that will cause us test + # `SlidingSyncResult.__bool__` for new results. + self._bump_notifier_wait_for_events(user1_id) + # Block for a little bit more to ensure we don't see any new results. + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=4000) + # Wait for the sync to complete (wait for the rest of the 10 second timeout, + # 5000 + 4000 + 1200 > 10000) + channel.await_result(timeout_ms=1200) + self.assertEqual(channel.code, 200, channel.json_body) + + self._assert_to_device_response(channel, []) + + +class SlidingSyncE2eeExtensionTestCase(unittest.HomeserverTestCase): + """Tests for the e2ee sliding sync extension""" + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + sync.register_servlets, + devices.register_servlets, + ] + + def default_config(self) -> JsonDict: + config = super().default_config() + # Enable sliding sync + config["experimental_features"] = {"msc3575_enabled": True} + return config + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.event_sources = hs.get_event_sources() + self.e2e_keys_handler = hs.get_e2e_keys_handler() + self.account_data_handler = hs.get_account_data_handler() + self.notifier = hs.get_notifier() + self.sync_endpoint = ( + "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync" + ) + + def _bump_notifier_wait_for_events(self, user_id: str) -> None: + """ + Wake-up a `notifier.wait_for_events(user_id)` call without affecting the Sliding + Sync results. + """ + # We're expecting some new activity from this point onwards + from_token = self.event_sources.get_current_token() + + triggered_notifier_wait_for_events = False + + async def _on_new_acivity( + before_token: StreamToken, after_token: StreamToken + ) -> bool: + nonlocal triggered_notifier_wait_for_events + triggered_notifier_wait_for_events = True + return True + + # Listen for some new activity for the user. We're just trying to confirm that + # our bump below actually does what we think it does (triggers new activity for + # the user). + result_awaitable = self.notifier.wait_for_events( + user_id, + 1000, + _on_new_acivity, + from_token=from_token, + ) + + # Update the account data so that `notifier.wait_for_events(...)` wakes up. + # We're bumping account data because it won't show up in the Sliding Sync + # response so it won't affect whether we have results. + self.get_success( + self.account_data_handler.add_account_data_for_user( + user_id, + "org.matrix.foobarbaz", + {"foo": "bar"}, + ) + ) + + # Wait for our notifier result + self.get_success(result_awaitable) + + if not triggered_notifier_wait_for_events: + raise AssertionError( + "Expected `notifier.wait_for_events(...)` to be triggered" + ) + + def test_no_data_initial_sync(self) -> None: + """ + Test that enabling e2ee extension works during an intitial sync, even if there + is no-data + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Make an initial Sliding Sync request with the e2ee extension enabled + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Device list updates are only present for incremental syncs + self.assertIsNone(channel.json_body["extensions"]["e2ee"].get("device_lists")) + + # Both of these should be present even when empty + self.assertEqual( + channel.json_body["extensions"]["e2ee"]["device_one_time_keys_count"], + { + # This is always present because of + # https://github.com/element-hq/element-android/issues/3725 and + # https://github.com/matrix-org/synapse/issues/10456 + "signed_curve25519": 0 + }, + ) + self.assertEqual( + channel.json_body["extensions"]["e2ee"]["device_unused_fallback_key_types"], + [], + ) + + def test_no_data_incremental_sync(self) -> None: + """ + Test that enabling e2ee extension works during an incremental sync, even if + there is no-data + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + from_token = self.event_sources.get_current_token() + + # Make an incremental Sliding Sync request with the e2ee extension enabled + channel = self.make_request( + "POST", + self.sync_endpoint + + f"?pos={self.get_success(from_token.to_string(self.store))}", + { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Device list shows up for incremental syncs + self.assertEqual( + channel.json_body["extensions"]["e2ee"] + .get("device_lists", {}) + .get("changed"), + [], + ) + self.assertEqual( + channel.json_body["extensions"]["e2ee"].get("device_lists", {}).get("left"), + [], + ) + + # Both of these should be present even when empty + self.assertEqual( + channel.json_body["extensions"]["e2ee"]["device_one_time_keys_count"], + { + # Note that "signed_curve25519" is always returned in key count responses + # regardless of whether we uploaded any keys for it. This is necessary until + # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed. + # + # Also related: + # https://github.com/element-hq/element-android/issues/3725 and + # https://github.com/matrix-org/synapse/issues/10456 + "signed_curve25519": 0 + }, + ) + self.assertEqual( + channel.json_body["extensions"]["e2ee"]["device_unused_fallback_key_types"], + [], + ) + + def test_wait_for_new_data(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive. + + (Only applies to incremental syncs with a `timeout` specified) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + test_device_id = "TESTDEVICE" + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass", device_id=test_device_id) + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + self.helper.join(room_id, user3_id, tok=user3_tok) + + from_token = self.event_sources.get_current_token() + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + + "?timeout=10000" + + f"&pos={self.get_success(from_token.to_string(self.store))}", + { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + }, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Bump the device lists to trigger new results + # Have user3 update their device list + device_update_channel = self.make_request( + "PUT", + f"/devices/{test_device_id}", + { + "display_name": "New Device Name", + }, + access_token=user3_tok, + ) + self.assertEqual( + device_update_channel.code, 200, device_update_channel.json_body + ) + # Should respond before the 10 second timeout + channel.await_result(timeout_ms=3000) + self.assertEqual(channel.code, 200, channel.json_body) + + # We should see the device list update + self.assertEqual( + channel.json_body["extensions"]["e2ee"] + .get("device_lists", {}) + .get("changed"), + [user3_id], + ) + self.assertEqual( + channel.json_body["extensions"]["e2ee"].get("device_lists", {}).get("left"), + [], + ) + + def test_wait_for_new_data_timeout(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive but + no data ever arrives so we timeout. We're also making sure that the default data + from the E2EE extension doesn't trigger a false-positive for new data (see + `device_one_time_keys_count.signed_curve25519`). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + from_token = self.event_sources.get_current_token() + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + + "?timeout=10000" + + f"&pos={self.get_success(from_token.to_string(self.store))}", + { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + }, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Wake-up `notifier.wait_for_events(...)` that will cause us test + # `SlidingSyncResult.__bool__` for new results. + self._bump_notifier_wait_for_events(user1_id) + # Block for a little bit more to ensure we don't see any new results. + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=4000) + # Wait for the sync to complete (wait for the rest of the 10 second timeout, + # 5000 + 4000 + 1200 > 10000) + channel.await_result(timeout_ms=1200) + self.assertEqual(channel.code, 200, channel.json_body) + + # Device lists are present for incremental syncs but empty because no device changes + self.assertEqual( + channel.json_body["extensions"]["e2ee"] + .get("device_lists", {}) + .get("changed"), + [], + ) + self.assertEqual( + channel.json_body["extensions"]["e2ee"].get("device_lists", {}).get("left"), + [], + ) + + # Both of these should be present even when empty + self.assertEqual( + channel.json_body["extensions"]["e2ee"]["device_one_time_keys_count"], + { + # Note that "signed_curve25519" is always returned in key count responses + # regardless of whether we uploaded any keys for it. This is necessary until + # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed. + # + # Also related: + # https://github.com/element-hq/element-android/issues/3725 and + # https://github.com/matrix-org/synapse/issues/10456 + "signed_curve25519": 0 + }, + ) + self.assertEqual( + channel.json_body["extensions"]["e2ee"]["device_unused_fallback_key_types"], + [], + ) + + def test_device_lists(self) -> None: + """ + Test that device list updates are included in the response + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + test_device_id = "TESTDEVICE" + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass", device_id=test_device_id) + + user4_id = self.register_user("user4", "pass") + user4_tok = self.login(user4_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + self.helper.join(room_id, user3_id, tok=user3_tok) + self.helper.join(room_id, user4_id, tok=user4_tok) + + from_token = self.event_sources.get_current_token() + + # Have user3 update their device list + channel = self.make_request( + "PUT", + f"/devices/{test_device_id}", + { + "display_name": "New Device Name", + }, + access_token=user3_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # User4 leaves the room + self.helper.leave(room_id, user4_id, tok=user4_tok) + + # Make an incremental Sliding Sync request with the e2ee extension enabled + channel = self.make_request( + "POST", + self.sync_endpoint + + f"?pos={self.get_success(from_token.to_string(self.store))}", + { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Device list updates show up + self.assertEqual( + channel.json_body["extensions"]["e2ee"] + .get("device_lists", {}) + .get("changed"), + [user3_id], + ) + self.assertEqual( + channel.json_body["extensions"]["e2ee"].get("device_lists", {}).get("left"), + [user4_id], + ) + + def test_device_one_time_keys_count(self) -> None: + """ + Test that `device_one_time_keys_count` are included in the response + """ + test_device_id = "TESTDEVICE" + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass", device_id=test_device_id) + + # Upload one time keys for the user/device + keys: JsonDict = { + "alg1:k1": "key1", + "alg2:k2": {"key": "key2", "signatures": {"k1": "sig1"}}, + "alg2:k3": {"key": "key3"}, + } + upload_keys_response = self.get_success( + self.e2e_keys_handler.upload_keys_for_user( + user1_id, test_device_id, {"one_time_keys": keys} + ) + ) + self.assertDictEqual( + upload_keys_response, + { + "one_time_key_counts": { + "alg1": 1, + "alg2": 2, + # Note that "signed_curve25519" is always returned in key count responses + # regardless of whether we uploaded any keys for it. This is necessary until + # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed. + # + # Also related: + # https://github.com/element-hq/element-android/issues/3725 and + # https://github.com/matrix-org/synapse/issues/10456 + "signed_curve25519": 0, + } + }, + ) + + # Make a Sliding Sync request with the e2ee extension enabled + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Check for those one time key counts + self.assertEqual( + channel.json_body["extensions"]["e2ee"].get("device_one_time_keys_count"), + { + "alg1": 1, + "alg2": 2, + # Note that "signed_curve25519" is always returned in key count responses + # regardless of whether we uploaded any keys for it. This is necessary until + # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed. + # + # Also related: + # https://github.com/element-hq/element-android/issues/3725 and + # https://github.com/matrix-org/synapse/issues/10456 + "signed_curve25519": 0, + }, + ) + + def test_device_unused_fallback_key_types(self) -> None: + """ + Test that `device_unused_fallback_key_types` are included in the response + """ + test_device_id = "TESTDEVICE" + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass", device_id=test_device_id) + + # We shouldn't have any unused fallback keys yet + res = self.get_success( + self.store.get_e2e_unused_fallback_key_types(user1_id, test_device_id) + ) + self.assertEqual(res, []) + + # Upload a fallback key for the user/device + self.get_success( + self.e2e_keys_handler.upload_keys_for_user( + user1_id, + test_device_id, + {"fallback_keys": {"alg1:k1": "fallback_key1"}}, + ) + ) + # We should now have an unused alg1 key + fallback_res = self.get_success( + self.store.get_e2e_unused_fallback_key_types(user1_id, test_device_id) + ) + self.assertEqual(fallback_res, ["alg1"], fallback_res) + + # Make a Sliding Sync request with the e2ee extension enabled + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Check for the unused fallback key types + self.assertListEqual( + channel.json_body["extensions"]["e2ee"].get( + "device_unused_fallback_key_types" + ), + ["alg1"], + ) From e3a0681ecffcafa898f0797b9f23c0af9c923949 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Jul 2024 23:42:36 +0000 Subject: [PATCH 37/43] Bump pyopenssl from 24.1.0 to 24.2.1 (#17465) Bumps [pyopenssl](https://github.com/pyca/pyopenssl) from 24.1.0 to 24.2.1.
Changelog

Sourced from pyopenssl's changelog.

24.2.1 (2024-07-20)

Backward-incompatible changes: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

Deprecations: ^^^^^^^^^^^^^

Changes: ^^^^^^^^

  • Fixed changelog to remove sphinx specific restructured text strings.

24.2.0 (2024-07-20)

Backward-incompatible changes: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

Deprecations: ^^^^^^^^^^^^^

  • Deprecated OpenSSL.crypto.X509Req, OpenSSL.crypto.load_certificate_request, OpenSSL.crypto.dump_certificate_request. Instead, cryptography.x509.CertificateSigningRequest, cryptography.x509.CertificateSigningRequestBuilder, cryptography.x509.load_der_x509_csr, or cryptography.x509.load_pem_x509_csr should be used.

Changes: ^^^^^^^^

  • Added type hints for the SSL module. [#1308](https://github.com/pyca/pyopenssl/issues/1308) <https://github.com/pyca/pyopenssl/pull/1308>_.
  • Changed OpenSSL.crypto.PKey.from_cryptography_key to accept public and private EC, ED25519, ED448 keys. [#1310](https://github.com/pyca/pyopenssl/issues/1310) <https://github.com/pyca/pyopenssl/pull/1310>_.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pyopenssl&package-manager=pip&previous-version=24.1.0&new-version=24.2.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index 282219e19..6069867e5 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2013,17 +2013,17 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] [[package]] name = "pyopenssl" -version = "24.1.0" +version = "24.2.1" description = "Python wrapper module around the OpenSSL library" optional = false python-versions = ">=3.7" files = [ - {file = "pyOpenSSL-24.1.0-py3-none-any.whl", hash = "sha256:17ed5be5936449c5418d1cd269a1a9e9081bc54c17aed272b45856a3d3dc86ad"}, - {file = "pyOpenSSL-24.1.0.tar.gz", hash = "sha256:cabed4bfaa5df9f1a16c0ef64a0cb65318b5cd077a7eda7d6970131ca2f41a6f"}, + {file = "pyOpenSSL-24.2.1-py3-none-any.whl", hash = "sha256:967d5719b12b243588573f39b0c677637145c7a1ffedcd495a487e58177fbb8d"}, + {file = "pyopenssl-24.2.1.tar.gz", hash = "sha256:4247f0dbe3748d560dcbb2ff3ea01af0f9a1a001ef5f7c4c647956ed8cbf0e95"}, ] [package.dependencies] -cryptography = ">=41.0.5,<43" +cryptography = ">=41.0.5,<44" [package.extras] docs = ["sphinx (!=5.2.0,!=5.2.0.post0,!=7.2.5)", "sphinx-rtd-theme"] From 13a99fba1b0120af34688e5717478404989b8a1a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Jul 2024 23:58:19 +0000 Subject: [PATCH 38/43] Bump hiredis from 2.3.2 to 3.0.0 (#17464) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [hiredis](https://github.com/redis/hiredis-py) from 2.3.2 to 3.0.0.
Release notes

Sourced from hiredis's releases.

3.0.0

Changes

Breaking Changes

  • Return Redis sets as Python lists (#189)

🐛 Bug Fixes

  • Return Redis sets as Python lists (#189)

Contributors

We'd like to thank all the contributors who worked on this release!

@​gerzse

2.4.0

Changes

🧰 Maintenance

  • Fix small typo (#192)
  • Quote version for Python setup action in CI (#191)
  • Fix building the wheel for windows (#190)
  • pack: Replace sdsalloc.h with alloc.h (#159)
  • Bump black from 22.3.0 to 24.3.0 (#185)
  • Removing python 3.7 trove (#181)
  • Badge for latest released on Pypi (#182)
  • Sync license in metadata with LICENSE file (#183)

Contributors

We'd like to thank all the contributors who worked on this release!

@​Apteryks, @​ArtemIsmagilov, @​chayim, @​dependabot, @​dependabot[bot], @​gerzse and @​shadchin

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=hiredis&package-manager=pip&previous-version=2.3.2&new-version=3.0.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 207 ++++++++++++++++++++++++---------------------------- 1 file changed, 96 insertions(+), 111 deletions(-) diff --git a/poetry.lock b/poetry.lock index 6069867e5..945b91e02 100644 --- a/poetry.lock +++ b/poetry.lock @@ -542,120 +542,105 @@ test = ["coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", [[package]] name = "hiredis" -version = "2.3.2" +version = "3.0.0" description = "Python wrapper for hiredis" optional = true -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "hiredis-2.3.2-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:742093f33d374098aa21c1696ac6e4874b52658c870513a297a89265a4d08fe5"}, - {file = "hiredis-2.3.2-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:9e14fb70ca4f7efa924f508975199353bf653f452e4ef0a1e47549e208f943d7"}, - {file = "hiredis-2.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6d7302b4b17fcc1cc727ce84ded7f6be4655701e8d58744f73b09cb9ed2b13df"}, - {file = "hiredis-2.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed63e8b75c193c5e5a8288d9d7b011da076cc314fafc3bfd59ec1d8a750d48c8"}, - {file = "hiredis-2.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b4edee59dc089bc3948f4f6fba309f51aa2ccce63902364900aa0a553a85e97"}, - {file = "hiredis-2.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6481c3b7673a86276220140456c2a6fbfe8d1fb5c613b4728293c8634134824"}, - {file = "hiredis-2.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:684840b014ce83541a087fcf2d48227196576f56ae3e944d4dfe14c0a3e0ccb7"}, - {file = "hiredis-2.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c4c0bcf786f0eac9593367b6279e9b89534e008edbf116dcd0de956524702c8"}, - {file = "hiredis-2.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:66ab949424ac6504d823cba45c4c4854af5c59306a1531edb43b4dd22e17c102"}, - {file = "hiredis-2.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:322c668ee1c12d6c5750a4b1057e6b4feee2a75b3d25d630922a463cfe5e7478"}, - {file = "hiredis-2.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:bfa73e3f163c6e8b2ec26f22285d717a5f77ab2120c97a2605d8f48b26950dac"}, - {file = "hiredis-2.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:7f39f28ffc65de577c3bc0c7615f149e35bc927802a0f56e612db9b530f316f9"}, - {file = "hiredis-2.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:55ce31bf4711da879b96d511208efb65a6165da4ba91cb3a96d86d5a8d9d23e6"}, - {file = "hiredis-2.3.2-cp310-cp310-win32.whl", hash = "sha256:3dd63d0bbbe75797b743f35d37a4cca7ca7ba35423a0de742ae2985752f20c6d"}, - {file = "hiredis-2.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:ea002656a8d974daaf6089863ab0a306962c8b715db6b10879f98b781a2a5bf5"}, - {file = "hiredis-2.3.2-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:adfbf2e9c38b77d0db2fb32c3bdaea638fa76b4e75847283cd707521ad2475ef"}, - {file = "hiredis-2.3.2-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:80b02d27864ebaf9b153d4b99015342382eeaed651f5591ce6f07e840307c56d"}, - {file = "hiredis-2.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bd40d2e2f82a483de0d0a6dfd8c3895a02e55e5c9949610ecbded18188fd0a56"}, - {file = "hiredis-2.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfa904045d7cebfb0f01dad51352551cce1d873d7c3f80c7ded7d42f8cac8f89"}, - {file = "hiredis-2.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:28bd184b33e0dd6d65816c16521a4ba1ffbe9ff07d66873c42ea4049a62fed83"}, - {file = "hiredis-2.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f70481213373d44614148f0f2e38e7905be3f021902ae5167289413196de4ba4"}, - {file = "hiredis-2.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb8797b528c1ff81eef06713623562b36db3dafa106b59f83a6468df788ff0d1"}, - {file = "hiredis-2.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:02fc71c8333586871602db4774d3a3e403b4ccf6446dc4603ec12df563127cee"}, - {file = "hiredis-2.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0da56915bda1e0a49157191b54d3e27689b70960f0685fdd5c415dacdee2fbed"}, - {file = "hiredis-2.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:e2674a5a3168349435b08fa0b82998ed2536eb9acccf7087efe26e4cd088a525"}, - {file = "hiredis-2.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:dc1c3fd49930494a67dcec37d0558d99d84eca8eb3f03b17198424538f2608d7"}, - {file = "hiredis-2.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:14c7b43205e515f538a9defb4e411e0f0576caaeeda76bb9993ed505486f7562"}, - {file = "hiredis-2.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7bac7e02915b970c3723a7a7c5df4ba7a11a3426d2a3f181e041aa506a1ff028"}, - {file = "hiredis-2.3.2-cp311-cp311-win32.whl", hash = "sha256:63a090761ddc3c1f7db5e67aa4e247b4b3bb9890080bdcdadd1b5200b8b89ac4"}, - {file = "hiredis-2.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:70d226ab0306a5b8d408235cabe51d4bf3554c9e8a72d53ce0b3c5c84cf78881"}, - {file = "hiredis-2.3.2-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:5c614552c6bd1d0d907f448f75550f6b24fb56cbfce80c094908b7990cad9702"}, - {file = "hiredis-2.3.2-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:9c431431abf55b64347ddc8df68b3ef840269cb0aa5bc2d26ad9506eb4b1b866"}, - {file = "hiredis-2.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a45857e87e9d2b005e81ddac9d815a33efd26ec67032c366629f023fe64fb415"}, - {file = "hiredis-2.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e138d141ec5a6ec800b6d01ddc3e5561ce1c940215e0eb9960876bfde7186aae"}, - {file = "hiredis-2.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:387f655444d912a963ab68abf64bf6e178a13c8e4aa945cb27388fd01a02e6f1"}, - {file = "hiredis-2.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4852f4bf88f0e2d9bdf91279892f5740ed22ae368335a37a52b92a5c88691140"}, - {file = "hiredis-2.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d711c107e83117129b7f8bd08e9820c43ceec6204fff072a001fd82f6d13db9f"}, - {file = "hiredis-2.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:92830c16885f29163e1c2da1f3c1edb226df1210ec7e8711aaabba3dd0d5470a"}, - {file = "hiredis-2.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:16b01d9ceae265d4ab9547be0cd628ecaff14b3360357a9d30c029e5ae8b7e7f"}, - {file = "hiredis-2.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:5986fb5f380169270a0293bebebd95466a1c85010b4f1afc2727e4d17c452512"}, - {file = "hiredis-2.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:49532d7939cc51f8e99efc326090c54acf5437ed88b9c904cc8015b3c4eda9c9"}, - {file = "hiredis-2.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:8f34801b251ca43ad70691fb08b606a2e55f06b9c9fb1fc18fd9402b19d70f7b"}, - {file = "hiredis-2.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:7298562a49d95570ab1c7fc4051e72824c6a80e907993a21a41ba204223e7334"}, - {file = "hiredis-2.3.2-cp312-cp312-win32.whl", hash = "sha256:e1d86b75de787481b04d112067a4033e1ecfda2a060e50318a74e4e1c9b2948c"}, - {file = "hiredis-2.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:6dbfe1887ffa5cf3030451a56a8f965a9da2fa82b7149357752b67a335a05fc6"}, - {file = "hiredis-2.3.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:4fc242e9da4af48714199216eb535b61e8f8d66552c8819e33fc7806bd465a09"}, - {file = "hiredis-2.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e81aa4e9a1fcf604c8c4b51aa5d258e195a6ba81efe1da82dea3204443eba01c"}, - {file = "hiredis-2.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:419780f8583ddb544ffa86f9d44a7fcc183cd826101af4e5ffe535b6765f5f6b"}, - {file = "hiredis-2.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6871306d8b98a15e53a5f289ec1106a3a1d43e7ab6f4d785f95fcef9a7bd9504"}, - {file = "hiredis-2.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88cb0b35b63717ef1e41d62f4f8717166f7c6245064957907cfe177cc144357c"}, - {file = "hiredis-2.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c490191fa1218851f8a80c5a21a05a6f680ac5aebc2e688b71cbfe592f8fec6"}, - {file = "hiredis-2.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:4baf4b579b108062e91bd2a991dc98b9dc3dc06e6288db2d98895eea8acbac22"}, - {file = "hiredis-2.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:e627d8ef5e100556e09fb44c9571a432b10e11596d3c4043500080ca9944a91a"}, - {file = "hiredis-2.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:ba3dc0af0def8c21ce7d903c59ea1e8ec4cb073f25ece9edaec7f92a286cd219"}, - {file = "hiredis-2.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:56e9b7d6051688ca94e68c0c8a54a243f8db841911b683cedf89a29d4de91509"}, - {file = "hiredis-2.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:380e029bb4b1d34cf560fcc8950bf6b57c2ef0c9c8b7c7ac20b7c524a730fadd"}, - {file = "hiredis-2.3.2-cp37-cp37m-win32.whl", hash = "sha256:948d9f2ca7841794dd9b204644963a4bcd69ced4e959b0d4ecf1b8ce994a6daa"}, - {file = "hiredis-2.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:cfa67afe2269b2d203cd1389c00c5bc35a287cd57860441fb0e53b371ea6a029"}, - {file = "hiredis-2.3.2-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:bcbe47da0aebc00a7cfe3ebdcff0373b86ce2b1856251c003e3d69c9db44b5a7"}, - {file = "hiredis-2.3.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:f2c9c0d910dd3f7df92f0638e7f65d8edd7f442203caf89c62fc79f11b0b73f8"}, - {file = "hiredis-2.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:01b6c24c0840ac7afafbc4db236fd55f56a9a0919a215c25a238f051781f4772"}, - {file = "hiredis-2.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1f567489f422d40c21e53212a73bef4638d9f21043848150f8544ef1f3a6ad1"}, - {file = "hiredis-2.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:28adecb308293e705e44087a1c2d557a816f032430d8a2a9bb7873902a1c6d48"}, - {file = "hiredis-2.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:27e9619847e9dc70b14b1ad2d0fb4889e7ca18996585c3463cff6c951fd6b10b"}, - {file = "hiredis-2.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a0026cfbf29f07649b0e34509091a2a6016ff8844b127de150efce1c3aff60b"}, - {file = "hiredis-2.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f9de7586522e5da6bee83c9cf0dcccac0857a43249cb4d721a2e312d98a684d1"}, - {file = "hiredis-2.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e58494f282215fc461b06709e9a195a24c12ba09570f25bdf9efb036acc05101"}, - {file = "hiredis-2.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:de3a32b4b76d46f1eb42b24a918d51d8ca52411a381748196241d59a895f7c5c"}, - {file = "hiredis-2.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:1979334ccab21a49c544cd1b8d784ffb2747f99a51cb0bd0976eebb517628382"}, - {file = "hiredis-2.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:0c0773266e1c38a06e7593bd08870ac1503f5f0ce0f5c63f2b4134b090b5d6a4"}, - {file = "hiredis-2.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bd1cee053416183adcc8e6134704c46c60c3f66b8faaf9e65bf76191ca59a2f7"}, - {file = "hiredis-2.3.2-cp38-cp38-win32.whl", hash = "sha256:5341ce3d01ef3c7418a72e370bf028c7aeb16895e79e115fe4c954fff990489e"}, - {file = "hiredis-2.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:8fc7197ff33047ce43a67851ccf190acb5b05c52fd4a001bb55766358f04da68"}, - {file = "hiredis-2.3.2-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:f47775e27388b58ce52f4f972f80e45b13c65113e9e6b6bf60148f893871dc9b"}, - {file = "hiredis-2.3.2-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:9412a06b8a8e09abd6313d96864b6d7713c6003a365995a5c70cfb9209df1570"}, - {file = "hiredis-2.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3020b60e3fc96d08c2a9b011f1c2e2a6bdcc09cb55df93c509b88be5cb791df"}, - {file = "hiredis-2.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53d0f2c59bce399b8010a21bc779b4f8c32d0f582b2284ac8c98dc7578b27bc4"}, - {file = "hiredis-2.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:57c0d0c7e308ed5280a4900d4468bbfec51f0e1b4cde1deae7d4e639bc6b7766"}, - {file = "hiredis-2.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1d63318ca189fddc7e75f6a4af8eae9c0545863619fb38cfba5f43e81280b286"}, - {file = "hiredis-2.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e741ffe4e2db78a1b9dd6e5d29678ce37fbaaf65dfe132e5b82a794413302ef1"}, - {file = "hiredis-2.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb98038ccd368e0d88bd92ee575c58cfaf33e77f788c36b2a89a84ee1936dc6b"}, - {file = "hiredis-2.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:eae62ed60d53b3561148bcd8c2383e430af38c0deab9f2dd15f8874888ffd26f"}, - {file = "hiredis-2.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ca33c175c1cf60222d9c6d01c38fc17ec3a484f32294af781de30226b003e00f"}, - {file = "hiredis-2.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0c5f6972d2bdee3cd301d5c5438e31195cf1cabf6fd9274491674d4ceb46914d"}, - {file = "hiredis-2.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:a6b54dabfaa5dbaa92f796f0c32819b4636e66aa8e9106c3d421624bd2a2d676"}, - {file = "hiredis-2.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e96cd35df012a17c87ae276196ea8f215e77d6eeca90709eb03999e2d5e3fd8a"}, - {file = "hiredis-2.3.2-cp39-cp39-win32.whl", hash = "sha256:63b99b5ea9fe4f21469fb06a16ca5244307678636f11917359e3223aaeca0b67"}, - {file = "hiredis-2.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:a50c8af811b35b8a43b1590cf890b61ff2233225257a3cad32f43b3ec7ff1b9f"}, - {file = "hiredis-2.3.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7e8bf4444b09419b77ce671088db9f875b26720b5872d97778e2545cd87dba4a"}, - {file = "hiredis-2.3.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bd42d0d45ea47a2f96babd82a659fbc60612ab9423a68e4a8191e538b85542a"}, - {file = "hiredis-2.3.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80441b55edbef868e2563842f5030982b04349408396e5ac2b32025fb06b5212"}, - {file = "hiredis-2.3.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ec444ab8f27562a363672d6a7372bc0700a1bdc9764563c57c5f9efa0e592b5f"}, - {file = "hiredis-2.3.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f9f606e810858207d4b4287b4ef0dc622c2aa469548bf02b59dcc616f134f811"}, - {file = "hiredis-2.3.2-pp37-pypy37_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c3dde4ca00fe9eee3b76209711f1941bb86db42b8a75d7f2249ff9dfc026ab0e"}, - {file = "hiredis-2.3.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4dd676107a1d3c724a56a9d9db38166ad4cf44f924ee701414751bd18a784a0"}, - {file = "hiredis-2.3.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce42649e2676ad783186264d5ffc788a7612ecd7f9effb62d51c30d413a3eefe"}, - {file = "hiredis-2.3.2-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e3f8b1733078ac663dad57e20060e16389a60ab542f18a97931f3a2a2dd64a4"}, - {file = "hiredis-2.3.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:532a84a82156a82529ec401d1c25d677c6543c791e54a263aa139541c363995f"}, - {file = "hiredis-2.3.2-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:4d59f88c4daa36b8c38e59ac7bffed6f5d7f68eaccad471484bf587b28ccc478"}, - {file = "hiredis-2.3.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a91a14dd95e24dc078204b18b0199226ee44644974c645dc54ee7b00c3157330"}, - {file = "hiredis-2.3.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb777a38797c8c7df0444533119570be18d1a4ce5478dffc00c875684df7bfcb"}, - {file = "hiredis-2.3.2-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d47c915897a99d0d34a39fad4be97b4b709ab3d0d3b779ebccf2b6024a8c681e"}, - {file = "hiredis-2.3.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:333b5e04866758b11bda5f5315b4e671d15755fc6ed3b7969721bc6311d0ee36"}, - {file = "hiredis-2.3.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c8937f1100435698c18e4da086968c4b5d70e86ea718376f833475ab3277c9aa"}, - {file = "hiredis-2.3.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa45f7d771094b8145af10db74704ab0f698adb682fbf3721d8090f90e42cc49"}, - {file = "hiredis-2.3.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33d5ebc93c39aed4b5bc769f8ce0819bc50e74bb95d57a35f838f1c4378978e0"}, - {file = "hiredis-2.3.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a797d8c7df9944314d309b0d9e1b354e2fa4430a05bb7604da13b6ad291bf959"}, - {file = "hiredis-2.3.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:e15a408f71a6c8c87b364f1f15a6cd9c1baca12bbc47a326ac8ab99ec7ad3c64"}, - {file = "hiredis-2.3.2.tar.gz", hash = "sha256:733e2456b68f3f126ddaf2cd500a33b25146c3676b97ea843665717bda0c5d43"}, + {file = "hiredis-3.0.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:4b182791c41c5eb1d9ed736f0ff81694b06937ca14b0d4dadde5dadba7ff6dae"}, + {file = "hiredis-3.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:13c275b483a052dd645eb2cb60d6380f1f5215e4c22d6207e17b86be6dd87ffa"}, + {file = "hiredis-3.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c1018cc7f12824506f165027eabb302735b49e63af73eb4d5450c66c88f47026"}, + {file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83a29cc7b21b746cb6a480189e49f49b2072812c445e66a9e38d2004d496b81c"}, + {file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e241fab6332e8fb5f14af00a4a9c6aefa22f19a336c069b7ddbf28ef8341e8d6"}, + {file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1fb8de899f0145d6c4d5d4bd0ee88a78eb980a7ffabd51e9889251b8f58f1785"}, + {file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b23291951959141173eec10f8573538e9349fa27f47a0c34323d1970bf891ee5"}, + {file = "hiredis-3.0.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e421ac9e4b5efc11705a0d5149e641d4defdc07077f748667f359e60dc904420"}, + {file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:77c8006c12154c37691b24ff293c077300c22944018c3ff70094a33e10c1d795"}, + {file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:41afc0d3c18b59eb50970479a9c0e5544fb4b95e3a79cf2fbaece6ddefb926fe"}, + {file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:04ccae6dcd9647eae6025425ab64edb4d79fde8b9e6e115ebfabc6830170e3b2"}, + {file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:fe91d62b0594db5ea7d23fc2192182b1a7b6973f628a9b8b2e0a42a2be721ac6"}, + {file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:99516d99316062824a24d145d694f5b0d030c80da693ea6f8c4ecf71a251d8bb"}, + {file = "hiredis-3.0.0-cp310-cp310-win32.whl", hash = "sha256:562eaf820de045eb487afaa37e6293fe7eceb5b25e158b5a1974b7e40bf04543"}, + {file = "hiredis-3.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:a1c81c89ed765198da27412aa21478f30d54ef69bf5e4480089d9c3f77b8f882"}, + {file = "hiredis-3.0.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:4664dedcd5933364756d7251a7ea86d60246ccf73a2e00912872dacbfcef8978"}, + {file = "hiredis-3.0.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:47de0bbccf4c8a9f99d82d225f7672b9dd690d8fd872007b933ef51a302c9fa6"}, + {file = "hiredis-3.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e43679eca508ba8240d016d8cca9d27342d70184773c15bea78a23c87a1922f1"}, + {file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:13c345e7278c210317e77e1934b27b61394fee0dec2e8bd47e71570900f75823"}, + {file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00018f22f38530768b73ea86c11f47e8d4df65facd4e562bd78773bd1baef35e"}, + {file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ea3a86405baa8eb0d3639ced6926ad03e07113de54cb00fd7510cb0db76a89d"}, + {file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c073848d2b1d5561f3903879ccf4e1a70c9b1e7566c7bdcc98d082fa3e7f0a1d"}, + {file = "hiredis-3.0.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a8dffb5f5b3415a4669d25de48b617fd9d44b0bccfc4c2ab24b06406ecc9ecb"}, + {file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:22c17c96143c2a62dfd61b13803bc5de2ac526b8768d2141c018b965d0333b66"}, + {file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:c3ece960008dab66c6b8bb3a1350764677ee7c74ccd6270aaf1b1caf9ccebb46"}, + {file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f75999ae00a920f7dce6ecae76fa5e8674a3110e5a75f12c7a2c75ae1af53396"}, + {file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:e069967cbd5e1900aafc4b5943888f6d34937fc59bf8918a1a546cb729b4b1e4"}, + {file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0aacc0a78e1d94d843a6d191f224a35893e6bdfeb77a4a89264155015c65f126"}, + {file = "hiredis-3.0.0-cp311-cp311-win32.whl", hash = "sha256:719c32147ba29528cb451f037bf837dcdda4ff3ddb6cdb12c4216b0973174718"}, + {file = "hiredis-3.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:bdc144d56333c52c853c31b4e2e52cfbdb22d3da4374c00f5f3d67c42158970f"}, + {file = "hiredis-3.0.0-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:484025d2eb8f6348f7876fc5a2ee742f568915039fcb31b478fd5c242bb0fe3a"}, + {file = "hiredis-3.0.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:fcdb552ffd97151dab8e7bc3ab556dfa1512556b48a367db94b5c20253a35ee1"}, + {file = "hiredis-3.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0bb6f9fd92f147ba11d338ef5c68af4fd2908739c09e51f186e1d90958c68cc1"}, + {file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa86bf9a0ed339ec9e8a9a9d0ae4dccd8671625c83f9f9f2640729b15e07fbfd"}, + {file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e194a0d5df9456995d8f510eab9f529213e7326af6b94770abf8f8b7952ddcaa"}, + {file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c8a1df39d74ec507d79c7a82c8063eee60bf80537cdeee652f576059b9cdd15c"}, + {file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f91456507427ba36fd81b2ca11053a8e112c775325acc74e993201ea912d63e9"}, + {file = "hiredis-3.0.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9862db92ef67a8a02e0d5370f07d380e14577ecb281b79720e0d7a89aedb9ee5"}, + {file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d10fcd9e0eeab835f492832b2a6edb5940e2f1230155f33006a8dfd3bd2c94e4"}, + {file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:48727d7d405d03977d01885f317328dc21d639096308de126c2c4e9950cbd3c9"}, + {file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8e0bb6102ebe2efecf8a3292c6660a0e6fac98176af6de67f020bea1c2343717"}, + {file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:df274e3abb4df40f4c7274dd3e587dfbb25691826c948bc98d5fead019dfb001"}, + {file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:034925b5fb514f7b11aac38cd55b3fd7e9d3af23bd6497f3f20aa5b8ba58e232"}, + {file = "hiredis-3.0.0-cp312-cp312-win32.whl", hash = "sha256:120f2dda469b28d12ccff7c2230225162e174657b49cf4cd119db525414ae281"}, + {file = "hiredis-3.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:e584fe5f4e6681d8762982be055f1534e0170f6308a7a90f58d737bab12ff6a8"}, + {file = "hiredis-3.0.0-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:122171ff47d96ed8dd4bba6c0e41d8afaba3e8194949f7720431a62aa29d8895"}, + {file = "hiredis-3.0.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:ba9fc605ac558f0de67463fb588722878641e6fa1dabcda979e8e69ff581d0bd"}, + {file = "hiredis-3.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a631e2990b8be23178f655cae8ac6c7422af478c420dd54e25f2e26c29e766f1"}, + {file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63482db3fadebadc1d01ad33afa6045ebe2ea528eb77ccaabd33ee7d9c2bad48"}, + {file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f669212c390eebfbe03c4e20181f5970b82c5d0a0ad1df1785f7ffbe7d61150"}, + {file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6a49ef161739f8018c69b371528bdb47d7342edfdee9ddc75a4d8caddf45a6e"}, + {file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98a152052b8878e5e43a2e3a14075218adafc759547c98668a21e9485882696c"}, + {file = "hiredis-3.0.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50a196af0ce657fcde9bf8a0bbe1032e22c64d8fcec2bc926a35e7ff68b3a166"}, + {file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f2f312eef8aafc2255e3585dcf94d5da116c43ef837db91db9ecdc1bc930072d"}, + {file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:6ca41fa40fa019cde42c21add74aadd775e71458051a15a352eabeb12eb4d084"}, + {file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:6eecb343c70629f5af55a8b3e53264e44fa04e155ef7989de13668a0cb102a90"}, + {file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:c3fdad75e7837a475900a1d3a5cc09aa024293c3b0605155da2d42f41bc0e482"}, + {file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:8854969e7480e8d61ed7549eb232d95082a743e94138d98d7222ba4e9f7ecacd"}, + {file = "hiredis-3.0.0-cp38-cp38-win32.whl", hash = "sha256:f114a6c86edbf17554672b050cce72abf489fe58d583c7921904d5f1c9691605"}, + {file = "hiredis-3.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:7d99b91e42217d7b4b63354b15b41ce960e27d216783e04c4a350224d55842a4"}, + {file = "hiredis-3.0.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:4c6efcbb5687cf8d2aedcc2c3ed4ac6feae90b8547427d417111194873b66b06"}, + {file = "hiredis-3.0.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:5b5cff42a522a0d81c2ae7eae5e56d0ee7365e0c4ad50c4de467d8957aff4414"}, + {file = "hiredis-3.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:82f794d564f4bc76b80c50b03267fe5d6589e93f08e66b7a2f674faa2fa76ebc"}, + {file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7a4c1791d7aa7e192f60fe028ae409f18ccdd540f8b1e6aeb0df7816c77e4a4"}, + {file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a2537b2cd98192323fce4244c8edbf11f3cac548a9d633dbbb12b48702f379f4"}, + {file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8fed69bbaa307040c62195a269f82fc3edf46b510a17abb6b30a15d7dab548df"}, + {file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:869f6d5537d243080f44253491bb30aa1ec3c21754003b3bddeadedeb65842b0"}, + {file = "hiredis-3.0.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d435ae89073d7cd51e6b6bf78369c412216261c9c01662e7008ff00978153729"}, + {file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:204b79b30a0e6be0dc2301a4d385bb61472809f09c49f400497f1cdd5a165c66"}, + {file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3ea635101b739c12effd189cc19b2671c268abb03013fd1f6321ca29df3ca625"}, + {file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:f359175197fd833c8dd7a8c288f1516be45415bb5c939862ab60c2918e1e1943"}, + {file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ac6d929cb33dd12ad3424b75725975f0a54b5b12dbff95f2a2d660c510aa106d"}, + {file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:100431e04d25a522ef2c3b94f294c4219c4de3bfc7d557b6253296145a144c11"}, + {file = "hiredis-3.0.0-cp39-cp39-win32.whl", hash = "sha256:e1a9c14ae9573d172dc050a6f63a644457df5d01ec4d35a6a0f097f812930f83"}, + {file = "hiredis-3.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:54a6dd7b478e6eb01ce15b3bb5bf771e108c6c148315bf194eb2ab776a3cac4d"}, + {file = "hiredis-3.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:50da7a9edf371441dfcc56288d790985ee9840d982750580710a9789b8f4a290"}, + {file = "hiredis-3.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9b285ef6bf1581310b0d5e8f6ce64f790a1c40e89c660e1320b35f7515433672"}, + {file = "hiredis-3.0.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dcfa684966f25b335072115de2f920228a3c2caf79d4bfa2b30f6e4f674a948"}, + {file = "hiredis-3.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a41be8af1fd78ca97bc948d789a09b730d1e7587d07ca53af05758f31f4b985d"}, + {file = "hiredis-3.0.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:038756db735e417ab36ee6fd7725ce412385ed2bd0767e8179a4755ea11b804f"}, + {file = "hiredis-3.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:fcecbd39bd42cef905c0b51c9689c39d0cc8b88b1671e7f40d4fb213423aef3a"}, + {file = "hiredis-3.0.0-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a131377493a59fb0f5eaeb2afd49c6540cafcfba5b0b3752bed707be9e7c4eaf"}, + {file = "hiredis-3.0.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:3d22c53f0ec5c18ecb3d92aa9420563b1c5d657d53f01356114978107b00b860"}, + {file = "hiredis-3.0.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8a91e9520fbc65a799943e5c970ffbcd67905744d8becf2e75f9f0a5e8414f0"}, + {file = "hiredis-3.0.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3dc8043959b50141df58ab4f398e8ae84c6f9e673a2c9407be65fc789138f4a6"}, + {file = "hiredis-3.0.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:51b99cfac514173d7b8abdfe10338193e8a0eccdfe1870b646009d2fb7cbe4b5"}, + {file = "hiredis-3.0.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:fa1fcad89d8a41d8dc10b1e54951ec1e161deabd84ed5a2c95c3c7213bdb3514"}, + {file = "hiredis-3.0.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:898636a06d9bf575d2c594129085ad6b713414038276a4bfc5db7646b8a5be78"}, + {file = "hiredis-3.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:466f836dbcf86de3f9692097a7a01533dc9926986022c6617dc364a402b265c5"}, + {file = "hiredis-3.0.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23142a8af92a13fc1e3f2ca1d940df3dcf2af1d176be41fe8d89e30a837a0b60"}, + {file = "hiredis-3.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:793c80a3d6b0b0e8196a2d5de37a08330125668c8012922685e17aa9108c33ac"}, + {file = "hiredis-3.0.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:467d28112c7faa29b7db743f40803d927c8591e9da02b6ce3d5fadc170a542a2"}, + {file = "hiredis-3.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:dc384874a719c767b50a30750f937af18842ee5e288afba95a5a3ed703b1515a"}, + {file = "hiredis-3.0.0.tar.gz", hash = "sha256:fed8581ae26345dea1f1e0d1a96e05041a727a45e7d8d459164583e23c6ac441"}, ] [[package]] From a9ee832e4899fefcfd27fba475091e1ffaa069b8 Mon Sep 17 00:00:00 2001 From: Michael Hollister Date: Tue, 23 Jul 2024 04:59:24 -0500 Subject: [PATCH 39/43] Fixed presence results not returning offline users on initial sync (#17231) This is to address an issue in which `m.presence` results on initial sync are not returning entries of users who are currently offline. The original behaviour was from https://github.com/element-hq/synapse/issues/1535 This change is useful for applications that use the presence system for tracking user profile information/updates (e.g. https://github.com/element-hq/synapse/pull/16992 or for profile status messages). This is gated behind a new configuration option to avoid performance impact for applications that don't need this, as a pragmatic solution for now. --- changelog.d/17231.bugfix | 1 + docs/usage/configuration/config_documentation.md | 5 +++++ synapse/config/server.py | 5 +++++ synapse/handlers/sync.py | 6 +++++- 4 files changed, 16 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17231.bugfix diff --git a/changelog.d/17231.bugfix b/changelog.d/17231.bugfix new file mode 100644 index 000000000..d09b45565 --- /dev/null +++ b/changelog.d/17231.bugfix @@ -0,0 +1 @@ +Added configurable option to always include offline users in presence sync results. Contributed by @Michael-Hollister. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index e8bc2df79..649f4f71c 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -246,6 +246,7 @@ Example configuration: ```yaml presence: enabled: false + include_offline_users_on_sync: false ``` `enabled` can also be set to a special value of "untracked" which ignores updates @@ -254,6 +255,10 @@ received via clients and federation, while still accepting updates from the *The "untracked" option was added in Synapse 1.96.0.* +When clients perform an initial or `full_state` sync, presence results for offline users are +not included by default. Setting `include_offline_users_on_sync` to `true` will always include +offline users in the results. Defaults to false. + --- ### `require_auth_for_profile_requests` diff --git a/synapse/config/server.py b/synapse/config/server.py index 8bb97df17..fd52c0475 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -384,6 +384,11 @@ class ServerConfig(Config): # Whether to internally track presence, requires that presence is enabled, self.track_presence = self.presence_enabled and presence_enabled != "untracked" + # Determines if presence results for offline users are included on initial/full sync + self.presence_include_offline_users_on_sync = presence_config.get( + "include_offline_users_on_sync", False + ) + # Custom presence router module # This is the legacy way of configuring it (the config should now be put in the modules section) self.presence_router_module_class = None diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index de227faec..ede014180 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -2270,7 +2270,11 @@ class SyncHandler: user=user, from_key=presence_key, is_guest=sync_config.is_guest, - include_offline=include_offline, + include_offline=( + True + if self.hs_config.server.presence_include_offline_users_on_sync + else include_offline + ), ) assert presence_key sync_result_builder.now_token = now_token.copy_and_replace( From 1daae43f3aded13ccab1fbacbe8fa299cc2e359e Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 23 Jul 2024 11:51:34 +0100 Subject: [PATCH 40/43] Reduce volume of 'Waiting for current token' logs, which were introduced in v1.109.0. (#17428) Introduced in: #17215 This caused us a minor bit of grief as the volume of logs produced was much higher than normal --------- Signed-off-by: Olivier 'reivilibre --- changelog.d/17428.bugfix | 1 + synapse/notifier.py | 13 ++++++++----- synapse/types/__init__.py | 23 +++++++++++++++++++++++ 3 files changed, 32 insertions(+), 5 deletions(-) create mode 100644 changelog.d/17428.bugfix diff --git a/changelog.d/17428.bugfix b/changelog.d/17428.bugfix new file mode 100644 index 000000000..cbfd7b0c3 --- /dev/null +++ b/changelog.d/17428.bugfix @@ -0,0 +1 @@ +Reduce volume of 'Waiting for current token' logs, which were introduced in v1.109.0. \ No newline at end of file diff --git a/synapse/notifier.py b/synapse/notifier.py index c3ecf86ec..7a2b54036 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -773,6 +773,7 @@ class Notifier: stream_token = await self.event_sources.bound_future_token(stream_token) start = self.clock.time_msec() + logged = False while True: current_token = self.event_sources.get_current_token() if stream_token.is_before_or_eq(current_token): @@ -783,11 +784,13 @@ class Notifier: if now - start > 10_000: return False - logger.info( - "Waiting for current token to reach %s; currently at %s", - stream_token, - current_token, - ) + if not logged: + logger.info( + "Waiting for current token to reach %s; currently at %s", + stream_token, + current_token, + ) + logged = True # TODO: be better await self.clock.sleep(0.5) diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index 046cdc29c..c0d30ac2a 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -777,6 +777,13 @@ class RoomStreamToken(AbstractMultiWriterStreamToken): return super().bound_stream_token(max_stream) + def __str__(self) -> str: + instances = ", ".join(f"{k}: {v}" for k, v in sorted(self.instance_map.items())) + return ( + f"RoomStreamToken(stream: {self.stream}, topological: {self.topological}, " + f"instances: {{{instances}}})" + ) + @attr.s(frozen=True, slots=True, order=False) class MultiWriterStreamToken(AbstractMultiWriterStreamToken): @@ -873,6 +880,13 @@ class MultiWriterStreamToken(AbstractMultiWriterStreamToken): return True + def __str__(self) -> str: + instances = ", ".join(f"{k}: {v}" for k, v in sorted(self.instance_map.items())) + return ( + f"MultiWriterStreamToken(stream: {self.stream}, " + f"instances: {{{instances}}})" + ) + class StreamKeyType(Enum): """Known stream types. @@ -1131,6 +1145,15 @@ class StreamToken: return True + def __str__(self) -> str: + return ( + f"StreamToken(room: {self.room_key}, presence: {self.presence_key}, " + f"typing: {self.typing_key}, receipt: {self.receipt_key}, " + f"account_data: {self.account_data_key}, push_rules: {self.push_rules_key}, " + f"to_device: {self.to_device_key}, device_list: {self.device_list_key}, " + f"groups: {self.groups_key}, un_partial_stated_rooms: {self.un_partial_stated_rooms_key})" + ) + StreamToken.START = StreamToken( RoomStreamToken(stream=0), 0, 0, MultiWriterStreamToken(stream=0), 0, 0, 0, 0, 0, 0 From d225b6b3ebea419bdf0e6c0f1476544053f2dcbf Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Jul 2024 14:03:14 +0100 Subject: [PATCH 41/43] Speed up SS room sorting (#17468) We do this by bulk fetching the latest stream ordering. --------- Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- changelog.d/17468.misc | 1 + synapse/handlers/sliding_sync.py | 43 +++--- .../databases/main/event_federation.py | 5 + synapse/storage/databases/main/stream.py | 123 +++++++++++++++++- synapse/util/caches/stream_change_cache.py | 12 +- tests/util/test_stream_change_cache.py | 4 +- 6 files changed, 159 insertions(+), 29 deletions(-) create mode 100644 changelog.d/17468.misc diff --git a/changelog.d/17468.misc b/changelog.d/17468.misc new file mode 100644 index 000000000..d90877620 --- /dev/null +++ b/changelog.d/17468.misc @@ -0,0 +1 @@ +Speed up sorting of the room list in sliding sync. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 886d7c715..554ab59bf 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -1230,34 +1230,33 @@ class SlidingSyncHandler: # Assemble a map of room ID to the `stream_ordering` of the last activity that the # user should see in the room (<= `to_token`) last_activity_in_room_map: Dict[str, int] = {} + for room_id, room_for_user in sync_room_map.items(): - # If they are fully-joined to the room, let's find the latest activity - # at/before the `to_token`. - if room_for_user.membership == Membership.JOIN: - last_event_result = ( - await self.store.get_last_event_pos_in_room_before_stream_ordering( - room_id, to_token.room_key - ) - ) - - # If the room has no events at/before the `to_token`, this is probably a - # mistake in the code that generates the `sync_room_map` since that should - # only give us rooms that the user had membership in during the token range. - assert last_event_result is not None - - _, event_pos = last_event_result - - last_activity_in_room_map[room_id] = event_pos.stream - else: - # Otherwise, if the user has left/been invited/knocked/been banned from - # a room, they shouldn't see anything past that point. + if room_for_user.membership != Membership.JOIN: + # If the user has left/been invited/knocked/been banned from a + # room, they shouldn't see anything past that point. # - # FIXME: It's possible that people should see beyond this point in - # invited/knocked cases if for example the room has + # FIXME: It's possible that people should see beyond this point + # in invited/knocked cases if for example the room has # `invite`/`world_readable` history visibility, see # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932 last_activity_in_room_map[room_id] = room_for_user.event_pos.stream + # For fully-joined rooms, we find the latest activity at/before the + # `to_token`. + joined_room_positions = ( + await self.store.bulk_get_last_event_pos_in_room_before_stream_ordering( + [ + room_id + for room_id, room_for_user in sync_room_map.items() + if room_for_user.membership == Membership.JOIN + ], + to_token.room_key, + ) + ) + + last_activity_in_room_map.update(joined_room_positions) + return sorted( sync_room_map.values(), # Sort by the last activity (stream_ordering) in the room diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index 24abab4a2..715846865 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -1313,6 +1313,11 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas # We want to make the cache more effective, so we clamp to the last # change before the given ordering. last_change = self._events_stream_cache.get_max_pos_of_last_change(room_id) # type: ignore[attr-defined] + if last_change is None: + # If the room isn't in the cache we know that the last change was + # somewhere before the earliest known position of the cache, so we + # can clamp to that. + last_change = self._events_stream_cache.get_earliest_known_position() # type: ignore[attr-defined] # We don't always have a full stream_to_exterm_id table, e.g. after # the upgrade that introduced it, so we make sure we never ask for a diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index e74e0d2e9..b034361ae 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -78,10 +78,11 @@ from synapse.storage.database import ( from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine from synapse.storage.util.id_generators import MultiWriterIdGenerator -from synapse.types import PersistedEventPosition, RoomStreamToken +from synapse.types import PersistedEventPosition, RoomStreamToken, StrCollection from synapse.util.caches.descriptors import cached from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.cancellation import cancellable +from synapse.util.iterutils import batch_iter if TYPE_CHECKING: from synapse.server import HomeServer @@ -1293,6 +1294,126 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): get_last_event_pos_in_room_before_stream_ordering_txn, ) + async def bulk_get_last_event_pos_in_room_before_stream_ordering( + self, + room_ids: StrCollection, + end_token: RoomStreamToken, + ) -> Dict[str, int]: + """Bulk fetch the stream position of the latest events in the given + rooms + """ + + min_token = end_token.stream + max_token = end_token.get_max_stream_pos() + results: Dict[str, int] = {} + + # First, we check for the rooms in the stream change cache to see if we + # can just use the latest position from it. + missing_room_ids: Set[str] = set() + for room_id in room_ids: + stream_pos = self._events_stream_cache.get_max_pos_of_last_change(room_id) + if stream_pos and stream_pos <= min_token: + results[room_id] = stream_pos + else: + missing_room_ids.add(room_id) + + # Next, we query the stream position from the DB. At first we fetch all + # positions less than the *max* stream pos in the token, then filter + # them down. We do this as a) this is a cheaper query, and b) the vast + # majority of rooms will have a latest token from before the min stream + # pos. + + def bulk_get_last_event_pos_txn( + txn: LoggingTransaction, batch_room_ids: StrCollection + ) -> Dict[str, int]: + # This query fetches the latest stream position in the rooms before + # the given max position. + clause, args = make_in_list_sql_clause( + self.database_engine, "room_id", batch_room_ids + ) + sql = f""" + SELECT room_id, ( + SELECT stream_ordering FROM events AS e + LEFT JOIN rejections USING (event_id) + WHERE e.room_id = r.room_id + AND stream_ordering <= ? + AND NOT outlier + AND rejection_reason IS NULL + ORDER BY stream_ordering DESC + LIMIT 1 + ) + FROM rooms AS r + WHERE {clause} + """ + txn.execute(sql, [max_token] + args) + return {row[0]: row[1] for row in txn} + + recheck_rooms: Set[str] = set() + for batched in batch_iter(missing_room_ids, 1000): + result = await self.db_pool.runInteraction( + "bulk_get_last_event_pos_in_room_before_stream_ordering", + bulk_get_last_event_pos_txn, + batched, + ) + + # Check that the stream position for the rooms are from before the + # minimum position of the token. If not then we need to fetch more + # rows. + for room_id, stream in result.items(): + if stream <= min_token: + results[room_id] = stream + else: + recheck_rooms.add(room_id) + + if not recheck_rooms: + return results + + # For the remaining rooms we need to fetch all rows between the min and + # max stream positions in the end token, and filter out the rows that + # are after the end token. + # + # This query should be fast as the range between the min and max should + # be small. + + def bulk_get_last_event_pos_recheck_txn( + txn: LoggingTransaction, batch_room_ids: StrCollection + ) -> Dict[str, int]: + clause, args = make_in_list_sql_clause( + self.database_engine, "room_id", batch_room_ids + ) + sql = f""" + SELECT room_id, instance_name, stream_ordering + FROM events + WHERE ? < stream_ordering AND stream_ordering <= ? + AND NOT outlier + AND rejection_reason IS NULL + AND {clause} + ORDER BY stream_ordering ASC + """ + txn.execute(sql, [min_token, max_token] + args) + + # We take the max stream ordering that is less than the token. Since + # we ordered by stream ordering we just need to iterate through and + # take the last matching stream ordering. + txn_results: Dict[str, int] = {} + for row in txn: + room_id = row[0] + event_pos = PersistedEventPosition(row[1], row[2]) + if not event_pos.persisted_after(end_token): + txn_results[room_id] = event_pos.stream + + return txn_results + + for batched in batch_iter(recheck_rooms, 1000): + recheck_result = await self.db_pool.runInteraction( + "bulk_get_last_event_pos_in_room_before_stream_ordering_recheck", + bulk_get_last_event_pos_recheck_txn, + batched, + ) + results.update(recheck_result) + + return results + async def get_current_room_stream_token_for_room_id( self, room_id: str ) -> RoomStreamToken: diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 91c335f85..16fcb0020 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -327,7 +327,7 @@ class StreamChangeCache: for entity in r: self._entity_to_key.pop(entity, None) - def get_max_pos_of_last_change(self, entity: EntityType) -> int: + def get_max_pos_of_last_change(self, entity: EntityType) -> Optional[int]: """Returns an upper bound of the stream id of the last change to an entity. @@ -335,7 +335,11 @@ class StreamChangeCache: entity: The entity to check. Return: - The stream position of the latest change for the given entity or - the earliest known stream position if the entitiy is unknown. + The stream position of the latest change for the given entity, if + known """ - return self._entity_to_key.get(entity, self._earliest_known_stream_pos) + return self._entity_to_key.get(entity) + + def get_earliest_known_position(self) -> int: + """Returns the earliest position in the cache.""" + return self._earliest_known_stream_pos diff --git a/tests/util/test_stream_change_cache.py b/tests/util/test_stream_change_cache.py index 5d38718a5..af1199ef8 100644 --- a/tests/util/test_stream_change_cache.py +++ b/tests/util/test_stream_change_cache.py @@ -249,5 +249,5 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase): self.assertEqual(cache.get_max_pos_of_last_change("bar@baz.net"), 3) self.assertEqual(cache.get_max_pos_of_last_change("user@elsewhere.org"), 4) - # Unknown entities will return the stream start position. - self.assertEqual(cache.get_max_pos_of_last_change("not@here.website"), 1) + # Unknown entities will return None + self.assertEqual(cache.get_max_pos_of_last_change("not@here.website"), None) From 48c1307911e7123b458910c8ca9d5d85b811c502 Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Tue, 23 Jul 2024 09:01:43 -0600 Subject: [PATCH 42/43] 1.112.0rc1 --- CHANGES.md | 58 +++++++++++++++++++++++++++++++++++++++ changelog.d/17231.bugfix | 1 - changelog.d/17387.doc | 1 - changelog.d/17416.feature | 1 - changelog.d/17418.feature | 1 - changelog.d/17419.feature | 1 - changelog.d/17423.doc | 1 - changelog.d/17424.misc | 1 - changelog.d/17426.misc | 1 - changelog.d/17428.bugfix | 1 - changelog.d/17429.feature | 1 - changelog.d/17432.feature | 1 - changelog.d/17433.feature | 1 - changelog.d/17434.bugfix | 1 - changelog.d/17435.bugfix | 1 - changelog.d/17438.bugfix | 1 - changelog.d/17439.bugfix | 1 - changelog.d/17449.bugfix | 1 - changelog.d/17451.doc | 1 - changelog.d/17453.misc | 1 - changelog.d/17454.feature | 1 - changelog.d/17458.misc | 1 - changelog.d/17460.misc | 1 - changelog.d/17461.misc | 1 - changelog.d/17468.misc | 1 - changelog.d/17469.misc | 1 - debian/changelog | 6 ++++ pyproject.toml | 2 +- 28 files changed, 65 insertions(+), 26 deletions(-) delete mode 100644 changelog.d/17231.bugfix delete mode 100644 changelog.d/17387.doc delete mode 100644 changelog.d/17416.feature delete mode 100644 changelog.d/17418.feature delete mode 100644 changelog.d/17419.feature delete mode 100644 changelog.d/17423.doc delete mode 100644 changelog.d/17424.misc delete mode 100644 changelog.d/17426.misc delete mode 100644 changelog.d/17428.bugfix delete mode 100644 changelog.d/17429.feature delete mode 100644 changelog.d/17432.feature delete mode 100644 changelog.d/17433.feature delete mode 100644 changelog.d/17434.bugfix delete mode 100644 changelog.d/17435.bugfix delete mode 100644 changelog.d/17438.bugfix delete mode 100644 changelog.d/17439.bugfix delete mode 100644 changelog.d/17449.bugfix delete mode 100644 changelog.d/17451.doc delete mode 100644 changelog.d/17453.misc delete mode 100644 changelog.d/17454.feature delete mode 100644 changelog.d/17458.misc delete mode 100644 changelog.d/17460.misc delete mode 100644 changelog.d/17461.misc delete mode 100644 changelog.d/17468.misc delete mode 100644 changelog.d/17469.misc diff --git a/CHANGES.md b/CHANGES.md index 0a2b816ed..51d8b177c 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,61 @@ +# Synapse 1.112.0rc1 (2024-07-23) + +### Features + +- Add to-device extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17416](https://github.com/element-hq/synapse/issues/17416)) +- Populate `name`/`avatar` fields in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17418](https://github.com/element-hq/synapse/issues/17418)) +- Populate `heroes` and room summary fields (`joined_count`, `invited_count`) in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17419](https://github.com/element-hq/synapse/issues/17419)) +- Populate `is_dm` room field in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17429](https://github.com/element-hq/synapse/issues/17429)) +- Add room subscriptions to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17432](https://github.com/element-hq/synapse/issues/17432)) +- Prepare for authenticated media freeze. ([\#17433](https://github.com/element-hq/synapse/issues/17433)) +- Add E2EE extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17454](https://github.com/element-hq/synapse/issues/17454)) + +### Bugfixes + +- Added configurable option to always include offline users in presence sync results. Contributed by @Michael-Hollister. ([\#17231](https://github.com/element-hq/synapse/issues/17231)) +- Reduce volume of 'Waiting for current token' logs, which were introduced in v1.109.0. ([\#17428](https://github.com/element-hq/synapse/issues/17428)) +- Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using room type filters and the user has one or more remote invites. ([\#17434](https://github.com/element-hq/synapse/issues/17434)) +- Order `heroes` by `stream_ordering` as the Matrix specification states (applies to `/sync`). ([\#17435](https://github.com/element-hq/synapse/issues/17435)) +- Fix rare bug where `/sync` would break for a user when using workers with multiple stream writers. ([\#17438](https://github.com/element-hq/synapse/issues/17438)) +- Limit concurrent remote downloads to 6 per IP address, and decrement remote downloads without a content-length from the ratelimiter after the download is complete. ([\#17439](https://github.com/element-hq/synapse/issues/17439)) +- Remove unnecessary call to resume producing in fake channel. ([\#17449](https://github.com/element-hq/synapse/issues/17449)) + +### Improved Documentation + +- Update the readme image to have a white background, so that it is readable in dark mode. ([\#17387](https://github.com/element-hq/synapse/issues/17387)) +- Add Red Hat Enterprise Linux and Rocky Linux 8 and 9 installation instructions. ([\#17423](https://github.com/element-hq/synapse/issues/17423)) +- Improve documentation for the [`default_power_level_content_override`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#default_power_level_content_override) config option. ([\#17451](https://github.com/element-hq/synapse/issues/17451)) + +### Internal Changes + +- Make sure we always use the right logic for enabling the media repo. ([\#17424](https://github.com/element-hq/synapse/issues/17424)) +- Fix documentation on `RateLimiter#record_action`. ([\#17426](https://github.com/element-hq/synapse/issues/17426)) +- Update experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint to bump room when it is created. ([\#17453](https://github.com/element-hq/synapse/issues/17453)) +- Speed up generating sliding sync responses. ([\#17458](https://github.com/element-hq/synapse/issues/17458)) +- Add cache to `get_rooms_for_local_user_where_membership_is` to speed up sliding sync. ([\#17460](https://github.com/element-hq/synapse/issues/17460)) +- Speed up fetching room keys from backup. ([\#17461](https://github.com/element-hq/synapse/issues/17461)) +- Speed up sorting of the room list in sliding sync. ([\#17468](https://github.com/element-hq/synapse/issues/17468)) +- Implement handling of `$ME` as a state key in sliding sync. ([\#17469](https://github.com/element-hq/synapse/issues/17469)) + + + +### Updates to locked dependencies + +* Bump bytes from 1.6.0 to 1.6.1. ([\#17441](https://github.com/element-hq/synapse/issues/17441)) +* Bump hiredis from 2.3.2 to 3.0.0. ([\#17464](https://github.com/element-hq/synapse/issues/17464)) +* Bump jsonschema from 4.22.0 to 4.23.0. ([\#17444](https://github.com/element-hq/synapse/issues/17444)) +* Bump matrix-org/done-action from 2 to 3. ([\#17440](https://github.com/element-hq/synapse/issues/17440)) +* Bump mypy from 1.9.0 to 1.10.1. ([\#17445](https://github.com/element-hq/synapse/issues/17445)) +* Bump pyopenssl from 24.1.0 to 24.2.1. ([\#17465](https://github.com/element-hq/synapse/issues/17465)) +* Bump ruff from 0.5.0 to 0.5.4. ([\#17466](https://github.com/element-hq/synapse/issues/17466)) +* Bump sentry-sdk from 2.6.0 to 2.8.0. ([\#17456](https://github.com/element-hq/synapse/issues/17456)) +* Bump sentry-sdk from 2.8.0 to 2.10.0. ([\#17467](https://github.com/element-hq/synapse/issues/17467)) +* Bump setuptools from 67.6.0 to 70.0.0. ([\#17448](https://github.com/element-hq/synapse/issues/17448)) +* Bump twine from 5.1.0 to 5.1.1. ([\#17443](https://github.com/element-hq/synapse/issues/17443)) +* Bump types-jsonschema from 4.22.0.20240610 to 4.23.0.20240712. ([\#17446](https://github.com/element-hq/synapse/issues/17446)) +* Bump ulid from 1.1.2 to 1.1.3. ([\#17442](https://github.com/element-hq/synapse/issues/17442)) +* Bump zipp from 3.15.0 to 3.19.1. ([\#17427](https://github.com/element-hq/synapse/issues/17427)) + # Synapse 1.111.0 (2024-07-16) No significant changes since 1.111.0rc2. diff --git a/changelog.d/17231.bugfix b/changelog.d/17231.bugfix deleted file mode 100644 index d09b45565..000000000 --- a/changelog.d/17231.bugfix +++ /dev/null @@ -1 +0,0 @@ -Added configurable option to always include offline users in presence sync results. Contributed by @Michael-Hollister. diff --git a/changelog.d/17387.doc b/changelog.d/17387.doc deleted file mode 100644 index 82be10f13..000000000 --- a/changelog.d/17387.doc +++ /dev/null @@ -1 +0,0 @@ -Update the readme image to have a white background, so that it is readable in dark mode. \ No newline at end of file diff --git a/changelog.d/17416.feature b/changelog.d/17416.feature deleted file mode 100644 index 1d119cf48..000000000 --- a/changelog.d/17416.feature +++ /dev/null @@ -1 +0,0 @@ -Add to-device extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/changelog.d/17418.feature b/changelog.d/17418.feature deleted file mode 100644 index c5e56bc50..000000000 --- a/changelog.d/17418.feature +++ /dev/null @@ -1 +0,0 @@ -Populate `name`/`avatar` fields in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/changelog.d/17419.feature b/changelog.d/17419.feature deleted file mode 100644 index 186a27c47..000000000 --- a/changelog.d/17419.feature +++ /dev/null @@ -1 +0,0 @@ -Populate `heroes` and room summary fields (`joined_count`, `invited_count`) in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/changelog.d/17423.doc b/changelog.d/17423.doc deleted file mode 100644 index 972bc659e..000000000 --- a/changelog.d/17423.doc +++ /dev/null @@ -1 +0,0 @@ -Add Red Hat Enterprise Linux and Rocky Linux 8 and 9 installation instructions. diff --git a/changelog.d/17424.misc b/changelog.d/17424.misc deleted file mode 100644 index d4a81c137..000000000 --- a/changelog.d/17424.misc +++ /dev/null @@ -1 +0,0 @@ -Make sure we always use the right logic for enabling the media repo. diff --git a/changelog.d/17426.misc b/changelog.d/17426.misc deleted file mode 100644 index 886e5d438..000000000 --- a/changelog.d/17426.misc +++ /dev/null @@ -1 +0,0 @@ -Fix documentation on `RateLimiter#record_action`. \ No newline at end of file diff --git a/changelog.d/17428.bugfix b/changelog.d/17428.bugfix deleted file mode 100644 index cbfd7b0c3..000000000 --- a/changelog.d/17428.bugfix +++ /dev/null @@ -1 +0,0 @@ -Reduce volume of 'Waiting for current token' logs, which were introduced in v1.109.0. \ No newline at end of file diff --git a/changelog.d/17429.feature b/changelog.d/17429.feature deleted file mode 100644 index 608b75d63..000000000 --- a/changelog.d/17429.feature +++ /dev/null @@ -1 +0,0 @@ -Populate `is_dm` room field in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/changelog.d/17432.feature b/changelog.d/17432.feature deleted file mode 100644 index c86f04c11..000000000 --- a/changelog.d/17432.feature +++ /dev/null @@ -1 +0,0 @@ -Add room subscriptions to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/changelog.d/17433.feature b/changelog.d/17433.feature deleted file mode 100644 index ac9b5dee6..000000000 --- a/changelog.d/17433.feature +++ /dev/null @@ -1 +0,0 @@ -Prepare for authenticated media freeze. \ No newline at end of file diff --git a/changelog.d/17434.bugfix b/changelog.d/17434.bugfix deleted file mode 100644 index c7cce5239..000000000 --- a/changelog.d/17434.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using room type filters and the user has one or more remote invites. diff --git a/changelog.d/17435.bugfix b/changelog.d/17435.bugfix deleted file mode 100644 index 2d06a7c7f..000000000 --- a/changelog.d/17435.bugfix +++ /dev/null @@ -1 +0,0 @@ -Order `heroes` by `stream_ordering` as the Matrix specification states (applies to `/sync`). diff --git a/changelog.d/17438.bugfix b/changelog.d/17438.bugfix deleted file mode 100644 index cff6eecd4..000000000 --- a/changelog.d/17438.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix rare bug where `/sync` would break for a user when using workers with multiple stream writers. diff --git a/changelog.d/17439.bugfix b/changelog.d/17439.bugfix deleted file mode 100644 index f36c3ec25..000000000 --- a/changelog.d/17439.bugfix +++ /dev/null @@ -1 +0,0 @@ -Limit concurrent remote downloads to 6 per IP address, and decrement remote downloads without a content-length from the ratelimiter after the download is complete. \ No newline at end of file diff --git a/changelog.d/17449.bugfix b/changelog.d/17449.bugfix deleted file mode 100644 index cd847a3d1..000000000 --- a/changelog.d/17449.bugfix +++ /dev/null @@ -1 +0,0 @@ -Remove unnecessary call to resume producing in fake channel. \ No newline at end of file diff --git a/changelog.d/17451.doc b/changelog.d/17451.doc deleted file mode 100644 index 357ac2c90..000000000 --- a/changelog.d/17451.doc +++ /dev/null @@ -1 +0,0 @@ -Improve documentation for the [`default_power_level_content_override`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#default_power_level_content_override) config option. diff --git a/changelog.d/17453.misc b/changelog.d/17453.misc deleted file mode 100644 index 2978a5247..000000000 --- a/changelog.d/17453.misc +++ /dev/null @@ -1 +0,0 @@ -Update experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint to bump room when it is created. diff --git a/changelog.d/17454.feature b/changelog.d/17454.feature deleted file mode 100644 index bb088371b..000000000 --- a/changelog.d/17454.feature +++ /dev/null @@ -1 +0,0 @@ -Add E2EE extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/changelog.d/17458.misc b/changelog.d/17458.misc deleted file mode 100644 index 09cce15d0..000000000 --- a/changelog.d/17458.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up generating sliding sync responses. diff --git a/changelog.d/17460.misc b/changelog.d/17460.misc deleted file mode 100644 index fd99da5a9..000000000 --- a/changelog.d/17460.misc +++ /dev/null @@ -1 +0,0 @@ -Add cache to `get_rooms_for_local_user_where_membership_is` to speed up sliding sync. diff --git a/changelog.d/17461.misc b/changelog.d/17461.misc deleted file mode 100644 index 80f7144ba..000000000 --- a/changelog.d/17461.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up fetching room keys from backup. diff --git a/changelog.d/17468.misc b/changelog.d/17468.misc deleted file mode 100644 index d90877620..000000000 --- a/changelog.d/17468.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up sorting of the room list in sliding sync. diff --git a/changelog.d/17469.misc b/changelog.d/17469.misc deleted file mode 100644 index ba0419355..000000000 --- a/changelog.d/17469.misc +++ /dev/null @@ -1 +0,0 @@ -Implement handling of `$ME` as a state key in sliding sync. diff --git a/debian/changelog b/debian/changelog index 0470e25f2..5209b9f5f 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.112.0~rc1) stable; urgency=medium + + * New Synapse release 1.112.0rc1. + + -- Synapse Packaging team Tue, 23 Jul 2024 08:58:55 -0600 + matrix-synapse-py3 (1.111.0) stable; urgency=medium * New Synapse release 1.111.0. diff --git a/pyproject.toml b/pyproject.toml index 50dc0c3c6..0b5dc418e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.111.0" +version = "1.112.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From e8ee784c75d161d1634a60b7c4e419e8f08273b8 Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Tue, 23 Jul 2024 09:14:45 -0600 Subject: [PATCH 43/43] Address changelog review comments --- CHANGES.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 51d8b177c..f869674ac 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -12,13 +12,10 @@ ### Bugfixes -- Added configurable option to always include offline users in presence sync results. Contributed by @Michael-Hollister. ([\#17231](https://github.com/element-hq/synapse/issues/17231)) -- Reduce volume of 'Waiting for current token' logs, which were introduced in v1.109.0. ([\#17428](https://github.com/element-hq/synapse/issues/17428)) +- Add configurable option to always include offline users in presence sync results. Contributed by @Michael-Hollister. ([\#17231](https://github.com/element-hq/synapse/issues/17231)) - Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using room type filters and the user has one or more remote invites. ([\#17434](https://github.com/element-hq/synapse/issues/17434)) - Order `heroes` by `stream_ordering` as the Matrix specification states (applies to `/sync`). ([\#17435](https://github.com/element-hq/synapse/issues/17435)) - Fix rare bug where `/sync` would break for a user when using workers with multiple stream writers. ([\#17438](https://github.com/element-hq/synapse/issues/17438)) -- Limit concurrent remote downloads to 6 per IP address, and decrement remote downloads without a content-length from the ratelimiter after the download is complete. ([\#17439](https://github.com/element-hq/synapse/issues/17439)) -- Remove unnecessary call to resume producing in fake channel. ([\#17449](https://github.com/element-hq/synapse/issues/17449)) ### Improved Documentation @@ -29,7 +26,10 @@ ### Internal Changes - Make sure we always use the right logic for enabling the media repo. ([\#17424](https://github.com/element-hq/synapse/issues/17424)) -- Fix documentation on `RateLimiter#record_action`. ([\#17426](https://github.com/element-hq/synapse/issues/17426)) +- Fix argument documentation for method `RateLimiter.record_action`. ([\#17426](https://github.com/element-hq/synapse/issues/17426)) +- Reduce volume of 'Waiting for current token' logs, which were introduced in v1.109.0. ([\#17428](https://github.com/element-hq/synapse/issues/17428)) +- Limit concurrent remote downloads to 6 per IP address, and decrement remote downloads without a content-length from the ratelimiter after the download is complete. ([\#17439](https://github.com/element-hq/synapse/issues/17439)) +- Remove unnecessary call to resume producing in fake channel. ([\#17449](https://github.com/element-hq/synapse/issues/17449)) - Update experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint to bump room when it is created. ([\#17453](https://github.com/element-hq/synapse/issues/17453)) - Speed up generating sliding sync responses. ([\#17458](https://github.com/element-hq/synapse/issues/17458)) - Add cache to `get_rooms_for_local_user_where_membership_is` to speed up sliding sync. ([\#17460](https://github.com/element-hq/synapse/issues/17460))