2018-07-17 06:43:18 -04:00
#
2023-11-21 15:29:58 -05:00
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
2024-01-23 06:26:48 -05:00
# Copyright 2019 The Matrix.org Foundation C.I.C.
2023-11-21 15:29:58 -05:00
# Copyright (C) 2023 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
# Originally licensed under the Apache License, Version 2.0:
# <http://www.apache.org/licenses/LICENSE-2.0>.
#
# [This file includes modifications made by New Vector Limited]
2018-07-17 06:43:18 -04:00
#
#
2019-10-30 12:47:37 -04:00
import json
2024-06-24 20:07:56 -04:00
import logging
2024-07-15 05:37:10 -04:00
from http import HTTPStatus
2024-07-25 11:43:35 -04:00
from typing import Any , Dict , Iterable , List , Literal , Optional , Tuple
2019-10-30 14:01:56 -04:00
2024-05-23 13:06:16 -04:00
from parameterized import parameterized , parameterized_class
2021-11-09 05:26:07 -05:00
2022-02-23 08:33:19 -05:00
from twisted . test . proto_helpers import MemoryReactor
2019-05-01 10:32:38 -04:00
import synapse . rest . admin
2021-07-28 04:05:11 -04:00
from synapse . api . constants import (
2024-06-13 14:56:58 -04:00
AccountDataTypes ,
2021-07-28 04:05:11 -04:00
EventContentFields ,
EventTypes ,
2024-07-02 12:07:05 -04:00
HistoryVisibility ,
2024-07-04 13:25:36 -04:00
Membership ,
2022-04-28 13:34:12 -04:00
ReceiptTypes ,
2021-07-28 04:05:11 -04:00
RelationTypes ,
)
2024-07-24 10:21:56 -04:00
from synapse . api . room_versions import RoomVersions
2024-07-04 13:25:36 -04:00
from synapse . events import EventBase
from synapse . handlers . sliding_sync import StateValues
2024-07-10 06:58:42 -04:00
from synapse . rest . client import (
devices ,
knock ,
login ,
read_marker ,
receipts ,
room ,
sendtodevice ,
sync ,
)
2022-02-23 08:33:19 -05:00
from synapse . server import HomeServer
2024-07-24 06:47:25 -04:00
from synapse . types import (
JsonDict ,
RoomStreamToken ,
SlidingSyncStreamToken ,
StreamKeyType ,
StreamToken ,
UserID ,
)
2024-07-29 17:45:48 -04:00
from synapse . types . handlers import SlidingSyncConfig
2022-02-23 08:33:19 -05:00
from synapse . util import Clock
2024-07-25 11:43:35 -04:00
from synapse . util . stringutils import random_string
2018-07-17 06:43:18 -04:00
from tests import unittest
2021-06-09 14:39:51 -04:00
from tests . federation . transport . test_knocking import (
KnockingStrippedStateEventHelperMixin ,
)
2024-07-25 12:01:47 -04:00
from tests . server import TimedOutException
2024-07-24 10:21:56 -04:00
from tests . test_utils . event_injection import create_event , mark_event_as_partial_state
2018-07-17 06:43:18 -04:00
2024-06-24 20:07:56 -04:00
logger = logging . getLogger ( __name__ )
2018-07-17 06:43:18 -04:00
2018-08-17 11:08:45 -04:00
class FilterTestCase ( unittest . HomeserverTestCase ) :
user_id = " @apple:test "
2019-10-30 12:47:37 -04:00
servlets = [
synapse . rest . admin . register_servlets_for_client_rest_resource ,
room . register_servlets ,
login . register_servlets ,
sync . register_servlets ,
]
2018-07-17 06:43:18 -04:00
2022-02-23 08:33:19 -05:00
def test_sync_argless ( self ) - > None :
2020-12-15 09:44:04 -05:00
channel = self . make_request ( " GET " , " /sync " )
2018-07-17 06:43:18 -04:00
2018-08-17 11:08:45 -04:00
self . assertEqual ( channel . code , 200 )
2021-06-23 10:57:41 -04:00
self . assertIn ( " next_batch " , channel . json_body )
2018-11-02 09:19:23 -04:00
2019-10-30 12:47:37 -04:00
class SyncFilterTestCase ( unittest . HomeserverTestCase ) :
servlets = [
synapse . rest . admin . register_servlets_for_client_rest_resource ,
room . register_servlets ,
login . register_servlets ,
sync . register_servlets ,
]
2022-02-23 08:33:19 -05:00
def test_sync_filter_labels ( self ) - > None :
2019-10-30 13:28:41 -04:00
""" Test that we can filter by a label. """
2019-10-30 12:47:37 -04:00
sync_filter = json . dumps (
{
" room " : {
" timeline " : {
" types " : [ EventTypes . Message ] ,
" org.matrix.labels " : [ " #fun " ] ,
}
}
}
)
events = self . _test_sync_filter_labels ( sync_filter )
2019-10-30 13:28:41 -04:00
self . assertEqual ( len ( events ) , 2 , [ event [ " content " ] for event in events ] )
self . assertEqual ( events [ 0 ] [ " content " ] [ " body " ] , " with right label " , events [ 0 ] )
self . assertEqual ( events [ 1 ] [ " content " ] [ " body " ] , " with right label " , events [ 1 ] )
2019-10-30 12:47:37 -04:00
2022-02-23 08:33:19 -05:00
def test_sync_filter_not_labels ( self ) - > None :
2019-10-30 13:28:41 -04:00
""" Test that we can filter by the absence of a label. """
2019-10-30 12:47:37 -04:00
sync_filter = json . dumps (
{
" room " : {
" timeline " : {
" types " : [ EventTypes . Message ] ,
" org.matrix.not_labels " : [ " #fun " ] ,
}
}
}
)
events = self . _test_sync_filter_labels ( sync_filter )
2019-10-30 13:28:41 -04:00
self . assertEqual ( len ( events ) , 3 , [ event [ " content " ] for event in events ] )
2019-10-30 12:47:37 -04:00
self . assertEqual ( events [ 0 ] [ " content " ] [ " body " ] , " without label " , events [ 0 ] )
self . assertEqual ( events [ 1 ] [ " content " ] [ " body " ] , " with wrong label " , events [ 1 ] )
2019-10-30 14:01:56 -04:00
self . assertEqual (
events [ 2 ] [ " content " ] [ " body " ] , " with two wrong labels " , events [ 2 ]
)
2019-10-30 13:28:41 -04:00
2022-02-23 08:33:19 -05:00
def test_sync_filter_labels_not_labels ( self ) - > None :
2019-10-30 13:28:41 -04:00
""" Test that we can filter by both a label and the absence of another label. """
sync_filter = json . dumps (
{
" room " : {
" timeline " : {
" types " : [ EventTypes . Message ] ,
" org.matrix.labels " : [ " #work " ] ,
" org.matrix.not_labels " : [ " #notfun " ] ,
}
}
}
)
events = self . _test_sync_filter_labels ( sync_filter )
self . assertEqual ( len ( events ) , 1 , [ event [ " content " ] for event in events ] )
self . assertEqual ( events [ 0 ] [ " content " ] [ " body " ] , " with wrong label " , events [ 0 ] )
2019-10-30 12:47:37 -04:00
2022-02-23 08:33:19 -05:00
def _test_sync_filter_labels ( self , sync_filter : str ) - > List [ JsonDict ] :
2019-10-30 12:47:37 -04:00
user_id = self . register_user ( " kermit " , " test " )
tok = self . login ( " kermit " , " test " )
room_id = self . helper . create_room_as ( user_id , tok = tok )
self . helper . send_event (
room_id = room_id ,
type = EventTypes . Message ,
content = {
" msgtype " : " m.text " ,
2019-10-30 13:28:41 -04:00
" body " : " with right label " ,
2019-11-01 12:22:44 -04:00
EventContentFields . LABELS : [ " #fun " ] ,
2019-10-30 12:47:37 -04:00
} ,
tok = tok ,
)
self . helper . send_event (
room_id = room_id ,
type = EventTypes . Message ,
2019-10-30 14:01:56 -04:00
content = { " msgtype " : " m.text " , " body " : " without label " } ,
2019-10-30 12:47:37 -04:00
tok = tok ,
)
self . helper . send_event (
room_id = room_id ,
type = EventTypes . Message ,
content = {
" msgtype " : " m.text " ,
" body " : " with wrong label " ,
2019-11-01 12:22:44 -04:00
EventContentFields . LABELS : [ " #work " ] ,
2019-10-30 12:47:37 -04:00
} ,
tok = tok ,
)
self . helper . send_event (
room_id = room_id ,
type = EventTypes . Message ,
content = {
" msgtype " : " m.text " ,
2019-10-30 13:28:41 -04:00
" body " : " with two wrong labels " ,
2019-11-01 12:22:44 -04:00
EventContentFields . LABELS : [ " #work " , " #notfun " ] ,
2019-10-30 13:28:41 -04:00
} ,
tok = tok ,
)
self . helper . send_event (
room_id = room_id ,
type = EventTypes . Message ,
content = {
" msgtype " : " m.text " ,
" body " : " with right label " ,
2019-11-01 12:22:44 -04:00
EventContentFields . LABELS : [ " #fun " ] ,
2019-10-30 12:47:37 -04:00
} ,
tok = tok ,
)
2020-12-15 09:44:04 -05:00
channel = self . make_request (
2019-10-30 12:47:37 -04:00
" GET " , " /sync?filter= %s " % sync_filter , access_token = tok
)
self . assertEqual ( channel . code , 200 , channel . result )
return channel . json_body [ " rooms " ] [ " join " ] [ room_id ] [ " timeline " ] [ " events " ]
2018-11-02 09:19:23 -04:00
class SyncTypingTests ( unittest . HomeserverTestCase ) :
servlets = [
2019-05-02 06:59:16 -04:00
synapse . rest . admin . register_servlets_for_client_rest_resource ,
2018-11-02 09:19:23 -04:00
room . register_servlets ,
login . register_servlets ,
sync . register_servlets ,
]
user_id = True
hijack_auth = False
2022-02-23 08:33:19 -05:00
def test_sync_backwards_typing ( self ) - > None :
2018-11-02 09:19:23 -04:00
"""
If the typing serial goes backwards and the typing handler is then reset
( such as when the master restarts and sets the typing serial to 0 ) , we
do not incorrectly return typing information that had a serial greater
than the now - reset serial .
"""
typing_url = " /rooms/ %s /typing/ %s ?access_token= %s "
sync_url = " /sync?timeout=3000000&access_token= %s &since= %s "
# Register the user who gets notified
user_id = self . register_user ( " user " , " pass " )
access_token = self . login ( " user " , " pass " )
# Register the user who sends the message
other_user_id = self . register_user ( " otheruser " , " pass " )
other_access_token = self . login ( " otheruser " , " pass " )
# Create a room
room = self . helper . create_room_as ( user_id , tok = access_token )
# Invite the other person
self . helper . invite ( room = room , src = user_id , tok = access_token , targ = other_user_id )
# The other user joins
self . helper . join ( room = room , user = other_user_id , tok = other_access_token )
# The other user sends some messages
self . helper . send ( room , body = " Hi! " , tok = other_access_token )
self . helper . send ( room , body = " There! " , tok = other_access_token )
# Start typing.
2020-12-15 09:44:04 -05:00
channel = self . make_request (
2018-11-02 09:19:23 -04:00
" PUT " ,
typing_url % ( room , other_user_id , other_access_token ) ,
b ' { " typing " : true, " timeout " : 30000} ' ,
)
2022-02-28 07:12:29 -05:00
self . assertEqual ( 200 , channel . code )
2018-11-02 09:19:23 -04:00
2020-12-15 09:44:04 -05:00
channel = self . make_request ( " GET " , " /sync?access_token= %s " % ( access_token , ) )
2022-02-28 07:12:29 -05:00
self . assertEqual ( 200 , channel . code )
2018-11-02 09:19:23 -04:00
next_batch = channel . json_body [ " next_batch " ]
# Stop typing.
2020-12-15 09:44:04 -05:00
channel = self . make_request (
2018-11-02 09:19:23 -04:00
" PUT " ,
typing_url % ( room , other_user_id , other_access_token ) ,
b ' { " typing " : false} ' ,
)
2022-02-28 07:12:29 -05:00
self . assertEqual ( 200 , channel . code )
2018-11-02 09:19:23 -04:00
# Start typing.
2020-12-15 09:44:04 -05:00
channel = self . make_request (
2018-11-02 09:19:23 -04:00
" PUT " ,
typing_url % ( room , other_user_id , other_access_token ) ,
b ' { " typing " : true, " timeout " : 30000} ' ,
)
2022-02-28 07:12:29 -05:00
self . assertEqual ( 200 , channel . code )
2018-11-02 09:19:23 -04:00
# Should return immediately
2020-12-15 09:44:04 -05:00
channel = self . make_request ( " GET " , sync_url % ( access_token , next_batch ) )
2022-02-28 07:12:29 -05:00
self . assertEqual ( 200 , channel . code )
2018-11-02 09:19:23 -04:00
next_batch = channel . json_body [ " next_batch " ]
# Reset typing serial back to 0, as if the master had.
typing = self . hs . get_typing_handler ( )
typing . _latest_room_serial = 0
# Since it checks the state token, we need some state to update to
# invalidate the stream token.
self . helper . send ( room , body = " There! " , tok = other_access_token )
2020-12-15 09:44:04 -05:00
channel = self . make_request ( " GET " , sync_url % ( access_token , next_batch ) )
2022-02-28 07:12:29 -05:00
self . assertEqual ( 200 , channel . code )
2018-11-02 09:19:23 -04:00
next_batch = channel . json_body [ " next_batch " ]
# This should time out! But it does not, because our stream token is
# ahead, and therefore it's saying the typing (that we've actually
# already seen) is new, since it's got a token above our new, now-reset
# stream token.
2020-12-15 09:44:04 -05:00
channel = self . make_request ( " GET " , sync_url % ( access_token , next_batch ) )
2022-02-28 07:12:29 -05:00
self . assertEqual ( 200 , channel . code )
2018-11-02 09:19:23 -04:00
next_batch = channel . json_body [ " next_batch " ]
# Clear the typing information, so that it doesn't think everything is
# in the future.
typing . _reset ( )
# Now it SHOULD fail as it never completes!
2020-11-15 17:49:21 -05:00
with self . assertRaises ( TimedOutException ) :
self . make_request ( " GET " , sync_url % ( access_token , next_batch ) )
2020-09-02 12:19:37 -04:00
2023-01-25 14:38:20 -05:00
class SyncKnockTestCase ( KnockingStrippedStateEventHelperMixin ) :
2021-06-09 14:39:51 -04:00
servlets = [
synapse . rest . admin . register_servlets ,
login . register_servlets ,
room . register_servlets ,
sync . register_servlets ,
knock . register_servlets ,
]
2022-02-23 08:33:19 -05:00
def prepare ( self , reactor : MemoryReactor , clock : Clock , hs : HomeServer ) - > None :
2022-02-23 06:04:02 -05:00
self . store = hs . get_datastores ( ) . main
2021-06-09 14:39:51 -04:00
self . url = " /sync?since= %s "
self . next_batch = " s0 "
# Register the first user (used to create the room to knock on).
self . user_id = self . register_user ( " kermit " , " monkey " )
self . tok = self . login ( " kermit " , " monkey " )
# Create the room we'll knock on.
self . room_id = self . helper . create_room_as (
self . user_id ,
is_public = False ,
2021-06-15 07:45:14 -04:00
room_version = " 7 " ,
2021-06-09 14:39:51 -04:00
tok = self . tok ,
)
# Register the second user (used to knock on the room).
self . knocker = self . register_user ( " knocker " , " monkey " )
self . knocker_tok = self . login ( " knocker " , " monkey " )
# Perform an initial sync for the knocking user.
channel = self . make_request (
" GET " ,
self . url % self . next_batch ,
access_token = self . tok ,
)
self . assertEqual ( channel . code , 200 , channel . json_body )
# Store the next batch for the next request.
self . next_batch = channel . json_body [ " next_batch " ]
# Set up some room state to test with.
self . expected_room_state = self . send_example_state_events_to_room (
hs , self . room_id , self . user_id
)
2022-02-23 08:33:19 -05:00
def test_knock_room_state ( self ) - > None :
2021-06-09 14:39:51 -04:00
""" Tests that /sync returns state from a room after knocking on it. """
# Knock on a room
channel = self . make_request (
" POST " ,
2022-05-04 11:59:22 -04:00
f " /_matrix/client/r0/knock/ { self . room_id } " ,
2021-06-09 14:39:51 -04:00
b " {} " ,
self . knocker_tok ,
)
2022-02-28 07:12:29 -05:00
self . assertEqual ( 200 , channel . code , channel . result )
2021-06-09 14:39:51 -04:00
# We expect to see the knock event in the stripped room state later
self . expected_room_state [ EventTypes . Member ] = {
2021-06-15 07:45:14 -04:00
" content " : { " membership " : " knock " , " displayname " : " knocker " } ,
2021-06-09 14:39:51 -04:00
" state_key " : " @knocker:test " ,
}
# Check that /sync includes stripped state from the room
channel = self . make_request (
" GET " ,
self . url % self . next_batch ,
access_token = self . knocker_tok ,
)
self . assertEqual ( channel . code , 200 , channel . json_body )
# Extract the stripped room state events from /sync
2021-06-15 07:45:14 -04:00
knock_entry = channel . json_body [ " rooms " ] [ " knock " ]
2021-06-09 14:39:51 -04:00
room_state_events = knock_entry [ self . room_id ] [ " knock_state " ] [ " events " ]
# Validate that the knock membership event came last
self . assertEqual ( room_state_events [ - 1 ] [ " type " ] , EventTypes . Member )
# Validate the stripped room state events
self . check_knock_room_state_against_room_state (
room_state_events , self . expected_room_state
)
2020-09-02 12:19:37 -04:00
class UnreadMessagesTestCase ( unittest . HomeserverTestCase ) :
servlets = [
synapse . rest . admin . register_servlets ,
login . register_servlets ,
read_marker . register_servlets ,
room . register_servlets ,
sync . register_servlets ,
2021-07-28 04:05:11 -04:00
receipts . register_servlets ,
2020-09-02 12:19:37 -04:00
]
2022-03-31 15:05:13 -04:00
def default_config ( self ) - > JsonDict :
config = super ( ) . default_config ( )
2022-05-04 11:59:22 -04:00
config [ " experimental_features " ] = {
" msc2654_enabled " : True ,
}
2022-03-31 15:05:13 -04:00
return config
2022-02-23 08:33:19 -05:00
def prepare ( self , reactor : MemoryReactor , clock : Clock , hs : HomeServer ) - > None :
2020-09-02 12:19:37 -04:00
self . url = " /sync?since= %s "
self . next_batch = " s0 "
# Register the first user (used to check the unread counts).
self . user_id = self . register_user ( " kermit " , " monkey " )
self . tok = self . login ( " kermit " , " monkey " )
# Create the room we'll check unread counts for.
self . room_id = self . helper . create_room_as ( self . user_id , tok = self . tok )
# Register the second user (used to send events to the room).
self . user2 = self . register_user ( " kermit2 " , " monkey " )
self . tok2 = self . login ( " kermit2 " , " monkey " )
# Change the power levels of the room so that the second user can send state
# events.
self . helper . send_state (
self . room_id ,
EventTypes . PowerLevels ,
{
" users " : { self . user_id : 100 , self . user2 : 100 } ,
" users_default " : 0 ,
" events " : {
" m.room.name " : 50 ,
" m.room.power_levels " : 100 ,
" m.room.history_visibility " : 100 ,
" m.room.canonical_alias " : 50 ,
" m.room.avatar " : 50 ,
" m.room.tombstone " : 100 ,
" m.room.server_acl " : 100 ,
" m.room.encryption " : 100 ,
} ,
" events_default " : 0 ,
" state_default " : 50 ,
" ban " : 50 ,
" kick " : 50 ,
" redact " : 50 ,
" invite " : 0 ,
} ,
tok = self . tok ,
)
2022-09-01 08:31:54 -04:00
def test_unread_counts ( self ) - > None :
2020-09-02 12:19:37 -04:00
""" Tests that /sync returns the right value for the unread count (MSC2654). """
# Check that our own messages don't increase the unread count.
self . helper . send ( self . room_id , " hello " , tok = self . tok )
self . _check_unread_count ( 0 )
# Join the new user and check that this doesn't increase the unread count.
self . helper . join ( room = self . room_id , user = self . user2 , tok = self . tok2 )
self . _check_unread_count ( 0 )
# Check that the new user sending a message increases our unread count.
res = self . helper . send ( self . room_id , " hello " , tok = self . tok2 )
self . _check_unread_count ( 1 )
# Send a read receipt to tell the server we've read the latest event.
2020-12-15 09:44:04 -05:00
channel = self . make_request (
2020-09-02 12:19:37 -04:00
" POST " ,
2022-05-04 11:59:22 -04:00
f " /rooms/ { self . room_id } /read_markers " ,
2022-07-17 17:28:45 -04:00
{ ReceiptTypes . READ : res [ " event_id " ] } ,
2020-09-02 12:19:37 -04:00
access_token = self . tok ,
)
self . assertEqual ( channel . code , 200 , channel . json_body )
# Check that the unread counter is back to 0.
self . _check_unread_count ( 0 )
2022-05-05 08:31:25 -04:00
# Check that private read receipts don't break unread counts
2021-07-28 04:05:11 -04:00
res = self . helper . send ( self . room_id , " hello " , tok = self . tok2 )
self . _check_unread_count ( 1 )
# Send a read receipt to tell the server we've read the latest event.
channel = self . make_request (
" POST " ,
2022-09-01 08:31:54 -04:00
f " /rooms/ { self . room_id } /receipt/ { ReceiptTypes . READ_PRIVATE } / { res [ ' event_id ' ] } " ,
2022-05-04 11:59:22 -04:00
{ } ,
2021-07-28 04:05:11 -04:00
access_token = self . tok ,
)
self . assertEqual ( channel . code , 200 , channel . json_body )
# Check that the unread counter is back to 0.
self . _check_unread_count ( 0 )
2020-09-02 12:19:37 -04:00
# Check that room name changes increase the unread counter.
self . helper . send_state (
self . room_id ,
" m.room.name " ,
{ " name " : " my super room " } ,
tok = self . tok2 ,
)
self . _check_unread_count ( 1 )
# Check that room topic changes increase the unread counter.
self . helper . send_state (
self . room_id ,
" m.room.topic " ,
{ " topic " : " welcome!!! " } ,
tok = self . tok2 ,
)
self . _check_unread_count ( 2 )
# Check that encrypted messages increase the unread counter.
self . helper . send_event ( self . room_id , EventTypes . Encrypted , { } , tok = self . tok2 )
self . _check_unread_count ( 3 )
# Check that custom events with a body increase the unread counter.
2022-05-16 08:42:45 -04:00
result = self . helper . send_event (
2020-09-02 12:19:37 -04:00
self . room_id ,
" org.matrix.custom_type " ,
{ " body " : " hello " } ,
tok = self . tok2 ,
)
2022-05-16 08:42:45 -04:00
event_id = result [ " event_id " ]
2020-09-02 12:19:37 -04:00
self . _check_unread_count ( 4 )
# Check that edits don't increase the unread counter.
self . helper . send_event (
room_id = self . room_id ,
type = EventTypes . Message ,
content = {
" body " : " hello " ,
" msgtype " : " m.text " ,
2022-05-16 08:42:45 -04:00
" m.relates_to " : {
" rel_type " : RelationTypes . REPLACE ,
" event_id " : event_id ,
} ,
2020-09-02 12:19:37 -04:00
} ,
tok = self . tok2 ,
)
self . _check_unread_count ( 4 )
# Check that notices don't increase the unread counter.
self . helper . send_event (
room_id = self . room_id ,
type = EventTypes . Message ,
content = { " body " : " hello " , " msgtype " : " m.notice " } ,
tok = self . tok2 ,
)
self . _check_unread_count ( 4 )
# Check that tombstone events changes increase the unread counter.
2022-05-04 11:59:22 -04:00
res1 = self . helper . send_state (
2020-09-02 12:19:37 -04:00
self . room_id ,
EventTypes . Tombstone ,
{ " replacement_room " : " !someroom:test " } ,
tok = self . tok2 ,
)
self . _check_unread_count ( 5 )
2022-05-04 11:59:22 -04:00
res2 = self . helper . send ( self . room_id , " hello " , tok = self . tok2 )
2022-08-05 11:09:33 -04:00
# Make sure both m.read and m.read.private advance
2022-05-04 11:59:22 -04:00
channel = self . make_request (
" POST " ,
f " /rooms/ { self . room_id } /receipt/m.read/ { res1 [ ' event_id ' ] } " ,
{ } ,
access_token = self . tok ,
)
self . assertEqual ( channel . code , 200 , channel . json_body )
self . _check_unread_count ( 1 )
channel = self . make_request (
" POST " ,
2022-09-01 08:31:54 -04:00
f " /rooms/ { self . room_id } /receipt/ { ReceiptTypes . READ_PRIVATE } / { res2 [ ' event_id ' ] } " ,
2022-05-04 11:59:22 -04:00
{ } ,
access_token = self . tok ,
)
self . assertEqual ( channel . code , 200 , channel . json_body )
self . _check_unread_count ( 0 )
2022-08-05 11:09:33 -04:00
# We test for all three receipt types that influence notification counts
@parameterized.expand (
[
ReceiptTypes . READ ,
ReceiptTypes . READ_PRIVATE ,
]
)
def test_read_receipts_only_go_down ( self , receipt_type : str ) - > None :
2022-05-04 11:59:22 -04:00
# Join the new user
self . helper . join ( room = self . room_id , user = self . user2 , tok = self . tok2 )
# Send messages
res1 = self . helper . send ( self . room_id , " hello " , tok = self . tok2 )
res2 = self . helper . send ( self . room_id , " hello " , tok = self . tok2 )
# Read last event
channel = self . make_request (
" POST " ,
2022-09-01 08:31:54 -04:00
f " /rooms/ { self . room_id } /receipt/ { ReceiptTypes . READ_PRIVATE } / { res2 [ ' event_id ' ] } " ,
2022-05-04 11:59:22 -04:00
{ } ,
access_token = self . tok ,
)
self . assertEqual ( channel . code , 200 , channel . json_body )
self . _check_unread_count ( 0 )
2022-08-05 11:09:33 -04:00
# Make sure neither m.read nor m.read.private make the
2022-05-04 11:59:22 -04:00
# read receipt go up to an older event
channel = self . make_request (
" POST " ,
2022-09-01 08:31:54 -04:00
f " /rooms/ { self . room_id } /receipt/ { ReceiptTypes . READ_PRIVATE } / { res1 [ ' event_id ' ] } " ,
2022-05-04 11:59:22 -04:00
{ } ,
access_token = self . tok ,
)
self . assertEqual ( channel . code , 200 , channel . json_body )
self . _check_unread_count ( 0 )
channel = self . make_request (
" POST " ,
f " /rooms/ { self . room_id } /receipt/m.read/ { res1 [ ' event_id ' ] } " ,
{ } ,
access_token = self . tok ,
)
self . assertEqual ( channel . code , 200 , channel . json_body )
self . _check_unread_count ( 0 )
2020-09-02 12:19:37 -04:00
2022-02-23 08:33:19 -05:00
def _check_unread_count ( self , expected_count : int ) - > None :
2020-09-02 12:19:37 -04:00
""" Syncs and compares the unread count with the expected value. """
2020-12-15 09:44:04 -05:00
channel = self . make_request (
2020-09-02 12:19:37 -04:00
" GET " ,
self . url % self . next_batch ,
access_token = self . tok ,
)
self . assertEqual ( channel . code , 200 , channel . json_body )
2022-05-04 11:59:22 -04:00
room_entry = (
channel . json_body . get ( " rooms " , { } ) . get ( " join " , { } ) . get ( self . room_id , { } )
)
2020-09-02 12:19:37 -04:00
self . assertEqual (
2022-05-04 11:59:22 -04:00
room_entry . get ( " org.matrix.msc2654.unread_count " , 0 ) ,
2020-09-02 12:19:37 -04:00
expected_count ,
room_entry ,
)
# Store the next batch for the next request.
self . next_batch = channel . json_body [ " next_batch " ]
2021-06-17 11:23:11 -04:00
class SyncCacheTestCase ( unittest . HomeserverTestCase ) :
servlets = [
synapse . rest . admin . register_servlets ,
login . register_servlets ,
sync . register_servlets ,
]
2022-02-23 08:33:19 -05:00
def test_noop_sync_does_not_tightloop ( self ) - > None :
2021-06-17 11:23:11 -04:00
""" If the sync times out, we shouldn ' t cache the result
2023-11-15 08:02:11 -05:00
Essentially a regression test for https : / / github . com / matrix - org / synapse / issues / 8518.
2021-06-17 11:23:11 -04:00
"""
self . user_id = self . register_user ( " kermit " , " monkey " )
self . tok = self . login ( " kermit " , " monkey " )
# we should immediately get an initial sync response
channel = self . make_request ( " GET " , " /sync " , access_token = self . tok )
self . assertEqual ( channel . code , 200 , channel . json_body )
# now, make an incremental sync request, with a timeout
next_batch = channel . json_body [ " next_batch " ]
channel = self . make_request (
" GET " ,
f " /sync?since= { next_batch } &timeout=10000 " ,
access_token = self . tok ,
await_result = False ,
)
# that should block for 10 seconds
with self . assertRaises ( TimedOutException ) :
channel . await_result ( timeout_ms = 9900 )
channel . await_result ( timeout_ms = 200 )
self . assertEqual ( channel . code , 200 , channel . json_body )
# we expect the next_batch in the result to be the same as before
self . assertEqual ( channel . json_body [ " next_batch " ] , next_batch )
# another incremental sync should also block.
channel = self . make_request (
" GET " ,
f " /sync?since= { next_batch } &timeout=10000 " ,
access_token = self . tok ,
await_result = False ,
)
# that should block for 10 seconds
with self . assertRaises ( TimedOutException ) :
channel . await_result ( timeout_ms = 9900 )
channel . await_result ( timeout_ms = 200 )
self . assertEqual ( channel . code , 200 , channel . json_body )
2022-02-07 08:21:19 -05:00
2024-05-23 13:06:16 -04:00
@parameterized_class (
( " sync_endpoint " , " experimental_features " ) ,
[
( " /sync " , { } ) ,
(
" /_matrix/client/unstable/org.matrix.msc3575/sync/e2ee " ,
# Enable sliding sync
{ " msc3575_enabled " : True } ,
) ,
] ,
)
2022-02-07 08:21:19 -05:00
class DeviceListSyncTestCase ( unittest . HomeserverTestCase ) :
2024-05-23 13:06:16 -04:00
"""
Tests regarding device list ( ` device_lists ` ) changes .
Attributes :
sync_endpoint : The endpoint under test to use for syncing .
experimental_features : The experimental features homeserver config to use .
"""
sync_endpoint : str
experimental_features : JsonDict
2022-02-07 08:21:19 -05:00
servlets = [
synapse . rest . admin . register_servlets ,
login . register_servlets ,
2024-05-23 13:06:16 -04:00
room . register_servlets ,
2022-02-07 08:21:19 -05:00
sync . register_servlets ,
devices . register_servlets ,
]
2024-05-23 13:06:16 -04:00
def default_config ( self ) - > JsonDict :
config = super ( ) . default_config ( )
config [ " experimental_features " ] = self . experimental_features
return config
def test_receiving_local_device_list_changes ( self ) - > None :
""" Tests that a local users that share a room receive each other ' s device list
changes .
"""
# Register two users
test_device_id = " TESTDEVICE "
alice_user_id = self . register_user ( " alice " , " correcthorse " )
alice_access_token = self . login (
alice_user_id , " correcthorse " , device_id = test_device_id
)
bob_user_id = self . register_user ( " bob " , " ponyponypony " )
bob_access_token = self . login ( bob_user_id , " ponyponypony " )
# Create a room for them to coexist peacefully in
new_room_id = self . helper . create_room_as (
alice_user_id , is_public = True , tok = alice_access_token
)
self . assertIsNotNone ( new_room_id )
# Have Bob join the room
self . helper . invite (
new_room_id , alice_user_id , bob_user_id , tok = alice_access_token
)
self . helper . join ( new_room_id , bob_user_id , tok = bob_access_token )
# Now have Bob initiate an initial sync (in order to get a since token)
channel = self . make_request (
" GET " ,
self . sync_endpoint ,
access_token = bob_access_token ,
)
self . assertEqual ( channel . code , 200 , channel . json_body )
next_batch_token = channel . json_body [ " next_batch " ]
# ...and then an incremental sync. This should block until the sync stream is woken up,
# which we hope will happen as a result of Alice updating their device list.
bob_sync_channel = self . make_request (
" GET " ,
f " { self . sync_endpoint } ?since= { next_batch_token } &timeout=30000 " ,
access_token = bob_access_token ,
# Start the request, then continue on.
await_result = False ,
)
# Have alice update their device list
channel = self . make_request (
" PUT " ,
f " /devices/ { test_device_id } " ,
{
" display_name " : " New Device Name " ,
} ,
access_token = alice_access_token ,
)
self . assertEqual ( channel . code , 200 , channel . json_body )
# Check that bob's incremental sync contains the updated device list.
# If not, the client would only receive the device list update on the
# *next* sync.
bob_sync_channel . await_result ( )
self . assertEqual ( bob_sync_channel . code , 200 , bob_sync_channel . json_body )
changed_device_lists = bob_sync_channel . json_body . get ( " device_lists " , { } ) . get (
" changed " , [ ]
)
self . assertIn ( alice_user_id , changed_device_lists , bob_sync_channel . json_body )
def test_not_receiving_local_device_list_changes ( self ) - > None :
""" Tests a local users DO NOT receive device updates from each other if they do not
share a room .
"""
# Register two users
test_device_id = " TESTDEVICE "
alice_user_id = self . register_user ( " alice " , " correcthorse " )
alice_access_token = self . login (
alice_user_id , " correcthorse " , device_id = test_device_id
)
bob_user_id = self . register_user ( " bob " , " ponyponypony " )
bob_access_token = self . login ( bob_user_id , " ponyponypony " )
# These users do not share a room. They are lonely.
# Have Bob initiate an initial sync (in order to get a since token)
channel = self . make_request (
" GET " ,
self . sync_endpoint ,
access_token = bob_access_token ,
)
self . assertEqual ( channel . code , 200 , channel . json_body )
next_batch_token = channel . json_body [ " next_batch " ]
# ...and then an incremental sync. This should block until the sync stream is woken up,
# which we hope will happen as a result of Alice updating their device list.
bob_sync_channel = self . make_request (
" GET " ,
f " { self . sync_endpoint } ?since= { next_batch_token } &timeout=1000 " ,
access_token = bob_access_token ,
# Start the request, then continue on.
await_result = False ,
)
# Have alice update their device list
channel = self . make_request (
" PUT " ,
f " /devices/ { test_device_id } " ,
{
" display_name " : " New Device Name " ,
} ,
access_token = alice_access_token ,
)
self . assertEqual ( channel . code , 200 , channel . json_body )
# Check that bob's incremental sync does not contain the updated device list.
bob_sync_channel . await_result ( )
self . assertEqual ( bob_sync_channel . code , 200 , bob_sync_channel . json_body )
changed_device_lists = bob_sync_channel . json_body . get ( " device_lists " , { } ) . get (
" changed " , [ ]
)
self . assertNotIn (
alice_user_id , changed_device_lists , bob_sync_channel . json_body
)
2022-02-23 08:33:19 -05:00
def test_user_with_no_rooms_receives_self_device_list_updates ( self ) - > None :
2022-02-07 08:21:19 -05:00
""" Tests that a user with no rooms still receives their own device list updates """
2024-05-23 13:06:16 -04:00
test_device_id = " TESTDEVICE "
2022-02-07 08:21:19 -05:00
# Register a user and login, creating a device
2024-05-23 13:06:16 -04:00
alice_user_id = self . register_user ( " alice " , " correcthorse " )
alice_access_token = self . login (
alice_user_id , " correcthorse " , device_id = test_device_id
)
2022-02-07 08:21:19 -05:00
# Request an initial sync
2024-05-23 13:06:16 -04:00
channel = self . make_request (
" GET " , self . sync_endpoint , access_token = alice_access_token
)
2022-02-07 08:21:19 -05:00
self . assertEqual ( channel . code , 200 , channel . json_body )
next_batch = channel . json_body [ " next_batch " ]
# Now, make an incremental sync request.
# It won't return until something has happened
incremental_sync_channel = self . make_request (
" GET " ,
2024-05-23 13:06:16 -04:00
f " { self . sync_endpoint } ?since= { next_batch } &timeout=30000 " ,
access_token = alice_access_token ,
2022-02-07 08:21:19 -05:00
await_result = False ,
)
# Change our device's display name
channel = self . make_request (
" PUT " ,
2024-05-23 13:06:16 -04:00
f " devices/ { test_device_id } " ,
2022-02-07 08:21:19 -05:00
{
" display_name " : " freeze ray " ,
} ,
2024-05-23 13:06:16 -04:00
access_token = alice_access_token ,
2022-02-07 08:21:19 -05:00
)
self . assertEqual ( channel . code , 200 , channel . json_body )
# The sync should now have returned
incremental_sync_channel . await_result ( timeout_ms = 20000 )
self . assertEqual ( incremental_sync_channel . code , 200 , channel . json_body )
# We should have received notification that the (user's) device has changed
device_list_changes = incremental_sync_channel . json_body . get (
" device_lists " , { }
) . get ( " changed " , [ ] )
self . assertIn (
2024-05-23 13:06:16 -04:00
alice_user_id , device_list_changes , incremental_sync_channel . json_body
)
@parameterized_class (
( " sync_endpoint " , " experimental_features " ) ,
[
( " /sync " , { } ) ,
(
" /_matrix/client/unstable/org.matrix.msc3575/sync/e2ee " ,
# Enable sliding sync
{ " msc3575_enabled " : True } ,
) ,
] ,
)
class DeviceOneTimeKeysSyncTestCase ( unittest . HomeserverTestCase ) :
"""
Tests regarding device one time keys ( ` device_one_time_keys_count ` ) changes .
Attributes :
sync_endpoint : The endpoint under test to use for syncing .
experimental_features : The experimental features homeserver config to use .
"""
sync_endpoint : str
experimental_features : JsonDict
servlets = [
synapse . rest . admin . register_servlets ,
login . register_servlets ,
sync . register_servlets ,
devices . register_servlets ,
]
def default_config ( self ) - > JsonDict :
config = super ( ) . default_config ( )
config [ " experimental_features " ] = self . experimental_features
return config
def prepare ( self , reactor : MemoryReactor , clock : Clock , hs : HomeServer ) - > None :
self . e2e_keys_handler = hs . get_e2e_keys_handler ( )
def test_no_device_one_time_keys ( self ) - > None :
"""
Tests when no one time keys set , it still has the default ` signed_curve25519 ` in
` device_one_time_keys_count `
"""
test_device_id = " TESTDEVICE "
alice_user_id = self . register_user ( " alice " , " correcthorse " )
alice_access_token = self . login (
alice_user_id , " correcthorse " , device_id = test_device_id
)
# Request an initial sync
channel = self . make_request (
" GET " , self . sync_endpoint , access_token = alice_access_token
)
self . assertEqual ( channel . code , 200 , channel . json_body )
# Check for those one time key counts
self . assertDictEqual (
channel . json_body [ " device_one_time_keys_count " ] ,
# Note that "signed_curve25519" is always returned in key count responses
# regardless of whether we uploaded any keys for it. This is necessary until
# https://github.com/matrix-org/matrix-doc/issues/3298 is fixed.
{ " signed_curve25519 " : 0 } ,
channel . json_body [ " device_one_time_keys_count " ] ,
)
def test_returns_device_one_time_keys ( self ) - > None :
"""
Tests that one time keys for the device / user are counted correctly in the ` / sync `
response
"""
test_device_id = " TESTDEVICE "
alice_user_id = self . register_user ( " alice " , " correcthorse " )
alice_access_token = self . login (
alice_user_id , " correcthorse " , device_id = test_device_id
)
# Upload one time keys for the user/device
keys : JsonDict = {
" alg1:k1 " : " key1 " ,
" alg2:k2 " : { " key " : " key2 " , " signatures " : { " k1 " : " sig1 " } } ,
" alg2:k3 " : { " key " : " key3 " } ,
}
res = self . get_success (
self . e2e_keys_handler . upload_keys_for_user (
alice_user_id , test_device_id , { " one_time_keys " : keys }
)
)
# Note that "signed_curve25519" is always returned in key count responses
# regardless of whether we uploaded any keys for it. This is necessary until
# https://github.com/matrix-org/matrix-doc/issues/3298 is fixed.
self . assertDictEqual (
res ,
{ " one_time_key_counts " : { " alg1 " : 1 , " alg2 " : 2 , " signed_curve25519 " : 0 } } ,
)
# Request an initial sync
channel = self . make_request (
" GET " , self . sync_endpoint , access_token = alice_access_token
)
self . assertEqual ( channel . code , 200 , channel . json_body )
# Check for those one time key counts
self . assertDictEqual (
channel . json_body [ " device_one_time_keys_count " ] ,
{ " alg1 " : 1 , " alg2 " : 2 , " signed_curve25519 " : 0 } ,
channel . json_body [ " device_one_time_keys_count " ] ,
)
@parameterized_class (
( " sync_endpoint " , " experimental_features " ) ,
[
( " /sync " , { } ) ,
(
" /_matrix/client/unstable/org.matrix.msc3575/sync/e2ee " ,
# Enable sliding sync
{ " msc3575_enabled " : True } ,
) ,
] ,
)
class DeviceUnusedFallbackKeySyncTestCase ( unittest . HomeserverTestCase ) :
"""
Tests regarding device one time keys ( ` device_unused_fallback_key_types ` ) changes .
Attributes :
sync_endpoint : The endpoint under test to use for syncing .
experimental_features : The experimental features homeserver config to use .
"""
sync_endpoint : str
experimental_features : JsonDict
servlets = [
synapse . rest . admin . register_servlets ,
login . register_servlets ,
sync . register_servlets ,
devices . register_servlets ,
]
def default_config ( self ) - > JsonDict :
config = super ( ) . default_config ( )
config [ " experimental_features " ] = self . experimental_features
return config
def prepare ( self , reactor : MemoryReactor , clock : Clock , hs : HomeServer ) - > None :
self . store = self . hs . get_datastores ( ) . main
self . e2e_keys_handler = hs . get_e2e_keys_handler ( )
def test_no_device_unused_fallback_key ( self ) - > None :
"""
Test when no unused fallback key is set , it just returns an empty list . The MSC
says " The device_unused_fallback_key_types parameter must be present if the
server supports fallback keys . " ,
https : / / github . com / matrix - org / matrix - spec - proposals / blob / 54255851 f642f84a4f1aaf7bc063eebe3d76752b / proposals / 2732 - olm - fallback - keys . md
"""
test_device_id = " TESTDEVICE "
alice_user_id = self . register_user ( " alice " , " correcthorse " )
alice_access_token = self . login (
alice_user_id , " correcthorse " , device_id = test_device_id
)
# Request an initial sync
channel = self . make_request (
" GET " , self . sync_endpoint , access_token = alice_access_token
)
self . assertEqual ( channel . code , 200 , channel . json_body )
# Check for those one time key counts
self . assertListEqual (
channel . json_body [ " device_unused_fallback_key_types " ] ,
[ ] ,
channel . json_body [ " device_unused_fallback_key_types " ] ,
)
def test_returns_device_one_time_keys ( self ) - > None :
"""
Tests that device unused fallback key type is returned correctly in the ` / sync `
"""
test_device_id = " TESTDEVICE "
alice_user_id = self . register_user ( " alice " , " correcthorse " )
alice_access_token = self . login (
alice_user_id , " correcthorse " , device_id = test_device_id
)
# We shouldn't have any unused fallback keys yet
res = self . get_success (
self . store . get_e2e_unused_fallback_key_types ( alice_user_id , test_device_id )
)
self . assertEqual ( res , [ ] )
# Upload a fallback key for the user/device
self . get_success (
self . e2e_keys_handler . upload_keys_for_user (
alice_user_id ,
test_device_id ,
2024-07-22 16:40:06 -04:00
{ " fallback_keys " : { " alg1:k1 " : " fallback_key1 " } } ,
2024-05-23 13:06:16 -04:00
)
)
# We should now have an unused alg1 key
fallback_res = self . get_success (
self . store . get_e2e_unused_fallback_key_types ( alice_user_id , test_device_id )
)
self . assertEqual ( fallback_res , [ " alg1 " ] , fallback_res )
# Request an initial sync
channel = self . make_request (
" GET " , self . sync_endpoint , access_token = alice_access_token
)
self . assertEqual ( channel . code , 200 , channel . json_body )
# Check for the unused fallback key types
self . assertListEqual (
channel . json_body [ " device_unused_fallback_key_types " ] ,
[ " alg1 " ] ,
channel . json_body [ " device_unused_fallback_key_types " ] ,
2022-02-07 08:21:19 -05:00
)
2022-03-30 05:43:04 -04:00
class ExcludeRoomTestCase ( unittest . HomeserverTestCase ) :
servlets = [
synapse . rest . admin . register_servlets ,
login . register_servlets ,
sync . register_servlets ,
room . register_servlets ,
]
def prepare (
self , reactor : MemoryReactor , clock : Clock , homeserver : HomeServer
) - > None :
self . user_id = self . register_user ( " user " , " password " )
self . tok = self . login ( " user " , " password " )
self . excluded_room_id = self . helper . create_room_as ( self . user_id , tok = self . tok )
self . included_room_id = self . helper . create_room_as ( self . user_id , tok = self . tok )
# We need to manually append the room ID, because we can't know the ID before
# creating the room, and we can't set the config after starting the homeserver.
2023-01-23 10:44:39 -05:00
self . hs . get_sync_handler ( ) . rooms_to_exclude_globally . append (
self . excluded_room_id
)
2022-03-30 05:43:04 -04:00
def test_join_leave ( self ) - > None :
""" Tests that rooms are correctly excluded from the ' join ' and ' leave ' sections of
sync responses .
"""
channel = self . make_request ( " GET " , " /sync " , access_token = self . tok )
self . assertEqual ( channel . code , 200 , channel . result )
self . assertNotIn ( self . excluded_room_id , channel . json_body [ " rooms " ] [ " join " ] )
self . assertIn ( self . included_room_id , channel . json_body [ " rooms " ] [ " join " ] )
self . helper . leave ( self . excluded_room_id , self . user_id , tok = self . tok )
self . helper . leave ( self . included_room_id , self . user_id , tok = self . tok )
channel = self . make_request (
" GET " ,
" /sync?since= " + channel . json_body [ " next_batch " ] ,
access_token = self . tok ,
)
self . assertEqual ( channel . code , 200 , channel . result )
self . assertNotIn ( self . excluded_room_id , channel . json_body [ " rooms " ] [ " leave " ] )
self . assertIn ( self . included_room_id , channel . json_body [ " rooms " ] [ " leave " ] )
def test_invite ( self ) - > None :
""" Tests that rooms are correctly excluded from the ' invite ' section of sync
responses .
"""
invitee = self . register_user ( " invitee " , " password " )
invitee_tok = self . login ( " invitee " , " password " )
self . helper . invite ( self . excluded_room_id , self . user_id , invitee , tok = self . tok )
self . helper . invite ( self . included_room_id , self . user_id , invitee , tok = self . tok )
channel = self . make_request ( " GET " , " /sync " , access_token = invitee_tok )
self . assertEqual ( channel . code , 200 , channel . result )
self . assertNotIn ( self . excluded_room_id , channel . json_body [ " rooms " ] [ " invite " ] )
self . assertIn ( self . included_room_id , channel . json_body [ " rooms " ] [ " invite " ] )
2022-08-04 05:02:29 -04:00
def test_incremental_sync ( self ) - > None :
""" Tests that activity in the room is properly filtered out of incremental
syncs .
"""
channel = self . make_request ( " GET " , " /sync " , access_token = self . tok )
self . assertEqual ( channel . code , 200 , channel . result )
next_batch = channel . json_body [ " next_batch " ]
self . helper . send ( self . excluded_room_id , tok = self . tok )
self . helper . send ( self . included_room_id , tok = self . tok )
channel = self . make_request (
" GET " ,
f " /sync?since= { next_batch } " ,
access_token = self . tok ,
)
self . assertEqual ( channel . code , 200 , channel . result )
self . assertNotIn ( self . excluded_room_id , channel . json_body [ " rooms " ] [ " join " ] )
self . assertIn ( self . included_room_id , channel . json_body [ " rooms " ] [ " join " ] )
Add Sliding Sync `/sync` endpoint (initial implementation) (#17187)
Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync
This iteration only focuses on returning the list of room IDs in the sliding window API (without sorting/filtering).
Rooms appear in the Sliding sync response based on:
- `invite`, `join`, `knock`, `ban` membership events
- Kicks (`leave` membership events where `sender` is different from the `user_id`/`state_key`)
- `newly_left` (rooms that were left during the given token range, > `from_token` and <= `to_token`)
- In order for bans/kicks to not show up, you need to `/forget` those rooms. This doesn't modify the event itself though and only adds the `forgotten` flag to `room_memberships` in Synapse. There isn't a way to tell when a room was forgotten at the moment so we can't factor it into the from/to range.
### Example request
`POST http://localhost:8008/_matrix/client/unstable/org.matrix.msc3575/sync`
```json
{
"lists": {
"foo-list": {
"ranges": [ [0, 99] ],
"sort": [ "by_notification_level", "by_recency", "by_name" ],
"required_state": [
["m.room.join_rules", ""],
["m.room.history_visibility", ""],
["m.space.child", "*"]
],
"timeline_limit": 100
}
}
}
```
Response:
```json
{
"next_pos": "s58_224_0_13_10_1_1_16_0_1",
"lists": {
"foo-list": {
"count": 1,
"ops": [
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [
"!MmgikIyFzsuvtnbvVG:my.synapse.linux.server"
]
}
]
}
},
"rooms": {},
"extensions": {}
}
```
2024-06-06 15:44:32 -04:00
2024-07-24 06:47:25 -04:00
class SlidingSyncBase ( unittest . HomeserverTestCase ) :
""" Base class for sliding sync test cases """
sync_endpoint = " /_matrix/client/unstable/org.matrix.simplified_msc3575/sync "
2024-07-25 11:43:35 -04:00
def default_config ( self ) - > JsonDict :
config = super ( ) . default_config ( )
# Enable sliding sync
config [ " experimental_features " ] = { " msc3575_enabled " : True }
return config
2024-07-24 06:47:25 -04:00
def do_sync (
self , sync_body : JsonDict , * , since : Optional [ str ] = None , tok : str
) - > Tuple [ JsonDict , str ] :
""" Do a sliding sync request with given body.
Asserts the request was successful .
Attributes :
sync_body : The full request body to use
since : Optional since token
tok : Access token to use
Returns :
A tuple of the response body and the ` pos ` field .
"""
sync_path = self . sync_endpoint
if since :
sync_path + = f " ?pos= { since } "
channel = self . make_request (
method = " POST " ,
path = sync_path ,
content = sync_body ,
access_token = tok ,
)
self . assertEqual ( channel . code , 200 , channel . json_body )
return channel . json_body , channel . json_body [ " pos " ]
2024-07-25 11:43:35 -04:00
def _bump_notifier_wait_for_events (
self ,
user_id : str ,
wake_stream_key : Literal [
StreamKeyType . ACCOUNT_DATA ,
StreamKeyType . PRESENCE ,
] ,
) - > None :
"""
Wake - up a ` notifier . wait_for_events ( user_id ) ` call without affecting the Sliding
Sync results .
Args :
user_id : The user ID to wake up the notifier for
wake_stream_key : The stream key to wake up . This will create an actual new
entity in that stream so it ' s best to choose one that won ' t affect the
Sliding Sync results you ' re testing for. In other words, if your testing
account data , choose ` StreamKeyType . PRESENCE ` instead . We support two
possible stream keys because you ' re probably testing one or the other so
one is always a " safe " option .
"""
# We're expecting some new activity from this point onwards
from_token = self . hs . get_event_sources ( ) . get_current_token ( )
triggered_notifier_wait_for_events = False
async def _on_new_acivity (
before_token : StreamToken , after_token : StreamToken
) - > bool :
nonlocal triggered_notifier_wait_for_events
triggered_notifier_wait_for_events = True
return True
notifier = self . hs . get_notifier ( )
# Listen for some new activity for the user. We're just trying to confirm that
# our bump below actually does what we think it does (triggers new activity for
# the user).
result_awaitable = notifier . wait_for_events (
user_id ,
1000 ,
_on_new_acivity ,
from_token = from_token ,
)
# Update the account data or presence so that `notifier.wait_for_events(...)`
# wakes up. We chose these two options because they're least likely to show up
# in the Sliding Sync response so it won't affect whether we have results.
if wake_stream_key == StreamKeyType . ACCOUNT_DATA :
self . get_success (
self . hs . get_account_data_handler ( ) . add_account_data_for_user (
user_id ,
" org.matrix.foobarbaz " ,
{ " foo " : " bar " } ,
)
)
elif wake_stream_key == StreamKeyType . PRESENCE :
sending_user_id = self . register_user (
" user_bump_notifier_wait_for_events_ " + random_string ( 10 ) , " pass "
)
sending_user_tok = self . login ( sending_user_id , " pass " )
test_msg = { " foo " : " bar " }
chan = self . make_request (
" PUT " ,
" /_matrix/client/r0/sendToDevice/m.test/1234 " ,
content = { " messages " : { user_id : { " d1 " : test_msg } } } ,
access_token = sending_user_tok ,
)
self . assertEqual ( chan . code , 200 , chan . result )
else :
raise AssertionError (
" Unable to wake that stream in _bump_notifier_wait_for_events(...) "
)
# Wait for our notifier result
self . get_success ( result_awaitable )
if not triggered_notifier_wait_for_events :
raise AssertionError (
" Expected `notifier.wait_for_events(...)` to be triggered "
)
2024-07-24 06:47:25 -04:00
class SlidingSyncTestCase ( SlidingSyncBase ) :
Add Sliding Sync `/sync` endpoint (initial implementation) (#17187)
Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync
This iteration only focuses on returning the list of room IDs in the sliding window API (without sorting/filtering).
Rooms appear in the Sliding sync response based on:
- `invite`, `join`, `knock`, `ban` membership events
- Kicks (`leave` membership events where `sender` is different from the `user_id`/`state_key`)
- `newly_left` (rooms that were left during the given token range, > `from_token` and <= `to_token`)
- In order for bans/kicks to not show up, you need to `/forget` those rooms. This doesn't modify the event itself though and only adds the `forgotten` flag to `room_memberships` in Synapse. There isn't a way to tell when a room was forgotten at the moment so we can't factor it into the from/to range.
### Example request
`POST http://localhost:8008/_matrix/client/unstable/org.matrix.msc3575/sync`
```json
{
"lists": {
"foo-list": {
"ranges": [ [0, 99] ],
"sort": [ "by_notification_level", "by_recency", "by_name" ],
"required_state": [
["m.room.join_rules", ""],
["m.room.history_visibility", ""],
["m.space.child", "*"]
],
"timeline_limit": 100
}
}
}
```
Response:
```json
{
"next_pos": "s58_224_0_13_10_1_1_16_0_1",
"lists": {
"foo-list": {
"count": 1,
"ops": [
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [
"!MmgikIyFzsuvtnbvVG:my.synapse.linux.server"
]
}
]
}
},
"rooms": {},
"extensions": {}
}
```
2024-06-06 15:44:32 -04:00
"""
Tests regarding MSC3575 Sliding Sync ` / sync ` endpoint .
"""
servlets = [
synapse . rest . admin . register_servlets ,
login . register_servlets ,
room . register_servlets ,
sync . register_servlets ,
devices . register_servlets ,
]
def prepare ( self , reactor : MemoryReactor , clock : Clock , hs : HomeServer ) - > None :
self . store = hs . get_datastores ( ) . main
self . event_sources = hs . get_event_sources ( )
2024-07-04 13:25:36 -04:00
self . storage_controllers = hs . get_storage_controllers ( )
def _assertRequiredStateIncludes (
self ,
actual_required_state : Any ,
expected_state_events : Iterable [ EventBase ] ,
exact : bool = False ,
) - > None :
"""
2024-07-15 05:37:10 -04:00
Wrapper around ` assertIncludes ` to give slightly better looking diff error
2024-07-04 13:25:36 -04:00
messages that include some context " $event_id (type, state_key) " .
Args :
actual_required_state : The " required_state " of a room from a Sliding Sync
request response .
expected_state_events : The expected state events to be included in the
` actual_required_state ` .
exact : Whether the actual state should be exactly equal to the expected
state ( no extras ) .
"""
assert isinstance ( actual_required_state , list )
for event in actual_required_state :
assert isinstance ( event , dict )
2024-07-15 05:37:10 -04:00
self . assertIncludes (
2024-07-04 13:25:36 -04:00
{
f ' { event [ " event_id " ] } ( " { event [ " type " ] } " , " { event [ " state_key " ] } " ) '
for event in actual_required_state
} ,
{
f ' { event . event_id } ( " { event . type } " , " { event . state_key } " ) '
for event in expected_state_events
} ,
exact = exact ,
# Message to help understand the diff in context
message = str ( actual_required_state ) ,
)
2024-06-24 20:07:56 -04:00
def _add_new_dm_to_global_account_data (
self , source_user_id : str , target_user_id : str , target_room_id : str
) - > None :
"""
Helper to handle inserting a new DM for the source user into global account data
( handles all of the list merging ) .
Args :
source_user_id : The user ID of the DM mapping we ' re going to update
target_user_id : User ID of the person the DM is with
target_room_id : Room ID of the DM
"""
# Get the current DM map
existing_dm_map = self . get_success (
self . store . get_global_account_data_by_type_for_user (
source_user_id , AccountDataTypes . DIRECT
)
)
# Scrutinize the account data since it has no concrete type. We're just copying
# everything into a known type. It should be a mapping from user ID to a list of
# room IDs. Ignore anything else.
new_dm_map : Dict [ str , List [ str ] ] = { }
if isinstance ( existing_dm_map , dict ) :
for user_id , room_ids in existing_dm_map . items ( ) :
if isinstance ( user_id , str ) and isinstance ( room_ids , list ) :
for room_id in room_ids :
if isinstance ( room_id , str ) :
new_dm_map [ user_id ] = new_dm_map . get ( user_id , [ ] ) + [
room_id
]
# Add the new DM to the map
new_dm_map [ target_user_id ] = new_dm_map . get ( target_user_id , [ ] ) + [
target_room_id
]
# Save the DM map to global account data
self . get_success (
self . store . add_account_data_for_user (
source_user_id ,
AccountDataTypes . DIRECT ,
new_dm_map ,
)
)
2024-06-13 14:56:58 -04:00
def _create_dm_room (
self ,
inviter_user_id : str ,
inviter_tok : str ,
invitee_user_id : str ,
invitee_tok : str ,
2024-06-24 20:07:56 -04:00
should_join_room : bool = True ,
2024-06-13 14:56:58 -04:00
) - > str :
"""
Helper to create a DM room as the " inviter " and invite the " invitee " user to the
room . The " invitee " user also will join the room . The ` m . direct ` account data
will be set for both users .
"""
# Create a room and send an invite the other user
room_id = self . helper . create_room_as (
inviter_user_id ,
is_public = False ,
tok = inviter_tok ,
)
self . helper . invite (
room_id ,
src = inviter_user_id ,
targ = invitee_user_id ,
tok = inviter_tok ,
extra_data = { " is_direct " : True } ,
)
2024-06-24 20:07:56 -04:00
if should_join_room :
# Person that was invited joins the room
self . helper . join ( room_id , invitee_user_id , tok = invitee_tok )
2024-06-13 14:56:58 -04:00
# Mimic the client setting the room as a direct message in the global account
2024-06-24 20:07:56 -04:00
# data for both users.
self . _add_new_dm_to_global_account_data (
invitee_user_id , inviter_user_id , room_id
2024-06-13 14:56:58 -04:00
)
2024-06-24 20:07:56 -04:00
self . _add_new_dm_to_global_account_data (
inviter_user_id , invitee_user_id , room_id
2024-06-13 14:56:58 -04:00
)
return room_id
Add Sliding Sync `/sync` endpoint (initial implementation) (#17187)
Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync
This iteration only focuses on returning the list of room IDs in the sliding window API (without sorting/filtering).
Rooms appear in the Sliding sync response based on:
- `invite`, `join`, `knock`, `ban` membership events
- Kicks (`leave` membership events where `sender` is different from the `user_id`/`state_key`)
- `newly_left` (rooms that were left during the given token range, > `from_token` and <= `to_token`)
- In order for bans/kicks to not show up, you need to `/forget` those rooms. This doesn't modify the event itself though and only adds the `forgotten` flag to `room_memberships` in Synapse. There isn't a way to tell when a room was forgotten at the moment so we can't factor it into the from/to range.
### Example request
`POST http://localhost:8008/_matrix/client/unstable/org.matrix.msc3575/sync`
```json
{
"lists": {
"foo-list": {
"ranges": [ [0, 99] ],
"sort": [ "by_notification_level", "by_recency", "by_name" ],
"required_state": [
["m.room.join_rules", ""],
["m.room.history_visibility", ""],
["m.space.child", "*"]
],
"timeline_limit": 100
}
}
}
```
Response:
```json
{
"next_pos": "s58_224_0_13_10_1_1_16_0_1",
"lists": {
"foo-list": {
"count": 1,
"ops": [
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [
"!MmgikIyFzsuvtnbvVG:my.synapse.linux.server"
]
}
]
}
},
"rooms": {},
"extensions": {}
}
```
2024-06-06 15:44:32 -04:00
def test_sync_list ( self ) - > None :
"""
2024-07-02 12:07:05 -04:00
Test that room IDs show up in the Sliding Sync ` lists `
Add Sliding Sync `/sync` endpoint (initial implementation) (#17187)
Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync
This iteration only focuses on returning the list of room IDs in the sliding window API (without sorting/filtering).
Rooms appear in the Sliding sync response based on:
- `invite`, `join`, `knock`, `ban` membership events
- Kicks (`leave` membership events where `sender` is different from the `user_id`/`state_key`)
- `newly_left` (rooms that were left during the given token range, > `from_token` and <= `to_token`)
- In order for bans/kicks to not show up, you need to `/forget` those rooms. This doesn't modify the event itself though and only adds the `forgotten` flag to `room_memberships` in Synapse. There isn't a way to tell when a room was forgotten at the moment so we can't factor it into the from/to range.
### Example request
`POST http://localhost:8008/_matrix/client/unstable/org.matrix.msc3575/sync`
```json
{
"lists": {
"foo-list": {
"ranges": [ [0, 99] ],
"sort": [ "by_notification_level", "by_recency", "by_name" ],
"required_state": [
["m.room.join_rules", ""],
["m.room.history_visibility", ""],
["m.space.child", "*"]
],
"timeline_limit": 100
}
}
}
```
Response:
```json
{
"next_pos": "s58_224_0_13_10_1_1_16_0_1",
"lists": {
"foo-list": {
"count": 1,
"ops": [
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [
"!MmgikIyFzsuvtnbvVG:my.synapse.linux.server"
]
}
]
}
},
"rooms": {},
"extensions": {}
}
```
2024-06-06 15:44:32 -04:00
"""
2024-07-25 12:01:47 -04:00
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
Add Sliding Sync `/sync` endpoint (initial implementation) (#17187)
Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync
This iteration only focuses on returning the list of room IDs in the sliding window API (without sorting/filtering).
Rooms appear in the Sliding sync response based on:
- `invite`, `join`, `knock`, `ban` membership events
- Kicks (`leave` membership events where `sender` is different from the `user_id`/`state_key`)
- `newly_left` (rooms that were left during the given token range, > `from_token` and <= `to_token`)
- In order for bans/kicks to not show up, you need to `/forget` those rooms. This doesn't modify the event itself though and only adds the `forgotten` flag to `room_memberships` in Synapse. There isn't a way to tell when a room was forgotten at the moment so we can't factor it into the from/to range.
### Example request
`POST http://localhost:8008/_matrix/client/unstable/org.matrix.msc3575/sync`
```json
{
"lists": {
"foo-list": {
"ranges": [ [0, 99] ],
"sort": [ "by_notification_level", "by_recency", "by_name" ],
"required_state": [
["m.room.join_rules", ""],
["m.room.history_visibility", ""],
["m.space.child", "*"]
],
"timeline_limit": 100
}
}
}
```
Response:
```json
{
"next_pos": "s58_224_0_13_10_1_1_16_0_1",
"lists": {
"foo-list": {
"count": 1,
"ops": [
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [
"!MmgikIyFzsuvtnbvVG:my.synapse.linux.server"
]
}
]
}
},
"rooms": {},
"extensions": {}
}
```
2024-06-06 15:44:32 -04:00
2024-07-25 12:01:47 -04:00
room_id = self . helper . create_room_as ( user1_id , tok = user1_tok , is_public = True )
Add Sliding Sync `/sync` endpoint (initial implementation) (#17187)
Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync
This iteration only focuses on returning the list of room IDs in the sliding window API (without sorting/filtering).
Rooms appear in the Sliding sync response based on:
- `invite`, `join`, `knock`, `ban` membership events
- Kicks (`leave` membership events where `sender` is different from the `user_id`/`state_key`)
- `newly_left` (rooms that were left during the given token range, > `from_token` and <= `to_token`)
- In order for bans/kicks to not show up, you need to `/forget` those rooms. This doesn't modify the event itself though and only adds the `forgotten` flag to `room_memberships` in Synapse. There isn't a way to tell when a room was forgotten at the moment so we can't factor it into the from/to range.
### Example request
`POST http://localhost:8008/_matrix/client/unstable/org.matrix.msc3575/sync`
```json
{
"lists": {
"foo-list": {
"ranges": [ [0, 99] ],
"sort": [ "by_notification_level", "by_recency", "by_name" ],
"required_state": [
["m.room.join_rules", ""],
["m.room.history_visibility", ""],
["m.space.child", "*"]
],
"timeline_limit": 100
}
}
}
```
Response:
```json
{
"next_pos": "s58_224_0_13_10_1_1_16_0_1",
"lists": {
"foo-list": {
"count": 1,
"ops": [
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [
"!MmgikIyFzsuvtnbvVG:my.synapse.linux.server"
]
}
]
}
},
"rooms": {},
"extensions": {}
}
```
2024-06-06 15:44:32 -04:00
# Make the Sliding Sync request
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 99 ] ] ,
" required_state " : [
[ " m.room.join_rules " , " " ] ,
[ " m.room.history_visibility " , " " ] ,
[ " m.space.child " , " * " ] ,
] ,
" timeline_limit " : 1 ,
Add Sliding Sync `/sync` endpoint (initial implementation) (#17187)
Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync
This iteration only focuses on returning the list of room IDs in the sliding window API (without sorting/filtering).
Rooms appear in the Sliding sync response based on:
- `invite`, `join`, `knock`, `ban` membership events
- Kicks (`leave` membership events where `sender` is different from the `user_id`/`state_key`)
- `newly_left` (rooms that were left during the given token range, > `from_token` and <= `to_token`)
- In order for bans/kicks to not show up, you need to `/forget` those rooms. This doesn't modify the event itself though and only adds the `forgotten` flag to `room_memberships` in Synapse. There isn't a way to tell when a room was forgotten at the moment so we can't factor it into the from/to range.
### Example request
`POST http://localhost:8008/_matrix/client/unstable/org.matrix.msc3575/sync`
```json
{
"lists": {
"foo-list": {
"ranges": [ [0, 99] ],
"sort": [ "by_notification_level", "by_recency", "by_name" ],
"required_state": [
["m.room.join_rules", ""],
["m.room.history_visibility", ""],
["m.space.child", "*"]
],
"timeline_limit": 100
}
}
}
```
Response:
```json
{
"next_pos": "s58_224_0_13_10_1_1_16_0_1",
"lists": {
"foo-list": {
"count": 1,
"ops": [
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [
"!MmgikIyFzsuvtnbvVG:my.synapse.linux.server"
]
}
]
}
},
"rooms": {},
"extensions": {}
}
```
2024-06-06 15:44:32 -04:00
}
2024-07-25 12:01:47 -04:00
}
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
Add Sliding Sync `/sync` endpoint (initial implementation) (#17187)
Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync
This iteration only focuses on returning the list of room IDs in the sliding window API (without sorting/filtering).
Rooms appear in the Sliding sync response based on:
- `invite`, `join`, `knock`, `ban` membership events
- Kicks (`leave` membership events where `sender` is different from the `user_id`/`state_key`)
- `newly_left` (rooms that were left during the given token range, > `from_token` and <= `to_token`)
- In order for bans/kicks to not show up, you need to `/forget` those rooms. This doesn't modify the event itself though and only adds the `forgotten` flag to `room_memberships` in Synapse. There isn't a way to tell when a room was forgotten at the moment so we can't factor it into the from/to range.
### Example request
`POST http://localhost:8008/_matrix/client/unstable/org.matrix.msc3575/sync`
```json
{
"lists": {
"foo-list": {
"ranges": [ [0, 99] ],
"sort": [ "by_notification_level", "by_recency", "by_name" ],
"required_state": [
["m.room.join_rules", ""],
["m.room.history_visibility", ""],
["m.space.child", "*"]
],
"timeline_limit": 100
}
}
}
```
Response:
```json
{
"next_pos": "s58_224_0_13_10_1_1_16_0_1",
"lists": {
"foo-list": {
"count": 1,
"ops": [
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [
"!MmgikIyFzsuvtnbvVG:my.synapse.linux.server"
]
}
]
}
},
"rooms": {},
"extensions": {}
}
```
2024-06-06 15:44:32 -04:00
# Make sure it has the foo-list we requested
self . assertListEqual (
2024-07-25 12:01:47 -04:00
list ( response_body [ " lists " ] . keys ( ) ) ,
Add Sliding Sync `/sync` endpoint (initial implementation) (#17187)
Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync
This iteration only focuses on returning the list of room IDs in the sliding window API (without sorting/filtering).
Rooms appear in the Sliding sync response based on:
- `invite`, `join`, `knock`, `ban` membership events
- Kicks (`leave` membership events where `sender` is different from the `user_id`/`state_key`)
- `newly_left` (rooms that were left during the given token range, > `from_token` and <= `to_token`)
- In order for bans/kicks to not show up, you need to `/forget` those rooms. This doesn't modify the event itself though and only adds the `forgotten` flag to `room_memberships` in Synapse. There isn't a way to tell when a room was forgotten at the moment so we can't factor it into the from/to range.
### Example request
`POST http://localhost:8008/_matrix/client/unstable/org.matrix.msc3575/sync`
```json
{
"lists": {
"foo-list": {
"ranges": [ [0, 99] ],
"sort": [ "by_notification_level", "by_recency", "by_name" ],
"required_state": [
["m.room.join_rules", ""],
["m.room.history_visibility", ""],
["m.space.child", "*"]
],
"timeline_limit": 100
}
}
}
```
Response:
```json
{
"next_pos": "s58_224_0_13_10_1_1_16_0_1",
"lists": {
"foo-list": {
"count": 1,
"ops": [
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [
"!MmgikIyFzsuvtnbvVG:my.synapse.linux.server"
]
}
]
}
},
"rooms": {},
"extensions": {}
}
```
2024-06-06 15:44:32 -04:00
[ " foo-list " ] ,
2024-07-25 12:01:47 -04:00
response_body [ " lists " ] . keys ( ) ,
Add Sliding Sync `/sync` endpoint (initial implementation) (#17187)
Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync
This iteration only focuses on returning the list of room IDs in the sliding window API (without sorting/filtering).
Rooms appear in the Sliding sync response based on:
- `invite`, `join`, `knock`, `ban` membership events
- Kicks (`leave` membership events where `sender` is different from the `user_id`/`state_key`)
- `newly_left` (rooms that were left during the given token range, > `from_token` and <= `to_token`)
- In order for bans/kicks to not show up, you need to `/forget` those rooms. This doesn't modify the event itself though and only adds the `forgotten` flag to `room_memberships` in Synapse. There isn't a way to tell when a room was forgotten at the moment so we can't factor it into the from/to range.
### Example request
`POST http://localhost:8008/_matrix/client/unstable/org.matrix.msc3575/sync`
```json
{
"lists": {
"foo-list": {
"ranges": [ [0, 99] ],
"sort": [ "by_notification_level", "by_recency", "by_name" ],
"required_state": [
["m.room.join_rules", ""],
["m.room.history_visibility", ""],
["m.space.child", "*"]
],
"timeline_limit": 100
}
}
}
```
Response:
```json
{
"next_pos": "s58_224_0_13_10_1_1_16_0_1",
"lists": {
"foo-list": {
"count": 1,
"ops": [
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [
"!MmgikIyFzsuvtnbvVG:my.synapse.linux.server"
]
}
]
}
},
"rooms": {},
"extensions": {}
}
```
2024-06-06 15:44:32 -04:00
)
# Make sure the list includes the room we are joined to
self . assertListEqual (
2024-07-25 12:01:47 -04:00
list ( response_body [ " lists " ] [ " foo-list " ] [ " ops " ] ) ,
Add Sliding Sync `/sync` endpoint (initial implementation) (#17187)
Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync
This iteration only focuses on returning the list of room IDs in the sliding window API (without sorting/filtering).
Rooms appear in the Sliding sync response based on:
- `invite`, `join`, `knock`, `ban` membership events
- Kicks (`leave` membership events where `sender` is different from the `user_id`/`state_key`)
- `newly_left` (rooms that were left during the given token range, > `from_token` and <= `to_token`)
- In order for bans/kicks to not show up, you need to `/forget` those rooms. This doesn't modify the event itself though and only adds the `forgotten` flag to `room_memberships` in Synapse. There isn't a way to tell when a room was forgotten at the moment so we can't factor it into the from/to range.
### Example request
`POST http://localhost:8008/_matrix/client/unstable/org.matrix.msc3575/sync`
```json
{
"lists": {
"foo-list": {
"ranges": [ [0, 99] ],
"sort": [ "by_notification_level", "by_recency", "by_name" ],
"required_state": [
["m.room.join_rules", ""],
["m.room.history_visibility", ""],
["m.space.child", "*"]
],
"timeline_limit": 100
}
}
}
```
Response:
```json
{
"next_pos": "s58_224_0_13_10_1_1_16_0_1",
"lists": {
"foo-list": {
"count": 1,
"ops": [
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [
"!MmgikIyFzsuvtnbvVG:my.synapse.linux.server"
]
}
]
}
},
"rooms": {},
"extensions": {}
}
```
2024-06-06 15:44:32 -04:00
[
{
" op " : " SYNC " ,
" range " : [ 0 , 99 ] ,
" room_ids " : [ room_id ] ,
}
] ,
2024-07-25 12:01:47 -04:00
response_body [ " lists " ] [ " foo-list " ] ,
Add Sliding Sync `/sync` endpoint (initial implementation) (#17187)
Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync
This iteration only focuses on returning the list of room IDs in the sliding window API (without sorting/filtering).
Rooms appear in the Sliding sync response based on:
- `invite`, `join`, `knock`, `ban` membership events
- Kicks (`leave` membership events where `sender` is different from the `user_id`/`state_key`)
- `newly_left` (rooms that were left during the given token range, > `from_token` and <= `to_token`)
- In order for bans/kicks to not show up, you need to `/forget` those rooms. This doesn't modify the event itself though and only adds the `forgotten` flag to `room_memberships` in Synapse. There isn't a way to tell when a room was forgotten at the moment so we can't factor it into the from/to range.
### Example request
`POST http://localhost:8008/_matrix/client/unstable/org.matrix.msc3575/sync`
```json
{
"lists": {
"foo-list": {
"ranges": [ [0, 99] ],
"sort": [ "by_notification_level", "by_recency", "by_name" ],
"required_state": [
["m.room.join_rules", ""],
["m.room.history_visibility", ""],
["m.space.child", "*"]
],
"timeline_limit": 100
}
}
}
```
Response:
```json
{
"next_pos": "s58_224_0_13_10_1_1_16_0_1",
"lists": {
"foo-list": {
"count": 1,
"ops": [
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [
"!MmgikIyFzsuvtnbvVG:my.synapse.linux.server"
]
}
]
}
},
"rooms": {},
"extensions": {}
}
```
2024-06-06 15:44:32 -04:00
)
def test_wait_for_sync_token ( self ) - > None :
"""
Test that worker will wait until it catches up to the given token
"""
2024-07-25 12:01:47 -04:00
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
Add Sliding Sync `/sync` endpoint (initial implementation) (#17187)
Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync
This iteration only focuses on returning the list of room IDs in the sliding window API (without sorting/filtering).
Rooms appear in the Sliding sync response based on:
- `invite`, `join`, `knock`, `ban` membership events
- Kicks (`leave` membership events where `sender` is different from the `user_id`/`state_key`)
- `newly_left` (rooms that were left during the given token range, > `from_token` and <= `to_token`)
- In order for bans/kicks to not show up, you need to `/forget` those rooms. This doesn't modify the event itself though and only adds the `forgotten` flag to `room_memberships` in Synapse. There isn't a way to tell when a room was forgotten at the moment so we can't factor it into the from/to range.
### Example request
`POST http://localhost:8008/_matrix/client/unstable/org.matrix.msc3575/sync`
```json
{
"lists": {
"foo-list": {
"ranges": [ [0, 99] ],
"sort": [ "by_notification_level", "by_recency", "by_name" ],
"required_state": [
["m.room.join_rules", ""],
["m.room.history_visibility", ""],
["m.space.child", "*"]
],
"timeline_limit": 100
}
}
}
```
Response:
```json
{
"next_pos": "s58_224_0_13_10_1_1_16_0_1",
"lists": {
"foo-list": {
"count": 1,
"ops": [
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [
"!MmgikIyFzsuvtnbvVG:my.synapse.linux.server"
]
}
]
}
},
"rooms": {},
"extensions": {}
}
```
2024-06-06 15:44:32 -04:00
# Create a future token that will cause us to wait. Since we never send a new
# event to reach that future stream_ordering, the worker will wait until the
# full timeout.
2024-07-02 07:39:49 -04:00
stream_id_gen = self . store . get_events_stream_id_generator ( )
stream_id = self . get_success ( stream_id_gen . get_next ( ) . __aenter__ ( ) )
Add Sliding Sync `/sync` endpoint (initial implementation) (#17187)
Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync
This iteration only focuses on returning the list of room IDs in the sliding window API (without sorting/filtering).
Rooms appear in the Sliding sync response based on:
- `invite`, `join`, `knock`, `ban` membership events
- Kicks (`leave` membership events where `sender` is different from the `user_id`/`state_key`)
- `newly_left` (rooms that were left during the given token range, > `from_token` and <= `to_token`)
- In order for bans/kicks to not show up, you need to `/forget` those rooms. This doesn't modify the event itself though and only adds the `forgotten` flag to `room_memberships` in Synapse. There isn't a way to tell when a room was forgotten at the moment so we can't factor it into the from/to range.
### Example request
`POST http://localhost:8008/_matrix/client/unstable/org.matrix.msc3575/sync`
```json
{
"lists": {
"foo-list": {
"ranges": [ [0, 99] ],
"sort": [ "by_notification_level", "by_recency", "by_name" ],
"required_state": [
["m.room.join_rules", ""],
["m.room.history_visibility", ""],
["m.space.child", "*"]
],
"timeline_limit": 100
}
}
}
```
Response:
```json
{
"next_pos": "s58_224_0_13_10_1_1_16_0_1",
"lists": {
"foo-list": {
"count": 1,
"ops": [
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [
"!MmgikIyFzsuvtnbvVG:my.synapse.linux.server"
]
}
]
}
},
"rooms": {},
"extensions": {}
}
```
2024-06-06 15:44:32 -04:00
current_token = self . event_sources . get_current_token ( )
future_position_token = current_token . copy_and_replace (
StreamKeyType . ROOM ,
2024-07-02 07:39:49 -04:00
RoomStreamToken ( stream = stream_id ) ,
Add Sliding Sync `/sync` endpoint (initial implementation) (#17187)
Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync
This iteration only focuses on returning the list of room IDs in the sliding window API (without sorting/filtering).
Rooms appear in the Sliding sync response based on:
- `invite`, `join`, `knock`, `ban` membership events
- Kicks (`leave` membership events where `sender` is different from the `user_id`/`state_key`)
- `newly_left` (rooms that were left during the given token range, > `from_token` and <= `to_token`)
- In order for bans/kicks to not show up, you need to `/forget` those rooms. This doesn't modify the event itself though and only adds the `forgotten` flag to `room_memberships` in Synapse. There isn't a way to tell when a room was forgotten at the moment so we can't factor it into the from/to range.
### Example request
`POST http://localhost:8008/_matrix/client/unstable/org.matrix.msc3575/sync`
```json
{
"lists": {
"foo-list": {
"ranges": [ [0, 99] ],
"sort": [ "by_notification_level", "by_recency", "by_name" ],
"required_state": [
["m.room.join_rules", ""],
["m.room.history_visibility", ""],
["m.space.child", "*"]
],
"timeline_limit": 100
}
}
}
```
Response:
```json
{
"next_pos": "s58_224_0_13_10_1_1_16_0_1",
"lists": {
"foo-list": {
"count": 1,
"ops": [
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [
"!MmgikIyFzsuvtnbvVG:my.synapse.linux.server"
]
}
]
}
},
"rooms": {},
"extensions": {}
}
```
2024-06-06 15:44:32 -04:00
)
future_position_token_serialized = self . get_success (
2024-07-24 06:47:25 -04:00
SlidingSyncStreamToken ( future_position_token , 0 ) . to_string ( self . store )
Add Sliding Sync `/sync` endpoint (initial implementation) (#17187)
Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync
This iteration only focuses on returning the list of room IDs in the sliding window API (without sorting/filtering).
Rooms appear in the Sliding sync response based on:
- `invite`, `join`, `knock`, `ban` membership events
- Kicks (`leave` membership events where `sender` is different from the `user_id`/`state_key`)
- `newly_left` (rooms that were left during the given token range, > `from_token` and <= `to_token`)
- In order for bans/kicks to not show up, you need to `/forget` those rooms. This doesn't modify the event itself though and only adds the `forgotten` flag to `room_memberships` in Synapse. There isn't a way to tell when a room was forgotten at the moment so we can't factor it into the from/to range.
### Example request
`POST http://localhost:8008/_matrix/client/unstable/org.matrix.msc3575/sync`
```json
{
"lists": {
"foo-list": {
"ranges": [ [0, 99] ],
"sort": [ "by_notification_level", "by_recency", "by_name" ],
"required_state": [
["m.room.join_rules", ""],
["m.room.history_visibility", ""],
["m.space.child", "*"]
],
"timeline_limit": 100
}
}
}
```
Response:
```json
{
"next_pos": "s58_224_0_13_10_1_1_16_0_1",
"lists": {
"foo-list": {
"count": 1,
"ops": [
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [
"!MmgikIyFzsuvtnbvVG:my.synapse.linux.server"
]
}
]
}
},
"rooms": {},
"extensions": {}
}
```
2024-06-06 15:44:32 -04:00
)
# Make the Sliding Sync request
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 99 ] ] ,
" required_state " : [
[ " m.room.join_rules " , " " ] ,
[ " m.room.history_visibility " , " " ] ,
[ " m.space.child " , " * " ] ,
] ,
" timeline_limit " : 1 ,
}
}
}
Add Sliding Sync `/sync` endpoint (initial implementation) (#17187)
Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync
This iteration only focuses on returning the list of room IDs in the sliding window API (without sorting/filtering).
Rooms appear in the Sliding sync response based on:
- `invite`, `join`, `knock`, `ban` membership events
- Kicks (`leave` membership events where `sender` is different from the `user_id`/`state_key`)
- `newly_left` (rooms that were left during the given token range, > `from_token` and <= `to_token`)
- In order for bans/kicks to not show up, you need to `/forget` those rooms. This doesn't modify the event itself though and only adds the `forgotten` flag to `room_memberships` in Synapse. There isn't a way to tell when a room was forgotten at the moment so we can't factor it into the from/to range.
### Example request
`POST http://localhost:8008/_matrix/client/unstable/org.matrix.msc3575/sync`
```json
{
"lists": {
"foo-list": {
"ranges": [ [0, 99] ],
"sort": [ "by_notification_level", "by_recency", "by_name" ],
"required_state": [
["m.room.join_rules", ""],
["m.room.history_visibility", ""],
["m.space.child", "*"]
],
"timeline_limit": 100
}
}
}
```
Response:
```json
{
"next_pos": "s58_224_0_13_10_1_1_16_0_1",
"lists": {
"foo-list": {
"count": 1,
"ops": [
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [
"!MmgikIyFzsuvtnbvVG:my.synapse.linux.server"
]
}
]
}
},
"rooms": {},
"extensions": {}
}
```
2024-06-06 15:44:32 -04:00
channel = self . make_request (
" POST " ,
self . sync_endpoint + f " ?pos= { future_position_token_serialized } " ,
2024-07-25 12:01:47 -04:00
content = sync_body ,
access_token = user1_tok ,
Add Sliding Sync `/sync` endpoint (initial implementation) (#17187)
Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync
This iteration only focuses on returning the list of room IDs in the sliding window API (without sorting/filtering).
Rooms appear in the Sliding sync response based on:
- `invite`, `join`, `knock`, `ban` membership events
- Kicks (`leave` membership events where `sender` is different from the `user_id`/`state_key`)
- `newly_left` (rooms that were left during the given token range, > `from_token` and <= `to_token`)
- In order for bans/kicks to not show up, you need to `/forget` those rooms. This doesn't modify the event itself though and only adds the `forgotten` flag to `room_memberships` in Synapse. There isn't a way to tell when a room was forgotten at the moment so we can't factor it into the from/to range.
### Example request
`POST http://localhost:8008/_matrix/client/unstable/org.matrix.msc3575/sync`
```json
{
"lists": {
"foo-list": {
"ranges": [ [0, 99] ],
"sort": [ "by_notification_level", "by_recency", "by_name" ],
"required_state": [
["m.room.join_rules", ""],
["m.room.history_visibility", ""],
["m.space.child", "*"]
],
"timeline_limit": 100
}
}
}
```
Response:
```json
{
"next_pos": "s58_224_0_13_10_1_1_16_0_1",
"lists": {
"foo-list": {
"count": 1,
"ops": [
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [
"!MmgikIyFzsuvtnbvVG:my.synapse.linux.server"
]
}
]
}
},
"rooms": {},
"extensions": {}
}
```
2024-06-06 15:44:32 -04:00
await_result = False ,
)
# Block for 10 seconds to make `notifier.wait_for_stream_token(from_token)`
# timeout
with self . assertRaises ( TimedOutException ) :
channel . await_result ( timeout_ms = 9900 )
channel . await_result ( timeout_ms = 200 )
self . assertEqual ( channel . code , 200 , channel . json_body )
2024-07-02 12:07:05 -04:00
# We expect the next `pos` in the result to be the same as what we requested
Add Sliding Sync `/sync` endpoint (initial implementation) (#17187)
Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync
This iteration only focuses on returning the list of room IDs in the sliding window API (without sorting/filtering).
Rooms appear in the Sliding sync response based on:
- `invite`, `join`, `knock`, `ban` membership events
- Kicks (`leave` membership events where `sender` is different from the `user_id`/`state_key`)
- `newly_left` (rooms that were left during the given token range, > `from_token` and <= `to_token`)
- In order for bans/kicks to not show up, you need to `/forget` those rooms. This doesn't modify the event itself though and only adds the `forgotten` flag to `room_memberships` in Synapse. There isn't a way to tell when a room was forgotten at the moment so we can't factor it into the from/to range.
### Example request
`POST http://localhost:8008/_matrix/client/unstable/org.matrix.msc3575/sync`
```json
{
"lists": {
"foo-list": {
"ranges": [ [0, 99] ],
"sort": [ "by_notification_level", "by_recency", "by_name" ],
"required_state": [
["m.room.join_rules", ""],
["m.room.history_visibility", ""],
["m.space.child", "*"]
],
"timeline_limit": 100
}
}
}
```
Response:
```json
{
"next_pos": "s58_224_0_13_10_1_1_16_0_1",
"lists": {
"foo-list": {
"count": 1,
"ops": [
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [
"!MmgikIyFzsuvtnbvVG:my.synapse.linux.server"
]
}
]
}
},
"rooms": {},
"extensions": {}
}
```
2024-06-06 15:44:32 -04:00
# with because we weren't able to find anything new yet.
2024-07-02 12:07:05 -04:00
self . assertEqual ( channel . json_body [ " pos " ] , future_position_token_serialized )
2024-06-13 14:56:58 -04:00
2024-07-22 16:40:06 -04:00
def test_wait_for_new_data ( self ) - > None :
"""
Test to make sure that the Sliding Sync request waits for new data to arrive .
( Only applies to incremental syncs with a ` timeout ` specified )
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
room_id = self . helper . create_room_as ( user2_id , tok = user2_tok )
self . helper . join ( room_id , user1_id , tok = user1_tok )
2024-07-24 06:47:25 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 0 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 1 ,
}
}
}
_ , from_token = self . do_sync ( sync_body , tok = user1_tok )
2024-07-22 16:40:06 -04:00
# Make the Sliding Sync request
channel = self . make_request (
" POST " ,
2024-07-24 06:47:25 -04:00
self . sync_endpoint + f " ?timeout=10000&pos= { from_token } " ,
content = sync_body ,
2024-07-22 16:40:06 -04:00
access_token = user1_tok ,
await_result = False ,
)
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
with self . assertRaises ( TimedOutException ) :
channel . await_result ( timeout_ms = 5000 )
# Bump the room with new events to trigger new results
event_response1 = self . helper . send (
room_id , " new activity in room " , tok = user1_tok
)
# Should respond before the 10 second timeout
channel . await_result ( timeout_ms = 3000 )
self . assertEqual ( channel . code , 200 , channel . json_body )
# Check to make sure the new event is returned
self . assertEqual (
[
event [ " event_id " ]
for event in channel . json_body [ " rooms " ] [ room_id ] [ " timeline " ]
] ,
[
event_response1 [ " event_id " ] ,
] ,
channel . json_body [ " rooms " ] [ room_id ] [ " timeline " ] ,
)
def test_wait_for_new_data_timeout ( self ) - > None :
"""
Test to make sure that the Sliding Sync request waits for new data to arrive but
no data ever arrives so we timeout . We ' re also making sure that the default data
doesn ' t trigger a false-positive for new data.
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
room_id = self . helper . create_room_as ( user2_id , tok = user2_tok )
self . helper . join ( room_id , user1_id , tok = user1_tok )
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 0 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 1 ,
}
}
}
_ , from_token = self . do_sync ( sync_body , tok = user1_tok )
2024-07-22 16:40:06 -04:00
# Make the Sliding Sync request
channel = self . make_request (
" POST " ,
2024-07-25 12:01:47 -04:00
self . sync_endpoint + f " ?timeout=10000&pos= { from_token } " ,
content = sync_body ,
2024-07-22 16:40:06 -04:00
access_token = user1_tok ,
await_result = False ,
)
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
with self . assertRaises ( TimedOutException ) :
channel . await_result ( timeout_ms = 5000 )
# Wake-up `notifier.wait_for_events(...)` that will cause us test
# `SlidingSyncResult.__bool__` for new results.
2024-07-25 11:43:35 -04:00
self . _bump_notifier_wait_for_events (
user1_id , wake_stream_key = StreamKeyType . ACCOUNT_DATA
)
2024-07-22 16:40:06 -04:00
# Block for a little bit more to ensure we don't see any new results.
with self . assertRaises ( TimedOutException ) :
channel . await_result ( timeout_ms = 4000 )
# Wait for the sync to complete (wait for the rest of the 10 second timeout,
# 5000 + 4000 + 1200 > 10000)
channel . await_result ( timeout_ms = 1200 )
self . assertEqual ( channel . code , 200 , channel . json_body )
2024-07-30 04:30:44 -04:00
# There should be no room sent down.
self . assertFalse ( channel . json_body [ " rooms " ] )
2024-07-22 16:40:06 -04:00
2024-06-13 14:56:58 -04:00
def test_filter_list ( self ) - > None :
"""
2024-07-02 12:07:05 -04:00
Test that filters apply to ` lists `
2024-06-13 14:56:58 -04:00
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
# Create a DM room
2024-06-24 20:07:56 -04:00
joined_dm_room_id = self . _create_dm_room (
2024-06-13 14:56:58 -04:00
inviter_user_id = user1_id ,
inviter_tok = user1_tok ,
invitee_user_id = user2_id ,
invitee_tok = user2_tok ,
2024-06-24 20:07:56 -04:00
should_join_room = True ,
)
invited_dm_room_id = self . _create_dm_room (
inviter_user_id = user1_id ,
inviter_tok = user1_tok ,
invitee_user_id = user2_id ,
invitee_tok = user2_tok ,
should_join_room = False ,
2024-06-13 14:56:58 -04:00
)
# Create a normal room
2024-07-22 16:40:06 -04:00
room_id = self . helper . create_room_as ( user2_id , tok = user2_tok )
2024-06-24 20:07:56 -04:00
self . helper . join ( room_id , user1_id , tok = user1_tok )
# Create a room that user1 is invited to
2024-07-22 16:40:06 -04:00
invite_room_id = self . helper . create_room_as ( user2_id , tok = user2_tok )
2024-06-24 20:07:56 -04:00
self . helper . invite ( invite_room_id , src = user2_id , targ = user1_id , tok = user2_tok )
2024-06-13 14:56:58 -04:00
# Make the Sliding Sync request
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : {
# Absense of filters does not imply "False" values
" all " : {
" ranges " : [ [ 0 , 99 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 1 ,
" filters " : { } ,
} ,
# Test single truthy filter
" dms " : {
" ranges " : [ [ 0 , 99 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 1 ,
" filters " : { " is_dm " : True } ,
} ,
# Test single falsy filter
" non-dms " : {
" ranges " : [ [ 0 , 99 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 1 ,
" filters " : { " is_dm " : False } ,
} ,
# Test how multiple filters should stack (AND'd together)
" room-invites " : {
" ranges " : [ [ 0 , 99 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 1 ,
" filters " : { " is_dm " : False , " is_invite " : True } ,
} ,
}
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-06-13 14:56:58 -04:00
# Make sure it has the foo-list we requested
self . assertListEqual (
2024-07-25 12:01:47 -04:00
list ( response_body [ " lists " ] . keys ( ) ) ,
2024-06-24 20:07:56 -04:00
[ " all " , " dms " , " non-dms " , " room-invites " ] ,
2024-07-25 12:01:47 -04:00
response_body [ " lists " ] . keys ( ) ,
2024-06-13 14:56:58 -04:00
)
2024-06-24 20:07:56 -04:00
# Make sure the lists have the correct rooms
self . assertListEqual (
2024-07-25 12:01:47 -04:00
list ( response_body [ " lists " ] [ " all " ] [ " ops " ] ) ,
2024-06-24 20:07:56 -04:00
[
{
" op " : " SYNC " ,
" range " : [ 0 , 99 ] ,
" room_ids " : [
invite_room_id ,
room_id ,
invited_dm_room_id ,
joined_dm_room_id ,
] ,
}
] ,
2024-07-25 12:01:47 -04:00
list ( response_body [ " lists " ] [ " all " ] ) ,
2024-06-24 20:07:56 -04:00
)
2024-06-13 14:56:58 -04:00
self . assertListEqual (
2024-07-25 12:01:47 -04:00
list ( response_body [ " lists " ] [ " dms " ] [ " ops " ] ) ,
2024-06-13 14:56:58 -04:00
[
{
" op " : " SYNC " ,
" range " : [ 0 , 99 ] ,
2024-06-24 20:07:56 -04:00
" room_ids " : [ invited_dm_room_id , joined_dm_room_id ] ,
2024-06-13 14:56:58 -04:00
}
] ,
2024-07-25 12:01:47 -04:00
list ( response_body [ " lists " ] [ " dms " ] ) ,
2024-06-13 14:56:58 -04:00
)
self . assertListEqual (
2024-07-25 12:01:47 -04:00
list ( response_body [ " lists " ] [ " non-dms " ] [ " ops " ] ) ,
2024-06-13 14:56:58 -04:00
[
{
" op " : " SYNC " ,
" range " : [ 0 , 99 ] ,
2024-06-24 20:07:56 -04:00
" room_ids " : [ invite_room_id , room_id ] ,
}
] ,
2024-07-25 12:01:47 -04:00
list ( response_body [ " lists " ] [ " non-dms " ] ) ,
2024-06-24 20:07:56 -04:00
)
self . assertListEqual (
2024-07-25 12:01:47 -04:00
list ( response_body [ " lists " ] [ " room-invites " ] [ " ops " ] ) ,
2024-06-24 20:07:56 -04:00
[
{
" op " : " SYNC " ,
" range " : [ 0 , 99 ] ,
" room_ids " : [ invite_room_id ] ,
2024-06-13 14:56:58 -04:00
}
] ,
2024-07-25 12:01:47 -04:00
list ( response_body [ " lists " ] [ " room-invites " ] ) ,
2024-06-13 14:56:58 -04:00
)
2024-06-17 12:27:14 -04:00
2024-07-11 19:19:26 -04:00
# Ensure DM's are correctly marked
self . assertDictEqual (
{
room_id : room . get ( " is_dm " )
2024-07-25 12:01:47 -04:00
for room_id , room in response_body [ " rooms " ] . items ( )
2024-07-11 19:19:26 -04:00
} ,
{
invite_room_id : None ,
room_id : None ,
invited_dm_room_id : True ,
joined_dm_room_id : True ,
} ,
)
2024-06-17 12:27:14 -04:00
def test_sort_list ( self ) - > None :
"""
2024-07-02 12:07:05 -04:00
Test that the ` lists ` are sorted by ` stream_ordering `
2024-06-17 12:27:14 -04:00
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
room_id1 = self . helper . create_room_as ( user1_id , tok = user1_tok , is_public = True )
room_id2 = self . helper . create_room_as ( user1_id , tok = user1_tok , is_public = True )
room_id3 = self . helper . create_room_as ( user1_id , tok = user1_tok , is_public = True )
# Activity that will order the rooms
self . helper . send ( room_id3 , " activity in room3 " , tok = user1_tok )
self . helper . send ( room_id1 , " activity in room1 " , tok = user1_tok )
self . helper . send ( room_id2 , " activity in room2 " , tok = user1_tok )
# Make the Sliding Sync request
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 99 ] ] ,
" required_state " : [
[ " m.room.join_rules " , " " ] ,
[ " m.room.history_visibility " , " " ] ,
[ " m.space.child " , " * " ] ,
] ,
" timeline_limit " : 1 ,
2024-06-17 12:27:14 -04:00
}
2024-07-25 12:01:47 -04:00
}
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-06-17 12:27:14 -04:00
# Make sure it has the foo-list we requested
self . assertListEqual (
2024-07-25 12:01:47 -04:00
list ( response_body [ " lists " ] . keys ( ) ) ,
2024-06-17 12:27:14 -04:00
[ " foo-list " ] ,
2024-07-25 12:01:47 -04:00
response_body [ " lists " ] . keys ( ) ,
2024-06-17 12:27:14 -04:00
)
# Make sure the list is sorted in the way we expect
self . assertListEqual (
2024-07-25 12:01:47 -04:00
list ( response_body [ " lists " ] [ " foo-list " ] [ " ops " ] ) ,
2024-06-17 12:27:14 -04:00
[
{
" op " : " SYNC " ,
" range " : [ 0 , 99 ] ,
" room_ids " : [ room_id2 , room_id1 , room_id3 ] ,
}
] ,
2024-07-25 12:01:47 -04:00
response_body [ " lists " ] [ " foo-list " ] ,
2024-06-17 12:27:14 -04:00
)
2024-07-02 12:07:05 -04:00
def test_sliced_windows ( self ) - > None :
"""
Test that the ` lists ` ` ranges ` are sliced correctly . Both sides of each range
are inclusive .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
_room_id1 = self . helper . create_room_as ( user1_id , tok = user1_tok , is_public = True )
room_id2 = self . helper . create_room_as ( user1_id , tok = user1_tok , is_public = True )
room_id3 = self . helper . create_room_as ( user1_id , tok = user1_tok , is_public = True )
# Make the Sliding Sync request for a single room
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 0 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 1 ,
2024-07-02 12:07:05 -04:00
}
2024-07-25 12:01:47 -04:00
}
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-02 12:07:05 -04:00
# Make sure it has the foo-list we requested
self . assertListEqual (
2024-07-25 12:01:47 -04:00
list ( response_body [ " lists " ] . keys ( ) ) ,
2024-07-02 12:07:05 -04:00
[ " foo-list " ] ,
2024-07-25 12:01:47 -04:00
response_body [ " lists " ] . keys ( ) ,
2024-07-02 12:07:05 -04:00
)
# Make sure the list is sorted in the way we expect
self . assertListEqual (
2024-07-25 12:01:47 -04:00
list ( response_body [ " lists " ] [ " foo-list " ] [ " ops " ] ) ,
2024-07-02 12:07:05 -04:00
[
{
" op " : " SYNC " ,
" range " : [ 0 , 0 ] ,
" room_ids " : [ room_id3 ] ,
}
] ,
2024-07-25 12:01:47 -04:00
response_body [ " lists " ] [ " foo-list " ] ,
2024-07-02 12:07:05 -04:00
)
# Make the Sliding Sync request for the first two rooms
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 1 ,
2024-07-02 12:07:05 -04:00
}
2024-07-25 12:01:47 -04:00
}
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-02 12:07:05 -04:00
# Make sure it has the foo-list we requested
self . assertListEqual (
2024-07-25 12:01:47 -04:00
list ( response_body [ " lists " ] . keys ( ) ) ,
2024-07-02 12:07:05 -04:00
[ " foo-list " ] ,
2024-07-25 12:01:47 -04:00
response_body [ " lists " ] . keys ( ) ,
2024-07-02 12:07:05 -04:00
)
# Make sure the list is sorted in the way we expect
self . assertListEqual (
2024-07-25 12:01:47 -04:00
list ( response_body [ " lists " ] [ " foo-list " ] [ " ops " ] ) ,
2024-07-02 12:07:05 -04:00
[
{
" op " : " SYNC " ,
" range " : [ 0 , 1 ] ,
" room_ids " : [ room_id3 , room_id2 ] ,
}
] ,
2024-07-25 12:01:47 -04:00
response_body [ " lists " ] [ " foo-list " ] ,
2024-07-02 12:07:05 -04:00
)
2024-07-09 13:26:45 -04:00
def test_rooms_meta_when_joined ( self ) - > None :
"""
2024-07-11 15:05:38 -04:00
Test that the ` rooms ` ` name ` and ` avatar ` are included in the response and
reflect the current state of the room when the user is joined to the room .
2024-07-09 13:26:45 -04:00
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
room_id1 = self . helper . create_room_as (
user2_id ,
tok = user2_tok ,
extra_content = {
" name " : " my super room " ,
} ,
)
# Set the room avatar URL
self . helper . send_state (
room_id1 ,
EventTypes . RoomAvatar ,
{ " url " : " mxc://DUMMY_MEDIA_ID " } ,
tok = user2_tok ,
)
self . helper . join ( room_id1 , user1_id , tok = user1_tok )
# Make the Sliding Sync request
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 0 ,
2024-07-09 13:26:45 -04:00
}
2024-07-25 12:01:47 -04:00
}
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-09 13:26:45 -04:00
# Reflect the current state of the room
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " name " ] ,
2024-07-09 13:26:45 -04:00
" my super room " ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-09 13:26:45 -04:00
)
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " avatar " ] ,
2024-07-09 13:26:45 -04:00
" mxc://DUMMY_MEDIA_ID " ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-09 13:26:45 -04:00
)
2024-07-11 15:05:38 -04:00
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " joined_count " ] ,
2024-07-11 15:05:38 -04:00
2 ,
)
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " invited_count " ] ,
2024-07-11 15:05:38 -04:00
0 ,
)
2024-07-11 19:19:26 -04:00
self . assertIsNone (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] . get ( " is_dm " ) ,
2024-07-11 19:19:26 -04:00
)
2024-07-09 13:26:45 -04:00
def test_rooms_meta_when_invited ( self ) - > None :
"""
2024-07-11 15:05:38 -04:00
Test that the ` rooms ` ` name ` and ` avatar ` are included in the response and
reflect the current state of the room when the user is invited to the room .
2024-07-09 13:26:45 -04:00
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
room_id1 = self . helper . create_room_as (
user2_id ,
tok = user2_tok ,
extra_content = {
" name " : " my super room " ,
} ,
)
# Set the room avatar URL
self . helper . send_state (
room_id1 ,
EventTypes . RoomAvatar ,
{ " url " : " mxc://DUMMY_MEDIA_ID " } ,
tok = user2_tok ,
)
2024-07-11 15:05:38 -04:00
# User1 is invited to the room
self . helper . invite ( room_id1 , src = user2_id , targ = user1_id , tok = user2_tok )
2024-07-09 13:26:45 -04:00
# Update the room name after user1 has left
self . helper . send_state (
room_id1 ,
EventTypes . Name ,
{ " name " : " my super duper room " } ,
tok = user2_tok ,
)
# Update the room avatar URL after user1 has left
self . helper . send_state (
room_id1 ,
EventTypes . RoomAvatar ,
{ " url " : " mxc://UPDATED_DUMMY_MEDIA_ID " } ,
tok = user2_tok ,
)
# Make the Sliding Sync request
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 0 ,
2024-07-09 13:26:45 -04:00
}
2024-07-25 12:01:47 -04:00
}
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-09 13:26:45 -04:00
# This should still reflect the current state of the room even when the user is
# invited.
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " name " ] ,
2024-07-09 13:26:45 -04:00
" my super duper room " ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-09 13:26:45 -04:00
)
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " avatar " ] ,
2024-07-09 13:26:45 -04:00
" mxc://UPDATED_DUMMY_MEDIA_ID " ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-09 13:26:45 -04:00
)
2024-07-11 15:05:38 -04:00
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " joined_count " ] ,
2024-07-11 15:05:38 -04:00
1 ,
)
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " invited_count " ] ,
2024-07-11 15:05:38 -04:00
1 ,
)
2024-07-11 19:19:26 -04:00
self . assertIsNone (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] . get ( " is_dm " ) ,
2024-07-11 19:19:26 -04:00
)
2024-07-09 13:26:45 -04:00
def test_rooms_meta_when_banned ( self ) - > None :
"""
2024-07-11 15:05:38 -04:00
Test that the ` rooms ` ` name ` and ` avatar ` reflect the state of the room when the
user was banned ( do not leak current state ) .
2024-07-09 13:26:45 -04:00
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
room_id1 = self . helper . create_room_as (
user2_id ,
tok = user2_tok ,
extra_content = {
" name " : " my super room " ,
} ,
)
# Set the room avatar URL
self . helper . send_state (
room_id1 ,
EventTypes . RoomAvatar ,
{ " url " : " mxc://DUMMY_MEDIA_ID " } ,
tok = user2_tok ,
)
self . helper . join ( room_id1 , user1_id , tok = user1_tok )
self . helper . ban ( room_id1 , src = user2_id , targ = user1_id , tok = user2_tok )
# Update the room name after user1 has left
self . helper . send_state (
room_id1 ,
EventTypes . Name ,
{ " name " : " my super duper room " } ,
tok = user2_tok ,
)
# Update the room avatar URL after user1 has left
self . helper . send_state (
room_id1 ,
EventTypes . RoomAvatar ,
{ " url " : " mxc://UPDATED_DUMMY_MEDIA_ID " } ,
tok = user2_tok ,
)
# Make the Sliding Sync request
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 0 ,
2024-07-09 13:26:45 -04:00
}
2024-07-25 12:01:47 -04:00
}
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-09 13:26:45 -04:00
# Reflect the state of the room at the time of leaving
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " name " ] ,
2024-07-09 13:26:45 -04:00
" my super room " ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-09 13:26:45 -04:00
)
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " avatar " ] ,
2024-07-09 13:26:45 -04:00
" mxc://DUMMY_MEDIA_ID " ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-09 13:26:45 -04:00
)
2024-07-11 15:05:38 -04:00
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " joined_count " ] ,
2024-07-11 15:05:38 -04:00
# FIXME: The actual number should be "1" (user2) but we currently don't
# support this for rooms where the user has left/been banned.
0 ,
)
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " invited_count " ] ,
2024-07-11 15:05:38 -04:00
0 ,
)
2024-07-11 19:19:26 -04:00
self . assertIsNone (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] . get ( " is_dm " ) ,
2024-07-11 19:19:26 -04:00
)
2024-07-11 15:05:38 -04:00
def test_rooms_meta_heroes ( self ) - > None :
"""
Test that the ` rooms ` ` heroes ` are included in the response when the room
doesn ' t have a room name set.
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
user3_id = self . register_user ( " user3 " , " pass " )
_user3_tok = self . login ( user3_id , " pass " )
room_id1 = self . helper . create_room_as (
user2_id ,
tok = user2_tok ,
extra_content = {
" name " : " my super room " ,
} ,
)
self . helper . join ( room_id1 , user1_id , tok = user1_tok )
# User3 is invited
self . helper . invite ( room_id1 , src = user2_id , targ = user3_id , tok = user2_tok )
room_id2 = self . helper . create_room_as (
user2_id ,
tok = user2_tok ,
extra_content = {
# No room name set so that `heroes` is populated
#
# "name": "my super room2",
} ,
)
self . helper . join ( room_id2 , user1_id , tok = user1_tok )
# User3 is invited
self . helper . invite ( room_id2 , src = user2_id , targ = user3_id , tok = user2_tok )
# Make the Sliding Sync request
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 0 ,
2024-07-11 15:05:38 -04:00
}
2024-07-25 12:01:47 -04:00
}
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-11 15:05:38 -04:00
# Room1 has a name so we shouldn't see any `heroes` which the client would use
# the calculate the room name themselves.
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " name " ] ,
2024-07-11 15:05:38 -04:00
" my super room " ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-11 15:05:38 -04:00
)
2024-07-25 12:01:47 -04:00
self . assertIsNone ( response_body [ " rooms " ] [ room_id1 ] . get ( " heroes " ) )
2024-07-11 15:05:38 -04:00
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " joined_count " ] ,
2024-07-11 15:05:38 -04:00
2 ,
)
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " invited_count " ] ,
2024-07-11 15:05:38 -04:00
1 ,
)
# Room2 doesn't have a name so we should see `heroes` populated
2024-07-25 12:01:47 -04:00
self . assertIsNone ( response_body [ " rooms " ] [ room_id2 ] . get ( " name " ) )
2024-07-11 15:05:38 -04:00
self . assertCountEqual (
[
hero [ " user_id " ]
2024-07-25 12:01:47 -04:00
for hero in response_body [ " rooms " ] [ room_id2 ] . get ( " heroes " , [ ] )
2024-07-11 15:05:38 -04:00
] ,
# Heroes shouldn't include the user themselves (we shouldn't see user1)
[ user2_id , user3_id ] ,
)
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id2 ] [ " joined_count " ] ,
2024-07-11 15:05:38 -04:00
2 ,
)
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id2 ] [ " invited_count " ] ,
2024-07-11 15:05:38 -04:00
1 ,
)
# We didn't request any state so we shouldn't see any `required_state`
2024-07-25 12:01:47 -04:00
self . assertIsNone ( response_body [ " rooms " ] [ room_id1 ] . get ( " required_state " ) )
self . assertIsNone ( response_body [ " rooms " ] [ room_id2 ] . get ( " required_state " ) )
2024-07-11 15:05:38 -04:00
def test_rooms_meta_heroes_max ( self ) - > None :
"""
Test that the ` rooms ` ` heroes ` only includes the first 5 users ( not including
yourself ) .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
user3_id = self . register_user ( " user3 " , " pass " )
user3_tok = self . login ( user3_id , " pass " )
user4_id = self . register_user ( " user4 " , " pass " )
user4_tok = self . login ( user4_id , " pass " )
user5_id = self . register_user ( " user5 " , " pass " )
user5_tok = self . login ( user5_id , " pass " )
user6_id = self . register_user ( " user6 " , " pass " )
user6_tok = self . login ( user6_id , " pass " )
user7_id = self . register_user ( " user7 " , " pass " )
user7_tok = self . login ( user7_id , " pass " )
room_id1 = self . helper . create_room_as (
user2_id ,
tok = user2_tok ,
extra_content = {
# No room name set so that `heroes` is populated
#
# "name": "my super room",
} ,
)
self . helper . join ( room_id1 , user1_id , tok = user1_tok )
self . helper . join ( room_id1 , user3_id , tok = user3_tok )
self . helper . join ( room_id1 , user4_id , tok = user4_tok )
self . helper . join ( room_id1 , user5_id , tok = user5_tok )
self . helper . join ( room_id1 , user6_id , tok = user6_tok )
self . helper . join ( room_id1 , user7_id , tok = user7_tok )
# Make the Sliding Sync request
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 0 ,
2024-07-11 15:05:38 -04:00
}
2024-07-25 12:01:47 -04:00
}
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-11 15:05:38 -04:00
# Room2 doesn't have a name so we should see `heroes` populated
2024-07-25 12:01:47 -04:00
self . assertIsNone ( response_body [ " rooms " ] [ room_id1 ] . get ( " name " ) )
2024-07-17 14:10:15 -04:00
self . assertCountEqual (
[
hero [ " user_id " ]
2024-07-25 12:01:47 -04:00
for hero in response_body [ " rooms " ] [ room_id1 ] . get ( " heroes " , [ ] )
2024-07-17 14:10:15 -04:00
] ,
# Heroes should be the first 5 users in the room (excluding the user
# themselves, we shouldn't see `user1`)
[ user2_id , user3_id , user4_id , user5_id , user6_id ] ,
)
2024-07-11 15:05:38 -04:00
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " joined_count " ] ,
2024-07-11 15:05:38 -04:00
7 ,
)
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " invited_count " ] ,
2024-07-11 15:05:38 -04:00
0 ,
)
# We didn't request any state so we shouldn't see any `required_state`
2024-07-25 12:01:47 -04:00
self . assertIsNone ( response_body [ " rooms " ] [ room_id1 ] . get ( " required_state " ) )
2024-07-11 15:05:38 -04:00
def test_rooms_meta_heroes_when_banned ( self ) - > None :
"""
Test that the ` rooms ` ` heroes ` are included in the response when the room
doesn ' t have a room name set but doesn ' t leak information past their ban .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
user3_id = self . register_user ( " user3 " , " pass " )
_user3_tok = self . login ( user3_id , " pass " )
user4_id = self . register_user ( " user4 " , " pass " )
user4_tok = self . login ( user4_id , " pass " )
user5_id = self . register_user ( " user5 " , " pass " )
_user5_tok = self . login ( user5_id , " pass " )
room_id1 = self . helper . create_room_as (
user2_id ,
tok = user2_tok ,
extra_content = {
# No room name set so that `heroes` is populated
#
# "name": "my super room",
} ,
)
# User1 joins the room
self . helper . join ( room_id1 , user1_id , tok = user1_tok )
# User3 is invited
self . helper . invite ( room_id1 , src = user2_id , targ = user3_id , tok = user2_tok )
# User1 is banned from the room
self . helper . ban ( room_id1 , src = user2_id , targ = user1_id , tok = user2_tok )
# User4 joins the room after user1 is banned
self . helper . join ( room_id1 , user4_id , tok = user4_tok )
# User5 is invited after user1 is banned
self . helper . invite ( room_id1 , src = user2_id , targ = user5_id , tok = user2_tok )
# Make the Sliding Sync request
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 0 ,
2024-07-11 15:05:38 -04:00
}
2024-07-25 12:01:47 -04:00
}
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-11 15:05:38 -04:00
# Room2 doesn't have a name so we should see `heroes` populated
2024-07-25 12:01:47 -04:00
self . assertIsNone ( response_body [ " rooms " ] [ room_id1 ] . get ( " name " ) )
2024-07-11 15:05:38 -04:00
self . assertCountEqual (
[
hero [ " user_id " ]
2024-07-25 12:01:47 -04:00
for hero in response_body [ " rooms " ] [ room_id1 ] . get ( " heroes " , [ ] )
2024-07-11 15:05:38 -04:00
] ,
# Heroes shouldn't include the user themselves (we shouldn't see user1). We
# also shouldn't see user4 since they joined after user1 was banned.
#
# FIXME: The actual result should be `[user2_id, user3_id]` but we currently
# don't support this for rooms where the user has left/been banned.
[ ] ,
)
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " joined_count " ] ,
2024-07-11 15:05:38 -04:00
# FIXME: The actual number should be "1" (user2) but we currently don't
# support this for rooms where the user has left/been banned.
0 ,
)
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " invited_count " ] ,
2024-07-11 15:05:38 -04:00
# We shouldn't see user5 since they were invited after user1 was banned.
#
# FIXME: The actual number should be "1" (user3) but we currently don't
# support this for rooms where the user has left/been banned.
0 ,
)
2024-07-09 13:26:45 -04:00
2024-07-02 12:07:05 -04:00
def test_rooms_limited_initial_sync ( self ) - > None :
"""
Test that we mark ` rooms ` as ` limited = True ` when we saturate the ` timeline_limit `
on initial sync .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
room_id1 = self . helper . create_room_as ( user2_id , tok = user2_tok )
self . helper . send ( room_id1 , " activity1 " , tok = user2_tok )
self . helper . send ( room_id1 , " activity2 " , tok = user2_tok )
event_response3 = self . helper . send ( room_id1 , " activity3 " , tok = user2_tok )
event_pos3 = self . get_success (
self . store . get_position_for_event ( event_response3 [ " event_id " ] )
)
event_response4 = self . helper . send ( room_id1 , " activity4 " , tok = user2_tok )
event_pos4 = self . get_success (
self . store . get_position_for_event ( event_response4 [ " event_id " ] )
)
event_response5 = self . helper . send ( room_id1 , " activity5 " , tok = user2_tok )
user1_join_response = self . helper . join ( room_id1 , user1_id , tok = user1_tok )
# Make the Sliding Sync request
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 3 ,
2024-07-02 12:07:05 -04:00
}
2024-07-25 12:01:47 -04:00
}
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-02 12:07:05 -04:00
# We expect to saturate the `timeline_limit` (there are more than 3 messages in the room)
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " limited " ] ,
2024-07-02 12:07:05 -04:00
True ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-02 12:07:05 -04:00
)
# Check to make sure the latest events are returned
self . assertEqual (
[
event [ " event_id " ]
2024-07-25 12:01:47 -04:00
for event in response_body [ " rooms " ] [ room_id1 ] [ " timeline " ]
2024-07-02 12:07:05 -04:00
] ,
[
event_response4 [ " event_id " ] ,
event_response5 [ " event_id " ] ,
user1_join_response [ " event_id " ] ,
] ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " timeline " ] ,
2024-07-02 12:07:05 -04:00
)
# Check to make sure the `prev_batch` points at the right place
prev_batch_token = self . get_success (
StreamToken . from_string (
2024-07-25 12:01:47 -04:00
self . store , response_body [ " rooms " ] [ room_id1 ] [ " prev_batch " ]
2024-07-02 12:07:05 -04:00
)
)
prev_batch_room_stream_token_serialized = self . get_success (
prev_batch_token . room_key . to_string ( self . store )
)
# If we use the `prev_batch` token to look backwards, we should see `event3`
# next so make sure the token encompasses it
self . assertEqual (
event_pos3 . persisted_after ( prev_batch_token . room_key ) ,
False ,
f " `prev_batch` token { prev_batch_room_stream_token_serialized } should be >= event_pos3= { self . get_success ( event_pos3 . to_room_stream_token ( ) . to_string ( self . store ) ) } " ,
)
# If we use the `prev_batch` token to look backwards, we shouldn't see `event4`
# anymore since it was just returned in this response.
self . assertEqual (
event_pos4 . persisted_after ( prev_batch_token . room_key ) ,
True ,
f " `prev_batch` token { prev_batch_room_stream_token_serialized } should be < event_pos4= { self . get_success ( event_pos4 . to_room_stream_token ( ) . to_string ( self . store ) ) } " ,
)
# With no `from_token` (initial sync), it's all historical since there is no
# "live" range
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " num_live " ] ,
2024-07-02 12:07:05 -04:00
0 ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-02 12:07:05 -04:00
)
def test_rooms_not_limited_initial_sync ( self ) - > None :
"""
Test that we mark ` rooms ` as ` limited = False ` when there are no more events to
paginate to .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
room_id1 = self . helper . create_room_as ( user2_id , tok = user2_tok )
self . helper . send ( room_id1 , " activity1 " , tok = user2_tok )
self . helper . send ( room_id1 , " activity2 " , tok = user2_tok )
self . helper . send ( room_id1 , " activity3 " , tok = user2_tok )
self . helper . join ( room_id1 , user1_id , tok = user1_tok )
# Make the Sliding Sync request
timeline_limit = 100
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : timeline_limit ,
2024-07-02 12:07:05 -04:00
}
2024-07-25 12:01:47 -04:00
}
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-02 12:07:05 -04:00
# The timeline should be `limited=False` because we have all of the events (no
# more to paginate to)
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " limited " ] ,
2024-07-02 12:07:05 -04:00
False ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-02 12:07:05 -04:00
)
expected_number_of_events = 9
# We're just looking to make sure we got all of the events before hitting the `timeline_limit`
self . assertEqual (
2024-07-25 12:01:47 -04:00
len ( response_body [ " rooms " ] [ room_id1 ] [ " timeline " ] ) ,
2024-07-02 12:07:05 -04:00
expected_number_of_events ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " timeline " ] ,
2024-07-02 12:07:05 -04:00
)
self . assertLessEqual ( expected_number_of_events , timeline_limit )
# With no `from_token` (initial sync), it's all historical since there is no
# "live" token range.
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " num_live " ] ,
2024-07-02 12:07:05 -04:00
0 ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-02 12:07:05 -04:00
)
def test_rooms_incremental_sync ( self ) - > None :
"""
Test ` rooms ` data during an incremental sync after an initial sync .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
room_id1 = self . helper . create_room_as ( user2_id , tok = user2_tok )
self . helper . join ( room_id1 , user1_id , tok = user1_tok )
self . helper . send ( room_id1 , " activity before initial sync1 " , tok = user2_tok )
# Make an initial Sliding Sync request to grab a token. This is also a sanity
# check that we can go from initial to incremental sync.
2024-07-25 12:01:47 -04:00
sync_body = {
2024-07-02 12:07:05 -04:00
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 3 ,
}
}
}
2024-07-25 12:01:47 -04:00
_ , from_token = self . do_sync ( sync_body , tok = user1_tok )
2024-07-02 12:07:05 -04:00
# Send some events but don't send enough to saturate the `timeline_limit`.
# We want to later test that we only get the new events since the `next_pos`
event_response2 = self . helper . send ( room_id1 , " activity after2 " , tok = user2_tok )
event_response3 = self . helper . send ( room_id1 , " activity after3 " , tok = user2_tok )
# Make an incremental Sliding Sync request (what we're trying to test)
2024-07-25 12:01:47 -04:00
response_body , _ = self . do_sync ( sync_body , since = from_token , tok = user1_tok )
2024-07-02 12:07:05 -04:00
# We only expect to see the new events since the last sync which isn't enough to
# fill up the `timeline_limit`.
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " limited " ] ,
2024-07-02 12:07:05 -04:00
False ,
2024-07-25 12:01:47 -04:00
f ' Our `timeline_limit` was { sync_body [ " lists " ] [ " foo-list " ] [ " timeline_limit " ] } '
+ f ' and { len ( response_body [ " rooms " ] [ room_id1 ] [ " timeline " ] ) } events were returned in the timeline. '
+ str ( response_body [ " rooms " ] [ room_id1 ] ) ,
2024-07-02 12:07:05 -04:00
)
# Check to make sure the latest events are returned
self . assertEqual (
[
event [ " event_id " ]
2024-07-25 12:01:47 -04:00
for event in response_body [ " rooms " ] [ room_id1 ] [ " timeline " ]
2024-07-02 12:07:05 -04:00
] ,
[
event_response2 [ " event_id " ] ,
event_response3 [ " event_id " ] ,
] ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " timeline " ] ,
2024-07-02 12:07:05 -04:00
)
# All events are "live"
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " num_live " ] ,
2024-07-02 12:07:05 -04:00
2 ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-02 12:07:05 -04:00
)
2024-07-08 14:17:08 -04:00
def test_rooms_bump_stamp ( self ) - > None :
"""
Test that ` bump_stamp ` is present and pointing to relevant events .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
room_id1 = self . helper . create_room_as (
user1_id ,
tok = user1_tok ,
)
event_response1 = message_response = self . helper . send (
room_id1 , " message in room1 " , tok = user1_tok
)
event_pos1 = self . get_success (
self . store . get_position_for_event ( event_response1 [ " event_id " ] )
)
room_id2 = self . helper . create_room_as (
user1_id ,
tok = user1_tok ,
)
send_response2 = self . helper . send ( room_id2 , " message in room2 " , tok = user1_tok )
event_pos2 = self . get_success (
self . store . get_position_for_event ( send_response2 [ " event_id " ] )
)
# Send a reaction in room1 but it shouldn't affect the `bump_stamp`
# because reactions are not part of the `DEFAULT_BUMP_EVENT_TYPES`
self . helper . send_event (
room_id1 ,
type = EventTypes . Reaction ,
content = {
" m.relates_to " : {
" event_id " : message_response [ " event_id " ] ,
" key " : " 👍 " ,
" rel_type " : " m.annotation " ,
}
} ,
tok = user1_tok ,
)
# Make the Sliding Sync request
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 100 ,
2024-07-08 14:17:08 -04:00
}
2024-07-25 12:01:47 -04:00
}
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-08 14:17:08 -04:00
# Make sure it has the foo-list we requested
self . assertListEqual (
2024-07-25 12:01:47 -04:00
list ( response_body [ " lists " ] . keys ( ) ) ,
2024-07-08 14:17:08 -04:00
[ " foo-list " ] ,
2024-07-25 12:01:47 -04:00
response_body [ " lists " ] . keys ( ) ,
2024-07-08 14:17:08 -04:00
)
# Make sure the list includes the rooms in the right order
self . assertListEqual (
2024-07-25 12:01:47 -04:00
list ( response_body [ " lists " ] [ " foo-list " ] [ " ops " ] ) ,
2024-07-08 14:17:08 -04:00
[
{
" op " : " SYNC " ,
" range " : [ 0 , 1 ] ,
# room1 sorts before room2 because it has the latest event (the
# reaction)
" room_ids " : [ room_id1 , room_id2 ] ,
}
] ,
2024-07-25 12:01:47 -04:00
response_body [ " lists " ] [ " foo-list " ] ,
2024-07-08 14:17:08 -04:00
)
# The `bump_stamp` for room1 should point at the latest message (not the
# reaction since it's not one of the `DEFAULT_BUMP_EVENT_TYPES`)
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " bump_stamp " ] ,
2024-07-08 14:17:08 -04:00
event_pos1 . stream ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-08 14:17:08 -04:00
)
# The `bump_stamp` for room2 should point at the latest message
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id2 ] [ " bump_stamp " ] ,
2024-07-08 14:17:08 -04:00
event_pos2 . stream ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id2 ] ,
2024-07-08 14:17:08 -04:00
)
2024-07-24 10:21:56 -04:00
def test_rooms_bump_stamp_backfill ( self ) - > None :
"""
Test that ` bump_stamp ` ignores backfilled events , i . e . events with a
negative stream ordering .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
# Create a remote room
creator = " @user:other "
room_id = " !foo:other "
shared_kwargs = {
" room_id " : room_id ,
" room_version " : " 10 " ,
}
create_tuple = self . get_success (
create_event (
self . hs ,
prev_event_ids = [ ] ,
type = EventTypes . Create ,
state_key = " " ,
sender = creator ,
* * shared_kwargs ,
)
)
creator_tuple = self . get_success (
create_event (
self . hs ,
prev_event_ids = [ create_tuple [ 0 ] . event_id ] ,
auth_event_ids = [ create_tuple [ 0 ] . event_id ] ,
type = EventTypes . Member ,
state_key = creator ,
content = { " membership " : Membership . JOIN } ,
sender = creator ,
* * shared_kwargs ,
)
)
# We add a message event as a valid "bump type"
msg_tuple = self . get_success (
create_event (
self . hs ,
prev_event_ids = [ creator_tuple [ 0 ] . event_id ] ,
auth_event_ids = [ create_tuple [ 0 ] . event_id ] ,
type = EventTypes . Message ,
content = { " body " : " foo " , " msgtype " : " m.text " } ,
sender = creator ,
* * shared_kwargs ,
)
)
invite_tuple = self . get_success (
create_event (
self . hs ,
prev_event_ids = [ msg_tuple [ 0 ] . event_id ] ,
auth_event_ids = [ create_tuple [ 0 ] . event_id , creator_tuple [ 0 ] . event_id ] ,
type = EventTypes . Member ,
state_key = user1_id ,
content = { " membership " : Membership . INVITE } ,
sender = creator ,
* * shared_kwargs ,
)
)
remote_events_and_contexts = [
create_tuple ,
creator_tuple ,
msg_tuple ,
invite_tuple ,
]
# Ensure the local HS knows the room version
self . get_success (
self . store . store_room ( room_id , creator , False , RoomVersions . V10 )
)
# Persist these events as backfilled events.
persistence = self . hs . get_storage_controllers ( ) . persistence
assert persistence is not None
for event , context in remote_events_and_contexts :
self . get_success ( persistence . persist_event ( event , context , backfilled = True ) )
# Now we join the local user to the room
join_tuple = self . get_success (
create_event (
self . hs ,
prev_event_ids = [ invite_tuple [ 0 ] . event_id ] ,
auth_event_ids = [ create_tuple [ 0 ] . event_id , invite_tuple [ 0 ] . event_id ] ,
type = EventTypes . Member ,
state_key = user1_id ,
content = { " membership " : Membership . JOIN } ,
sender = user1_id ,
* * shared_kwargs ,
)
)
self . get_success ( persistence . persist_event ( * join_tuple ) )
# Doing an SS request should return a positive `bump_stamp`, even though
# the only event that matches the bump types has as negative stream
# ordering.
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 5 ,
2024-07-24 10:21:56 -04:00
}
2024-07-25 12:01:47 -04:00
}
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-24 10:21:56 -04:00
2024-07-25 12:01:47 -04:00
self . assertGreater ( response_body [ " rooms " ] [ room_id ] [ " bump_stamp " ] , 0 )
2024-07-24 10:21:56 -04:00
2024-07-02 12:07:05 -04:00
def test_rooms_newly_joined_incremental_sync ( self ) - > None :
"""
Test that when we make an incremental sync with a ` newly_joined ` ` rooms ` , we are
able to see some historical events before the ` from_token ` .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
room_id1 = self . helper . create_room_as ( user2_id , tok = user2_tok )
self . helper . send ( room_id1 , " activity before token1 " , tok = user2_tok )
event_response2 = self . helper . send (
room_id1 , " activity before token2 " , tok = user2_tok
)
2024-07-24 06:47:25 -04:00
# The `timeline_limit` is set to 4 so we can at least see one historical event
# before the `from_token`. We should see historical events because this is a
# `newly_joined` room.
timeline_limit = 4
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : timeline_limit ,
}
}
}
_ , from_token = self . do_sync ( sync_body , tok = user1_tok )
2024-07-02 12:07:05 -04:00
# Join the room after the `from_token` which will make us consider this room as
# `newly_joined`.
user1_join_response = self . helper . join ( room_id1 , user1_id , tok = user1_tok )
# Send some events but don't send enough to saturate the `timeline_limit`.
# We want to later test that we only get the new events since the `next_pos`
event_response3 = self . helper . send (
room_id1 , " activity after token3 " , tok = user2_tok
)
event_response4 = self . helper . send (
room_id1 , " activity after token4 " , tok = user2_tok
)
# Make an incremental Sliding Sync request (what we're trying to test)
2024-07-25 12:01:47 -04:00
response_body , _ = self . do_sync ( sync_body , since = from_token , tok = user1_tok )
2024-07-02 12:07:05 -04:00
# We should see the new events and the rest should be filled with historical
# events which will make us `limited=True` since there are more to paginate to.
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " limited " ] ,
2024-07-02 12:07:05 -04:00
True ,
f " Our `timeline_limit` was { timeline_limit } "
2024-07-25 12:01:47 -04:00
+ f ' and { len ( response_body [ " rooms " ] [ room_id1 ] [ " timeline " ] ) } events were returned in the timeline. '
+ str ( response_body [ " rooms " ] [ room_id1 ] ) ,
2024-07-02 12:07:05 -04:00
)
# Check to make sure that the "live" and historical events are returned
self . assertEqual (
[
event [ " event_id " ]
2024-07-25 12:01:47 -04:00
for event in response_body [ " rooms " ] [ room_id1 ] [ " timeline " ]
2024-07-02 12:07:05 -04:00
] ,
[
event_response2 [ " event_id " ] ,
user1_join_response [ " event_id " ] ,
event_response3 [ " event_id " ] ,
event_response4 [ " event_id " ] ,
] ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " timeline " ] ,
2024-07-02 12:07:05 -04:00
)
# Only events after the `from_token` are "live" (join, event3, event4)
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " num_live " ] ,
2024-07-02 12:07:05 -04:00
3 ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-02 12:07:05 -04:00
)
def test_rooms_invite_shared_history_initial_sync ( self ) - > None :
"""
Test that ` rooms ` we are invited to have some stripped ` invite_state ` during an
initial sync .
This is an ` invite ` room so we should only have ` stripped_state ` ( no ` timeline ` )
but we also shouldn ' t see any timeline events because the history visiblity is
` shared ` and we haven ' t joined the room yet.
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user1 = UserID . from_string ( user1_id )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
user2 = UserID . from_string ( user2_id )
room_id1 = self . helper . create_room_as ( user2_id , tok = user2_tok )
# Ensure we're testing with a room with `shared` history visibility which means
# history visible until you actually join the room.
history_visibility_response = self . helper . get_state (
room_id1 , EventTypes . RoomHistoryVisibility , tok = user2_tok
)
self . assertEqual (
history_visibility_response . get ( " history_visibility " ) ,
HistoryVisibility . SHARED ,
)
self . helper . send ( room_id1 , " activity before1 " , tok = user2_tok )
self . helper . send ( room_id1 , " activity before2 " , tok = user2_tok )
self . helper . invite ( room_id1 , src = user2_id , targ = user1_id , tok = user2_tok )
self . helper . send ( room_id1 , " activity after3 " , tok = user2_tok )
self . helper . send ( room_id1 , " activity after4 " , tok = user2_tok )
# Make the Sliding Sync request
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 3 ,
2024-07-02 12:07:05 -04:00
}
2024-07-25 12:01:47 -04:00
}
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-02 12:07:05 -04:00
# `timeline` is omitted for `invite` rooms with `stripped_state`
self . assertIsNone (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] . get ( " timeline " ) ,
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-02 12:07:05 -04:00
)
# `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
self . assertIsNone (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] . get ( " num_live " ) ,
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-02 12:07:05 -04:00
)
# `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
self . assertIsNone (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] . get ( " limited " ) ,
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-02 12:07:05 -04:00
)
# `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
self . assertIsNone (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] . get ( " prev_batch " ) ,
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-02 12:07:05 -04:00
)
2024-07-04 13:25:36 -04:00
# `required_state` is omitted for `invite` rooms with `stripped_state`
self . assertIsNone (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] . get ( " required_state " ) ,
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-04 13:25:36 -04:00
)
2024-07-02 12:07:05 -04:00
# We should have some `stripped_state` so the potential joiner can identify the
# room (we don't care about the order).
self . assertCountEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " invite_state " ] ,
2024-07-02 12:07:05 -04:00
[
{
" content " : { " creator " : user2_id , " room_version " : " 10 " } ,
" sender " : user2_id ,
" state_key " : " " ,
" type " : " m.room.create " ,
} ,
{
" content " : { " join_rule " : " public " } ,
" sender " : user2_id ,
" state_key " : " " ,
" type " : " m.room.join_rules " ,
} ,
{
" content " : { " displayname " : user2 . localpart , " membership " : " join " } ,
" sender " : user2_id ,
" state_key " : user2_id ,
" type " : " m.room.member " ,
} ,
{
" content " : { " displayname " : user1 . localpart , " membership " : " invite " } ,
" sender " : user2_id ,
" state_key " : user1_id ,
" type " : " m.room.member " ,
} ,
] ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " invite_state " ] ,
2024-07-02 12:07:05 -04:00
)
def test_rooms_invite_shared_history_incremental_sync ( self ) - > None :
"""
Test that ` rooms ` we are invited to have some stripped ` invite_state ` during an
incremental sync .
This is an ` invite ` room so we should only have ` stripped_state ` ( no ` timeline ` )
but we also shouldn ' t see any timeline events because the history visiblity is
` shared ` and we haven ' t joined the room yet.
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user1 = UserID . from_string ( user1_id )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
user2 = UserID . from_string ( user2_id )
room_id1 = self . helper . create_room_as ( user2_id , tok = user2_tok )
# Ensure we're testing with a room with `shared` history visibility which means
# history visible until you actually join the room.
history_visibility_response = self . helper . get_state (
room_id1 , EventTypes . RoomHistoryVisibility , tok = user2_tok
)
self . assertEqual (
history_visibility_response . get ( " history_visibility " ) ,
HistoryVisibility . SHARED ,
)
self . helper . send ( room_id1 , " activity before invite1 " , tok = user2_tok )
self . helper . send ( room_id1 , " activity before invite2 " , tok = user2_tok )
self . helper . invite ( room_id1 , src = user2_id , targ = user1_id , tok = user2_tok )
self . helper . send ( room_id1 , " activity after invite3 " , tok = user2_tok )
self . helper . send ( room_id1 , " activity after invite4 " , tok = user2_tok )
2024-07-24 06:47:25 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 3 ,
}
}
}
_ , from_token = self . do_sync ( sync_body , tok = user1_tok )
2024-07-02 12:07:05 -04:00
self . helper . send ( room_id1 , " activity after token5 " , tok = user2_tok )
self . helper . send ( room_id1 , " activity after toekn6 " , tok = user2_tok )
# Make the Sliding Sync request
2024-07-25 12:01:47 -04:00
response_body , from_token = self . do_sync (
sync_body , since = from_token , tok = user1_tok
2024-07-02 12:07:05 -04:00
)
# `timeline` is omitted for `invite` rooms with `stripped_state`
self . assertIsNone (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] . get ( " timeline " ) ,
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-02 12:07:05 -04:00
)
# `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
self . assertIsNone (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] . get ( " num_live " ) ,
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-02 12:07:05 -04:00
)
# `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
self . assertIsNone (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] . get ( " limited " ) ,
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-02 12:07:05 -04:00
)
# `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
self . assertIsNone (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] . get ( " prev_batch " ) ,
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-02 12:07:05 -04:00
)
2024-07-04 13:25:36 -04:00
# `required_state` is omitted for `invite` rooms with `stripped_state`
self . assertIsNone (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] . get ( " required_state " ) ,
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-04 13:25:36 -04:00
)
2024-07-02 12:07:05 -04:00
# We should have some `stripped_state` so the potential joiner can identify the
# room (we don't care about the order).
self . assertCountEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " invite_state " ] ,
2024-07-02 12:07:05 -04:00
[
{
" content " : { " creator " : user2_id , " room_version " : " 10 " } ,
" sender " : user2_id ,
" state_key " : " " ,
" type " : " m.room.create " ,
} ,
{
" content " : { " join_rule " : " public " } ,
" sender " : user2_id ,
" state_key " : " " ,
" type " : " m.room.join_rules " ,
} ,
{
" content " : { " displayname " : user2 . localpart , " membership " : " join " } ,
" sender " : user2_id ,
" state_key " : user2_id ,
" type " : " m.room.member " ,
} ,
{
" content " : { " displayname " : user1 . localpart , " membership " : " invite " } ,
" sender " : user2_id ,
" state_key " : user1_id ,
" type " : " m.room.member " ,
} ,
] ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " invite_state " ] ,
2024-07-02 12:07:05 -04:00
)
def test_rooms_invite_world_readable_history_initial_sync ( self ) - > None :
"""
Test that ` rooms ` we are invited to have some stripped ` invite_state ` during an
initial sync .
This is an ` invite ` room so we should only have ` stripped_state ` ( no ` timeline ` )
but depending on the semantics we decide , we could potentially see some
historical events before / after the ` from_token ` because the history is
` world_readable ` . Same situation for events after the ` from_token ` if the
history visibility was set to ` invited ` .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user1 = UserID . from_string ( user1_id )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
user2 = UserID . from_string ( user2_id )
room_id1 = self . helper . create_room_as (
user2_id ,
tok = user2_tok ,
extra_content = {
" preset " : " public_chat " ,
" initial_state " : [
{
" content " : {
" history_visibility " : HistoryVisibility . WORLD_READABLE
} ,
" state_key " : " " ,
" type " : EventTypes . RoomHistoryVisibility ,
}
] ,
} ,
)
# Ensure we're testing with a room with `world_readable` history visibility
# which means events are visible to anyone even without membership.
history_visibility_response = self . helper . get_state (
room_id1 , EventTypes . RoomHistoryVisibility , tok = user2_tok
)
self . assertEqual (
history_visibility_response . get ( " history_visibility " ) ,
HistoryVisibility . WORLD_READABLE ,
)
self . helper . send ( room_id1 , " activity before1 " , tok = user2_tok )
self . helper . send ( room_id1 , " activity before2 " , tok = user2_tok )
self . helper . invite ( room_id1 , src = user2_id , targ = user1_id , tok = user2_tok )
self . helper . send ( room_id1 , " activity after3 " , tok = user2_tok )
self . helper . send ( room_id1 , " activity after4 " , tok = user2_tok )
# Make the Sliding Sync request
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [ ] ,
# Large enough to see the latest events and before the invite
" timeline_limit " : 4 ,
2024-07-02 12:07:05 -04:00
}
2024-07-25 12:01:47 -04:00
}
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-02 12:07:05 -04:00
# `timeline` is omitted for `invite` rooms with `stripped_state`
self . assertIsNone (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] . get ( " timeline " ) ,
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-02 12:07:05 -04:00
)
# `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
self . assertIsNone (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] . get ( " num_live " ) ,
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-02 12:07:05 -04:00
)
# `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
self . assertIsNone (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] . get ( " limited " ) ,
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-02 12:07:05 -04:00
)
# `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
self . assertIsNone (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] . get ( " prev_batch " ) ,
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-02 12:07:05 -04:00
)
2024-07-04 13:25:36 -04:00
# `required_state` is omitted for `invite` rooms with `stripped_state`
self . assertIsNone (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] . get ( " required_state " ) ,
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-04 13:25:36 -04:00
)
2024-07-02 12:07:05 -04:00
# We should have some `stripped_state` so the potential joiner can identify the
# room (we don't care about the order).
self . assertCountEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " invite_state " ] ,
2024-07-02 12:07:05 -04:00
[
{
" content " : { " creator " : user2_id , " room_version " : " 10 " } ,
" sender " : user2_id ,
" state_key " : " " ,
" type " : " m.room.create " ,
} ,
{
" content " : { " join_rule " : " public " } ,
" sender " : user2_id ,
" state_key " : " " ,
" type " : " m.room.join_rules " ,
} ,
{
" content " : { " displayname " : user2 . localpart , " membership " : " join " } ,
" sender " : user2_id ,
" state_key " : user2_id ,
" type " : " m.room.member " ,
} ,
{
" content " : { " displayname " : user1 . localpart , " membership " : " invite " } ,
" sender " : user2_id ,
" state_key " : user1_id ,
" type " : " m.room.member " ,
} ,
] ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " invite_state " ] ,
2024-07-02 12:07:05 -04:00
)
def test_rooms_invite_world_readable_history_incremental_sync ( self ) - > None :
"""
Test that ` rooms ` we are invited to have some stripped ` invite_state ` during an
incremental sync .
This is an ` invite ` room so we should only have ` stripped_state ` ( no ` timeline ` )
but depending on the semantics we decide , we could potentially see some
historical events before / after the ` from_token ` because the history is
` world_readable ` . Same situation for events after the ` from_token ` if the
history visibility was set to ` invited ` .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user1 = UserID . from_string ( user1_id )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
user2 = UserID . from_string ( user2_id )
room_id1 = self . helper . create_room_as (
user2_id ,
tok = user2_tok ,
extra_content = {
" preset " : " public_chat " ,
" initial_state " : [
{
" content " : {
" history_visibility " : HistoryVisibility . WORLD_READABLE
} ,
" state_key " : " " ,
" type " : EventTypes . RoomHistoryVisibility ,
}
] ,
} ,
)
# Ensure we're testing with a room with `world_readable` history visibility
# which means events are visible to anyone even without membership.
history_visibility_response = self . helper . get_state (
room_id1 , EventTypes . RoomHistoryVisibility , tok = user2_tok
)
self . assertEqual (
history_visibility_response . get ( " history_visibility " ) ,
HistoryVisibility . WORLD_READABLE ,
)
self . helper . send ( room_id1 , " activity before invite1 " , tok = user2_tok )
self . helper . send ( room_id1 , " activity before invite2 " , tok = user2_tok )
self . helper . invite ( room_id1 , src = user2_id , targ = user1_id , tok = user2_tok )
self . helper . send ( room_id1 , " activity after invite3 " , tok = user2_tok )
self . helper . send ( room_id1 , " activity after invite4 " , tok = user2_tok )
2024-07-24 06:47:25 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [ ] ,
# Large enough to see the latest events and before the invite
" timeline_limit " : 4 ,
}
}
}
_ , from_token = self . do_sync ( sync_body , tok = user1_tok )
2024-07-02 12:07:05 -04:00
self . helper . send ( room_id1 , " activity after token5 " , tok = user2_tok )
self . helper . send ( room_id1 , " activity after toekn6 " , tok = user2_tok )
2024-07-25 12:01:47 -04:00
# Make the incremental Sliding Sync request
response_body , _ = self . do_sync ( sync_body , since = from_token , tok = user1_tok )
2024-07-02 12:07:05 -04:00
# `timeline` is omitted for `invite` rooms with `stripped_state`
self . assertIsNone (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] . get ( " timeline " ) ,
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-02 12:07:05 -04:00
)
# `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
self . assertIsNone (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] . get ( " num_live " ) ,
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-02 12:07:05 -04:00
)
# `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
self . assertIsNone (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] . get ( " limited " ) ,
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-02 12:07:05 -04:00
)
# `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
self . assertIsNone (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] . get ( " prev_batch " ) ,
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-02 12:07:05 -04:00
)
2024-07-04 13:25:36 -04:00
# `required_state` is omitted for `invite` rooms with `stripped_state`
self . assertIsNone (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] . get ( " required_state " ) ,
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-04 13:25:36 -04:00
)
2024-07-02 12:07:05 -04:00
# We should have some `stripped_state` so the potential joiner can identify the
# room (we don't care about the order).
self . assertCountEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " invite_state " ] ,
2024-07-02 12:07:05 -04:00
[
{
" content " : { " creator " : user2_id , " room_version " : " 10 " } ,
" sender " : user2_id ,
" state_key " : " " ,
" type " : " m.room.create " ,
} ,
{
" content " : { " join_rule " : " public " } ,
" sender " : user2_id ,
" state_key " : " " ,
" type " : " m.room.join_rules " ,
} ,
{
" content " : { " displayname " : user2 . localpart , " membership " : " join " } ,
" sender " : user2_id ,
" state_key " : user2_id ,
" type " : " m.room.member " ,
} ,
{
" content " : { " displayname " : user1 . localpart , " membership " : " invite " } ,
" sender " : user2_id ,
" state_key " : user1_id ,
" type " : " m.room.member " ,
} ,
] ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " invite_state " ] ,
2024-07-02 12:07:05 -04:00
)
def test_rooms_ban_initial_sync ( self ) - > None :
"""
Test that ` rooms ` we are banned from in an intial sync only allows us to see
timeline events up to the ban event .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
room_id1 = self . helper . create_room_as ( user2_id , tok = user2_tok )
self . helper . send ( room_id1 , " activity before1 " , tok = user2_tok )
self . helper . send ( room_id1 , " activity before2 " , tok = user2_tok )
self . helper . join ( room_id1 , user1_id , tok = user1_tok )
event_response3 = self . helper . send ( room_id1 , " activity after3 " , tok = user2_tok )
event_response4 = self . helper . send ( room_id1 , " activity after4 " , tok = user2_tok )
user1_ban_response = self . helper . ban (
room_id1 , src = user2_id , targ = user1_id , tok = user2_tok
)
self . helper . send ( room_id1 , " activity after5 " , tok = user2_tok )
self . helper . send ( room_id1 , " activity after6 " , tok = user2_tok )
# Make the Sliding Sync request
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 3 ,
2024-07-02 12:07:05 -04:00
}
2024-07-25 12:01:47 -04:00
}
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-02 12:07:05 -04:00
# We should see events before the ban but not after
self . assertEqual (
[
event [ " event_id " ]
2024-07-25 12:01:47 -04:00
for event in response_body [ " rooms " ] [ room_id1 ] [ " timeline " ]
2024-07-02 12:07:05 -04:00
] ,
[
event_response3 [ " event_id " ] ,
event_response4 [ " event_id " ] ,
user1_ban_response [ " event_id " ] ,
] ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " timeline " ] ,
2024-07-02 12:07:05 -04:00
)
# No "live" events in an initial sync (no `from_token` to define the "live"
# range)
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " num_live " ] ,
2024-07-02 12:07:05 -04:00
0 ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-02 12:07:05 -04:00
)
# There are more events to paginate to
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " limited " ] ,
2024-07-02 12:07:05 -04:00
True ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-02 12:07:05 -04:00
)
def test_rooms_ban_incremental_sync1 ( self ) - > None :
"""
Test that ` rooms ` we are banned from during the next incremental sync only
allows us to see timeline events up to the ban event .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
room_id1 = self . helper . create_room_as ( user2_id , tok = user2_tok )
self . helper . send ( room_id1 , " activity before1 " , tok = user2_tok )
self . helper . send ( room_id1 , " activity before2 " , tok = user2_tok )
self . helper . join ( room_id1 , user1_id , tok = user1_tok )
2024-07-24 06:47:25 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 4 ,
}
}
}
_ , from_token = self . do_sync ( sync_body , tok = user1_tok )
2024-07-02 12:07:05 -04:00
event_response3 = self . helper . send ( room_id1 , " activity after3 " , tok = user2_tok )
event_response4 = self . helper . send ( room_id1 , " activity after4 " , tok = user2_tok )
# The ban is within the token range (between the `from_token` and the sliding
# sync request)
user1_ban_response = self . helper . ban (
room_id1 , src = user2_id , targ = user1_id , tok = user2_tok
)
self . helper . send ( room_id1 , " activity after5 " , tok = user2_tok )
self . helper . send ( room_id1 , " activity after6 " , tok = user2_tok )
2024-07-25 12:01:47 -04:00
# Make the incremental Sliding Sync request
response_body , _ = self . do_sync ( sync_body , since = from_token , tok = user1_tok )
2024-07-02 12:07:05 -04:00
# We should see events before the ban but not after
self . assertEqual (
[
event [ " event_id " ]
2024-07-25 12:01:47 -04:00
for event in response_body [ " rooms " ] [ room_id1 ] [ " timeline " ]
2024-07-02 12:07:05 -04:00
] ,
[
event_response3 [ " event_id " ] ,
event_response4 [ " event_id " ] ,
user1_ban_response [ " event_id " ] ,
] ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " timeline " ] ,
2024-07-02 12:07:05 -04:00
)
# All live events in the incremental sync
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " num_live " ] ,
2024-07-02 12:07:05 -04:00
3 ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-02 12:07:05 -04:00
)
# There aren't anymore events to paginate to in this range
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " limited " ] ,
2024-07-02 12:07:05 -04:00
False ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-02 12:07:05 -04:00
)
def test_rooms_ban_incremental_sync2 ( self ) - > None :
"""
Test that ` rooms ` we are banned from before the incremental sync don ' t return
any events in the timeline .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
room_id1 = self . helper . create_room_as ( user2_id , tok = user2_tok )
self . helper . send ( room_id1 , " activity before1 " , tok = user2_tok )
self . helper . join ( room_id1 , user1_id , tok = user1_tok )
self . helper . send ( room_id1 , " activity after2 " , tok = user2_tok )
# The ban is before we get our `from_token`
self . helper . ban ( room_id1 , src = user2_id , targ = user1_id , tok = user2_tok )
self . helper . send ( room_id1 , " activity after3 " , tok = user2_tok )
2024-07-24 06:47:25 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 4 ,
}
}
}
_ , from_token = self . do_sync ( sync_body , tok = user1_tok )
2024-07-02 12:07:05 -04:00
self . helper . send ( room_id1 , " activity after4 " , tok = user2_tok )
2024-07-25 12:01:47 -04:00
# Make the incremental Sliding Sync request
response_body , _ = self . do_sync ( sync_body , since = from_token , tok = user1_tok )
2024-07-02 12:07:05 -04:00
# Nothing to see for this banned user in the room in the token range
2024-07-30 04:30:44 -04:00
self . assertIsNone ( response_body [ " rooms " ] . get ( room_id1 ) )
2024-07-04 13:25:36 -04:00
def test_rooms_no_required_state ( self ) - > None :
"""
Empty ` rooms . required_state ` should not return any state events in the room
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
room_id1 = self . helper . create_room_as ( user2_id , tok = user2_tok )
self . helper . join ( room_id1 , user1_id , tok = user1_tok )
# Make the Sliding Sync request
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
# Empty `required_state`
" required_state " : [ ] ,
" timeline_limit " : 0 ,
2024-07-04 13:25:36 -04:00
}
2024-07-25 12:01:47 -04:00
}
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-04 13:25:36 -04:00
# No `required_state` in response
self . assertIsNone (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] . get ( " required_state " ) ,
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-04 13:25:36 -04:00
)
def test_rooms_required_state_initial_sync ( self ) - > None :
"""
Test ` rooms . required_state ` returns requested state events in the room during an
initial sync .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
room_id1 = self . helper . create_room_as ( user2_id , tok = user2_tok )
self . helper . join ( room_id1 , user1_id , tok = user1_tok )
# Make the Sliding Sync request
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [
[ EventTypes . Create , " " ] ,
[ EventTypes . RoomHistoryVisibility , " " ] ,
# This one doesn't exist in the room
[ EventTypes . Tombstone , " " ] ,
] ,
" timeline_limit " : 0 ,
2024-07-04 13:25:36 -04:00
}
2024-07-25 12:01:47 -04:00
}
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-04 13:25:36 -04:00
state_map = self . get_success (
self . storage_controllers . state . get_current_state ( room_id1 )
)
self . _assertRequiredStateIncludes (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " required_state " ] ,
2024-07-04 13:25:36 -04:00
{
state_map [ ( EventTypes . Create , " " ) ] ,
state_map [ ( EventTypes . RoomHistoryVisibility , " " ) ] ,
} ,
exact = True ,
)
2024-07-25 12:01:47 -04:00
self . assertIsNone ( response_body [ " rooms " ] [ room_id1 ] . get ( " invite_state " ) )
2024-07-04 13:25:36 -04:00
def test_rooms_required_state_incremental_sync ( self ) - > None :
"""
Test ` rooms . required_state ` returns requested state events in the room during an
incremental sync .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
room_id1 = self . helper . create_room_as ( user2_id , tok = user2_tok )
self . helper . join ( room_id1 , user1_id , tok = user1_tok )
2024-07-24 06:47:25 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [
[ EventTypes . Create , " " ] ,
[ EventTypes . RoomHistoryVisibility , " " ] ,
# This one doesn't exist in the room
[ EventTypes . Tombstone , " " ] ,
] ,
2024-07-30 04:30:44 -04:00
" timeline_limit " : 1 ,
2024-07-24 06:47:25 -04:00
}
}
}
2024-07-25 12:01:47 -04:00
_ , from_token = self . do_sync ( sync_body , tok = user1_tok )
2024-07-04 13:25:36 -04:00
2024-07-30 04:30:44 -04:00
# Send a message so the room comes down sync.
self . helper . send ( room_id1 , " msg " , tok = user1_tok )
2024-07-25 12:01:47 -04:00
# Make the incremental Sliding Sync request
response_body , _ = self . do_sync ( sync_body , since = from_token , tok = user1_tok )
2024-07-04 13:25:36 -04:00
2024-07-29 17:45:48 -04:00
# We only return updates but only if we've sent the room down the
# connection before.
self . assertIsNone ( response_body [ " rooms " ] [ room_id1 ] . get ( " required_state " ) )
self . assertIsNone ( response_body [ " rooms " ] [ room_id1 ] . get ( " invite_state " ) )
def test_rooms_required_state_incremental_sync_restart ( self ) - > None :
"""
Test ` rooms . required_state ` returns requested state events in the room during an
incremental sync , after a restart ( and so the in memory caches are reset ) .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
room_id1 = self . helper . create_room_as ( user2_id , tok = user2_tok )
self . helper . join ( room_id1 , user1_id , tok = user1_tok )
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [
[ EventTypes . Create , " " ] ,
[ EventTypes . RoomHistoryVisibility , " " ] ,
# This one doesn't exist in the room
[ EventTypes . Tombstone , " " ] ,
] ,
" timeline_limit " : 1 ,
}
}
}
_ , from_token = self . do_sync ( sync_body , tok = user1_tok )
# Reset the in-memory cache
self . hs . get_sliding_sync_handler ( ) . connection_store . _connections . clear ( )
# Make the Sliding Sync request
response_body , _ = self . do_sync ( sync_body , since = from_token , tok = user1_tok )
# If the cache has been cleared then we do expect the state to come down
2024-07-04 13:25:36 -04:00
state_map = self . get_success (
self . storage_controllers . state . get_current_state ( room_id1 )
)
self . _assertRequiredStateIncludes (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " required_state " ] ,
2024-07-04 13:25:36 -04:00
{
state_map [ ( EventTypes . Create , " " ) ] ,
state_map [ ( EventTypes . RoomHistoryVisibility , " " ) ] ,
} ,
exact = True ,
)
2024-07-25 12:01:47 -04:00
self . assertIsNone ( response_body [ " rooms " ] [ room_id1 ] . get ( " invite_state " ) )
2024-07-04 13:25:36 -04:00
def test_rooms_required_state_wildcard ( self ) - > None :
"""
Test ` rooms . required_state ` returns all state events when using wildcard ` [ " * " , " * " ] ` .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
room_id1 = self . helper . create_room_as ( user2_id , tok = user2_tok )
self . helper . join ( room_id1 , user1_id , tok = user1_tok )
self . helper . send_state (
room_id1 ,
event_type = " org.matrix.foo_state " ,
state_key = " " ,
body = { " foo " : " bar " } ,
tok = user2_tok ,
)
self . helper . send_state (
room_id1 ,
event_type = " org.matrix.foo_state " ,
state_key = " namespaced " ,
body = { " foo " : " bar " } ,
tok = user2_tok ,
)
# Make the Sliding Sync request with wildcards for the `event_type` and `state_key`
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [
[ StateValues . WILDCARD , StateValues . WILDCARD ] ,
] ,
" timeline_limit " : 0 ,
2024-07-04 13:25:36 -04:00
}
2024-07-25 12:01:47 -04:00
}
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-04 13:25:36 -04:00
state_map = self . get_success (
self . storage_controllers . state . get_current_state ( room_id1 )
)
self . _assertRequiredStateIncludes (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " required_state " ] ,
2024-07-04 13:25:36 -04:00
# We should see all the state events in the room
state_map . values ( ) ,
exact = True ,
)
2024-07-25 12:01:47 -04:00
self . assertIsNone ( response_body [ " rooms " ] [ room_id1 ] . get ( " invite_state " ) )
2024-07-04 13:25:36 -04:00
def test_rooms_required_state_wildcard_event_type ( self ) - > None :
"""
Test ` rooms . required_state ` returns relevant state events when using wildcard in
the event_type ` [ " * " , " foobarbaz " ] ` .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
room_id1 = self . helper . create_room_as ( user2_id , tok = user2_tok )
self . helper . join ( room_id1 , user1_id , tok = user1_tok )
self . helper . send_state (
room_id1 ,
event_type = " org.matrix.foo_state " ,
state_key = " " ,
body = { " foo " : " bar " } ,
tok = user2_tok ,
)
self . helper . send_state (
room_id1 ,
event_type = " org.matrix.foo_state " ,
state_key = user2_id ,
body = { " foo " : " bar " } ,
tok = user2_tok ,
)
# Make the Sliding Sync request with wildcards for the `event_type`
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [
[ StateValues . WILDCARD , user2_id ] ,
] ,
" timeline_limit " : 0 ,
2024-07-04 13:25:36 -04:00
}
2024-07-25 12:01:47 -04:00
}
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-04 13:25:36 -04:00
state_map = self . get_success (
self . storage_controllers . state . get_current_state ( room_id1 )
)
# We expect at-least any state event with the `user2_id` as the `state_key`
self . _assertRequiredStateIncludes (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " required_state " ] ,
2024-07-04 13:25:36 -04:00
{
state_map [ ( EventTypes . Member , user2_id ) ] ,
state_map [ ( " org.matrix.foo_state " , user2_id ) ] ,
} ,
# Ideally, this would be exact but we're currently returning all state
# events when the `event_type` is a wildcard.
exact = False ,
)
2024-07-25 12:01:47 -04:00
self . assertIsNone ( response_body [ " rooms " ] [ room_id1 ] . get ( " invite_state " ) )
2024-07-04 13:25:36 -04:00
def test_rooms_required_state_wildcard_state_key ( self ) - > None :
"""
Test ` rooms . required_state ` returns relevant state events when using wildcard in
the state_key ` [ " foobarbaz " , " * " ] ` .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
room_id1 = self . helper . create_room_as ( user2_id , tok = user2_tok )
self . helper . join ( room_id1 , user1_id , tok = user1_tok )
# Make the Sliding Sync request with wildcards for the `state_key`
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [
[ EventTypes . Member , StateValues . WILDCARD ] ,
] ,
" timeline_limit " : 0 ,
2024-07-04 13:25:36 -04:00
}
2024-07-25 12:01:47 -04:00
}
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-04 13:25:36 -04:00
state_map = self . get_success (
self . storage_controllers . state . get_current_state ( room_id1 )
)
self . _assertRequiredStateIncludes (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " required_state " ] ,
2024-07-04 13:25:36 -04:00
{
state_map [ ( EventTypes . Member , user1_id ) ] ,
state_map [ ( EventTypes . Member , user2_id ) ] ,
} ,
exact = True ,
)
2024-07-25 12:01:47 -04:00
self . assertIsNone ( response_body [ " rooms " ] [ room_id1 ] . get ( " invite_state " ) )
2024-07-04 13:25:36 -04:00
def test_rooms_required_state_lazy_loading_room_members ( self ) - > None :
"""
Test ` rooms . required_state ` returns people relevant to the timeline when
lazy - loading room members , ` [ " m.room.member " , " $LAZY " ] ` .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
user3_id = self . register_user ( " user3 " , " pass " )
user3_tok = self . login ( user3_id , " pass " )
room_id1 = self . helper . create_room_as ( user2_id , tok = user2_tok )
self . helper . join ( room_id1 , user1_id , tok = user1_tok )
self . helper . join ( room_id1 , user3_id , tok = user3_tok )
self . helper . send ( room_id1 , " 1 " , tok = user2_tok )
self . helper . send ( room_id1 , " 2 " , tok = user3_tok )
self . helper . send ( room_id1 , " 3 " , tok = user2_tok )
# Make the Sliding Sync request with lazy loading for the room members
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [
[ EventTypes . Create , " " ] ,
[ EventTypes . Member , StateValues . LAZY ] ,
] ,
" timeline_limit " : 3 ,
2024-07-04 13:25:36 -04:00
}
2024-07-25 12:01:47 -04:00
}
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-04 13:25:36 -04:00
state_map = self . get_success (
self . storage_controllers . state . get_current_state ( room_id1 )
)
# Only user2 and user3 sent events in the 3 events we see in the `timeline`
self . _assertRequiredStateIncludes (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " required_state " ] ,
2024-07-04 13:25:36 -04:00
{
state_map [ ( EventTypes . Create , " " ) ] ,
state_map [ ( EventTypes . Member , user2_id ) ] ,
state_map [ ( EventTypes . Member , user3_id ) ] ,
} ,
exact = True ,
)
2024-07-25 12:01:47 -04:00
self . assertIsNone ( response_body [ " rooms " ] [ room_id1 ] . get ( " invite_state " ) )
2024-07-22 12:48:09 -04:00
def test_rooms_required_state_me ( self ) - > None :
"""
Test ` rooms . required_state ` correctly handles $ ME .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
room_id1 = self . helper . create_room_as ( user2_id , tok = user2_tok )
self . helper . join ( room_id1 , user1_id , tok = user1_tok )
self . helper . send ( room_id1 , " 1 " , tok = user2_tok )
# Also send normal state events with state keys of the users, first
# change the power levels to allow this.
self . helper . send_state (
room_id1 ,
event_type = EventTypes . PowerLevels ,
body = { " users " : { user1_id : 50 , user2_id : 100 } } ,
tok = user2_tok ,
)
self . helper . send_state (
room_id1 ,
event_type = " org.matrix.foo " ,
state_key = user1_id ,
body = { } ,
tok = user1_tok ,
)
self . helper . send_state (
room_id1 ,
event_type = " org.matrix.foo " ,
state_key = user2_id ,
body = { } ,
tok = user2_tok ,
)
# Make the Sliding Sync request with a request for '$ME'.
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [
[ EventTypes . Create , " " ] ,
[ EventTypes . Member , StateValues . ME ] ,
[ " org.matrix.foo " , StateValues . ME ] ,
] ,
" timeline_limit " : 3 ,
2024-07-22 12:48:09 -04:00
}
2024-07-25 12:01:47 -04:00
}
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-22 12:48:09 -04:00
state_map = self . get_success (
self . storage_controllers . state . get_current_state ( room_id1 )
)
# Only user2 and user3 sent events in the 3 events we see in the `timeline`
self . _assertRequiredStateIncludes (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " required_state " ] ,
2024-07-22 12:48:09 -04:00
{
state_map [ ( EventTypes . Create , " " ) ] ,
state_map [ ( EventTypes . Member , user1_id ) ] ,
state_map [ ( " org.matrix.foo " , user1_id ) ] ,
} ,
exact = True ,
)
2024-07-25 12:01:47 -04:00
self . assertIsNone ( response_body [ " rooms " ] [ room_id1 ] . get ( " invite_state " ) )
2024-07-04 13:25:36 -04:00
@parameterized.expand ( [ ( Membership . LEAVE , ) , ( Membership . BAN , ) ] )
def test_rooms_required_state_leave_ban ( self , stop_membership : str ) - > None :
"""
Test ` rooms . required_state ` should not return state past a leave / ban event .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
user3_id = self . register_user ( " user3 " , " pass " )
user3_tok = self . login ( user3_id , " pass " )
2024-07-24 06:47:25 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [
[ EventTypes . Create , " " ] ,
[ EventTypes . Member , " * " ] ,
[ " org.matrix.foo_state " , " " ] ,
] ,
" timeline_limit " : 3 ,
}
}
}
_ , from_token = self . do_sync ( sync_body , tok = user1_tok )
2024-07-04 13:25:36 -04:00
room_id1 = self . helper . create_room_as ( user2_id , tok = user2_tok )
self . helper . join ( room_id1 , user1_id , tok = user1_tok )
self . helper . join ( room_id1 , user3_id , tok = user3_tok )
self . helper . send_state (
room_id1 ,
event_type = " org.matrix.foo_state " ,
state_key = " " ,
body = { " foo " : " bar " } ,
tok = user2_tok ,
)
if stop_membership == Membership . LEAVE :
# User 1 leaves
self . helper . leave ( room_id1 , user1_id , tok = user1_tok )
elif stop_membership == Membership . BAN :
# User 1 is banned
self . helper . ban ( room_id1 , src = user2_id , targ = user1_id , tok = user2_tok )
state_map = self . get_success (
self . storage_controllers . state . get_current_state ( room_id1 )
)
# Change the state after user 1 leaves
self . helper . send_state (
room_id1 ,
event_type = " org.matrix.foo_state " ,
state_key = " " ,
body = { " foo " : " qux " } ,
tok = user2_tok ,
)
self . helper . leave ( room_id1 , user3_id , tok = user3_tok )
# Make the Sliding Sync request with lazy loading for the room members
2024-07-25 12:01:47 -04:00
response_body , _ = self . do_sync ( sync_body , since = from_token , tok = user1_tok )
2024-07-04 13:25:36 -04:00
# Only user2 and user3 sent events in the 3 events we see in the `timeline`
self . _assertRequiredStateIncludes (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " required_state " ] ,
2024-07-04 13:25:36 -04:00
{
state_map [ ( EventTypes . Create , " " ) ] ,
state_map [ ( EventTypes . Member , user1_id ) ] ,
state_map [ ( EventTypes . Member , user2_id ) ] ,
state_map [ ( EventTypes . Member , user3_id ) ] ,
state_map [ ( " org.matrix.foo_state " , " " ) ] ,
} ,
exact = True ,
)
2024-07-25 12:01:47 -04:00
self . assertIsNone ( response_body [ " rooms " ] [ room_id1 ] . get ( " invite_state " ) )
2024-07-04 13:25:36 -04:00
def test_rooms_required_state_combine_superset ( self ) - > None :
"""
Test ` rooms . required_state ` is combined across lists and room subscriptions .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
room_id1 = self . helper . create_room_as ( user2_id , tok = user2_tok )
self . helper . join ( room_id1 , user1_id , tok = user1_tok )
self . helper . send_state (
room_id1 ,
event_type = " org.matrix.foo_state " ,
state_key = " " ,
body = { " foo " : " bar " } ,
tok = user2_tok ,
)
2024-07-15 05:37:10 -04:00
self . helper . send_state (
room_id1 ,
event_type = " org.matrix.bar_state " ,
state_key = " " ,
body = { " bar " : " qux " } ,
tok = user2_tok ,
)
2024-07-04 13:25:36 -04:00
# Make the Sliding Sync request with wildcards for the `state_key`
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [
[ EventTypes . Create , " " ] ,
[ EventTypes . Member , user1_id ] ,
] ,
" timeline_limit " : 0 ,
2024-07-15 05:37:10 -04:00
} ,
2024-07-25 12:01:47 -04:00
" bar-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [
[ EventTypes . Member , StateValues . WILDCARD ] ,
[ " org.matrix.foo_state " , " " ] ,
] ,
" timeline_limit " : 0 ,
2024-07-15 05:37:10 -04:00
} ,
2024-07-04 13:25:36 -04:00
} ,
2024-07-25 12:01:47 -04:00
" room_subscriptions " : {
room_id1 : {
" required_state " : [ [ " org.matrix.bar_state " , " " ] ] ,
" timeline_limit " : 0 ,
}
} ,
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-04 13:25:36 -04:00
state_map = self . get_success (
self . storage_controllers . state . get_current_state ( room_id1 )
)
self . _assertRequiredStateIncludes (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " required_state " ] ,
2024-07-04 13:25:36 -04:00
{
state_map [ ( EventTypes . Create , " " ) ] ,
state_map [ ( EventTypes . Member , user1_id ) ] ,
state_map [ ( EventTypes . Member , user2_id ) ] ,
state_map [ ( " org.matrix.foo_state " , " " ) ] ,
2024-07-15 05:37:10 -04:00
state_map [ ( " org.matrix.bar_state " , " " ) ] ,
2024-07-04 13:25:36 -04:00
} ,
exact = True ,
)
2024-07-25 12:01:47 -04:00
self . assertIsNone ( response_body [ " rooms " ] [ room_id1 ] . get ( " invite_state " ) )
2024-07-04 13:25:36 -04:00
def test_rooms_required_state_partial_state ( self ) - > None :
"""
Test partially - stated room are excluded unless ` rooms . required_state ` is
lazy - loading room members .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
room_id1 = self . helper . create_room_as ( user2_id , tok = user2_tok )
room_id2 = self . helper . create_room_as ( user2_id , tok = user2_tok )
_join_response1 = self . helper . join ( room_id1 , user1_id , tok = user1_tok )
join_response2 = self . helper . join ( room_id2 , user1_id , tok = user1_tok )
# Mark room2 as partial state
self . get_success (
mark_event_as_partial_state ( self . hs , join_response2 [ " event_id " ] , room_id2 )
)
# Make the Sliding Sync request (NOT lazy-loading room members)
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [
[ EventTypes . Create , " " ] ,
] ,
" timeline_limit " : 0 ,
} ,
}
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-04 13:25:36 -04:00
# Make sure the list includes room1 but room2 is excluded because it's still
# partially-stated
self . assertListEqual (
2024-07-25 12:01:47 -04:00
list ( response_body [ " lists " ] [ " foo-list " ] [ " ops " ] ) ,
2024-07-04 13:25:36 -04:00
[
{
" op " : " SYNC " ,
" range " : [ 0 , 1 ] ,
" room_ids " : [ room_id1 ] ,
}
] ,
2024-07-25 12:01:47 -04:00
response_body [ " lists " ] [ " foo-list " ] ,
2024-07-04 13:25:36 -04:00
)
# Make the Sliding Sync request (with lazy-loading room members)
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [
[ EventTypes . Create , " " ] ,
# Lazy-load room members
[ EventTypes . Member , StateValues . LAZY ] ,
] ,
" timeline_limit " : 0 ,
} ,
}
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-04 13:25:36 -04:00
# The list should include both rooms now because we're lazy-loading room members
self . assertListEqual (
2024-07-25 12:01:47 -04:00
list ( response_body [ " lists " ] [ " foo-list " ] [ " ops " ] ) ,
2024-07-04 13:25:36 -04:00
[
{
" op " : " SYNC " ,
" range " : [ 0 , 1 ] ,
" room_ids " : [ room_id2 , room_id1 ] ,
}
] ,
2024-07-25 12:01:47 -04:00
response_body [ " lists " ] [ " foo-list " ] ,
2024-07-04 13:25:36 -04:00
)
2024-07-10 06:58:42 -04:00
2024-07-15 05:37:10 -04:00
def test_room_subscriptions_with_join_membership ( self ) - > None :
"""
Test ` room_subscriptions ` with a joined room should give us timeline and current
state events .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
room_id1 = self . helper . create_room_as ( user2_id , tok = user2_tok )
join_response = self . helper . join ( room_id1 , user1_id , tok = user1_tok )
# Make the Sliding Sync request with just the room subscription
2024-07-25 12:01:47 -04:00
sync_body = {
" room_subscriptions " : {
room_id1 : {
" required_state " : [
[ EventTypes . Create , " " ] ,
] ,
" timeline_limit " : 1 ,
}
2024-07-15 05:37:10 -04:00
} ,
2024-07-25 12:01:47 -04:00
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-15 05:37:10 -04:00
state_map = self . get_success (
self . storage_controllers . state . get_current_state ( room_id1 )
)
# We should see some state
self . _assertRequiredStateIncludes (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " required_state " ] ,
2024-07-15 05:37:10 -04:00
{
state_map [ ( EventTypes . Create , " " ) ] ,
} ,
exact = True ,
)
2024-07-25 12:01:47 -04:00
self . assertIsNone ( response_body [ " rooms " ] [ room_id1 ] . get ( " invite_state " ) )
2024-07-15 05:37:10 -04:00
# We should see some events
self . assertEqual (
[
event [ " event_id " ]
2024-07-25 12:01:47 -04:00
for event in response_body [ " rooms " ] [ room_id1 ] [ " timeline " ]
2024-07-15 05:37:10 -04:00
] ,
[
join_response [ " event_id " ] ,
] ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " timeline " ] ,
2024-07-15 05:37:10 -04:00
)
# No "live" events in an initial sync (no `from_token` to define the "live"
# range)
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " num_live " ] ,
2024-07-15 05:37:10 -04:00
0 ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-15 05:37:10 -04:00
)
# There are more events to paginate to
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " limited " ] ,
2024-07-15 05:37:10 -04:00
True ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-15 05:37:10 -04:00
)
def test_room_subscriptions_with_leave_membership ( self ) - > None :
"""
Test ` room_subscriptions ` with a leave room should give us timeline and state
events up to the leave event .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
room_id1 = self . helper . create_room_as ( user2_id , tok = user2_tok )
self . helper . send_state (
room_id1 ,
event_type = " org.matrix.foo_state " ,
state_key = " " ,
body = { " foo " : " bar " } ,
tok = user2_tok ,
)
join_response = self . helper . join ( room_id1 , user1_id , tok = user1_tok )
leave_response = self . helper . leave ( room_id1 , user1_id , tok = user1_tok )
state_map = self . get_success (
self . storage_controllers . state . get_current_state ( room_id1 )
)
# Send some events after user1 leaves
self . helper . send ( room_id1 , " activity after leave " , tok = user2_tok )
# Update state after user1 leaves
self . helper . send_state (
room_id1 ,
event_type = " org.matrix.foo_state " ,
state_key = " " ,
body = { " foo " : " qux " } ,
tok = user2_tok ,
)
# Make the Sliding Sync request with just the room subscription
2024-07-25 12:01:47 -04:00
sync_body = {
" room_subscriptions " : {
room_id1 : {
" required_state " : [
[ " org.matrix.foo_state " , " " ] ,
] ,
" timeline_limit " : 2 ,
}
2024-07-15 05:37:10 -04:00
} ,
2024-07-25 12:01:47 -04:00
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-15 05:37:10 -04:00
# We should see the state at the time of the leave
self . _assertRequiredStateIncludes (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " required_state " ] ,
2024-07-15 05:37:10 -04:00
{
state_map [ ( " org.matrix.foo_state " , " " ) ] ,
} ,
exact = True ,
)
2024-07-25 12:01:47 -04:00
self . assertIsNone ( response_body [ " rooms " ] [ room_id1 ] . get ( " invite_state " ) )
2024-07-15 05:37:10 -04:00
# We should see some before we left (nothing after)
self . assertEqual (
[
event [ " event_id " ]
2024-07-25 12:01:47 -04:00
for event in response_body [ " rooms " ] [ room_id1 ] [ " timeline " ]
2024-07-15 05:37:10 -04:00
] ,
[
join_response [ " event_id " ] ,
leave_response [ " event_id " ] ,
] ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " timeline " ] ,
2024-07-15 05:37:10 -04:00
)
# No "live" events in an initial sync (no `from_token` to define the "live"
# range)
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " num_live " ] ,
2024-07-15 05:37:10 -04:00
0 ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-15 05:37:10 -04:00
)
# There are more events to paginate to
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] [ " limited " ] ,
2024-07-15 05:37:10 -04:00
True ,
2024-07-25 12:01:47 -04:00
response_body [ " rooms " ] [ room_id1 ] ,
2024-07-15 05:37:10 -04:00
)
def test_room_subscriptions_no_leak_private_room ( self ) - > None :
"""
Test ` room_subscriptions ` with a private room we have never been in should not
leak any data to the user .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
room_id1 = self . helper . create_room_as ( user2_id , tok = user2_tok , is_public = False )
# We should not be able to join the private room
self . helper . join (
room_id1 , user1_id , tok = user1_tok , expect_code = HTTPStatus . FORBIDDEN
)
# Make the Sliding Sync request with just the room subscription
2024-07-25 12:01:47 -04:00
sync_body = {
" room_subscriptions " : {
room_id1 : {
" required_state " : [
[ EventTypes . Create , " " ] ,
] ,
" timeline_limit " : 1 ,
}
2024-07-15 05:37:10 -04:00
} ,
2024-07-25 12:01:47 -04:00
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-15 05:37:10 -04:00
# We should not see the room at all (we're not in it)
2024-07-25 12:01:47 -04:00
self . assertIsNone ( response_body [ " rooms " ] . get ( room_id1 ) , response_body [ " rooms " ] )
2024-07-15 05:37:10 -04:00
def test_room_subscriptions_world_readable ( self ) - > None :
"""
Test ` room_subscriptions ` with a room that has ` world_readable ` history visibility
FIXME : We should be able to see the room timeline and state
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
# Create a room with `world_readable` history visibility
room_id1 = self . helper . create_room_as (
user2_id ,
tok = user2_tok ,
extra_content = {
" preset " : " public_chat " ,
" initial_state " : [
{
" content " : {
" history_visibility " : HistoryVisibility . WORLD_READABLE
} ,
" state_key " : " " ,
" type " : EventTypes . RoomHistoryVisibility ,
}
] ,
} ,
)
# Ensure we're testing with a room with `world_readable` history visibility
# which means events are visible to anyone even without membership.
history_visibility_response = self . helper . get_state (
room_id1 , EventTypes . RoomHistoryVisibility , tok = user2_tok
)
self . assertEqual (
history_visibility_response . get ( " history_visibility " ) ,
HistoryVisibility . WORLD_READABLE ,
)
# Note: We never join the room
# Make the Sliding Sync request with just the room subscription
2024-07-25 12:01:47 -04:00
sync_body = {
" room_subscriptions " : {
room_id1 : {
" required_state " : [
[ EventTypes . Create , " " ] ,
] ,
" timeline_limit " : 1 ,
}
2024-07-15 05:37:10 -04:00
} ,
2024-07-25 12:01:47 -04:00
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-15 05:37:10 -04:00
# FIXME: In the future, we should be able to see the room because it's
# `world_readable` but currently we don't support this.
2024-07-25 12:01:47 -04:00
self . assertIsNone ( response_body [ " rooms " ] . get ( room_id1 ) , response_body [ " rooms " ] )
2024-07-15 05:37:10 -04:00
2024-07-29 17:45:48 -04:00
def test_rooms_required_state_incremental_sync_LIVE ( self ) - > None :
""" Test that we only get state updates in incremental sync for rooms
we ' ve already seen (LIVE).
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
room_id1 = self . helper . create_room_as ( user2_id , tok = user2_tok )
self . helper . join ( room_id1 , user1_id , tok = user1_tok )
# Make the Sliding Sync request
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [
[ EventTypes . Create , " " ] ,
[ EventTypes . RoomHistoryVisibility , " " ] ,
# This one doesn't exist in the room
[ EventTypes . Name , " " ] ,
] ,
" timeline_limit " : 0 ,
}
}
}
response_body , from_token = self . do_sync ( sync_body , tok = user1_tok )
state_map = self . get_success (
self . storage_controllers . state . get_current_state ( room_id1 )
)
self . _assertRequiredStateIncludes (
response_body [ " rooms " ] [ room_id1 ] [ " required_state " ] ,
{
state_map [ ( EventTypes . Create , " " ) ] ,
state_map [ ( EventTypes . RoomHistoryVisibility , " " ) ] ,
} ,
exact = True ,
)
# Send a state event
self . helper . send_state (
room_id1 , EventTypes . Name , body = { " name " : " foo " } , tok = user2_tok
)
response_body , _ = self . do_sync ( sync_body , since = from_token , tok = user1_tok )
state_map = self . get_success (
self . storage_controllers . state . get_current_state ( room_id1 )
)
self . assertNotIn ( " initial " , response_body [ " rooms " ] [ room_id1 ] )
self . _assertRequiredStateIncludes (
response_body [ " rooms " ] [ room_id1 ] [ " required_state " ] ,
{
state_map [ ( EventTypes . Name , " " ) ] ,
} ,
exact = True ,
)
@parameterized.expand ( [ ( False , ) , ( True , ) ] )
def test_rooms_timeline_incremental_sync_PREVIOUSLY ( self , limited : bool ) - > None :
"""
Test getting room data where we have previously sent down the room , but
we missed sending down some timeline events previously and so its status
is considered PREVIOUSLY .
There are two versions of this test , one where there are more messages
than the timeline limit , and one where there isn ' t.
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
room_id1 = self . helper . create_room_as ( user1_id , tok = user1_tok )
room_id2 = self . helper . create_room_as ( user1_id , tok = user1_tok )
self . helper . send ( room_id1 , " msg " , tok = user1_tok )
timeline_limit = 5
conn_id = " conn_id "
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 0 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : timeline_limit ,
}
} ,
" conn_id " : " conn_id " ,
}
# The first room gets sent down the initial sync
response_body , initial_from_token = self . do_sync ( sync_body , tok = user1_tok )
self . assertCountEqual (
response_body [ " rooms " ] . keys ( ) , { room_id1 } , response_body [ " rooms " ]
)
# We now send down some events in room1 (depending on the test param).
expected_events = [ ] # The set of events in the timeline
if limited :
for _ in range ( 10 ) :
resp = self . helper . send ( room_id1 , " msg1 " , tok = user1_tok )
expected_events . append ( resp [ " event_id " ] )
else :
resp = self . helper . send ( room_id1 , " msg1 " , tok = user1_tok )
expected_events . append ( resp [ " event_id " ] )
# A second messages happens in the other room, so room1 won't get sent down.
self . helper . send ( room_id2 , " msg " , tok = user1_tok )
# Only the second room gets sent down sync.
response_body , from_token = self . do_sync (
sync_body , since = initial_from_token , tok = user1_tok
)
self . assertCountEqual (
response_body [ " rooms " ] . keys ( ) , { room_id2 } , response_body [ " rooms " ]
)
# FIXME: This is a hack to record that the first room wasn't sent down
# sync, as we don't implement that currently.
sliding_sync_handler = self . hs . get_sliding_sync_handler ( )
requester = self . get_success (
self . hs . get_auth ( ) . get_user_by_access_token ( user1_tok )
)
sync_config = SlidingSyncConfig (
user = requester . user ,
requester = requester ,
conn_id = conn_id ,
)
parsed_initial_from_token = self . get_success (
SlidingSyncStreamToken . from_string ( self . store , initial_from_token )
)
connection_position = self . get_success (
sliding_sync_handler . connection_store . record_rooms (
sync_config ,
parsed_initial_from_token ,
sent_room_ids = [ ] ,
unsent_room_ids = [ room_id1 ] ,
)
)
# FIXME: Now fix up `from_token` with new connect position above.
parsed_from_token = self . get_success (
SlidingSyncStreamToken . from_string ( self . store , from_token )
)
parsed_from_token = SlidingSyncStreamToken (
stream_token = parsed_from_token . stream_token ,
connection_position = connection_position ,
)
from_token = self . get_success ( parsed_from_token . to_string ( self . store ) )
# We now send another event to room1, so we should sync all the missing events.
resp = self . helper . send ( room_id1 , " msg2 " , tok = user1_tok )
expected_events . append ( resp [ " event_id " ] )
# This sync should contain the messages from room1 not yet sent down.
response_body , _ = self . do_sync ( sync_body , since = from_token , tok = user1_tok )
self . assertCountEqual (
response_body [ " rooms " ] . keys ( ) , { room_id1 } , response_body [ " rooms " ]
)
self . assertNotIn ( " initial " , response_body [ " rooms " ] [ room_id1 ] )
self . assertEqual (
[ ev [ " event_id " ] for ev in response_body [ " rooms " ] [ room_id1 ] [ " timeline " ] ] ,
expected_events [ - timeline_limit : ] ,
)
self . assertEqual ( response_body [ " rooms " ] [ room_id1 ] [ " limited " ] , limited )
self . assertEqual ( response_body [ " rooms " ] [ room_id1 ] . get ( " required_state " ) , None )
def test_rooms_required_state_incremental_sync_PREVIOUSLY ( self ) - > None :
"""
Test getting room data where we have previously sent down the room , but
we missed sending down some state previously and so its status is
considered PREVIOUSLY .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
room_id1 = self . helper . create_room_as ( user1_id , tok = user1_tok )
room_id2 = self . helper . create_room_as ( user1_id , tok = user1_tok )
self . helper . send ( room_id1 , " msg " , tok = user1_tok )
conn_id = " conn_id "
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 0 ] ] ,
" required_state " : [
[ EventTypes . Create , " " ] ,
[ EventTypes . RoomHistoryVisibility , " " ] ,
# This one doesn't exist in the room
[ EventTypes . Name , " " ] ,
] ,
" timeline_limit " : 0 ,
}
} ,
" conn_id " : " conn_id " ,
}
# The first room gets sent down the initial sync
response_body , initial_from_token = self . do_sync ( sync_body , tok = user1_tok )
self . assertCountEqual (
response_body [ " rooms " ] . keys ( ) , { room_id1 } , response_body [ " rooms " ]
)
# We now send down some state in room1
resp = self . helper . send_state (
room_id1 , EventTypes . Name , { " name " : " foo " } , tok = user1_tok
)
name_change_id = resp [ " event_id " ]
# A second messages happens in the other room, so room1 won't get sent down.
self . helper . send ( room_id2 , " msg " , tok = user1_tok )
# Only the second room gets sent down sync.
response_body , from_token = self . do_sync (
sync_body , since = initial_from_token , tok = user1_tok
)
self . assertCountEqual (
response_body [ " rooms " ] . keys ( ) , { room_id2 } , response_body [ " rooms " ]
)
# FIXME: This is a hack to record that the first room wasn't sent down
# sync, as we don't implement that currently.
sliding_sync_handler = self . hs . get_sliding_sync_handler ( )
requester = self . get_success (
self . hs . get_auth ( ) . get_user_by_access_token ( user1_tok )
)
sync_config = SlidingSyncConfig (
user = requester . user ,
requester = requester ,
conn_id = conn_id ,
)
parsed_initial_from_token = self . get_success (
SlidingSyncStreamToken . from_string ( self . store , initial_from_token )
)
connection_position = self . get_success (
sliding_sync_handler . connection_store . record_rooms (
sync_config ,
parsed_initial_from_token ,
sent_room_ids = [ ] ,
unsent_room_ids = [ room_id1 ] ,
)
)
# FIXME: Now fix up `from_token` with new connect position above.
parsed_from_token = self . get_success (
SlidingSyncStreamToken . from_string ( self . store , from_token )
)
parsed_from_token = SlidingSyncStreamToken (
stream_token = parsed_from_token . stream_token ,
connection_position = connection_position ,
)
from_token = self . get_success ( parsed_from_token . to_string ( self . store ) )
# We now send another event to room1, so we should sync all the missing state.
self . helper . send ( room_id1 , " msg " , tok = user1_tok )
# This sync should contain the state changes from room1.
response_body , _ = self . do_sync ( sync_body , since = from_token , tok = user1_tok )
self . assertCountEqual (
response_body [ " rooms " ] . keys ( ) , { room_id1 } , response_body [ " rooms " ]
)
self . assertNotIn ( " initial " , response_body [ " rooms " ] [ room_id1 ] )
# We should only see the name change.
self . assertEqual (
[
ev [ " event_id " ]
for ev in response_body [ " rooms " ] [ room_id1 ] [ " required_state " ]
] ,
[ name_change_id ] ,
)
def test_rooms_required_state_incremental_sync_NEVER ( self ) - > None :
"""
Test getting ` required_state ` where we have NEVER sent down the room before
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
room_id1 = self . helper . create_room_as ( user1_id , tok = user1_tok )
room_id2 = self . helper . create_room_as ( user1_id , tok = user1_tok )
self . helper . send ( room_id1 , " msg " , tok = user1_tok )
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 0 ] ] ,
" required_state " : [
[ EventTypes . Create , " " ] ,
[ EventTypes . RoomHistoryVisibility , " " ] ,
# This one doesn't exist in the room
[ EventTypes . Name , " " ] ,
] ,
" timeline_limit " : 1 ,
}
} ,
}
# A message happens in the other room, so room1 won't get sent down.
self . helper . send ( room_id2 , " msg " , tok = user1_tok )
# Only the second room gets sent down sync.
response_body , from_token = self . do_sync ( sync_body , tok = user1_tok )
self . assertCountEqual (
response_body [ " rooms " ] . keys ( ) , { room_id2 } , response_body [ " rooms " ]
)
# We now send another event to room1, so we should send down the full
# room.
self . helper . send ( room_id1 , " msg2 " , tok = user1_tok )
# This sync should contain the messages from room1 not yet sent down.
response_body , _ = self . do_sync ( sync_body , since = from_token , tok = user1_tok )
self . assertCountEqual (
response_body [ " rooms " ] . keys ( ) , { room_id1 } , response_body [ " rooms " ]
)
self . assertEqual ( response_body [ " rooms " ] [ room_id1 ] [ " initial " ] , True )
state_map = self . get_success (
self . storage_controllers . state . get_current_state ( room_id1 )
)
self . _assertRequiredStateIncludes (
response_body [ " rooms " ] [ room_id1 ] [ " required_state " ] ,
{
state_map [ ( EventTypes . Create , " " ) ] ,
state_map [ ( EventTypes . RoomHistoryVisibility , " " ) ] ,
} ,
exact = True ,
)
def test_rooms_timeline_incremental_sync_NEVER ( self ) - > None :
"""
Test getting timeline room data where we have NEVER sent down the room
before
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
room_id1 = self . helper . create_room_as ( user1_id , tok = user1_tok )
room_id2 = self . helper . create_room_as ( user1_id , tok = user1_tok )
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 0 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 5 ,
}
} ,
}
expected_events = [ ]
for _ in range ( 4 ) :
resp = self . helper . send ( room_id1 , " msg " , tok = user1_tok )
expected_events . append ( resp [ " event_id " ] )
# A message happens in the other room, so room1 won't get sent down.
self . helper . send ( room_id2 , " msg " , tok = user1_tok )
# Only the second room gets sent down sync.
response_body , from_token = self . do_sync ( sync_body , tok = user1_tok )
self . assertCountEqual (
response_body [ " rooms " ] . keys ( ) , { room_id2 } , response_body [ " rooms " ]
)
# We now send another event to room1 so it comes down sync
resp = self . helper . send ( room_id1 , " msg2 " , tok = user1_tok )
expected_events . append ( resp [ " event_id " ] )
# This sync should contain the messages from room1 not yet sent down.
response_body , _ = self . do_sync ( sync_body , since = from_token , tok = user1_tok )
self . assertCountEqual (
response_body [ " rooms " ] . keys ( ) , { room_id1 } , response_body [ " rooms " ]
)
self . assertEqual (
[ ev [ " event_id " ] for ev in response_body [ " rooms " ] [ room_id1 ] [ " timeline " ] ] ,
expected_events ,
)
self . assertEqual ( response_body [ " rooms " ] [ room_id1 ] [ " limited " ] , True )
self . assertEqual ( response_body [ " rooms " ] [ room_id1 ] [ " initial " ] , True )
2024-07-30 04:30:44 -04:00
def test_rooms_with_no_updates_do_not_come_down_incremental_sync ( self ) - > None :
"""
Test that rooms with no updates are returned in subsequent incremental
syncs .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
room_id1 = self . helper . create_room_as ( user1_id , tok = user1_tok )
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 0 ,
}
}
}
_ , from_token = self . do_sync ( sync_body , tok = user1_tok )
# Make the incremental Sliding Sync request
response_body , _ = self . do_sync ( sync_body , since = from_token , tok = user1_tok )
# Nothing has happened in the room, so the room should not come down
# /sync.
self . assertIsNone ( response_body [ " rooms " ] . get ( room_id1 ) )
def test_empty_initial_room_comes_down_sync ( self ) - > None :
"""
Test that rooms come down / sync even with empty required state and
timeline limit in initial sync .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
room_id1 = self . helper . create_room_as ( user1_id , tok = user1_tok )
sync_body = {
" lists " : {
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 0 ,
}
}
}
# Make the Sliding Sync request
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
self . assertEqual ( response_body [ " rooms " ] [ room_id1 ] [ " initial " ] , True )
2024-07-10 06:58:42 -04:00
2024-07-24 06:47:25 -04:00
class SlidingSyncToDeviceExtensionTestCase ( SlidingSyncBase ) :
2024-07-10 06:58:42 -04:00
""" Tests for the to-device sliding sync extension """
servlets = [
synapse . rest . admin . register_servlets ,
login . register_servlets ,
sync . register_servlets ,
sendtodevice . register_servlets ,
]
def prepare ( self , reactor : MemoryReactor , clock : Clock , hs : HomeServer ) - > None :
self . store = hs . get_datastores ( ) . main
def _assert_to_device_response (
2024-07-25 12:01:47 -04:00
self , response_body : JsonDict , expected_messages : List [ JsonDict ]
2024-07-10 06:58:42 -04:00
) - > str :
""" Assert the sliding sync response was successful and has the expected
to - device messages .
Returns the next_batch token from the to - device section .
"""
2024-07-25 12:01:47 -04:00
extensions = response_body [ " extensions " ]
2024-07-10 06:58:42 -04:00
to_device = extensions [ " to_device " ]
self . assertIsInstance ( to_device [ " next_batch " ] , str )
self . assertEqual ( to_device [ " events " ] , expected_messages )
return to_device [ " next_batch " ]
def test_no_data ( self ) - > None :
""" Test that enabling to-device extension works, even if there is
no - data
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : { } ,
" extensions " : {
" to_device " : {
" enabled " : True ,
}
2024-07-10 06:58:42 -04:00
} ,
2024-07-25 12:01:47 -04:00
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-10 06:58:42 -04:00
# We expect no to-device messages
2024-07-25 12:01:47 -04:00
self . _assert_to_device_response ( response_body , [ ] )
2024-07-10 06:58:42 -04:00
def test_data_initial_sync ( self ) - > None :
""" Test that we get to-device messages when we don ' t specify a since
token """
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " , " d1 " )
user2_id = self . register_user ( " u2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " , " d2 " )
# Send the to-device message
test_msg = { " foo " : " bar " }
chan = self . make_request (
" PUT " ,
" /_matrix/client/r0/sendToDevice/m.test/1234 " ,
content = { " messages " : { user1_id : { " d1 " : test_msg } } } ,
access_token = user2_tok ,
)
self . assertEqual ( chan . code , 200 , chan . result )
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : { } ,
" extensions " : {
" to_device " : {
" enabled " : True ,
}
2024-07-10 06:58:42 -04:00
} ,
2024-07-25 12:01:47 -04:00
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-10 06:58:42 -04:00
self . _assert_to_device_response (
2024-07-25 12:01:47 -04:00
response_body ,
2024-07-10 06:58:42 -04:00
[ { " content " : test_msg , " sender " : user2_id , " type " : " m.test " } ] ,
)
def test_data_incremental_sync ( self ) - > None :
""" Test that we get to-device messages over incremental syncs """
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " , " d1 " )
user2_id = self . register_user ( " u2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " , " d2 " )
2024-07-25 12:01:47 -04:00
sync_body : JsonDict = {
" lists " : { } ,
" extensions " : {
" to_device " : {
" enabled " : True ,
}
2024-07-10 06:58:42 -04:00
} ,
2024-07-25 12:01:47 -04:00
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-10 06:58:42 -04:00
# No to-device messages yet.
2024-07-25 12:01:47 -04:00
next_batch = self . _assert_to_device_response ( response_body , [ ] )
2024-07-10 06:58:42 -04:00
test_msg = { " foo " : " bar " }
chan = self . make_request (
" PUT " ,
" /_matrix/client/r0/sendToDevice/m.test/1234 " ,
content = { " messages " : { user1_id : { " d1 " : test_msg } } } ,
access_token = user2_tok ,
)
self . assertEqual ( chan . code , 200 , chan . result )
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : { } ,
" extensions " : {
" to_device " : {
" enabled " : True ,
" since " : next_batch ,
}
2024-07-10 06:58:42 -04:00
} ,
2024-07-25 12:01:47 -04:00
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-10 06:58:42 -04:00
next_batch = self . _assert_to_device_response (
2024-07-25 12:01:47 -04:00
response_body ,
2024-07-10 06:58:42 -04:00
[ { " content " : test_msg , " sender " : user2_id , " type " : " m.test " } ] ,
)
# The next sliding sync request should not include the to-device
# message.
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : { } ,
" extensions " : {
" to_device " : {
" enabled " : True ,
" since " : next_batch ,
}
2024-07-10 06:58:42 -04:00
} ,
2024-07-25 12:01:47 -04:00
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
self . _assert_to_device_response ( response_body , [ ] )
2024-07-10 06:58:42 -04:00
# An initial sliding sync request should not include the to-device
# message, as it should have been deleted
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : { } ,
" extensions " : {
" to_device " : {
" enabled " : True ,
}
2024-07-10 06:58:42 -04:00
} ,
2024-07-25 12:01:47 -04:00
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
self . _assert_to_device_response ( response_body , [ ] )
2024-07-22 16:40:06 -04:00
def test_wait_for_new_data ( self ) - > None :
"""
Test to make sure that the Sliding Sync request waits for new data to arrive .
( Only applies to incremental syncs with a ` timeout ` specified )
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " , " d1 " )
user2_id = self . register_user ( " u2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " , " d2 " )
2024-07-24 06:47:25 -04:00
sync_body = {
" lists " : { } ,
" extensions " : {
" to_device " : {
" enabled " : True ,
}
} ,
}
_ , from_token = self . do_sync ( sync_body , tok = user1_tok )
2024-07-22 16:40:06 -04:00
# Make the Sliding Sync request
channel = self . make_request (
" POST " ,
2024-07-24 06:47:25 -04:00
self . sync_endpoint + " ?timeout=10000 " + f " &pos= { from_token } " ,
content = sync_body ,
2024-07-22 16:40:06 -04:00
access_token = user1_tok ,
await_result = False ,
)
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
with self . assertRaises ( TimedOutException ) :
channel . await_result ( timeout_ms = 5000 )
# Bump the to-device messages to trigger new results
test_msg = { " foo " : " bar " }
send_to_device_channel = self . make_request (
" PUT " ,
" /_matrix/client/r0/sendToDevice/m.test/1234 " ,
content = { " messages " : { user1_id : { " d1 " : test_msg } } } ,
access_token = user2_tok ,
)
self . assertEqual (
send_to_device_channel . code , 200 , send_to_device_channel . result
)
# Should respond before the 10 second timeout
channel . await_result ( timeout_ms = 3000 )
self . assertEqual ( channel . code , 200 , channel . json_body )
self . _assert_to_device_response (
2024-07-25 12:01:47 -04:00
channel . json_body ,
2024-07-22 16:40:06 -04:00
[ { " content " : test_msg , " sender " : user2_id , " type " : " m.test " } ] ,
)
def test_wait_for_new_data_timeout ( self ) - > None :
"""
Test to make sure that the Sliding Sync request waits for new data to arrive but
no data ever arrives so we timeout . We ' re also making sure that the default data
from the To - Device extension doesn ' t trigger a false-positive for new data.
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
2024-07-24 06:47:25 -04:00
sync_body = {
" lists " : { } ,
" extensions " : {
" to_device " : {
" enabled " : True ,
}
} ,
}
_ , from_token = self . do_sync ( sync_body , tok = user1_tok )
2024-07-22 16:40:06 -04:00
# Make the Sliding Sync request
channel = self . make_request (
" POST " ,
2024-07-24 06:47:25 -04:00
self . sync_endpoint + " ?timeout=10000 " + f " &pos= { from_token } " ,
content = sync_body ,
2024-07-22 16:40:06 -04:00
access_token = user1_tok ,
await_result = False ,
)
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
with self . assertRaises ( TimedOutException ) :
channel . await_result ( timeout_ms = 5000 )
# Wake-up `notifier.wait_for_events(...)` that will cause us test
# `SlidingSyncResult.__bool__` for new results.
2024-07-25 11:43:35 -04:00
self . _bump_notifier_wait_for_events (
user1_id , wake_stream_key = StreamKeyType . ACCOUNT_DATA
)
2024-07-22 16:40:06 -04:00
# Block for a little bit more to ensure we don't see any new results.
with self . assertRaises ( TimedOutException ) :
channel . await_result ( timeout_ms = 4000 )
# Wait for the sync to complete (wait for the rest of the 10 second timeout,
# 5000 + 4000 + 1200 > 10000)
channel . await_result ( timeout_ms = 1200 )
self . assertEqual ( channel . code , 200 , channel . json_body )
2024-07-25 12:01:47 -04:00
self . _assert_to_device_response ( channel . json_body , [ ] )
2024-07-22 16:40:06 -04:00
2024-07-24 06:47:25 -04:00
class SlidingSyncE2eeExtensionTestCase ( SlidingSyncBase ) :
2024-07-22 16:40:06 -04:00
""" Tests for the e2ee sliding sync extension """
servlets = [
synapse . rest . admin . register_servlets ,
login . register_servlets ,
room . register_servlets ,
sync . register_servlets ,
devices . register_servlets ,
]
def prepare ( self , reactor : MemoryReactor , clock : Clock , hs : HomeServer ) - > None :
self . store = hs . get_datastores ( ) . main
self . e2e_keys_handler = hs . get_e2e_keys_handler ( )
def test_no_data_initial_sync ( self ) - > None :
"""
Test that enabling e2ee extension works during an intitial sync , even if there
is no - data
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
# Make an initial Sliding Sync request with the e2ee extension enabled
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : { } ,
" extensions " : {
" e2ee " : {
" enabled " : True ,
}
2024-07-22 16:40:06 -04:00
} ,
2024-07-25 12:01:47 -04:00
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-22 16:40:06 -04:00
# Device list updates are only present for incremental syncs
2024-07-25 12:01:47 -04:00
self . assertIsNone ( response_body [ " extensions " ] [ " e2ee " ] . get ( " device_lists " ) )
2024-07-22 16:40:06 -04:00
# Both of these should be present even when empty
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " extensions " ] [ " e2ee " ] [ " device_one_time_keys_count " ] ,
2024-07-22 16:40:06 -04:00
{
# This is always present because of
# https://github.com/element-hq/element-android/issues/3725 and
# https://github.com/matrix-org/synapse/issues/10456
" signed_curve25519 " : 0
} ,
)
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " extensions " ] [ " e2ee " ] [ " device_unused_fallback_key_types " ] ,
2024-07-22 16:40:06 -04:00
[ ] ,
)
def test_no_data_incremental_sync ( self ) - > None :
"""
Test that enabling e2ee extension works during an incremental sync , even if
there is no - data
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
2024-07-24 06:47:25 -04:00
sync_body = {
" lists " : { } ,
" extensions " : {
" e2ee " : {
" enabled " : True ,
}
} ,
}
_ , from_token = self . do_sync ( sync_body , tok = user1_tok )
2024-07-22 16:40:06 -04:00
# Make an incremental Sliding Sync request with the e2ee extension enabled
2024-07-25 12:01:47 -04:00
response_body , _ = self . do_sync ( sync_body , since = from_token , tok = user1_tok )
2024-07-22 16:40:06 -04:00
# Device list shows up for incremental syncs
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " extensions " ] [ " e2ee " ] . get ( " device_lists " , { } ) . get ( " changed " ) ,
2024-07-22 16:40:06 -04:00
[ ] ,
)
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " extensions " ] [ " e2ee " ] . get ( " device_lists " , { } ) . get ( " left " ) ,
2024-07-22 16:40:06 -04:00
[ ] ,
)
# Both of these should be present even when empty
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " extensions " ] [ " e2ee " ] [ " device_one_time_keys_count " ] ,
2024-07-22 16:40:06 -04:00
{
# Note that "signed_curve25519" is always returned in key count responses
# regardless of whether we uploaded any keys for it. This is necessary until
# https://github.com/matrix-org/matrix-doc/issues/3298 is fixed.
#
# Also related:
# https://github.com/element-hq/element-android/issues/3725 and
# https://github.com/matrix-org/synapse/issues/10456
" signed_curve25519 " : 0
} ,
)
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " extensions " ] [ " e2ee " ] [ " device_unused_fallback_key_types " ] ,
2024-07-22 16:40:06 -04:00
[ ] ,
)
def test_wait_for_new_data ( self ) - > None :
"""
Test to make sure that the Sliding Sync request waits for new data to arrive .
( Only applies to incremental syncs with a ` timeout ` specified )
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
test_device_id = " TESTDEVICE "
user3_id = self . register_user ( " user3 " , " pass " )
user3_tok = self . login ( user3_id , " pass " , device_id = test_device_id )
room_id = self . helper . create_room_as ( user2_id , tok = user2_tok )
self . helper . join ( room_id , user1_id , tok = user1_tok )
self . helper . join ( room_id , user3_id , tok = user3_tok )
2024-07-24 06:47:25 -04:00
sync_body = {
" lists " : { } ,
" extensions " : {
" e2ee " : {
" enabled " : True ,
}
} ,
}
_ , from_token = self . do_sync ( sync_body , tok = user1_tok )
2024-07-22 16:40:06 -04:00
# Make the Sliding Sync request
channel = self . make_request (
" POST " ,
2024-07-24 06:47:25 -04:00
self . sync_endpoint + " ?timeout=10000 " + f " &pos= { from_token } " ,
content = sync_body ,
2024-07-22 16:40:06 -04:00
access_token = user1_tok ,
await_result = False ,
)
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
with self . assertRaises ( TimedOutException ) :
channel . await_result ( timeout_ms = 5000 )
# Bump the device lists to trigger new results
# Have user3 update their device list
device_update_channel = self . make_request (
" PUT " ,
f " /devices/ { test_device_id } " ,
{
" display_name " : " New Device Name " ,
} ,
access_token = user3_tok ,
)
self . assertEqual (
device_update_channel . code , 200 , device_update_channel . json_body
)
# Should respond before the 10 second timeout
channel . await_result ( timeout_ms = 3000 )
self . assertEqual ( channel . code , 200 , channel . json_body )
# We should see the device list update
self . assertEqual (
channel . json_body [ " extensions " ] [ " e2ee " ]
. get ( " device_lists " , { } )
. get ( " changed " ) ,
[ user3_id ] ,
)
self . assertEqual (
channel . json_body [ " extensions " ] [ " e2ee " ] . get ( " device_lists " , { } ) . get ( " left " ) ,
[ ] ,
)
def test_wait_for_new_data_timeout ( self ) - > None :
"""
Test to make sure that the Sliding Sync request waits for new data to arrive but
no data ever arrives so we timeout . We ' re also making sure that the default data
from the E2EE extension doesn ' t trigger a false-positive for new data (see
` device_one_time_keys_count . signed_curve25519 ` ) .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
2024-07-24 06:47:25 -04:00
sync_body = {
" lists " : { } ,
" extensions " : {
" e2ee " : {
" enabled " : True ,
}
} ,
}
_ , from_token = self . do_sync ( sync_body , tok = user1_tok )
2024-07-22 16:40:06 -04:00
# Make the Sliding Sync request
channel = self . make_request (
" POST " ,
2024-07-24 06:47:25 -04:00
self . sync_endpoint + f " ?timeout=10000&pos= { from_token } " ,
content = sync_body ,
2024-07-22 16:40:06 -04:00
access_token = user1_tok ,
await_result = False ,
)
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
with self . assertRaises ( TimedOutException ) :
channel . await_result ( timeout_ms = 5000 )
# Wake-up `notifier.wait_for_events(...)` that will cause us test
# `SlidingSyncResult.__bool__` for new results.
2024-07-25 11:43:35 -04:00
self . _bump_notifier_wait_for_events (
user1_id , wake_stream_key = StreamKeyType . ACCOUNT_DATA
)
2024-07-22 16:40:06 -04:00
# Block for a little bit more to ensure we don't see any new results.
with self . assertRaises ( TimedOutException ) :
channel . await_result ( timeout_ms = 4000 )
# Wait for the sync to complete (wait for the rest of the 10 second timeout,
# 5000 + 4000 + 1200 > 10000)
channel . await_result ( timeout_ms = 1200 )
self . assertEqual ( channel . code , 200 , channel . json_body )
# Device lists are present for incremental syncs but empty because no device changes
self . assertEqual (
channel . json_body [ " extensions " ] [ " e2ee " ]
. get ( " device_lists " , { } )
. get ( " changed " ) ,
[ ] ,
)
self . assertEqual (
channel . json_body [ " extensions " ] [ " e2ee " ] . get ( " device_lists " , { } ) . get ( " left " ) ,
[ ] ,
)
# Both of these should be present even when empty
self . assertEqual (
channel . json_body [ " extensions " ] [ " e2ee " ] [ " device_one_time_keys_count " ] ,
{
# Note that "signed_curve25519" is always returned in key count responses
# regardless of whether we uploaded any keys for it. This is necessary until
# https://github.com/matrix-org/matrix-doc/issues/3298 is fixed.
#
# Also related:
# https://github.com/element-hq/element-android/issues/3725 and
# https://github.com/matrix-org/synapse/issues/10456
" signed_curve25519 " : 0
} ,
)
self . assertEqual (
channel . json_body [ " extensions " ] [ " e2ee " ] [ " device_unused_fallback_key_types " ] ,
[ ] ,
)
def test_device_lists ( self ) - > None :
"""
Test that device list updates are included in the response
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
test_device_id = " TESTDEVICE "
user3_id = self . register_user ( " user3 " , " pass " )
user3_tok = self . login ( user3_id , " pass " , device_id = test_device_id )
user4_id = self . register_user ( " user4 " , " pass " )
user4_tok = self . login ( user4_id , " pass " )
room_id = self . helper . create_room_as ( user2_id , tok = user2_tok )
self . helper . join ( room_id , user1_id , tok = user1_tok )
self . helper . join ( room_id , user3_id , tok = user3_tok )
self . helper . join ( room_id , user4_id , tok = user4_tok )
2024-07-24 06:47:25 -04:00
sync_body = {
" lists " : { } ,
" extensions " : {
" e2ee " : {
" enabled " : True ,
}
} ,
}
_ , from_token = self . do_sync ( sync_body , tok = user1_tok )
2024-07-22 16:40:06 -04:00
# Have user3 update their device list
channel = self . make_request (
" PUT " ,
f " /devices/ { test_device_id } " ,
{
" display_name " : " New Device Name " ,
} ,
access_token = user3_tok ,
)
self . assertEqual ( channel . code , 200 , channel . json_body )
# User4 leaves the room
self . helper . leave ( room_id , user4_id , tok = user4_tok )
# Make an incremental Sliding Sync request with the e2ee extension enabled
2024-07-25 12:01:47 -04:00
response_body , _ = self . do_sync ( sync_body , since = from_token , tok = user1_tok )
2024-07-22 16:40:06 -04:00
# Device list updates show up
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " extensions " ] [ " e2ee " ] . get ( " device_lists " , { } ) . get ( " changed " ) ,
2024-07-22 16:40:06 -04:00
[ user3_id ] ,
)
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " extensions " ] [ " e2ee " ] . get ( " device_lists " , { } ) . get ( " left " ) ,
2024-07-22 16:40:06 -04:00
[ user4_id ] ,
)
def test_device_one_time_keys_count ( self ) - > None :
"""
Test that ` device_one_time_keys_count ` are included in the response
"""
test_device_id = " TESTDEVICE "
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " , device_id = test_device_id )
# Upload one time keys for the user/device
keys : JsonDict = {
" alg1:k1 " : " key1 " ,
" alg2:k2 " : { " key " : " key2 " , " signatures " : { " k1 " : " sig1 " } } ,
" alg2:k3 " : { " key " : " key3 " } ,
}
upload_keys_response = self . get_success (
self . e2e_keys_handler . upload_keys_for_user (
user1_id , test_device_id , { " one_time_keys " : keys }
)
)
self . assertDictEqual (
upload_keys_response ,
{
" one_time_key_counts " : {
" alg1 " : 1 ,
" alg2 " : 2 ,
# Note that "signed_curve25519" is always returned in key count responses
# regardless of whether we uploaded any keys for it. This is necessary until
# https://github.com/matrix-org/matrix-doc/issues/3298 is fixed.
#
# Also related:
# https://github.com/element-hq/element-android/issues/3725 and
# https://github.com/matrix-org/synapse/issues/10456
" signed_curve25519 " : 0 ,
}
} ,
)
# Make a Sliding Sync request with the e2ee extension enabled
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : { } ,
" extensions " : {
" e2ee " : {
" enabled " : True ,
}
2024-07-22 16:40:06 -04:00
} ,
2024-07-25 12:01:47 -04:00
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-22 16:40:06 -04:00
# Check for those one time key counts
self . assertEqual (
2024-07-25 12:01:47 -04:00
response_body [ " extensions " ] [ " e2ee " ] . get ( " device_one_time_keys_count " ) ,
2024-07-22 16:40:06 -04:00
{
" alg1 " : 1 ,
" alg2 " : 2 ,
# Note that "signed_curve25519" is always returned in key count responses
# regardless of whether we uploaded any keys for it. This is necessary until
# https://github.com/matrix-org/matrix-doc/issues/3298 is fixed.
#
# Also related:
# https://github.com/element-hq/element-android/issues/3725 and
# https://github.com/matrix-org/synapse/issues/10456
" signed_curve25519 " : 0 ,
} ,
)
def test_device_unused_fallback_key_types ( self ) - > None :
"""
Test that ` device_unused_fallback_key_types ` are included in the response
"""
test_device_id = " TESTDEVICE "
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " , device_id = test_device_id )
# We shouldn't have any unused fallback keys yet
res = self . get_success (
self . store . get_e2e_unused_fallback_key_types ( user1_id , test_device_id )
)
self . assertEqual ( res , [ ] )
# Upload a fallback key for the user/device
self . get_success (
self . e2e_keys_handler . upload_keys_for_user (
user1_id ,
test_device_id ,
{ " fallback_keys " : { " alg1:k1 " : " fallback_key1 " } } ,
)
)
# We should now have an unused alg1 key
fallback_res = self . get_success (
self . store . get_e2e_unused_fallback_key_types ( user1_id , test_device_id )
)
self . assertEqual ( fallback_res , [ " alg1 " ] , fallback_res )
# Make a Sliding Sync request with the e2ee extension enabled
2024-07-25 12:01:47 -04:00
sync_body = {
" lists " : { } ,
" extensions " : {
" e2ee " : {
" enabled " : True ,
}
2024-07-22 16:40:06 -04:00
} ,
2024-07-25 12:01:47 -04:00
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
2024-07-22 16:40:06 -04:00
# Check for the unused fallback key types
self . assertListEqual (
2024-07-25 12:01:47 -04:00
response_body [ " extensions " ] [ " e2ee " ] . get ( " device_unused_fallback_key_types " ) ,
2024-07-22 16:40:06 -04:00
[ " alg1 " ] ,
)
2024-07-24 18:10:38 -04:00
class SlidingSyncAccountDataExtensionTestCase ( SlidingSyncBase ) :
""" Tests for the account_data sliding sync extension """
servlets = [
synapse . rest . admin . register_servlets ,
login . register_servlets ,
room . register_servlets ,
sync . register_servlets ,
sendtodevice . register_servlets ,
]
def prepare ( self , reactor : MemoryReactor , clock : Clock , hs : HomeServer ) - > None :
self . store = hs . get_datastores ( ) . main
self . account_data_handler = hs . get_account_data_handler ( )
def test_no_data_initial_sync ( self ) - > None :
"""
Test that enabling the account_data extension works during an intitial sync ,
even if there is no - data .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
# Make an initial Sliding Sync request with the account_data extension enabled
sync_body = {
" lists " : { } ,
" extensions " : {
" account_data " : {
" enabled " : True ,
}
} ,
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
self . assertIncludes (
{
global_event [ " type " ]
for global_event in response_body [ " extensions " ] [ " account_data " ] . get (
" global "
)
} ,
# Even though we don't have any global account data set, Synapse saves some
# default push rules for us.
{ AccountDataTypes . PUSH_RULES } ,
exact = True ,
)
self . assertIncludes (
response_body [ " extensions " ] [ " account_data " ] . get ( " rooms " ) . keys ( ) ,
set ( ) ,
exact = True ,
)
def test_no_data_incremental_sync ( self ) - > None :
"""
Test that enabling account_data extension works during an incremental sync , even
if there is no - data .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
sync_body = {
" lists " : { } ,
" extensions " : {
" account_data " : {
" enabled " : True ,
}
} ,
}
_ , from_token = self . do_sync ( sync_body , tok = user1_tok )
# Make an incremental Sliding Sync request with the account_data extension enabled
response_body , _ = self . do_sync ( sync_body , since = from_token , tok = user1_tok )
# There has been no account data changes since the `from_token` so we shouldn't
# see any account data here.
self . assertIncludes (
{
global_event [ " type " ]
for global_event in response_body [ " extensions " ] [ " account_data " ] . get (
" global "
)
} ,
set ( ) ,
exact = True ,
)
self . assertIncludes (
response_body [ " extensions " ] [ " account_data " ] . get ( " rooms " ) . keys ( ) ,
set ( ) ,
exact = True ,
)
def test_global_account_data_initial_sync ( self ) - > None :
"""
On initial sync , we should return all global account data on initial sync .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
# Update the global account data
self . get_success (
self . account_data_handler . add_account_data_for_user (
user_id = user1_id ,
account_data_type = " org.matrix.foobarbaz " ,
content = { " foo " : " bar " } ,
)
)
# Make an initial Sliding Sync request with the account_data extension enabled
sync_body = {
" lists " : { } ,
" extensions " : {
" account_data " : {
" enabled " : True ,
}
} ,
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
# It should show us all of the global account data
self . assertIncludes (
{
global_event [ " type " ]
for global_event in response_body [ " extensions " ] [ " account_data " ] . get (
" global "
)
} ,
{ AccountDataTypes . PUSH_RULES , " org.matrix.foobarbaz " } ,
exact = True ,
)
self . assertIncludes (
response_body [ " extensions " ] [ " account_data " ] . get ( " rooms " ) . keys ( ) ,
set ( ) ,
exact = True ,
)
def test_global_account_data_incremental_sync ( self ) - > None :
"""
On incremental sync , we should only account data that has changed since the
` from_token ` .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
# Add some global account data
self . get_success (
self . account_data_handler . add_account_data_for_user (
user_id = user1_id ,
account_data_type = " org.matrix.foobarbaz " ,
content = { " foo " : " bar " } ,
)
)
sync_body = {
" lists " : { } ,
" extensions " : {
" account_data " : {
" enabled " : True ,
}
} ,
}
_ , from_token = self . do_sync ( sync_body , tok = user1_tok )
# Add some other global account data
self . get_success (
self . account_data_handler . add_account_data_for_user (
user_id = user1_id ,
account_data_type = " org.matrix.doodardaz " ,
content = { " doo " : " dar " } ,
)
)
# Make an incremental Sliding Sync request with the account_data extension enabled
response_body , _ = self . do_sync ( sync_body , since = from_token , tok = user1_tok )
self . assertIncludes (
{
global_event [ " type " ]
for global_event in response_body [ " extensions " ] [ " account_data " ] . get (
" global "
)
} ,
# We should only see the new global account data that happened after the `from_token`
{ " org.matrix.doodardaz " } ,
exact = True ,
)
self . assertIncludes (
response_body [ " extensions " ] [ " account_data " ] . get ( " rooms " ) . keys ( ) ,
set ( ) ,
exact = True ,
)
def test_room_account_data_initial_sync ( self ) - > None :
"""
On initial sync , we return all account data for a given room but only for
rooms that we request and are being returned in the Sliding Sync response .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
# Create a room and add some room account data
room_id1 = self . helper . create_room_as ( user1_id , tok = user1_tok )
self . get_success (
self . account_data_handler . add_account_data_to_room (
user_id = user1_id ,
room_id = room_id1 ,
account_data_type = " org.matrix.roorarraz " ,
content = { " roo " : " rar " } ,
)
)
# Create another room with some room account data
room_id2 = self . helper . create_room_as ( user1_id , tok = user1_tok )
self . get_success (
self . account_data_handler . add_account_data_to_room (
user_id = user1_id ,
room_id = room_id2 ,
account_data_type = " org.matrix.roorarraz " ,
content = { " roo " : " rar " } ,
)
)
# Make an initial Sliding Sync request with the account_data extension enabled
sync_body = {
" lists " : { } ,
" room_subscriptions " : {
room_id1 : {
" required_state " : [ ] ,
" timeline_limit " : 0 ,
}
} ,
" extensions " : {
" account_data " : {
" enabled " : True ,
" rooms " : [ room_id1 , room_id2 ] ,
}
} ,
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
self . assertIsNotNone ( response_body [ " extensions " ] [ " account_data " ] . get ( " global " ) )
# Even though we requested room2, we only expect room1 to show up because that's
# the only room in the Sliding Sync response (room2 is not one of our room
# subscriptions or in a sliding window list).
self . assertIncludes (
response_body [ " extensions " ] [ " account_data " ] . get ( " rooms " ) . keys ( ) ,
{ room_id1 } ,
exact = True ,
)
self . assertIncludes (
{
event [ " type " ]
for event in response_body [ " extensions " ] [ " account_data " ]
. get ( " rooms " )
. get ( room_id1 )
} ,
{ " org.matrix.roorarraz " } ,
exact = True ,
)
def test_room_account_data_incremental_sync ( self ) - > None :
"""
On incremental sync , we return all account data for a given room but only for
rooms that we request and are being returned in the Sliding Sync response .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
# Create a room and add some room account data
room_id1 = self . helper . create_room_as ( user1_id , tok = user1_tok )
self . get_success (
self . account_data_handler . add_account_data_to_room (
user_id = user1_id ,
room_id = room_id1 ,
account_data_type = " org.matrix.roorarraz " ,
content = { " roo " : " rar " } ,
)
)
# Create another room with some room account data
room_id2 = self . helper . create_room_as ( user1_id , tok = user1_tok )
self . get_success (
self . account_data_handler . add_account_data_to_room (
user_id = user1_id ,
room_id = room_id2 ,
account_data_type = " org.matrix.roorarraz " ,
content = { " roo " : " rar " } ,
)
)
sync_body = {
" lists " : { } ,
" room_subscriptions " : {
room_id1 : {
" required_state " : [ ] ,
" timeline_limit " : 0 ,
}
} ,
" extensions " : {
" account_data " : {
" enabled " : True ,
" rooms " : [ room_id1 , room_id2 ] ,
}
} ,
}
_ , from_token = self . do_sync ( sync_body , tok = user1_tok )
# Add some other room account data
self . get_success (
self . account_data_handler . add_account_data_to_room (
user_id = user1_id ,
room_id = room_id1 ,
account_data_type = " org.matrix.roorarraz2 " ,
content = { " roo " : " rar " } ,
)
)
self . get_success (
self . account_data_handler . add_account_data_to_room (
user_id = user1_id ,
room_id = room_id2 ,
account_data_type = " org.matrix.roorarraz2 " ,
content = { " roo " : " rar " } ,
)
)
# Make an incremental Sliding Sync request with the account_data extension enabled
response_body , _ = self . do_sync ( sync_body , since = from_token , tok = user1_tok )
self . assertIsNotNone ( response_body [ " extensions " ] [ " account_data " ] . get ( " global " ) )
# Even though we requested room2, we only expect room1 to show up because that's
# the only room in the Sliding Sync response (room2 is not one of our room
# subscriptions or in a sliding window list).
self . assertIncludes (
response_body [ " extensions " ] [ " account_data " ] . get ( " rooms " ) . keys ( ) ,
{ room_id1 } ,
exact = True ,
)
# We should only see the new room account data that happened after the `from_token`
self . assertIncludes (
{
event [ " type " ]
for event in response_body [ " extensions " ] [ " account_data " ]
. get ( " rooms " )
. get ( room_id1 )
} ,
{ " org.matrix.roorarraz2 " } ,
exact = True ,
)
def test_room_account_data_relevant_rooms ( self ) - > None :
"""
Test out different variations of ` lists ` / ` rooms ` we are requesting account data for .
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
# Create a room and add some room account data
room_id1 = self . helper . create_room_as ( user1_id , tok = user1_tok )
self . get_success (
self . account_data_handler . add_account_data_to_room (
user_id = user1_id ,
room_id = room_id1 ,
account_data_type = " org.matrix.roorarraz " ,
content = { " roo " : " rar " } ,
)
)
# Create another room with some room account data
room_id2 = self . helper . create_room_as ( user1_id , tok = user1_tok )
self . get_success (
self . account_data_handler . add_account_data_to_room (
user_id = user1_id ,
room_id = room_id2 ,
account_data_type = " org.matrix.roorarraz " ,
content = { " roo " : " rar " } ,
)
)
# Create another room with some room account data
room_id3 = self . helper . create_room_as ( user1_id , tok = user1_tok )
self . get_success (
self . account_data_handler . add_account_data_to_room (
user_id = user1_id ,
room_id = room_id3 ,
account_data_type = " org.matrix.roorarraz " ,
content = { " roo " : " rar " } ,
)
)
# Create another room with some room account data
room_id4 = self . helper . create_room_as ( user1_id , tok = user1_tok )
self . get_success (
self . account_data_handler . add_account_data_to_room (
user_id = user1_id ,
room_id = room_id4 ,
account_data_type = " org.matrix.roorarraz " ,
content = { " roo " : " rar " } ,
)
)
# Create another room with some room account data
room_id5 = self . helper . create_room_as ( user1_id , tok = user1_tok )
self . get_success (
self . account_data_handler . add_account_data_to_room (
user_id = user1_id ,
room_id = room_id5 ,
account_data_type = " org.matrix.roorarraz " ,
content = { " roo " : " rar " } ,
)
)
room_id_to_human_name_map = {
room_id1 : " room1 " ,
room_id2 : " room2 " ,
room_id3 : " room3 " ,
room_id4 : " room4 " ,
room_id5 : " room5 " ,
}
# Mix lists and rooms
sync_body = {
" lists " : {
# We expect this list range to include room5 and room4
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 0 ,
} ,
# We expect this list range to include room5, room4, room3
" bar-list " : {
" ranges " : [ [ 0 , 2 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 0 ,
} ,
} ,
" room_subscriptions " : {
room_id1 : {
" required_state " : [ ] ,
" timeline_limit " : 0 ,
}
} ,
" extensions " : {
" account_data " : {
" enabled " : True ,
" lists " : [ " foo-list " , " non-existent-list " ] ,
" rooms " : [ room_id1 , room_id2 , " !non-existent-room " ] ,
}
} ,
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
# room1: ✅ Requested via `rooms` and a room subscription exists
# room2: ❌ Requested via `rooms` but not in the response (from lists or room subscriptions)
# room3: ❌ Not requested
# room4: ✅ Shows up because requested via `lists` and list exists in the response
# room5: ✅ Shows up because requested via `lists` and list exists in the response
self . assertIncludes (
{
room_id_to_human_name_map [ room_id ]
for room_id in response_body [ " extensions " ] [ " account_data " ]
. get ( " rooms " )
. keys ( )
} ,
{ " room1 " , " room4 " , " room5 " } ,
exact = True ,
)
# Try wildcards (this is the default)
sync_body = {
" lists " : {
# We expect this list range to include room5 and room4
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 0 ,
} ,
# We expect this list range to include room5, room4, room3
" bar-list " : {
" ranges " : [ [ 0 , 2 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 0 ,
} ,
} ,
" room_subscriptions " : {
room_id1 : {
" required_state " : [ ] ,
" timeline_limit " : 0 ,
}
} ,
" extensions " : {
" account_data " : {
" enabled " : True ,
# "lists": ["*"],
# "rooms": ["*"],
}
} ,
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
# room1: ✅ Shows up because of default `rooms` wildcard and is in one of the room subscriptions
# room2: ❌ Not requested
# room3: ✅ Shows up because of default `lists` wildcard and is in a list
# room4: ✅ Shows up because of default `lists` wildcard and is in a list
# room5: ✅ Shows up because of default `lists` wildcard and is in a list
self . assertIncludes (
{
room_id_to_human_name_map [ room_id ]
for room_id in response_body [ " extensions " ] [ " account_data " ]
. get ( " rooms " )
. keys ( )
} ,
{ " room1 " , " room3 " , " room4 " , " room5 " } ,
exact = True ,
)
# Empty list will return nothing
sync_body = {
" lists " : {
# We expect this list range to include room5 and room4
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 0 ,
} ,
# We expect this list range to include room5, room4, room3
" bar-list " : {
" ranges " : [ [ 0 , 2 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 0 ,
} ,
} ,
" room_subscriptions " : {
room_id1 : {
" required_state " : [ ] ,
" timeline_limit " : 0 ,
}
} ,
" extensions " : {
" account_data " : {
" enabled " : True ,
" lists " : [ ] ,
" rooms " : [ ] ,
}
} ,
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
# room1: ❌ Not requested
# room2: ❌ Not requested
# room3: ❌ Not requested
# room4: ❌ Not requested
# room5: ❌ Not requested
self . assertIncludes (
{
room_id_to_human_name_map [ room_id ]
for room_id in response_body [ " extensions " ] [ " account_data " ]
. get ( " rooms " )
. keys ( )
} ,
set ( ) ,
exact = True ,
)
# Try wildcard and none
sync_body = {
" lists " : {
# We expect this list range to include room5 and room4
" foo-list " : {
" ranges " : [ [ 0 , 1 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 0 ,
} ,
# We expect this list range to include room5, room4, room3
" bar-list " : {
" ranges " : [ [ 0 , 2 ] ] ,
" required_state " : [ ] ,
" timeline_limit " : 0 ,
} ,
} ,
" room_subscriptions " : {
room_id1 : {
" required_state " : [ ] ,
" timeline_limit " : 0 ,
}
} ,
" extensions " : {
" account_data " : {
" enabled " : True ,
" lists " : [ " * " ] ,
" rooms " : [ ] ,
}
} ,
}
response_body , _ = self . do_sync ( sync_body , tok = user1_tok )
# room1: ❌ Not requested
# room2: ❌ Not requested
# room3: ✅ Shows up because of default `lists` wildcard and is in a list
# room4: ✅ Shows up because of default `lists` wildcard and is in a list
# room5: ✅ Shows up because of default `lists` wildcard and is in a list
self . assertIncludes (
{
room_id_to_human_name_map [ room_id ]
for room_id in response_body [ " extensions " ] [ " account_data " ]
. get ( " rooms " )
. keys ( )
} ,
{ " room3 " , " room4 " , " room5 " } ,
exact = True ,
)
def test_wait_for_new_data ( self ) - > None :
"""
Test to make sure that the Sliding Sync request waits for new data to arrive .
( Only applies to incremental syncs with a ` timeout ` specified )
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
user2_id = self . register_user ( " user2 " , " pass " )
user2_tok = self . login ( user2_id , " pass " )
room_id = self . helper . create_room_as ( user2_id , tok = user2_tok )
self . helper . join ( room_id , user1_id , tok = user1_tok )
sync_body = {
" lists " : { } ,
" extensions " : {
" account_data " : {
" enabled " : True ,
}
} ,
}
_ , from_token = self . do_sync ( sync_body , tok = user1_tok )
# Make an incremental Sliding Sync request with the account_data extension enabled
channel = self . make_request (
" POST " ,
self . sync_endpoint + f " ?timeout=10000&pos= { from_token } " ,
content = sync_body ,
access_token = user1_tok ,
await_result = False ,
)
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
with self . assertRaises ( TimedOutException ) :
channel . await_result ( timeout_ms = 5000 )
# Bump the global account data to trigger new results
self . get_success (
self . account_data_handler . add_account_data_for_user (
user1_id ,
" org.matrix.foobarbaz " ,
{ " foo " : " bar " } ,
)
)
# Should respond before the 10 second timeout
channel . await_result ( timeout_ms = 3000 )
self . assertEqual ( channel . code , 200 , channel . json_body )
# We should see the global account data update
self . assertIncludes (
{
global_event [ " type " ]
for global_event in channel . json_body [ " extensions " ] [ " account_data " ] . get (
" global "
)
} ,
{ " org.matrix.foobarbaz " } ,
exact = True ,
)
self . assertIncludes (
channel . json_body [ " extensions " ] [ " account_data " ] . get ( " rooms " ) . keys ( ) ,
set ( ) ,
exact = True ,
)
def test_wait_for_new_data_timeout ( self ) - > None :
"""
Test to make sure that the Sliding Sync request waits for new data to arrive but
no data ever arrives so we timeout . We ' re also making sure that the default data
from the account_data extension doesn ' t trigger a false-positive for new data.
"""
user1_id = self . register_user ( " user1 " , " pass " )
user1_tok = self . login ( user1_id , " pass " )
sync_body = {
" lists " : { } ,
" extensions " : {
" account_data " : {
" enabled " : True ,
}
} ,
}
_ , from_token = self . do_sync ( sync_body , tok = user1_tok )
# Make the Sliding Sync request
channel = self . make_request (
" POST " ,
self . sync_endpoint + f " ?timeout=10000&pos= { from_token } " ,
content = sync_body ,
access_token = user1_tok ,
await_result = False ,
)
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
with self . assertRaises ( TimedOutException ) :
channel . await_result ( timeout_ms = 5000 )
# Wake-up `notifier.wait_for_events(...)` that will cause us test
# `SlidingSyncResult.__bool__` for new results.
2024-07-25 11:43:35 -04:00
self . _bump_notifier_wait_for_events (
user1_id ,
# We choose `StreamKeyType.PRESENCE` because we're testing for account data
# and don't want to contaminate the account data results using
# `StreamKeyType.ACCOUNT_DATA`.
wake_stream_key = StreamKeyType . PRESENCE ,
)
2024-07-24 18:10:38 -04:00
# Block for a little bit more to ensure we don't see any new results.
with self . assertRaises ( TimedOutException ) :
channel . await_result ( timeout_ms = 4000 )
# Wait for the sync to complete (wait for the rest of the 10 second timeout,
# 5000 + 4000 + 1200 > 10000)
channel . await_result ( timeout_ms = 1200 )
self . assertEqual ( channel . code , 200 , channel . json_body )
self . assertIsNotNone (
channel . json_body [ " extensions " ] [ " account_data " ] . get ( " global " )
)
self . assertIsNotNone (
channel . json_body [ " extensions " ] [ " account_data " ] . get ( " rooms " )
)