mirror of
https://git.anonymousland.org/anonymousland/synapse.git
synced 2025-11-06 01:52:53 -05:00
Revert "Sort internal changes in changelog" Revert "Update CHANGES.md" Revert "1.49.0rc1" Revert "Revert "Move `glob_to_regex` and `re_word_boundary` to `matrix-python-common` (#11505) (#11527)" Revert "Refactors in `_generate_sync_entry_for_rooms` (#11515)" Revert "Correctly register shutdown handler for presence workers (#11518)" Revert "Fix `ModuleApi.looping_background_call` for non-async functions (#11524)" Revert "Fix 'delete room' admin api to work on incomplete rooms (#11523)" Revert "Correctly ignore invites from ignored users (#11511)" Revert "Fix the test breakage introduced by #11435 as a result of concurrent PRs (#11522)" Revert "Stabilise support for MSC2918 refresh tokens as they have now been merged into the Matrix specification. (#11435)" Revert "Save the OIDC session ID (sid) with the device on login (#11482)" Revert "Add admin API to get some information about federation status (#11407)" Revert "Include bundled aggregations in /sync and related fixes (#11478)" Revert "Move `glob_to_regex` and `re_word_boundary` to `matrix-python-common` (#11505)" Revert "Update backward extremity docs to make it clear that it does not indicate whether we have fetched an events' `prev_events` (#11469)" Revert "Support configuring the lifetime of non-refreshable access tokens separately to refreshable access tokens. (#11445)" Revert "Add type hints to `synapse/tests/rest/admin` (#11501)" Revert "Revert accidental commits to develop." Revert "Newsfile" Revert "Give `tests.server.setup_test_homeserver` (nominally!) the same behaviour" Revert "Move `tests.utils.setup_test_homeserver` to `tests.server`" Revert "Convert one of the `setup_test_homeserver`s to `make_test_homeserver_synchronous`" Revert "Disambiguate queries on `state_key` (#11497)" Revert "Comments on the /sync tentacles (#11494)" Revert "Clean up tests.storage.test_appservice (#11492)" Revert "Clean up `tests.storage.test_main` to remove use of legacy code. (#11493)" Revert "Clean up `tests.test_visibility` to remove legacy code. (#11495)" Revert "Minor cleanup on recently ported doc pages (#11466)" Revert "Add most of the missing type hints to `synapse.federation`. (#11483)" Revert "Avoid waiting for zombie processes in `synctl stop` (#11490)" Revert "Fix media repository failing when media store path contains symlinks (#11446)" Revert "Add type annotations to `tests.storage.test_appservice`. (#11488)" Revert "`scripts-dev/sign_json`: support for signing events (#11486)" Revert "Add MSC3030 experimental client and federation API endpoints to get the closest event to a given timestamp (#9445)" Revert "Port wiki pages to documentation website (#11402)" Revert "Add a license header and comment. (#11479)" Revert "Clean-up get_version_string (#11468)" Revert "Link background update controller docs to summary (#11475)" Revert "Additional type hints for config module. (#11465)" Revert "Register the login redirect endpoint for v3. (#11451)" Revert "Update openid.md" Revert "Remove mention of OIDC certification from Dex (#11470)" Revert "Add a note about huge pages to our Postgres doc (#11467)" Revert "Don't start Synapse master process if `worker_app` is set (#11416)" Revert "Expose worker & homeserver as entrypoints in `setup.py` (#11449)" Revert "Bundle relations of relations into the `/relations` result. (#11284)" Revert "Fix `LruCache` corruption bug with a `size_callback` that can return 0 (#11454)" Revert "Eliminate a few `Any`s in `LruCache` type hints (#11453)" Revert "Remove unnecessary `json.dumps` from `tests.rest.admin` (#11461)" Revert "Merge branch 'master' into develop" This reverts commit26b5d2320f. This reverts commitbce4220f38. This reverts commit966b5d0fa0. This reverts commit088d748f2c. This reverts commit14d593f72d. This reverts commit2a3ec6facf. This reverts commiteccc49d755. This reverts commitb1ecd19c5d. This reverts commit9c55dedc8c. This reverts commit2d42e586a8. This reverts commit2f053f3f82. This reverts commita15a893df8. This reverts commit8b4b153c9e. This reverts commit494ebd7347. This reverts commita77c369897. This reverts commit4eb77965cd. This reverts commit637df95de6. This reverts commite5f426cd54. This reverts commit8cd68b8102. This reverts commit6cae125e20. This reverts commit7be88fbf48. This reverts commitb3fd99b74a. This reverts commitf7ec6e7d9e. This reverts commit5640992d17. This reverts commitd26808dd85. This reverts commitf91624a595. This reverts commit16d39a5490. This reverts commit8a4c296987. This reverts commit49e1356ee3. This reverts commitd2279f471b. This reverts commitb50e39df57. This reverts commit858d80bf0f. This reverts commit435f044807. This reverts commitf61462e1be. This reverts commita6f1a3abec. This reverts commit84dc50e160. This reverts commited635d3285. This reverts commit7b62791e00. This reverts commit153194c771. This reverts commitf44d729d4c. This reverts commita265fbd397. This reverts commitb9fef1a7cd. This reverts commitb0eb64ff7b. This reverts commitf1795463bf. This reverts commit70cbb1a5e3. This reverts commit42bf020463. This reverts commit379f2650cf. This reverts commit7ff22d6da4. This reverts commit5a0b652d36. This reverts commit432a174bc1. This reverts commitb14f8a1baf, reversing changes made toe713855dca.
348 lines
12 KiB
Python
348 lines
12 KiB
Python
# Copyright 2018 New Vector Ltd
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
import logging
|
|
from typing import Optional
|
|
from unittest.mock import Mock
|
|
|
|
from twisted.internet import defer
|
|
from twisted.internet.defer import succeed
|
|
|
|
from synapse.api.room_versions import RoomVersions
|
|
from synapse.events import FrozenEvent
|
|
from synapse.visibility import filter_events_for_server
|
|
|
|
import tests.unittest
|
|
from tests.utils import create_room, setup_test_homeserver
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
TEST_ROOM_ID = "!TEST:ROOM"
|
|
|
|
|
|
class FilterEventsForServerTestCase(tests.unittest.TestCase):
|
|
@defer.inlineCallbacks
|
|
def setUp(self):
|
|
self.hs = yield setup_test_homeserver(self.addCleanup)
|
|
self.event_creation_handler = self.hs.get_event_creation_handler()
|
|
self.event_builder_factory = self.hs.get_event_builder_factory()
|
|
self.storage = self.hs.get_storage()
|
|
|
|
yield defer.ensureDeferred(create_room(self.hs, TEST_ROOM_ID, "@someone:ROOM"))
|
|
|
|
@defer.inlineCallbacks
|
|
def test_filtering(self):
|
|
#
|
|
# The events to be filtered consist of 10 membership events (it doesn't
|
|
# really matter if they are joins or leaves, so let's make them joins).
|
|
# One of those membership events is going to be for a user on the
|
|
# server we are filtering for (so we can check the filtering is doing
|
|
# the right thing).
|
|
#
|
|
|
|
# before we do that, we persist some other events to act as state.
|
|
yield self.inject_visibility("@admin:hs", "joined")
|
|
for i in range(0, 10):
|
|
yield self.inject_room_member("@resident%i:hs" % i)
|
|
|
|
events_to_filter = []
|
|
|
|
for i in range(0, 10):
|
|
user = "@user%i:%s" % (i, "test_server" if i == 5 else "other_server")
|
|
evt = yield self.inject_room_member(user, extra_content={"a": "b"})
|
|
events_to_filter.append(evt)
|
|
|
|
filtered = yield defer.ensureDeferred(
|
|
filter_events_for_server(self.storage, "test_server", events_to_filter)
|
|
)
|
|
|
|
# the result should be 5 redacted events, and 5 unredacted events.
|
|
for i in range(0, 5):
|
|
self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id)
|
|
self.assertNotIn("a", filtered[i].content)
|
|
|
|
for i in range(5, 10):
|
|
self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id)
|
|
self.assertEqual(filtered[i].content["a"], "b")
|
|
|
|
@defer.inlineCallbacks
|
|
def test_erased_user(self):
|
|
# 4 message events, from erased and unerased users, with a membership
|
|
# change in the middle of them.
|
|
events_to_filter = []
|
|
|
|
evt = yield self.inject_message("@unerased:local_hs")
|
|
events_to_filter.append(evt)
|
|
|
|
evt = yield self.inject_message("@erased:local_hs")
|
|
events_to_filter.append(evt)
|
|
|
|
evt = yield self.inject_room_member("@joiner:remote_hs")
|
|
events_to_filter.append(evt)
|
|
|
|
evt = yield self.inject_message("@unerased:local_hs")
|
|
events_to_filter.append(evt)
|
|
|
|
evt = yield self.inject_message("@erased:local_hs")
|
|
events_to_filter.append(evt)
|
|
|
|
# the erasey user gets erased
|
|
yield defer.ensureDeferred(
|
|
self.hs.get_datastore().mark_user_erased("@erased:local_hs")
|
|
)
|
|
|
|
# ... and the filtering happens.
|
|
filtered = yield defer.ensureDeferred(
|
|
filter_events_for_server(self.storage, "test_server", events_to_filter)
|
|
)
|
|
|
|
for i in range(0, len(events_to_filter)):
|
|
self.assertEqual(
|
|
events_to_filter[i].event_id,
|
|
filtered[i].event_id,
|
|
"Unexpected event at result position %i" % (i,),
|
|
)
|
|
|
|
for i in (0, 3):
|
|
self.assertEqual(
|
|
events_to_filter[i].content["body"],
|
|
filtered[i].content["body"],
|
|
"Unexpected event content at result position %i" % (i,),
|
|
)
|
|
|
|
for i in (1, 4):
|
|
self.assertNotIn("body", filtered[i].content)
|
|
|
|
@defer.inlineCallbacks
|
|
def inject_visibility(self, user_id, visibility):
|
|
content = {"history_visibility": visibility}
|
|
builder = self.event_builder_factory.for_room_version(
|
|
RoomVersions.V1,
|
|
{
|
|
"type": "m.room.history_visibility",
|
|
"sender": user_id,
|
|
"state_key": "",
|
|
"room_id": TEST_ROOM_ID,
|
|
"content": content,
|
|
},
|
|
)
|
|
|
|
event, context = yield defer.ensureDeferred(
|
|
self.event_creation_handler.create_new_client_event(builder)
|
|
)
|
|
yield defer.ensureDeferred(
|
|
self.storage.persistence.persist_event(event, context)
|
|
)
|
|
return event
|
|
|
|
@defer.inlineCallbacks
|
|
def inject_room_member(
|
|
self, user_id, membership="join", extra_content: Optional[dict] = None
|
|
):
|
|
content = {"membership": membership}
|
|
content.update(extra_content or {})
|
|
builder = self.event_builder_factory.for_room_version(
|
|
RoomVersions.V1,
|
|
{
|
|
"type": "m.room.member",
|
|
"sender": user_id,
|
|
"state_key": user_id,
|
|
"room_id": TEST_ROOM_ID,
|
|
"content": content,
|
|
},
|
|
)
|
|
|
|
event, context = yield defer.ensureDeferred(
|
|
self.event_creation_handler.create_new_client_event(builder)
|
|
)
|
|
|
|
yield defer.ensureDeferred(
|
|
self.storage.persistence.persist_event(event, context)
|
|
)
|
|
return event
|
|
|
|
@defer.inlineCallbacks
|
|
def inject_message(self, user_id, content=None):
|
|
if content is None:
|
|
content = {"body": "testytest", "msgtype": "m.text"}
|
|
builder = self.event_builder_factory.for_room_version(
|
|
RoomVersions.V1,
|
|
{
|
|
"type": "m.room.message",
|
|
"sender": user_id,
|
|
"room_id": TEST_ROOM_ID,
|
|
"content": content,
|
|
},
|
|
)
|
|
|
|
event, context = yield defer.ensureDeferred(
|
|
self.event_creation_handler.create_new_client_event(builder)
|
|
)
|
|
|
|
yield defer.ensureDeferred(
|
|
self.storage.persistence.persist_event(event, context)
|
|
)
|
|
return event
|
|
|
|
@defer.inlineCallbacks
|
|
def test_large_room(self):
|
|
# see what happens when we have a large room with hundreds of thousands
|
|
# of membership events
|
|
|
|
# As above, the events to be filtered consist of 10 membership events,
|
|
# where one of them is for a user on the server we are filtering for.
|
|
|
|
import cProfile
|
|
import pstats
|
|
import time
|
|
|
|
# we stub out the store, because building up all that state the normal
|
|
# way is very slow.
|
|
test_store = _TestStore()
|
|
|
|
# our initial state is 100000 membership events and one
|
|
# history_visibility event.
|
|
room_state = []
|
|
|
|
history_visibility_evt = FrozenEvent(
|
|
{
|
|
"event_id": "$history_vis",
|
|
"type": "m.room.history_visibility",
|
|
"sender": "@resident_user_0:test.com",
|
|
"state_key": "",
|
|
"room_id": TEST_ROOM_ID,
|
|
"content": {"history_visibility": "joined"},
|
|
}
|
|
)
|
|
room_state.append(history_visibility_evt)
|
|
test_store.add_event(history_visibility_evt)
|
|
|
|
for i in range(0, 100000):
|
|
user = "@resident_user_%i:test.com" % (i,)
|
|
evt = FrozenEvent(
|
|
{
|
|
"event_id": "$res_event_%i" % (i,),
|
|
"type": "m.room.member",
|
|
"state_key": user,
|
|
"sender": user,
|
|
"room_id": TEST_ROOM_ID,
|
|
"content": {"membership": "join", "extra": "zzz,"},
|
|
}
|
|
)
|
|
room_state.append(evt)
|
|
test_store.add_event(evt)
|
|
|
|
events_to_filter = []
|
|
for i in range(0, 10):
|
|
user = "@user%i:%s" % (i, "test_server" if i == 5 else "other_server")
|
|
evt = FrozenEvent(
|
|
{
|
|
"event_id": "$evt%i" % (i,),
|
|
"type": "m.room.member",
|
|
"state_key": user,
|
|
"sender": user,
|
|
"room_id": TEST_ROOM_ID,
|
|
"content": {"membership": "join", "extra": "zzz"},
|
|
}
|
|
)
|
|
events_to_filter.append(evt)
|
|
room_state.append(evt)
|
|
|
|
test_store.add_event(evt)
|
|
test_store.set_state_ids_for_event(
|
|
evt, {(e.type, e.state_key): e.event_id for e in room_state}
|
|
)
|
|
|
|
pr = cProfile.Profile()
|
|
pr.enable()
|
|
|
|
logger.info("Starting filtering")
|
|
start = time.time()
|
|
|
|
storage = Mock()
|
|
storage.main = test_store
|
|
storage.state = test_store
|
|
|
|
filtered = yield defer.ensureDeferred(
|
|
filter_events_for_server(test_store, "test_server", events_to_filter)
|
|
)
|
|
logger.info("Filtering took %f seconds", time.time() - start)
|
|
|
|
pr.disable()
|
|
with open("filter_events_for_server.profile", "w+") as f:
|
|
ps = pstats.Stats(pr, stream=f).sort_stats("cumulative")
|
|
ps.print_stats()
|
|
|
|
# the result should be 5 redacted events, and 5 unredacted events.
|
|
for i in range(0, 5):
|
|
self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id)
|
|
self.assertNotIn("extra", filtered[i].content)
|
|
|
|
for i in range(5, 10):
|
|
self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id)
|
|
self.assertEqual(filtered[i].content["extra"], "zzz")
|
|
|
|
test_large_room.skip = "Disabled by default because it's slow"
|
|
|
|
|
|
class _TestStore:
|
|
"""Implements a few methods of the DataStore, so that we can test
|
|
filter_events_for_server
|
|
|
|
"""
|
|
|
|
def __init__(self):
|
|
# data for get_events: a map from event_id to event
|
|
self.events = {}
|
|
|
|
# data for get_state_ids_for_events mock: a map from event_id to
|
|
# a map from (type_state_key) -> event_id for the state at that
|
|
# event
|
|
self.state_ids_for_events = {}
|
|
|
|
def add_event(self, event):
|
|
self.events[event.event_id] = event
|
|
|
|
def set_state_ids_for_event(self, event, state):
|
|
self.state_ids_for_events[event.event_id] = state
|
|
|
|
def get_state_ids_for_events(self, events, types):
|
|
res = {}
|
|
include_memberships = False
|
|
for (type, state_key) in types:
|
|
if type == "m.room.history_visibility":
|
|
continue
|
|
if type != "m.room.member" or state_key is not None:
|
|
raise RuntimeError(
|
|
"Unimplemented: get_state_ids with type (%s, %s)"
|
|
% (type, state_key)
|
|
)
|
|
include_memberships = True
|
|
|
|
if include_memberships:
|
|
for event_id in events:
|
|
res[event_id] = self.state_ids_for_events[event_id]
|
|
|
|
else:
|
|
k = ("m.room.history_visibility", "")
|
|
for event_id in events:
|
|
hve = self.state_ids_for_events[event_id][k]
|
|
res[event_id] = {k: hve}
|
|
|
|
return succeed(res)
|
|
|
|
def get_events(self, events):
|
|
return succeed({event_id: self.events[event_id] for event_id in events})
|
|
|
|
def are_users_erased(self, users):
|
|
return succeed({u: False for u in users})
|