anonymousland-synapse/synapse/handlers/pagination.py

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

750 lines
28 KiB
Python
Raw Normal View History

2018-07-20 10:32:23 -04:00
# Copyright 2014 - 2016 OpenMarket Ltd
# Copyright 2017 - 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Any, Collection, Dict, List, Optional, Set
2018-07-20 10:32:23 -04:00
import attr
2018-07-20 10:32:23 -04:00
from twisted.python.failure import Failure
2018-08-16 09:22:47 -04:00
from synapse.api.constants import EventTypes, Membership
2018-07-20 10:32:23 -04:00
from synapse.api.errors import SynapseError
from synapse.api.filtering import Filter
from synapse.handlers.room import ShutdownRoomResponse
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.state import StateFilter
from synapse.streams.config import PaginationConfig
from synapse.types import JsonDict, Requester
from synapse.util.async_helpers import ReadWriteLock
2018-07-20 10:32:23 -04:00
from synapse.util.stringutils import random_string
from synapse.visibility import filter_events_for_client
if TYPE_CHECKING:
from synapse.server import HomeServer
2018-07-20 10:32:23 -04:00
logger = logging.getLogger(__name__)
@attr.s(slots=True, auto_attribs=True)
2020-09-04 06:54:56 -04:00
class PurgeStatus:
2018-07-20 10:32:23 -04:00
"""Object tracking the status of a purge request
This class contains information on the progress of a purge request, for
return by get_purge_status.
"""
STATUS_ACTIVE = 0
STATUS_COMPLETE = 1
STATUS_FAILED = 2
STATUS_TEXT = {
STATUS_ACTIVE: "active",
STATUS_COMPLETE: "complete",
STATUS_FAILED: "failed",
}
# Save the error message if an error occurs
error: str = ""
# Tracks whether this request has completed. One of STATUS_{ACTIVE,COMPLETE,FAILED}.
status: int = STATUS_ACTIVE
2018-07-20 10:32:23 -04:00
def asdict(self) -> JsonDict:
ret = {"status": PurgeStatus.STATUS_TEXT[self.status]}
if self.error:
ret["error"] = self.error
return ret
@attr.s(slots=True, auto_attribs=True)
class DeleteStatus:
"""Object tracking the status of a delete room request
This class contains information on the progress of a delete room request, for
return by get_delete_status.
"""
STATUS_PURGING = 0
STATUS_COMPLETE = 1
STATUS_FAILED = 2
STATUS_SHUTTING_DOWN = 3
STATUS_TEXT = {
STATUS_PURGING: "purging",
STATUS_COMPLETE: "complete",
STATUS_FAILED: "failed",
STATUS_SHUTTING_DOWN: "shutting_down",
}
# Tracks whether this request has completed.
# One of STATUS_{PURGING,COMPLETE,FAILED,SHUTTING_DOWN}.
status: int = STATUS_PURGING
# Save the error message if an error occurs
error: str = ""
# Saves the result of an action to give it back to REST API
shutdown_room: ShutdownRoomResponse = {
"kicked_users": [],
"failed_to_kick_users": [],
"local_aliases": [],
"new_room_id": None,
}
def asdict(self) -> JsonDict:
ret = {
"status": DeleteStatus.STATUS_TEXT[self.status],
"shutdown_room": self.shutdown_room,
}
if self.error:
ret["error"] = self.error
return ret
2018-07-20 10:32:23 -04:00
2020-09-04 06:54:56 -04:00
class PaginationHandler:
2018-07-20 10:32:23 -04:00
"""Handles pagination and purge history requests.
These are in the same handler due to the fact we need to block clients
paginating during a purge.
"""
# when to remove a completed deletion/purge from the results map
CLEAR_PURGE_AFTER_MS = 1000 * 3600 * 24 # 24 hours
def __init__(self, hs: "HomeServer"):
2018-07-20 10:32:23 -04:00
self.hs = hs
self.auth = hs.get_auth()
self.store = hs.get_datastore()
2019-10-23 12:25:54 -04:00
self.storage = hs.get_storage()
self.state_store = self.storage.state
2018-07-20 10:32:23 -04:00
self.clock = hs.get_clock()
2019-08-22 05:42:59 -04:00
self._server_name = hs.hostname
self._room_shutdown_handler = hs.get_room_shutdown_handler()
2018-07-20 10:32:23 -04:00
self.pagination_lock = ReadWriteLock()
# IDs of rooms in which there currently an active purge *or delete* operation.
self._purges_in_progress_by_room: Set[str] = set()
2018-07-20 10:32:23 -04:00
# map from purge id to PurgeStatus
self._purges_by_id: Dict[str, PurgeStatus] = {}
# map from purge id to DeleteStatus
self._delete_by_id: Dict[str, DeleteStatus] = {}
# map from room id to delete ids
# Dict[`room_id`, List[`delete_id`]]
self._delete_by_room: Dict[str, List[str]] = {}
self._event_serializer = hs.get_event_client_serializer()
2018-07-20 10:32:23 -04:00
self._retention_default_max_lifetime = (
hs.config.retention.retention_default_max_lifetime
)
self._retention_allowed_lifetime_min = (
hs.config.retention.retention_allowed_lifetime_min
)
self._retention_allowed_lifetime_max = (
hs.config.retention.retention_allowed_lifetime_max
)
if (
hs.config.worker.run_background_tasks
and hs.config.retention.retention_enabled
):
# Run the purge jobs described in the configuration file.
for job in hs.config.retention.retention_purge_jobs:
logger.info("Setting up purge job with config: %s", job)
self.clock.looping_call(
run_as_background_process,
job.interval,
"purge_history_for_rooms_in_range",
self.purge_history_for_rooms_in_range,
job.shortest_max_lifetime,
job.longest_max_lifetime,
)
async def purge_history_for_rooms_in_range(
self, min_ms: Optional[int], max_ms: Optional[int]
) -> None:
"""Purge outdated events from rooms within the given retention range.
If a default retention policy is defined in the server's configuration and its
'max_lifetime' is within this range, also targets rooms which don't have a
retention policy.
Args:
min_ms: Duration in milliseconds that define the lower limit of
the range to handle (exclusive). If None, it means that the range has no
lower limit.
max_ms: Duration in milliseconds that define the upper limit of
the range to handle (inclusive). If None, it means that the range has no
upper limit.
"""
# We want the storage layer to include rooms with no retention policy in its
# return value only if a default retention policy is defined in the server's
# configuration and that policy's 'max_lifetime' is either lower (or equal) than
# max_ms or higher than min_ms (or both).
if self._retention_default_max_lifetime is not None:
include_null = True
if min_ms is not None and min_ms >= self._retention_default_max_lifetime:
# The default max_lifetime is lower than (or equal to) min_ms.
include_null = False
if max_ms is not None and max_ms < self._retention_default_max_lifetime:
# The default max_lifetime is higher than max_ms.
include_null = False
else:
include_null = False
logger.info(
"[purge] Running purge job for %s < max_lifetime <= %s (include NULLs = %s)",
min_ms,
max_ms,
include_null,
)
rooms = await self.store.get_rooms_for_retention_period_in_range(
min_ms, max_ms, include_null
)
logger.debug("[purge] Rooms to purge: %s", rooms)
for room_id, retention_policy in rooms.items():
logger.info("[purge] Attempting to purge messages in room %s", room_id)
if room_id in self._purges_in_progress_by_room:
logger.warning(
"[purge] not purging room %s as there's an ongoing purge running"
" for this room",
room_id,
)
continue
# If max_lifetime is None, it means that the room has no retention policy.
# Given we only retrieve such rooms when there's a default retention policy
# defined in the server's configuration, we can safely assume that's the
# case and use it for this room.
max_lifetime = (
retention_policy["max_lifetime"] or self._retention_default_max_lifetime
)
# Cap the effective max_lifetime to be within the range allowed in the
# config.
# We do this in two steps:
# 1. Make sure it's higher or equal to the minimum allowed value, and if
# it's not replace it with that value. This is because the server
# operator can be required to not delete information before a given
# time, e.g. to comply with freedom of information laws.
# 2. Make sure the resulting value is lower or equal to the maximum allowed
# value, and if it's not replace it with that value. This is because the
# server operator can be required to delete any data after a specific
# amount of time.
if self._retention_allowed_lifetime_min is not None:
max_lifetime = max(self._retention_allowed_lifetime_min, max_lifetime)
if self._retention_allowed_lifetime_max is not None:
max_lifetime = min(max_lifetime, self._retention_allowed_lifetime_max)
logger.debug("[purge] max_lifetime for room %s: %s", room_id, max_lifetime)
# Figure out what token we should start purging at.
ts = self.clock.time_msec() - max_lifetime
stream_ordering = await self.store.find_first_stream_ordering_after_ts(ts)
r = await self.store.get_room_event_before_stream_ordering(
2019-11-19 08:22:37 -05:00
room_id,
stream_ordering,
)
if not r:
logger.warning(
"[purge] purging events not possible: No event found "
"(ts %i => stream_ordering %i)",
2019-11-19 08:22:37 -05:00
ts,
stream_ordering,
)
continue
(stream, topo, _event_id) = r
token = "t%d-%d" % (topo, stream)
purge_id = random_string(16)
self._purges_by_id[purge_id] = PurgeStatus()
logger.info(
"Starting purging events in room %s (purge_id %s)" % (room_id, purge_id)
)
# We want to purge everything, including local events, and to run the purge in
# the background so that it's not blocking any other operation apart from
# other purges in the same room.
run_as_background_process(
2019-11-19 08:22:37 -05:00
"_purge_history",
self._purge_history,
purge_id,
room_id,
token,
True,
)
def start_purge_history(
self, room_id: str, token: str, delete_local_events: bool = False
) -> str:
2018-07-20 10:32:23 -04:00
"""Start off a history purge on a room.
Args:
room_id: The room to purge from
token: topological token to delete events before
delete_local_events: True to delete local events as well as
2018-07-20 10:32:23 -04:00
remote ones
Returns:
unique ID for this purge transaction.
2018-07-20 10:32:23 -04:00
"""
if room_id in self._purges_in_progress_by_room:
raise SynapseError(
400, "History purge already in progress for %s" % (room_id,)
)
purge_id = random_string(16)
# we log the purge_id here so that it can be tied back to the
# request id in the log lines.
logger.info("[purge] starting purge_id %s", purge_id)
self._purges_by_id[purge_id] = PurgeStatus()
run_as_background_process(
"purge_history",
self._purge_history,
purge_id,
room_id,
token,
delete_local_events,
2018-07-20 10:32:23 -04:00
)
return purge_id
async def _purge_history(
self, purge_id: str, room_id: str, token: str, delete_local_events: bool
) -> None:
2018-07-20 10:32:23 -04:00
"""Carry out a history purge on a room.
Args:
purge_id: The ID for this purge.
room_id: The room to purge from
token: topological token to delete events before
delete_local_events: True to delete local events as well as remote ones
2018-07-20 10:32:23 -04:00
"""
self._purges_in_progress_by_room.add(room_id)
try:
with await self.pagination_lock.write(room_id):
await self.storage.purge_events.purge_history(
2019-10-30 11:12:49 -04:00
room_id, token, delete_local_events
)
2018-07-20 10:32:23 -04:00
logger.info("[purge] complete")
self._purges_by_id[purge_id].status = PurgeStatus.STATUS_COMPLETE
except Exception:
2019-02-25 11:56:41 -05:00
f = Failure()
logger.error(
"[purge] failed", exc_info=(f.type, f.value, f.getTracebackObject()) # type: ignore
2019-02-25 11:56:41 -05:00
)
2018-07-20 10:32:23 -04:00
self._purges_by_id[purge_id].status = PurgeStatus.STATUS_FAILED
self._purges_by_id[purge_id].error = f.getErrorMessage()
2018-07-20 10:32:23 -04:00
finally:
self._purges_in_progress_by_room.discard(room_id)
# remove the purge from the list 24 hours after it completes
def clear_purge() -> None:
2018-07-20 10:32:23 -04:00
del self._purges_by_id[purge_id]
2019-06-20 05:32:02 -04:00
self.hs.get_reactor().callLater(
PaginationHandler.CLEAR_PURGE_AFTER_MS / 1000, clear_purge
)
2018-07-20 10:32:23 -04:00
def get_purge_status(self, purge_id: str) -> Optional[PurgeStatus]:
2018-07-20 10:32:23 -04:00
"""Get the current status of an active purge
Args:
purge_id: purge_id returned by start_purge_history
2018-07-20 10:32:23 -04:00
"""
return self._purges_by_id.get(purge_id)
def get_delete_status(self, delete_id: str) -> Optional[DeleteStatus]:
"""Get the current status of an active deleting
Args:
delete_id: delete_id returned by start_shutdown_and_purge_room
"""
return self._delete_by_id.get(delete_id)
def get_delete_ids_by_room(self, room_id: str) -> Optional[Collection[str]]:
"""Get all active delete ids by room
Args:
room_id: room_id that is deleted
"""
return self._delete_by_room.get(room_id)
async def purge_room(self, room_id: str, force: bool = False) -> None:
"""Purge the given room from the database.
This function is part the delete room v1 API.
Args:
room_id: room to be purged
force: set true to skip checking for joined users.
"""
with await self.pagination_lock.write(room_id):
Revert accidental fast-forward merge from v1.49.0rc1 Revert "Sort internal changes in changelog" Revert "Update CHANGES.md" Revert "1.49.0rc1" Revert "Revert "Move `glob_to_regex` and `re_word_boundary` to `matrix-python-common` (#11505) (#11527)" Revert "Refactors in `_generate_sync_entry_for_rooms` (#11515)" Revert "Correctly register shutdown handler for presence workers (#11518)" Revert "Fix `ModuleApi.looping_background_call` for non-async functions (#11524)" Revert "Fix 'delete room' admin api to work on incomplete rooms (#11523)" Revert "Correctly ignore invites from ignored users (#11511)" Revert "Fix the test breakage introduced by #11435 as a result of concurrent PRs (#11522)" Revert "Stabilise support for MSC2918 refresh tokens as they have now been merged into the Matrix specification. (#11435)" Revert "Save the OIDC session ID (sid) with the device on login (#11482)" Revert "Add admin API to get some information about federation status (#11407)" Revert "Include bundled aggregations in /sync and related fixes (#11478)" Revert "Move `glob_to_regex` and `re_word_boundary` to `matrix-python-common` (#11505)" Revert "Update backward extremity docs to make it clear that it does not indicate whether we have fetched an events' `prev_events` (#11469)" Revert "Support configuring the lifetime of non-refreshable access tokens separately to refreshable access tokens. (#11445)" Revert "Add type hints to `synapse/tests/rest/admin` (#11501)" Revert "Revert accidental commits to develop." Revert "Newsfile" Revert "Give `tests.server.setup_test_homeserver` (nominally!) the same behaviour" Revert "Move `tests.utils.setup_test_homeserver` to `tests.server`" Revert "Convert one of the `setup_test_homeserver`s to `make_test_homeserver_synchronous`" Revert "Disambiguate queries on `state_key` (#11497)" Revert "Comments on the /sync tentacles (#11494)" Revert "Clean up tests.storage.test_appservice (#11492)" Revert "Clean up `tests.storage.test_main` to remove use of legacy code. (#11493)" Revert "Clean up `tests.test_visibility` to remove legacy code. (#11495)" Revert "Minor cleanup on recently ported doc pages (#11466)" Revert "Add most of the missing type hints to `synapse.federation`. (#11483)" Revert "Avoid waiting for zombie processes in `synctl stop` (#11490)" Revert "Fix media repository failing when media store path contains symlinks (#11446)" Revert "Add type annotations to `tests.storage.test_appservice`. (#11488)" Revert "`scripts-dev/sign_json`: support for signing events (#11486)" Revert "Add MSC3030 experimental client and federation API endpoints to get the closest event to a given timestamp (#9445)" Revert "Port wiki pages to documentation website (#11402)" Revert "Add a license header and comment. (#11479)" Revert "Clean-up get_version_string (#11468)" Revert "Link background update controller docs to summary (#11475)" Revert "Additional type hints for config module. (#11465)" Revert "Register the login redirect endpoint for v3. (#11451)" Revert "Update openid.md" Revert "Remove mention of OIDC certification from Dex (#11470)" Revert "Add a note about huge pages to our Postgres doc (#11467)" Revert "Don't start Synapse master process if `worker_app` is set (#11416)" Revert "Expose worker & homeserver as entrypoints in `setup.py` (#11449)" Revert "Bundle relations of relations into the `/relations` result. (#11284)" Revert "Fix `LruCache` corruption bug with a `size_callback` that can return 0 (#11454)" Revert "Eliminate a few `Any`s in `LruCache` type hints (#11453)" Revert "Remove unnecessary `json.dumps` from `tests.rest.admin` (#11461)" Revert "Merge branch 'master' into develop" This reverts commit 26b5d2320f62b5eb6262c7614fbdfc364a4dfc02. This reverts commit bce4220f387bf5448387f0ed7d14ed1e41e40747. This reverts commit 966b5d0fa0893c3b628c942dfc232e285417f46d. This reverts commit 088d748f2cb51f03f3bcacc0fb3af1e0f9607737. This reverts commit 14d593f72d10b4d8cb67e3288bb3131ee30ccf59. This reverts commit 2a3ec6facf79f6aae011d9fb6f9ed5e43c7b6bec. This reverts commit eccc49d7554d1fab001e1fefb0fda8ffb254b630. This reverts commit b1ecd19c5d19815b69e425d80f442bf2877cab76. This reverts commit 9c55dedc8c4484e6269451a8c3c10b3e314aeb4a. This reverts commit 2d42e586a8c54be1a83643148358b1651c1ca666. This reverts commit 2f053f3f82ca174cc1c858c75afffae51af8ce0d. This reverts commit a15a893df8428395df7cb95b729431575001c38a. This reverts commit 8b4b153c9e86c04c7db8c74fde4b6a04becbc461. This reverts commit 494ebd7347ba52d702802fba4c3bb13e7bfbc2cf. This reverts commit a77c36989785c0d5565ab9a1169f4f88e512ce8a. This reverts commit 4eb77965cd016181d2111f37d93526e9bb0434f0. This reverts commit 637df95de63196033a6da4a6e286e1d58ea517b6. This reverts commit e5f426cd54609e7f05f8241d845e6e36c5f10d9a. This reverts commit 8cd68b8102eeab1b525712097c1b2e9679c11896. This reverts commit 6cae125e20865c52d770b24278bb7ab8fde5bc0d. This reverts commit 7be88fbf48156b36b6daefb228e1258e7d48cae4. This reverts commit b3fd99b74a3f6f42a9afd1b19ee4c60e38e8e91a. This reverts commit f7ec6e7d9e0dc360d9fb41f3a1afd7bdba1475c7. This reverts commit 5640992d176a499204a0756b1677c9b1575b0a49. This reverts commit d26808dd854006bd26a2366c675428ce0737238c. This reverts commit f91624a5950e14ba9007eed9bfa1c828676d4745. This reverts commit 16d39a5490ce74c901c7a8dbb990c6e83c379207. This reverts commit 8a4c2969874c0b7d72003f2523883eba8a348e83. This reverts commit 49e1356ee3d5d72929c91f778b3a231726c1413c. This reverts commit d2279f471ba8f44d9f578e62b286897a338d8aa1. This reverts commit b50e39df578adc3f86c5efa16bee9035cfdab61b. This reverts commit 858d80bf0f9f656a03992794874081b806e49222. This reverts commit 435f04480728c5d982e1a63c1b2777784bf9cd26. This reverts commit f61462e1be36a51dbf571076afa8e1930cb182f4. This reverts commit a6f1a3abecf8e8fd3e1bff439a06b853df18f194. This reverts commit 84dc50e160a2ec6590813374b5a1e58b97f7a18d. This reverts commit ed635d32853ee0a3e5ec1078679b27e7844a4ac7. This reverts commit 7b62791e001d6a4f8897ed48b3232d7f8fe6aa48. This reverts commit 153194c7717d8016b0eb974c81b1baee7dc1917d. This reverts commit f44d729d4ccae61bc0cdd5774acb3233eb5f7c13. This reverts commit a265fbd397ae72b2d3ea4c9310591ff1d0f3e05c. This reverts commit b9fef1a7cdfcc128fa589a32160e6aa7ed8964d7. This reverts commit b0eb64ff7bf6bde42046e091f8bdea9b7aab5f04. This reverts commit f1795463bf503a6fca909d77f598f641f9349f56. This reverts commit 70cbb1a5e311f609b624e3fae1a1712db639c51e. This reverts commit 42bf0204635213e2c75188b19ee66dc7e7d8a35e. This reverts commit 379f2650cf875f50c59524147ec0e33cfd5ef60c. This reverts commit 7ff22d6da41cd5ca80db95c18b409aea38e49fcd. This reverts commit 5a0b652d36ae4b6d423498c1f2c82c97a49c6f75. This reverts commit 432a174bc192740ac7a0a755009f6099b8363ad9. This reverts commit b14f8a1baf6f500997ae4c1d6a6d72094ce14270, reversing changes made to e713855dca17a7605bae99ea8d71bc7f8657e4b8.
2021-12-07 11:38:29 -05:00
# check we know about the room
await self.store.get_room_version_id(room_id)
2019-08-22 05:42:59 -04:00
# first check that we have no users in this room
if not force:
joined = await self.store.is_host_joined(room_id, self._server_name)
if joined:
raise SynapseError(400, "Users are still joined to this room")
2019-08-22 05:42:59 -04:00
2019-10-30 11:12:49 -04:00
await self.storage.purge_events.purge_room(room_id)
2019-08-22 05:42:59 -04:00
async def get_messages(
2018-07-20 10:32:23 -04:00
self,
requester: Requester,
room_id: str,
pagin_config: PaginationConfig,
as_client_event: bool = True,
event_filter: Optional[Filter] = None,
) -> Dict[str, Any]:
2018-07-20 10:32:23 -04:00
"""Get messages in a room.
Args:
requester: The user requesting messages.
room_id: The room they want messages from.
pagin_config: The pagination config rules to apply, if any.
as_client_event: True to get events in client-server format.
event_filter: Filter to apply to results or None
2018-07-20 10:32:23 -04:00
Returns:
Pagination API results
2018-07-20 10:32:23 -04:00
"""
user_id = requester.user.to_string()
if pagin_config.from_token:
from_token = pagin_config.from_token
2018-07-20 10:32:23 -04:00
else:
from_token = self.hs.get_event_sources().get_current_token_for_pagination()
2018-07-20 10:32:23 -04:00
if pagin_config.limit is None:
# This shouldn't happen as we've set a default limit before this
# gets called.
raise Exception("limit not set")
2018-07-20 10:32:23 -04:00
room_token = from_token.room_key
2018-07-20 10:32:23 -04:00
with await self.pagination_lock.read(room_id):
(
membership,
member_event_id,
) = await self.auth.check_user_in_room_or_world_readable(
room_id, user_id, allow_departed_users=True
)
2018-07-20 10:32:23 -04:00
if pagin_config.direction == "b":
2018-07-20 10:32:23 -04:00
# if we're going backwards, we might need to backfill. This
# requires that we have a topo token.
if room_token.topological:
curr_topo = room_token.topological
2018-07-20 10:32:23 -04:00
else:
curr_topo = await self.store.get_current_topological_token(
2018-07-20 10:32:23 -04:00
room_id, room_token.stream
)
if membership == Membership.LEAVE:
# If they have left the room then clamp the token to be before
# they left the room, to save the effort of loading from the
# database.
# This is only None if the room is world_readable, in which
# case "JOIN" would have been returned.
assert member_event_id
leave_token = await self.store.get_topological_token_for_event(
2018-07-20 10:32:23 -04:00
member_event_id
)
2020-09-08 12:43:31 -04:00
assert leave_token.topological is not None
Synapse 1.20.0rc5 (2020-09-18) ============================== In addition to the below, Synapse 1.20.0rc5 also includes the bug fix that was included in 1.19.3. Features -------- - Add flags to the `/versions` endpoint for whether new rooms default to using E2EE. ([\#8343](https://github.com/matrix-org/synapse/issues/8343)) Bugfixes -------- - Fix rate limiting of federation `/send` requests. ([\#8342](https://github.com/matrix-org/synapse/issues/8342)) - Fix a longstanding bug where back pagination over federation could get stuck if it failed to handle a received event. ([\#8349](https://github.com/matrix-org/synapse/issues/8349)) Internal Changes ---------------- - Blacklist [MSC2753](https://github.com/matrix-org/matrix-doc/pull/2753) SyTests until it is implemented. ([\#8285](https://github.com/matrix-org/synapse/issues/8285)) -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEF3tZXk38tRDFVnUIM/xY9qcRMEgFAl9kzk8ACgkQM/xY9qcR MEim6A//aERkhyLGRlGpLd37lCyFQCeffTMH1rTvu04iIBQBaUZ6g7CYWOpK43zT U8kt379+5OShjdAXs/X4XP+ucdHVbrwsRSP3hBS/fFLiDT0fJgP8uiSf5QqO6NnT OqDyXYjcXvj/c6tMKglVtsdh8u4hFwNZjGPMGG68IzJu14uEhnD100cL9jSB9bLB ongWpsQzzdGBpJPSFRjv9dCUSeRbzyUdl1t0uqzrNqyN9s/JnzFTn7ZYo6y3lnSS dHGVMMo/12M2PkbBHnbJVvDY5Q/R7ZxyXlpz0gvSNOQIw8FqYFnuB0Niy5dQhXSR Sy5h4qbczLxqbql1x+lmzeQm4ZMORsW/Tl4C3z6yK6OYaOCJHIf9en4DplTSTqp1 t+85JxWR2wH10d99YHBpaYKmkVovpwgchrO4YWrtXljUFAhhavzf+YiAdOHYT52s RDsDLsvjMbxEHsz4cHfycmshYhjzjb340wkoDXuQpj0zrO99d+Zd83xdK8pS0UQn OaljLRAd/5iBjTSyZPSrB1U5141OzlM3QZVJzaYAnP12yhR9eaX2twSCk+lPYOWd nhLJjNnj1B1XSGArthuE5NLyEiCPz6KyN2RhO0EOx5YjZN9TwH7LS9upyNFe1nN1 GIhO5gz+jWLuBZE3xzRNjJyCx/I/LolpCwGMvKDu6638rpsbrPs= =tT5/ -----END PGP SIGNATURE----- Merge tag 'v1.20.0rc5' into develop Synapse 1.20.0rc5 (2020-09-18) ============================== In addition to the below, Synapse 1.20.0rc5 also includes the bug fix that was included in 1.19.3. Features -------- - Add flags to the `/versions` endpoint for whether new rooms default to using E2EE. ([\#8343](https://github.com/matrix-org/synapse/issues/8343)) Bugfixes -------- - Fix rate limiting of federation `/send` requests. ([\#8342](https://github.com/matrix-org/synapse/issues/8342)) - Fix a longstanding bug where back pagination over federation could get stuck if it failed to handle a received event. ([\#8349](https://github.com/matrix-org/synapse/issues/8349)) Internal Changes ---------------- - Blacklist [MSC2753](https://github.com/matrix-org/matrix-doc/pull/2753) SyTests until it is implemented. ([\#8285](https://github.com/matrix-org/synapse/issues/8285))
2020-09-18 11:17:58 -04:00
if leave_token.topological < curr_topo:
from_token = from_token.copy_and_replace(
"room_key", leave_token
)
2018-07-20 10:32:23 -04:00
await self.hs.get_federation_handler().maybe_backfill(
room_id,
curr_topo,
limit=pagin_config.limit,
2018-07-20 10:32:23 -04:00
)
to_room_key = None
if pagin_config.to_token:
to_room_key = pagin_config.to_token.room_key
events, next_key = await self.store.paginate_room_events(
2018-07-20 10:32:23 -04:00
room_id=room_id,
from_key=from_token.room_key,
to_key=to_room_key,
direction=pagin_config.direction,
limit=pagin_config.limit,
2018-07-20 10:32:23 -04:00
event_filter=event_filter,
)
next_token = from_token.copy_and_replace("room_key", next_key)
2018-07-20 10:32:23 -04:00
if events:
if event_filter:
events = await event_filter.filter(events)
events = await filter_events_for_client(
2019-10-23 12:25:54 -04:00
self.storage, user_id, events, is_peeking=(member_event_id is None)
)
2018-07-20 10:32:23 -04:00
if not events:
return {
"chunk": [],
"start": await from_token.to_string(self.store),
"end": await next_token.to_string(self.store),
}
2018-07-20 10:32:23 -04:00
2018-08-16 09:22:47 -04:00
state = None
if event_filter and event_filter.lazy_load_members and len(events) > 0:
2018-08-16 09:22:47 -04:00
# TODO: remove redundant members
# FIXME: we also care about invite targets etc.
state_filter = StateFilter.from_types(
(EventTypes.Member, event.sender) for event in events
)
2018-08-16 09:22:47 -04:00
state_ids = await self.state_store.get_state_ids_for_event(
events[0].event_id, state_filter=state_filter
2018-08-16 09:22:47 -04:00
)
if state_ids:
state_dict = await self.store.get_events(list(state_ids.values()))
state = state_dict.values()
2018-08-16 09:22:47 -04:00
2018-07-20 10:32:23 -04:00
time_now = self.clock.time_msec()
chunk = {
"chunk": (
await self._event_serializer.serialize_events(
events, time_now, as_client_event=as_client_event
)
),
"start": await from_token.to_string(self.store),
"end": await next_token.to_string(self.store),
2018-07-20 10:32:23 -04:00
}
2018-08-16 09:22:47 -04:00
if state:
chunk["state"] = await self._event_serializer.serialize_events(
state, time_now, as_client_event=as_client_event
)
2018-08-16 09:22:47 -04:00
return chunk
async def _shutdown_and_purge_room(
self,
delete_id: str,
room_id: str,
requester_user_id: str,
new_room_user_id: Optional[str] = None,
new_room_name: Optional[str] = None,
message: Optional[str] = None,
block: bool = False,
purge: bool = True,
force_purge: bool = False,
) -> None:
"""
Shuts down and purges a room.
See `RoomShutdownHandler.shutdown_room` for details of creation of the new room
Args:
delete_id: The ID for this delete.
room_id: The ID of the room to shut down.
requester_user_id:
User who requested the action. Will be recorded as putting the room on the
blocking list.
new_room_user_id:
If set, a new room will be created with this user ID
as the creator and admin, and all users in the old room will be
moved into that room. If not set, no new room will be created
and the users will just be removed from the old room.
new_room_name:
A string representing the name of the room that new users will
be invited to. Defaults to `Content Violation Notification`
message:
A string containing the first message that will be sent as
`new_room_user_id` in the new room. Ideally this will clearly
convey why the original room was shut down.
Defaults to `Sharing illegal content on this server is not
permitted and rooms in violation will be blocked.`
block:
If set to `true`, this room will be added to a blocking list,
preventing future attempts to join the room. Defaults to `false`.
purge:
If set to `true`, purge the given room from the database.
force_purge:
If set to `true`, the room will be purged from database
also if it fails to remove some users from room.
Saves a `RoomShutdownHandler.ShutdownRoomResponse` in `DeleteStatus`:
"""
self._purges_in_progress_by_room.add(room_id)
try:
with await self.pagination_lock.write(room_id):
self._delete_by_id[delete_id].status = DeleteStatus.STATUS_SHUTTING_DOWN
self._delete_by_id[
delete_id
].shutdown_room = await self._room_shutdown_handler.shutdown_room(
room_id=room_id,
requester_user_id=requester_user_id,
new_room_user_id=new_room_user_id,
new_room_name=new_room_name,
message=message,
block=block,
)
self._delete_by_id[delete_id].status = DeleteStatus.STATUS_PURGING
if purge:
logger.info("starting purge room_id %s", room_id)
# first check that we have no users in this room
if not force_purge:
joined = await self.store.is_host_joined(
room_id, self._server_name
)
if joined:
raise SynapseError(
400, "Users are still joined to this room"
)
await self.storage.purge_events.purge_room(room_id)
logger.info("complete")
self._delete_by_id[delete_id].status = DeleteStatus.STATUS_COMPLETE
except Exception:
f = Failure()
logger.error(
"failed",
exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore
)
self._delete_by_id[delete_id].status = DeleteStatus.STATUS_FAILED
self._delete_by_id[delete_id].error = f.getErrorMessage()
finally:
self._purges_in_progress_by_room.discard(room_id)
# remove the delete from the list 24 hours after it completes
def clear_delete() -> None:
del self._delete_by_id[delete_id]
self._delete_by_room[room_id].remove(delete_id)
if not self._delete_by_room[room_id]:
del self._delete_by_room[room_id]
self.hs.get_reactor().callLater(
PaginationHandler.CLEAR_PURGE_AFTER_MS / 1000, clear_delete
)
def start_shutdown_and_purge_room(
self,
room_id: str,
requester_user_id: str,
new_room_user_id: Optional[str] = None,
new_room_name: Optional[str] = None,
message: Optional[str] = None,
block: bool = False,
purge: bool = True,
force_purge: bool = False,
) -> str:
"""Start off shut down and purge on a room.
Args:
room_id: The ID of the room to shut down.
requester_user_id:
User who requested the action and put the room on the
blocking list.
new_room_user_id:
If set, a new room will be created with this user ID
as the creator and admin, and all users in the old room will be
moved into that room. If not set, no new room will be created
and the users will just be removed from the old room.
new_room_name:
A string representing the name of the room that new users will
be invited to. Defaults to `Content Violation Notification`
message:
A string containing the first message that will be sent as
`new_room_user_id` in the new room. Ideally this will clearly
convey why the original room was shut down.
Defaults to `Sharing illegal content on this server is not
permitted and rooms in violation will be blocked.`
block:
If set to `true`, this room will be added to a blocking list,
preventing future attempts to join the room. Defaults to `false`.
purge:
If set to `true`, purge the given room from the database.
force_purge:
If set to `true`, the room will be purged from database
also if it fails to remove some users from room.
Returns:
unique ID for this delete transaction.
"""
if room_id in self._purges_in_progress_by_room:
raise SynapseError(
400, "History purge already in progress for %s" % (room_id,)
)
# This check is double to `RoomShutdownHandler.shutdown_room`
# But here the requester get a direct response / error with HTTP request
# and do not have to check the purge status
if new_room_user_id is not None:
if not self.hs.is_mine_id(new_room_user_id):
raise SynapseError(
400, "User must be our own: %s" % (new_room_user_id,)
)
delete_id = random_string(16)
# we log the delete_id here so that it can be tied back to the
# request id in the log lines.
logger.info(
"starting shutdown room_id %s with delete_id %s",
room_id,
delete_id,
)
self._delete_by_id[delete_id] = DeleteStatus()
self._delete_by_room.setdefault(room_id, []).append(delete_id)
run_as_background_process(
"shutdown_and_purge_room",
self._shutdown_and_purge_room,
delete_id,
room_id,
requester_user_id,
new_room_user_id,
new_room_name,
message,
block,
purge,
force_purge,
)
return delete_id