Merge remote-tracking branch 'upstream/release-v1.46'

This commit is contained in:
Tulir Asokan 2021-10-27 15:42:34 +03:00
commit cf45cfd314
172 changed files with 5549 additions and 2350 deletions

View file

@ -128,9 +128,10 @@ class RelationSendServlet(RestServlet):
content["m.relates_to"] = {
"event_id": parent_id,
"key": aggregation_key,
"rel_type": relation_type,
}
if aggregation_key is not None:
content["m.relates_to"]["key"] = aggregation_key
event_dict = {
"type": event_type,
@ -232,12 +233,12 @@ class RelationPaginationServlet(RestServlet):
# Similarly, we don't allow relations to be applied to relations, so we
# return the original relations without any aggregations on top of them
# here.
events = await self._event_serializer.serialize_events(
serialized_events = await self._event_serializer.serialize_events(
events, now, bundle_aggregations=False
)
return_value = pagination_chunk.to_dict()
return_value["chunk"] = events
return_value["chunk"] = serialized_events
return_value["original_event"] = original_event
return 200, return_value
@ -416,10 +417,10 @@ class RelationAggregationGroupPaginationServlet(RestServlet):
)
now = self.clock.time_msec()
events = await self._event_serializer.serialize_events(events, now)
serialized_events = await self._event_serializer.serialize_events(events, now)
return_value = result.to_dict()
return_value["chunk"] = events
return_value["chunk"] = serialized_events
return 200, return_value

View file

@ -32,7 +32,6 @@ from synapse.http.servlet import (
from synapse.http.site import SynapseRequest
from synapse.rest.client.transactions import HttpTransactionCache
from synapse.types import JsonDict
from synapse.util.stringutils import random_string
if TYPE_CHECKING:
from synapse.server import HomeServer
@ -164,11 +163,6 @@ class RoomBatchSendEventRestServlet(RestServlet):
base_insertion_event = None
if batch_id_from_query:
batch_id_to_connect_to = batch_id_from_query
# All but the first base insertion event should point at a fake
# event, which causes the HS to ask for the state at the start of
# the batch later.
fake_prev_event_id = "$" + random_string(43)
prev_event_ids = [fake_prev_event_id]
# Otherwise, create an insertion event to act as a starting point.
#
# We don't always have an insertion event to start hanging more history
@ -177,8 +171,6 @@ class RoomBatchSendEventRestServlet(RestServlet):
# an insertion event), in which case we just create a new insertion event
# that can then get pointed to by a "marker" event later.
else:
prev_event_ids = prev_event_ids_from_query
base_insertion_event_dict = (
self.room_batch_handler.create_insertion_event_dict(
sender=requester.user.to_string(),
@ -186,7 +178,7 @@ class RoomBatchSendEventRestServlet(RestServlet):
origin_server_ts=last_event_in_batch["origin_server_ts"],
)
)
base_insertion_event_dict["prev_events"] = prev_event_ids.copy()
base_insertion_event_dict["prev_events"] = prev_event_ids_from_query.copy()
(
base_insertion_event,
@ -207,6 +199,11 @@ class RoomBatchSendEventRestServlet(RestServlet):
EventContentFields.MSC2716_NEXT_BATCH_ID
]
# Also connect the historical event chain to the end of the floating
# state chain, which causes the HS to ask for the state at the start of
# the batch later.
prev_event_ids = [state_event_ids_at_start[-1]]
# Create and persist all of the historical events as well as insertion
# and batch meta events to make the batch navigable in the DAG.
event_ids, next_batch_id = await self.room_batch_handler.handle_batch_of_events(