mirror of
https://mau.dev/maunium/synapse.git
synced 2024-10-01 01:36:05 -04:00
Merge pull request #5826 from matrix-org/erikj/reduce_event_pauses
Don't unnecessarily block notifying of new events.
This commit is contained in:
commit
2546f32b90
1
changelog.d/5826.misc
Normal file
1
changelog.d/5826.misc
Normal file
@ -0,0 +1 @@
|
|||||||
|
Reduce global pauses in the events stream caused by expensive state resolution during persistence.
|
@ -364,19 +364,6 @@ class EventsStore(
|
|||||||
if not events_and_contexts:
|
if not events_and_contexts:
|
||||||
return
|
return
|
||||||
|
|
||||||
if backfilled:
|
|
||||||
stream_ordering_manager = self._backfill_id_gen.get_next_mult(
|
|
||||||
len(events_and_contexts)
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
stream_ordering_manager = self._stream_id_gen.get_next_mult(
|
|
||||||
len(events_and_contexts)
|
|
||||||
)
|
|
||||||
|
|
||||||
with stream_ordering_manager as stream_orderings:
|
|
||||||
for (event, context), stream in zip(events_and_contexts, stream_orderings):
|
|
||||||
event.internal_metadata.stream_ordering = stream
|
|
||||||
|
|
||||||
chunks = [
|
chunks = [
|
||||||
events_and_contexts[x : x + 100]
|
events_and_contexts[x : x + 100]
|
||||||
for x in range(0, len(events_and_contexts), 100)
|
for x in range(0, len(events_and_contexts), 100)
|
||||||
@ -506,6 +493,33 @@ class EventsStore(
|
|||||||
if current_state is not None:
|
if current_state is not None:
|
||||||
current_state_for_room[room_id] = current_state
|
current_state_for_room[room_id] = current_state
|
||||||
|
|
||||||
|
# We want to calculate the stream orderings as late as possible, as
|
||||||
|
# we only notify after all events with a lesser stream ordering have
|
||||||
|
# been persisted. I.e. if we spend 10s inside the with block then
|
||||||
|
# that will delay all subsequent events from being notified about.
|
||||||
|
# Hence why we do it down here rather than wrapping the entire
|
||||||
|
# function.
|
||||||
|
#
|
||||||
|
# Its safe to do this after calculating the state deltas etc as we
|
||||||
|
# only need to protect the *persistence* of the events. This is to
|
||||||
|
# ensure that queries of the form "fetch events since X" don't
|
||||||
|
# return events and stream positions after events that are still in
|
||||||
|
# flight, as otherwise subsequent requests "fetch event since Y"
|
||||||
|
# will not return those events.
|
||||||
|
#
|
||||||
|
# Note: Multiple instances of this function cannot be in flight at
|
||||||
|
# the same time for the same room.
|
||||||
|
if backfilled:
|
||||||
|
stream_ordering_manager = self._backfill_id_gen.get_next_mult(
|
||||||
|
len(chunk)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
stream_ordering_manager = self._stream_id_gen.get_next_mult(len(chunk))
|
||||||
|
|
||||||
|
with stream_ordering_manager as stream_orderings:
|
||||||
|
for (event, context), stream in zip(chunk, stream_orderings):
|
||||||
|
event.internal_metadata.stream_ordering = stream
|
||||||
|
|
||||||
yield self.runInteraction(
|
yield self.runInteraction(
|
||||||
"persist_events",
|
"persist_events",
|
||||||
self._persist_events_txn,
|
self._persist_events_txn,
|
||||||
|
Loading…
Reference in New Issue
Block a user