incorporate review

This commit is contained in:
Matthew Hodgson 2018-07-19 19:03:50 +01:00
parent 924eb34d94
commit bcaec2915a
2 changed files with 31 additions and 20 deletions

View File

@ -435,7 +435,7 @@ class SyncHandler(object):
A Deferred map from ((type, state_key)->Event) A Deferred map from ((type, state_key)->Event)
""" """
state_ids = yield self.store.get_state_ids_for_event( state_ids = yield self.store.get_state_ids_for_event(
event.event_id, types, filtered_types=filtered_types event.event_id, types, filtered_types=filtered_types,
) )
if event.is_state(): if event.is_state():
state_ids = state_ids.copy() state_ids = state_ids.copy()
@ -470,7 +470,7 @@ class SyncHandler(object):
if last_events: if last_events:
last_event = last_events[-1] last_event = last_events[-1]
state = yield self.get_state_after_event( state = yield self.get_state_after_event(
last_event, types, filtered_types=filtered_types last_event, types, filtered_types=filtered_types,
) )
else: else:
@ -505,7 +505,6 @@ class SyncHandler(object):
with Measure(self.clock, "compute_state_delta"): with Measure(self.clock, "compute_state_delta"):
types = None types = None
member_state_ids = {}
lazy_load_members = sync_config.filter_collection.lazy_load_members() lazy_load_members = sync_config.filter_collection.lazy_load_members()
filtered_types = None filtered_types = None
@ -521,10 +520,6 @@ class SyncHandler(object):
) )
] ]
# We can't remove redundant member types at this stage as it has
# to be done based on event_id, and we don't have the member
# event ids until we've pulled them out of the DB.
# only apply the filtering to room members # only apply the filtering to room members
filtered_types = [EventTypes.Member] filtered_types = [EventTypes.Member]
@ -532,27 +527,32 @@ class SyncHandler(object):
if batch: if batch:
current_state_ids = yield self.store.get_state_ids_for_event( current_state_ids = yield self.store.get_state_ids_for_event(
batch.events[-1].event_id, types=types, batch.events[-1].event_id, types=types,
filtered_types=filtered_types filtered_types=filtered_types,
) )
state_ids = yield self.store.get_state_ids_for_event( state_ids = yield self.store.get_state_ids_for_event(
batch.events[0].event_id, types=types, batch.events[0].event_id, types=types,
filtered_types=filtered_types filtered_types=filtered_types,
) )
else: else:
current_state_ids = yield self.get_state_at( current_state_ids = yield self.get_state_at(
room_id, stream_position=now_token, types=types, room_id, stream_position=now_token, types=types,
filtered_types=filtered_types filtered_types=filtered_types,
) )
state_ids = current_state_ids state_ids = current_state_ids
# track the membership state events as of the beginning of this
# timeline sequence, so they can be filtered out of the state
# if we are lazy loading members.
if lazy_load_members: if lazy_load_members:
member_state_ids = { member_state_ids = {
t: state_ids[t] t: state_ids[t]
for t in state_ids if t[0] == EventTypes.Member for t in state_ids if t[0] == EventTypes.Member
} }
else:
member_state_ids = {}
timeline_state = { timeline_state = {
(event.type, event.state_key): event.event_id (event.type, event.state_key): event.event_id
@ -569,28 +569,38 @@ class SyncHandler(object):
elif batch.limited: elif batch.limited:
state_at_previous_sync = yield self.get_state_at( state_at_previous_sync = yield self.get_state_at(
room_id, stream_position=since_token, types=types, room_id, stream_position=since_token, types=types,
filtered_types=filtered_types filtered_types=filtered_types,
) )
current_state_ids = yield self.store.get_state_ids_for_event( current_state_ids = yield self.store.get_state_ids_for_event(
batch.events[-1].event_id, types=types, batch.events[-1].event_id, types=types,
filtered_types=filtered_types filtered_types=filtered_types,
) )
state_at_timeline_start = yield self.store.get_state_ids_for_event( state_at_timeline_start = yield self.store.get_state_ids_for_event(
batch.events[0].event_id, types=types, batch.events[0].event_id, types=types,
filtered_types=filtered_types filtered_types=filtered_types,
) )
# track the membership state events as of the beginning of this
# timeline sequence, so they can be filtered out of the state
# if we are lazy loading members.
if lazy_load_members: if lazy_load_members:
# TODO: filter out redundant members based on their event_ids # TODO: optionally filter out redundant membership events at this
# (not mxids) at this point. In practice, limited syncs are # point, to stop repeatedly sending members in every /sync as if
# the client isn't tracking them.
# When implement, this should filter using event_ids (not mxids).
# In practice, limited syncs are
# relatively rare so it's not a total disaster to send redundant # relatively rare so it's not a total disaster to send redundant
# members down at this point. # members down at this point. Redundant members are ones which
# repeatedly get sent down /sync because we don't know if the client
# is caching them or not.
member_state_ids = { member_state_ids = {
t: state_at_timeline_start[t] t: state_at_timeline_start[t]
for t in state_at_timeline_start if t[0] == EventTypes.Member for t in state_at_timeline_start if t[0] == EventTypes.Member
} }
else:
member_state_ids = {}
timeline_state = { timeline_state = {
(event.type, event.state_key): event.event_id (event.type, event.state_key): event.event_id
@ -614,7 +624,7 @@ class SyncHandler(object):
if types: if types:
state_ids = yield self.store.get_state_ids_for_event( state_ids = yield self.store.get_state_ids_for_event(
batch.events[0].event_id, types=types, batch.events[0].event_id, types=types,
filtered_types=filtered_types filtered_types=filtered_types,
) )
state = {} state = {}

View File

@ -545,9 +545,10 @@ class StateGroupWorkerStore(SQLBaseStore):
if state_key is None: if state_key is None:
type_to_key[typ] = None type_to_key[typ] = None
# XXX: why do we mark the type as missing from our cache just # we mark the type as missing from the cache because
# because we weren't filtering on a specific value of state_key? # when the cache was populated it might have been done with a
# is it because the cache doesn't handle wildcards? # restricted set of state_keys, so the wildcard will not work
# and the cache may be incomplete.
missing_types.add(key) missing_types.add(key)
else: else:
if type_to_key.get(typ, object()) is not None: if type_to_key.get(typ, object()) is not None: