2014-08-12 10:10:52 -04:00
|
|
|
# -*- coding: utf-8 -*-
|
2014-09-03 12:29:13 -04:00
|
|
|
# Copyright 2014 OpenMarket Ltd
|
2014-08-12 10:10:52 -04:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
2014-08-12 22:14:34 -04:00
|
|
|
|
2014-08-12 10:10:52 -04:00
|
|
|
from twisted.internet import defer
|
|
|
|
|
2014-09-06 02:41:36 -04:00
|
|
|
from synapse.federation.pdu_codec import encode_event_id, decode_event_id
|
2014-08-12 10:10:52 -04:00
|
|
|
from synapse.util.logutils import log_function
|
|
|
|
|
|
|
|
from collections import namedtuple
|
|
|
|
|
|
|
|
import logging
|
|
|
|
import hashlib
|
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
def _get_state_key_from_event(event):
|
|
|
|
return event.state_key
|
|
|
|
|
|
|
|
|
|
|
|
KeyStateTuple = namedtuple("KeyStateTuple", ("context", "type", "state_key"))
|
|
|
|
|
|
|
|
|
|
|
|
class StateHandler(object):
|
2014-10-14 11:59:51 -04:00
|
|
|
""" Responsible for doing state conflict resolution.
|
2014-08-12 10:10:52 -04:00
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, hs):
|
|
|
|
self.store = hs.get_datastore()
|
|
|
|
self._replication = hs.get_replication_layer()
|
|
|
|
self.server_name = hs.hostname
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
@log_function
|
2014-08-27 10:11:51 -04:00
|
|
|
def handle_new_event(self, event, snapshot):
|
2014-08-12 10:10:52 -04:00
|
|
|
""" Given an event this works out if a) we have sufficient power level
|
|
|
|
to update the state and b) works out what the prev_state should be.
|
|
|
|
|
|
|
|
Returns:
|
2014-10-14 11:59:51 -04:00
|
|
|
Deferred: Resolved with a boolean indicating if we successfully
|
2014-08-12 10:10:52 -04:00
|
|
|
updated the state.
|
|
|
|
|
|
|
|
Raised:
|
|
|
|
AuthError
|
|
|
|
"""
|
|
|
|
# This needs to be done in a transaction.
|
|
|
|
|
|
|
|
if not hasattr(event, "state_key"):
|
|
|
|
return
|
|
|
|
|
|
|
|
key = KeyStateTuple(
|
|
|
|
event.room_id,
|
|
|
|
event.type,
|
|
|
|
_get_state_key_from_event(event)
|
|
|
|
)
|
|
|
|
|
|
|
|
# Now I need to fill out the prev state and work out if it has auth
|
|
|
|
# (w.r.t. to power levels)
|
|
|
|
|
2014-08-27 10:11:51 -04:00
|
|
|
snapshot.fill_out_prev_events(event)
|
2014-10-15 11:06:59 -04:00
|
|
|
yield self.annotate_state_groups(event)
|
2014-08-12 10:10:52 -04:00
|
|
|
|
|
|
|
event.prev_events = [
|
|
|
|
e for e in event.prev_events if e != event.event_id
|
|
|
|
]
|
|
|
|
|
2014-08-27 10:11:51 -04:00
|
|
|
current_state = snapshot.prev_state_pdu
|
2014-08-12 10:10:52 -04:00
|
|
|
|
|
|
|
if current_state:
|
|
|
|
event.prev_state = encode_event_id(
|
|
|
|
current_state.pdu_id, current_state.origin
|
|
|
|
)
|
|
|
|
|
|
|
|
# TODO check current_state to see if the min power level is less
|
|
|
|
# than the power level of the user
|
|
|
|
# power_level = self._get_power_level_for_event(event)
|
|
|
|
|
2014-09-06 02:41:36 -04:00
|
|
|
pdu_id, origin = decode_event_id(event.event_id, self.server_name)
|
|
|
|
|
2014-08-12 10:10:52 -04:00
|
|
|
yield self.store.update_current_state(
|
2014-09-06 02:41:36 -04:00
|
|
|
pdu_id=pdu_id,
|
|
|
|
origin=origin,
|
2014-08-12 10:10:52 -04:00
|
|
|
context=key.context,
|
|
|
|
pdu_type=key.type,
|
|
|
|
state_key=key.state_key
|
|
|
|
)
|
|
|
|
|
|
|
|
defer.returnValue(True)
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
@log_function
|
|
|
|
def handle_new_state(self, new_pdu):
|
|
|
|
""" Apply conflict resolution to `new_pdu`.
|
|
|
|
|
|
|
|
This should be called on every new state pdu, regardless of whether or
|
|
|
|
not there is a conflict.
|
|
|
|
|
|
|
|
This function is safe against the race of it getting called with two
|
|
|
|
`PDU`s trying to update the same state.
|
|
|
|
"""
|
|
|
|
|
|
|
|
# This needs to be done in a transaction.
|
|
|
|
|
|
|
|
is_new = yield self._handle_new_state(new_pdu)
|
|
|
|
|
2014-09-12 12:11:00 -04:00
|
|
|
logger.debug("is_new: %s %s %s", is_new, new_pdu.pdu_id, new_pdu.origin)
|
|
|
|
|
2014-08-12 10:10:52 -04:00
|
|
|
if is_new:
|
|
|
|
yield self.store.update_current_state(
|
|
|
|
pdu_id=new_pdu.pdu_id,
|
|
|
|
origin=new_pdu.origin,
|
|
|
|
context=new_pdu.context,
|
|
|
|
pdu_type=new_pdu.pdu_type,
|
|
|
|
state_key=new_pdu.state_key
|
|
|
|
)
|
|
|
|
|
|
|
|
defer.returnValue(is_new)
|
|
|
|
|
2014-10-14 11:59:51 -04:00
|
|
|
@defer.inlineCallbacks
|
2014-10-17 10:04:17 -04:00
|
|
|
def annotate_state_groups(self, event, state=None):
|
|
|
|
if state:
|
|
|
|
event.state_group = None
|
|
|
|
event.old_state_events = None
|
|
|
|
event.state_events = state
|
|
|
|
return
|
|
|
|
|
2014-10-14 11:59:51 -04:00
|
|
|
state_groups = yield self.store.get_state_groups(
|
|
|
|
event.prev_events
|
|
|
|
)
|
|
|
|
|
|
|
|
state = {}
|
|
|
|
state_sets = {}
|
|
|
|
for group in state_groups:
|
|
|
|
for s in group.state:
|
2014-10-15 11:06:59 -04:00
|
|
|
state.setdefault((s.type, s.state_key), []).append(s)
|
2014-10-14 11:59:51 -04:00
|
|
|
|
|
|
|
state_sets.setdefault(
|
|
|
|
(s.type, s.state_key),
|
|
|
|
set()
|
|
|
|
).add(s.event_id)
|
|
|
|
|
|
|
|
unconflicted_state = {
|
2014-10-15 11:06:59 -04:00
|
|
|
k: state[k].pop() for k, v in state_sets.items()
|
2014-10-14 11:59:51 -04:00
|
|
|
if len(v) == 1
|
|
|
|
}
|
|
|
|
|
|
|
|
conflicted_state = {
|
|
|
|
k: state[k]
|
|
|
|
for k, v in state_sets.items()
|
|
|
|
if len(v) > 1
|
|
|
|
}
|
|
|
|
|
|
|
|
new_state = {}
|
|
|
|
new_state.update(unconflicted_state)
|
|
|
|
for key, events in conflicted_state.items():
|
|
|
|
new_state[key] = yield self.resolve(events)
|
|
|
|
|
2014-10-15 11:06:59 -04:00
|
|
|
event.old_state_events = new_state
|
|
|
|
|
2014-10-14 11:59:51 -04:00
|
|
|
if hasattr(event, "state_key"):
|
|
|
|
new_state[(event.type, event.state_key)] = event
|
|
|
|
|
|
|
|
event.state_group = None
|
2014-10-15 11:06:59 -04:00
|
|
|
event.state_events = new_state
|
2014-10-14 11:59:51 -04:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def resolve(self, events):
|
|
|
|
curr_events = events
|
|
|
|
|
|
|
|
new_powers_deferreds = []
|
|
|
|
for e in curr_events:
|
|
|
|
new_powers_deferreds.append(
|
2014-10-17 10:04:17 -04:00
|
|
|
self.store.get_power_level(e.room_id, e.user_id)
|
2014-10-14 11:59:51 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
new_powers = yield defer.gatherResults(
|
|
|
|
new_powers_deferreds,
|
|
|
|
consumeErrors=True
|
|
|
|
)
|
|
|
|
|
|
|
|
max_power = max([int(p) for p in new_powers])
|
|
|
|
|
|
|
|
curr_events = [
|
|
|
|
z[0] for z in zip(curr_events, new_powers)
|
|
|
|
if int(z[1]) == max_power
|
|
|
|
]
|
|
|
|
|
|
|
|
if not curr_events:
|
|
|
|
raise RuntimeError("Max didn't get a max?")
|
|
|
|
elif len(curr_events) == 1:
|
|
|
|
defer.returnValue(curr_events[0])
|
|
|
|
|
|
|
|
# TODO: For now, just choose the one with the largest event_id.
|
|
|
|
defer.returnValue(
|
|
|
|
sorted(
|
|
|
|
curr_events,
|
|
|
|
key=lambda e: hashlib.sha1(
|
|
|
|
e.event_id + e.user_id + e.room_id + e.type
|
|
|
|
).hexdigest()
|
|
|
|
)[0]
|
|
|
|
)
|
|
|
|
|
2014-08-12 10:10:52 -04:00
|
|
|
def _get_power_level_for_event(self, event):
|
|
|
|
# return self._persistence.get_power_level_for_user(event.room_id,
|
|
|
|
# event.sender)
|
|
|
|
return event.power_level
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
@log_function
|
|
|
|
def _handle_new_state(self, new_pdu):
|
2014-09-08 14:50:46 -04:00
|
|
|
tree, missing_branch = yield self.store.get_unresolved_state_tree(
|
|
|
|
new_pdu
|
|
|
|
)
|
2014-08-12 10:10:52 -04:00
|
|
|
new_branch, current_branch = tree
|
|
|
|
|
|
|
|
logger.debug(
|
|
|
|
"_handle_new_state new=%s, current=%s",
|
|
|
|
new_branch, current_branch
|
|
|
|
)
|
|
|
|
|
2014-09-08 14:50:46 -04:00
|
|
|
if missing_branch is not None:
|
|
|
|
# We're missing some PDUs. Fetch them.
|
|
|
|
# TODO (erikj): Limit this.
|
|
|
|
missing_prev = tree[missing_branch][-1]
|
|
|
|
|
|
|
|
pdu_id = missing_prev.prev_state_id
|
|
|
|
origin = missing_prev.prev_state_origin
|
|
|
|
|
|
|
|
is_missing = yield self.store.get_pdu(pdu_id, origin) is None
|
|
|
|
if not is_missing:
|
|
|
|
raise Exception("Conflict resolution failed")
|
|
|
|
|
|
|
|
yield self._replication.get_pdu(
|
|
|
|
destination=missing_prev.origin,
|
|
|
|
pdu_origin=origin,
|
|
|
|
pdu_id=pdu_id,
|
|
|
|
outlier=True
|
|
|
|
)
|
|
|
|
|
|
|
|
updated_current = yield self._handle_new_state(new_pdu)
|
|
|
|
defer.returnValue(updated_current)
|
|
|
|
|
2014-08-12 10:10:52 -04:00
|
|
|
if not current_branch:
|
|
|
|
# There is no current state
|
|
|
|
defer.returnValue(True)
|
|
|
|
return
|
|
|
|
|
2014-08-15 06:41:11 -04:00
|
|
|
n = new_branch[-1]
|
|
|
|
c = current_branch[-1]
|
|
|
|
|
2014-09-08 15:13:27 -04:00
|
|
|
common_ancestor = n.pdu_id == c.pdu_id and n.origin == c.origin
|
|
|
|
|
|
|
|
if common_ancestor:
|
2014-09-08 14:50:46 -04:00
|
|
|
# We found a common ancestor!
|
2014-08-12 10:10:52 -04:00
|
|
|
|
|
|
|
if len(current_branch) == 1:
|
|
|
|
# This is a direct clobber so we can just...
|
|
|
|
defer.returnValue(True)
|
|
|
|
|
|
|
|
else:
|
2014-09-08 14:50:46 -04:00
|
|
|
# We didn't find a common ancestor. This is probably fine.
|
|
|
|
pass
|
2014-09-03 10:50:05 -04:00
|
|
|
|
2014-09-12 12:11:00 -04:00
|
|
|
result = yield self._do_conflict_res(
|
2014-09-08 15:13:27 -04:00
|
|
|
new_branch, current_branch, common_ancestor
|
|
|
|
)
|
2014-09-08 14:50:46 -04:00
|
|
|
defer.returnValue(result)
|
2014-08-15 06:47:01 -04:00
|
|
|
|
2014-09-12 12:11:00 -04:00
|
|
|
@defer.inlineCallbacks
|
2014-09-08 15:13:27 -04:00
|
|
|
def _do_conflict_res(self, new_branch, current_branch, common_ancestor):
|
2014-09-08 14:50:46 -04:00
|
|
|
conflict_res = [
|
|
|
|
self._do_power_level_conflict_res,
|
|
|
|
self._do_chain_length_conflict_res,
|
|
|
|
self._do_hash_conflict_res,
|
|
|
|
]
|
2014-08-15 06:47:01 -04:00
|
|
|
|
2014-09-08 14:50:46 -04:00
|
|
|
for algo in conflict_res:
|
2014-09-12 12:11:00 -04:00
|
|
|
new_res, curr_res = yield defer.maybeDeferred(
|
|
|
|
algo,
|
2014-09-08 15:13:27 -04:00
|
|
|
new_branch, current_branch, common_ancestor
|
|
|
|
)
|
2014-08-15 06:47:01 -04:00
|
|
|
|
2014-09-08 14:50:46 -04:00
|
|
|
if new_res < curr_res:
|
|
|
|
defer.returnValue(False)
|
|
|
|
elif new_res > curr_res:
|
|
|
|
defer.returnValue(True)
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2014-09-08 14:50:46 -04:00
|
|
|
raise Exception("Conflict resolution failed.")
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2014-09-12 12:11:00 -04:00
|
|
|
@defer.inlineCallbacks
|
2014-09-08 15:13:27 -04:00
|
|
|
def _do_power_level_conflict_res(self, new_branch, current_branch,
|
|
|
|
common_ancestor):
|
2014-09-12 12:11:00 -04:00
|
|
|
new_powers_deferreds = []
|
|
|
|
for e in new_branch[:-1] if common_ancestor else new_branch:
|
|
|
|
if hasattr(e, "user_id"):
|
|
|
|
new_powers_deferreds.append(
|
|
|
|
self.store.get_power_level(e.context, e.user_id)
|
|
|
|
)
|
|
|
|
|
|
|
|
current_powers_deferreds = []
|
|
|
|
for e in current_branch[:-1] if common_ancestor else current_branch:
|
|
|
|
if hasattr(e, "user_id"):
|
|
|
|
current_powers_deferreds.append(
|
|
|
|
self.store.get_power_level(e.context, e.user_id)
|
|
|
|
)
|
|
|
|
|
|
|
|
new_powers = yield defer.gatherResults(
|
|
|
|
new_powers_deferreds,
|
|
|
|
consumeErrors=True
|
|
|
|
)
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2014-09-12 12:11:00 -04:00
|
|
|
current_powers = yield defer.gatherResults(
|
|
|
|
current_powers_deferreds,
|
|
|
|
consumeErrors=True
|
|
|
|
)
|
|
|
|
|
|
|
|
max_power_new = max(new_powers)
|
|
|
|
max_power_current = max(current_powers)
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2014-09-12 12:11:00 -04:00
|
|
|
defer.returnValue(
|
|
|
|
(max_power_new, max_power_current)
|
|
|
|
)
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2014-09-08 15:13:27 -04:00
|
|
|
def _do_chain_length_conflict_res(self, new_branch, current_branch,
|
|
|
|
common_ancestor):
|
2014-08-12 10:10:52 -04:00
|
|
|
return (len(new_branch), len(current_branch))
|
|
|
|
|
2014-09-08 15:13:27 -04:00
|
|
|
def _do_hash_conflict_res(self, new_branch, current_branch,
|
|
|
|
common_ancestor):
|
2014-08-12 10:10:52 -04:00
|
|
|
new_str = "".join([p.pdu_id + p.origin for p in new_branch])
|
|
|
|
c_str = "".join([p.pdu_id + p.origin for p in current_branch])
|
|
|
|
|
|
|
|
return (
|
|
|
|
hashlib.sha1(new_str).hexdigest(),
|
|
|
|
hashlib.sha1(c_str).hexdigest()
|
|
|
|
)
|