mirror of
https://git.anonymousland.org/anonymousland/synapse-product.git
synced 2024-12-29 19:56:14 -05:00
Merge pull request #675 from matrix-org/markjh/replicate_stateII
Add a replication stream for state groups
This commit is contained in:
commit
98c460cecd
@ -38,6 +38,7 @@ STREAM_NAMES = (
|
|||||||
("backfill",),
|
("backfill",),
|
||||||
("push_rules",),
|
("push_rules",),
|
||||||
("pushers",),
|
("pushers",),
|
||||||
|
("state",),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -123,6 +124,7 @@ class ReplicationResource(Resource):
|
|||||||
backfill_token = yield self.store.get_current_backfill_token()
|
backfill_token = yield self.store.get_current_backfill_token()
|
||||||
push_rules_token, room_stream_token = self.store.get_push_rules_stream_token()
|
push_rules_token, room_stream_token = self.store.get_push_rules_stream_token()
|
||||||
pushers_token = self.store.get_pushers_stream_token()
|
pushers_token = self.store.get_pushers_stream_token()
|
||||||
|
state_token = self.store.get_state_stream_token()
|
||||||
|
|
||||||
defer.returnValue(_ReplicationToken(
|
defer.returnValue(_ReplicationToken(
|
||||||
room_stream_token,
|
room_stream_token,
|
||||||
@ -133,6 +135,7 @@ class ReplicationResource(Resource):
|
|||||||
backfill_token,
|
backfill_token,
|
||||||
push_rules_token,
|
push_rules_token,
|
||||||
pushers_token,
|
pushers_token,
|
||||||
|
state_token,
|
||||||
))
|
))
|
||||||
|
|
||||||
@request_handler
|
@request_handler
|
||||||
@ -156,6 +159,7 @@ class ReplicationResource(Resource):
|
|||||||
yield self.receipts(writer, current_token, limit)
|
yield self.receipts(writer, current_token, limit)
|
||||||
yield self.push_rules(writer, current_token, limit)
|
yield self.push_rules(writer, current_token, limit)
|
||||||
yield self.pushers(writer, current_token, limit)
|
yield self.pushers(writer, current_token, limit)
|
||||||
|
yield self.state(writer, current_token, limit)
|
||||||
self.streams(writer, current_token)
|
self.streams(writer, current_token)
|
||||||
|
|
||||||
logger.info("Replicated %d rows", writer.total)
|
logger.info("Replicated %d rows", writer.total)
|
||||||
@ -205,12 +209,12 @@ class ReplicationResource(Resource):
|
|||||||
current_token.backfill, current_token.events,
|
current_token.backfill, current_token.events,
|
||||||
limit
|
limit
|
||||||
)
|
)
|
||||||
writer.write_header_and_rows(
|
writer.write_header_and_rows("events", events_rows, (
|
||||||
"events", events_rows, ("position", "internal", "json")
|
"position", "internal", "json", "state_group"
|
||||||
)
|
))
|
||||||
writer.write_header_and_rows(
|
writer.write_header_and_rows("backfill", backfill_rows, (
|
||||||
"backfill", backfill_rows, ("position", "internal", "json")
|
"position", "internal", "json", "state_group"
|
||||||
)
|
))
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def presence(self, writer, current_token):
|
def presence(self, writer, current_token):
|
||||||
@ -320,6 +324,24 @@ class ReplicationResource(Resource):
|
|||||||
"position", "user_id", "app_id", "pushkey"
|
"position", "user_id", "app_id", "pushkey"
|
||||||
))
|
))
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def state(self, writer, current_token, limit):
|
||||||
|
current_position = current_token.state
|
||||||
|
|
||||||
|
state = parse_integer(writer.request, "state")
|
||||||
|
if state is not None:
|
||||||
|
state_groups, state_group_state = (
|
||||||
|
yield self.store.get_all_new_state_groups(
|
||||||
|
state, current_position, limit
|
||||||
|
)
|
||||||
|
)
|
||||||
|
writer.write_header_and_rows("state_groups", state_groups, (
|
||||||
|
"position", "room_id", "event_id"
|
||||||
|
))
|
||||||
|
writer.write_header_and_rows("state_group_state", state_group_state, (
|
||||||
|
"position", "type", "state_key", "event_id"
|
||||||
|
))
|
||||||
|
|
||||||
|
|
||||||
class _Writer(object):
|
class _Writer(object):
|
||||||
"""Writes the streams as a JSON object as the response to the request"""
|
"""Writes the streams as a JSON object as the response to the request"""
|
||||||
@ -350,7 +372,7 @@ class _Writer(object):
|
|||||||
|
|
||||||
class _ReplicationToken(collections.namedtuple("_ReplicationToken", (
|
class _ReplicationToken(collections.namedtuple("_ReplicationToken", (
|
||||||
"events", "presence", "typing", "receipts", "account_data", "backfill",
|
"events", "presence", "typing", "receipts", "account_data", "backfill",
|
||||||
"push_rules", "pushers"
|
"push_rules", "pushers", "state"
|
||||||
))):
|
))):
|
||||||
__slots__ = []
|
__slots__ = []
|
||||||
|
|
||||||
|
@ -1097,10 +1097,12 @@ class EventsStore(SQLBaseStore):
|
|||||||
new events or as backfilled events"""
|
new events or as backfilled events"""
|
||||||
def get_all_new_events_txn(txn):
|
def get_all_new_events_txn(txn):
|
||||||
sql = (
|
sql = (
|
||||||
"SELECT e.stream_ordering, ej.internal_metadata, ej.json"
|
"SELECT e.stream_ordering, ej.internal_metadata, ej.json, eg.state_group"
|
||||||
" FROM events as e"
|
" FROM events as e"
|
||||||
" JOIN event_json as ej"
|
" JOIN event_json as ej"
|
||||||
" ON e.event_id = ej.event_id AND e.room_id = ej.room_id"
|
" ON e.event_id = ej.event_id AND e.room_id = ej.room_id"
|
||||||
|
" LEFT JOIN event_to_state_groups as eg"
|
||||||
|
" ON e.event_id = eg.event_id"
|
||||||
" WHERE ? < e.stream_ordering AND e.stream_ordering <= ?"
|
" WHERE ? < e.stream_ordering AND e.stream_ordering <= ?"
|
||||||
" ORDER BY e.stream_ordering ASC"
|
" ORDER BY e.stream_ordering ASC"
|
||||||
" LIMIT ?"
|
" LIMIT ?"
|
||||||
@ -1112,10 +1114,13 @@ class EventsStore(SQLBaseStore):
|
|||||||
new_forward_events = []
|
new_forward_events = []
|
||||||
|
|
||||||
sql = (
|
sql = (
|
||||||
"SELECT -e.stream_ordering, ej.internal_metadata, ej.json"
|
"SELECT -e.stream_ordering, ej.internal_metadata, ej.json,"
|
||||||
|
" eg.state_group"
|
||||||
" FROM events as e"
|
" FROM events as e"
|
||||||
" JOIN event_json as ej"
|
" JOIN event_json as ej"
|
||||||
" ON e.event_id = ej.event_id AND e.room_id = ej.room_id"
|
" ON e.event_id = ej.event_id AND e.room_id = ej.room_id"
|
||||||
|
" LEFT JOIN event_to_state_groups as eg"
|
||||||
|
" ON e.event_id = eg.event_id"
|
||||||
" WHERE ? > e.stream_ordering AND e.stream_ordering >= ?"
|
" WHERE ? > e.stream_ordering AND e.stream_ordering >= ?"
|
||||||
" ORDER BY e.stream_ordering DESC"
|
" ORDER BY e.stream_ordering DESC"
|
||||||
" LIMIT ?"
|
" LIMIT ?"
|
||||||
|
@ -429,3 +429,33 @@ class StateStore(SQLBaseStore):
|
|||||||
}
|
}
|
||||||
|
|
||||||
defer.returnValue(results)
|
defer.returnValue(results)
|
||||||
|
|
||||||
|
def get_all_new_state_groups(self, last_id, current_id, limit):
|
||||||
|
def get_all_new_state_groups_txn(txn):
|
||||||
|
sql = (
|
||||||
|
"SELECT id, room_id, event_id FROM state_groups"
|
||||||
|
" WHERE ? < id AND id <= ? ORDER BY id LIMIT ?"
|
||||||
|
)
|
||||||
|
txn.execute(sql, (last_id, current_id, limit))
|
||||||
|
groups = txn.fetchall()
|
||||||
|
|
||||||
|
if not groups:
|
||||||
|
return ([], [])
|
||||||
|
|
||||||
|
lower_bound = groups[0][0]
|
||||||
|
upper_bound = groups[-1][0]
|
||||||
|
sql = (
|
||||||
|
"SELECT state_group, type, state_key, event_id"
|
||||||
|
" FROM state_groups_state"
|
||||||
|
" WHERE ? <= state_group AND state_group <= ?"
|
||||||
|
)
|
||||||
|
|
||||||
|
txn.execute(sql, (lower_bound, upper_bound))
|
||||||
|
state_group_state = txn.fetchall()
|
||||||
|
return (groups, state_group_state)
|
||||||
|
return self.runInteraction(
|
||||||
|
"get_all_new_state_groups", get_all_new_state_groups_txn
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_state_stream_token(self):
|
||||||
|
return self._state_groups_id_gen.get_max_token()
|
||||||
|
@ -58,15 +58,21 @@ class ReplicationResourceCase(unittest.TestCase):
|
|||||||
self.assertEquals(body, {})
|
self.assertEquals(body, {})
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def test_events(self):
|
def test_events_and_state(self):
|
||||||
get = self.get(events="-1", timeout="0")
|
get = self.get(events="-1", state="-1", timeout="0")
|
||||||
yield self.hs.get_handlers().room_creation_handler.create_room(
|
yield self.hs.get_handlers().room_creation_handler.create_room(
|
||||||
Requester(self.user, "", False), {}
|
Requester(self.user, "", False), {}
|
||||||
)
|
)
|
||||||
code, body = yield get
|
code, body = yield get
|
||||||
self.assertEquals(code, 200)
|
self.assertEquals(code, 200)
|
||||||
self.assertEquals(body["events"]["field_names"], [
|
self.assertEquals(body["events"]["field_names"], [
|
||||||
"position", "internal", "json"
|
"position", "internal", "json", "state_group"
|
||||||
|
])
|
||||||
|
self.assertEquals(body["state_groups"]["field_names"], [
|
||||||
|
"position", "room_id", "event_id"
|
||||||
|
])
|
||||||
|
self.assertEquals(body["state_group_state"]["field_names"], [
|
||||||
|
"position", "type", "state_key", "event_id"
|
||||||
])
|
])
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@ -132,6 +138,7 @@ class ReplicationResourceCase(unittest.TestCase):
|
|||||||
test_timeout_backfill = _test_timeout("backfill")
|
test_timeout_backfill = _test_timeout("backfill")
|
||||||
test_timeout_push_rules = _test_timeout("push_rules")
|
test_timeout_push_rules = _test_timeout("push_rules")
|
||||||
test_timeout_pushers = _test_timeout("pushers")
|
test_timeout_pushers = _test_timeout("pushers")
|
||||||
|
test_timeout_state = _test_timeout("state")
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def send_text_message(self, room_id, message):
|
def send_text_message(self, room_id, message):
|
||||||
@ -182,4 +189,21 @@ class ReplicationResourceCase(unittest.TestCase):
|
|||||||
)
|
)
|
||||||
response_body = json.loads(response_json)
|
response_body = json.loads(response_json)
|
||||||
|
|
||||||
|
if response_code == 200:
|
||||||
|
self.check_response(response_body)
|
||||||
|
|
||||||
defer.returnValue((response_code, response_body))
|
defer.returnValue((response_code, response_body))
|
||||||
|
|
||||||
|
def check_response(self, response_body):
|
||||||
|
for name, stream in response_body.items():
|
||||||
|
self.assertIn("field_names", stream)
|
||||||
|
field_names = stream["field_names"]
|
||||||
|
self.assertIn("rows", stream)
|
||||||
|
self.assertTrue(stream["rows"])
|
||||||
|
for row in stream["rows"]:
|
||||||
|
self.assertEquals(
|
||||||
|
len(row), len(field_names),
|
||||||
|
"%s: len(row = %r) == len(field_names = %r)" % (
|
||||||
|
name, row, field_names
|
||||||
|
)
|
||||||
|
)
|
||||||
|
Loading…
Reference in New Issue
Block a user