Merge pull request #570 from matrix-org/erikj/events_fixes

Return events in correct order for /events
This commit is contained in:
Erik Johnston 2016-02-11 10:13:26 +00:00
commit abc1b22193
2 changed files with 12 additions and 8 deletions

View File

@ -1061,6 +1061,7 @@ class RoomEventSource(object):
from_key=from_key, from_key=from_key,
to_key=to_key, to_key=to_key,
limit=limit or 10, limit=limit or 10,
order='ASC',
) )
events = list(room_events) events = list(room_events)

View File

@ -157,7 +157,8 @@ class StreamStore(SQLBaseStore):
defer.returnValue(results) defer.returnValue(results)
@defer.inlineCallbacks @defer.inlineCallbacks
def get_room_events_stream_for_rooms(self, room_ids, from_key, to_key, limit=0): def get_room_events_stream_for_rooms(self, room_ids, from_key, to_key, limit=0,
order='DESC'):
from_id = RoomStreamToken.parse_stream_token(from_key).stream from_id = RoomStreamToken.parse_stream_token(from_key).stream
room_ids = yield self._events_stream_cache.get_entities_changed( room_ids = yield self._events_stream_cache.get_entities_changed(
@ -172,7 +173,7 @@ class StreamStore(SQLBaseStore):
for rm_ids in (room_ids[i:i + 20] for i in xrange(0, len(room_ids), 20)): for rm_ids in (room_ids[i:i + 20] for i in xrange(0, len(room_ids), 20)):
res = yield defer.gatherResults([ res = yield defer.gatherResults([
preserve_fn(self.get_room_events_stream_for_room)( preserve_fn(self.get_room_events_stream_for_room)(
room_id, from_key, to_key, limit, room_id, from_key, to_key, limit, order=order,
) )
for room_id in room_ids for room_id in room_ids
]) ])
@ -181,7 +182,8 @@ class StreamStore(SQLBaseStore):
defer.returnValue(results) defer.returnValue(results)
@defer.inlineCallbacks @defer.inlineCallbacks
def get_room_events_stream_for_room(self, room_id, from_key, to_key, limit=0): def get_room_events_stream_for_room(self, room_id, from_key, to_key, limit=0,
order='DESC'):
if from_key is not None: if from_key is not None:
from_id = RoomStreamToken.parse_stream_token(from_key).stream from_id = RoomStreamToken.parse_stream_token(from_key).stream
else: else:
@ -206,8 +208,8 @@ class StreamStore(SQLBaseStore):
" room_id = ?" " room_id = ?"
" AND not outlier" " AND not outlier"
" AND stream_ordering > ? AND stream_ordering <= ?" " AND stream_ordering > ? AND stream_ordering <= ?"
" ORDER BY stream_ordering DESC LIMIT ?" " ORDER BY stream_ordering %s LIMIT ?"
) ) % (order,)
txn.execute(sql, (room_id, from_id, to_id, limit)) txn.execute(sql, (room_id, from_id, to_id, limit))
else: else:
sql = ( sql = (
@ -215,8 +217,8 @@ class StreamStore(SQLBaseStore):
" room_id = ?" " room_id = ?"
" AND not outlier" " AND not outlier"
" AND stream_ordering <= ?" " AND stream_ordering <= ?"
" ORDER BY stream_ordering DESC LIMIT ?" " ORDER BY stream_ordering %s LIMIT ?"
) ) % (order,)
txn.execute(sql, (room_id, to_id, limit)) txn.execute(sql, (room_id, to_id, limit))
rows = self.cursor_to_dict(txn) rows = self.cursor_to_dict(txn)
@ -232,7 +234,8 @@ class StreamStore(SQLBaseStore):
self._set_before_and_after(ret, rows, topo_order=False) self._set_before_and_after(ret, rows, topo_order=False)
ret.reverse() if order.lower() == "desc":
ret.reverse()
if rows: if rows:
key = "s%d" % min(r["stream_ordering"] for r in rows) key = "s%d" % min(r["stream_ordering"] for r in rows)