Merge branch 'release-v0.13.2' of github.com:matrix-org/synapse

This commit is contained in:
Erik Johnston 2016-02-11 10:55:16 +00:00
commit 88a973cde5
5 changed files with 29 additions and 9 deletions

View File

@ -1,3 +1,9 @@
Changes in synapse v0.13.2 (2016-02-11)
=======================================
* Fix bug where ``/events`` would fail to skip some events if there had been
more events than the limit specified since the last request (PR #570)
Changes in synapse v0.13.1 (2016-02-10)
=======================================

View File

@ -16,4 +16,4 @@
""" This is a reference implementation of a Matrix home server.
"""
__version__ = "0.13.1"
__version__ = "0.13.2"

View File

@ -1061,6 +1061,7 @@ class RoomEventSource(object):
from_key=from_key,
to_key=to_key,
limit=limit or 10,
order='ASC',
)
events = list(room_events)

View File

@ -28,6 +28,7 @@ from synapse.api.errors import SynapseError
from ._base import client_v2_patterns
import copy
import itertools
import logging
import ujson as json
@ -288,6 +289,15 @@ class SyncRestServlet(RestServlet):
state_events = state_dict.values()
for event in itertools.chain(state_events, timeline_events):
# We've had bug reports that events were coming down under the
# wrong room.
if event.room_id != room.room_id:
logger.warn(
"Event %r is under room %r instead of %r",
event.event_id, room.room_id, event.room_id,
)
serialized_state = [serialize(e) for e in state_events]
serialized_timeline = [serialize(e) for e in timeline_events]

View File

@ -157,7 +157,8 @@ class StreamStore(SQLBaseStore):
defer.returnValue(results)
@defer.inlineCallbacks
def get_room_events_stream_for_rooms(self, room_ids, from_key, to_key, limit=0):
def get_room_events_stream_for_rooms(self, room_ids, from_key, to_key, limit=0,
order='DESC'):
from_id = RoomStreamToken.parse_stream_token(from_key).stream
room_ids = yield self._events_stream_cache.get_entities_changed(
@ -172,7 +173,7 @@ class StreamStore(SQLBaseStore):
for rm_ids in (room_ids[i:i + 20] for i in xrange(0, len(room_ids), 20)):
res = yield defer.gatherResults([
preserve_fn(self.get_room_events_stream_for_room)(
room_id, from_key, to_key, limit,
room_id, from_key, to_key, limit, order=order,
)
for room_id in room_ids
])
@ -181,7 +182,8 @@ class StreamStore(SQLBaseStore):
defer.returnValue(results)
@defer.inlineCallbacks
def get_room_events_stream_for_room(self, room_id, from_key, to_key, limit=0):
def get_room_events_stream_for_room(self, room_id, from_key, to_key, limit=0,
order='DESC'):
if from_key is not None:
from_id = RoomStreamToken.parse_stream_token(from_key).stream
else:
@ -206,8 +208,8 @@ class StreamStore(SQLBaseStore):
" room_id = ?"
" AND not outlier"
" AND stream_ordering > ? AND stream_ordering <= ?"
" ORDER BY stream_ordering DESC LIMIT ?"
)
" ORDER BY stream_ordering %s LIMIT ?"
) % (order,)
txn.execute(sql, (room_id, from_id, to_id, limit))
else:
sql = (
@ -215,8 +217,8 @@ class StreamStore(SQLBaseStore):
" room_id = ?"
" AND not outlier"
" AND stream_ordering <= ?"
" ORDER BY stream_ordering DESC LIMIT ?"
)
" ORDER BY stream_ordering %s LIMIT ?"
) % (order,)
txn.execute(sql, (room_id, to_id, limit))
rows = self.cursor_to_dict(txn)
@ -232,7 +234,8 @@ class StreamStore(SQLBaseStore):
self._set_before_and_after(ret, rows, topo_order=False)
ret.reverse()
if order.lower() == "desc":
ret.reverse()
if rows:
key = "s%d" % min(r["stream_ordering"] for r in rows)