2014-08-12 10:10:52 -04:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
# Copyright 2014 matrix.org
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
2014-08-19 11:40:25 -04:00
|
|
|
""" This module is responsible for getting events from the DB for pagination
|
|
|
|
and event streaming.
|
|
|
|
|
|
|
|
The order it returns events in depend on whether we are streaming forwards or
|
|
|
|
are paginating backwards. We do this because we want to handle out of order
|
|
|
|
messages nicely, while still returning them in the correct order when we
|
|
|
|
paginate bacwards.
|
|
|
|
|
|
|
|
This is implemented by keeping two ordering columns: stream_ordering and
|
|
|
|
topological_ordering. Stream ordering is basically insertion/received order
|
|
|
|
(except for events from backfill requests). The topolgical_ordering is a
|
|
|
|
weak ordering of events based on the pdu graph.
|
|
|
|
|
|
|
|
This means that we have to have two different types of tokens, depending on
|
|
|
|
what sort order was used:
|
|
|
|
- stream tokens are of the form: "s%d", which maps directly to the column
|
|
|
|
- topological tokems: "t%d-%d", where the integers map to the topological
|
|
|
|
and stream ordering columns respectively.
|
|
|
|
"""
|
|
|
|
|
2014-08-14 13:40:50 -04:00
|
|
|
from twisted.internet import defer
|
2014-08-12 22:14:34 -04:00
|
|
|
|
2014-08-12 10:10:52 -04:00
|
|
|
from ._base import SQLBaseStore
|
2014-08-19 09:19:48 -04:00
|
|
|
from synapse.api.errors import SynapseError
|
2014-08-14 13:01:39 -04:00
|
|
|
from synapse.api.constants import Membership
|
2014-08-19 09:19:48 -04:00
|
|
|
from synapse.util.logutils import log_function
|
2014-08-14 13:01:39 -04:00
|
|
|
|
2014-08-12 10:10:52 -04:00
|
|
|
import json
|
|
|
|
import logging
|
|
|
|
|
|
|
|
|
2014-08-14 13:01:39 -04:00
|
|
|
logger = logging.getLogger(__name__)
|
2014-08-12 10:10:52 -04:00
|
|
|
|
|
|
|
|
2014-08-14 13:01:39 -04:00
|
|
|
MAX_STREAM_SIZE = 1000
|
2014-08-12 10:10:52 -04:00
|
|
|
|
|
|
|
|
2014-08-19 09:19:48 -04:00
|
|
|
_STREAM_TOKEN = "stream"
|
|
|
|
_TOPOLOGICAL_TOKEN = "topological"
|
|
|
|
|
|
|
|
|
|
|
|
def _parse_stream_token(string):
|
|
|
|
try:
|
|
|
|
if string[0] != 's':
|
|
|
|
raise
|
|
|
|
return int(string[1:])
|
|
|
|
except:
|
|
|
|
raise SynapseError(400, "Invalid token")
|
|
|
|
|
|
|
|
|
|
|
|
def _parse_topological_token(string):
|
|
|
|
try:
|
|
|
|
if string[0] != 't':
|
|
|
|
raise
|
|
|
|
parts = string[1:].split('-', 1)
|
|
|
|
return (int(parts[0]), int(parts[1]))
|
|
|
|
except:
|
|
|
|
raise SynapseError(400, "Invalid token")
|
|
|
|
|
|
|
|
|
|
|
|
def is_stream_token(string):
|
|
|
|
try:
|
|
|
|
_parse_stream_token(string)
|
|
|
|
return True
|
|
|
|
except:
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
|
|
def is_topological_token(string):
|
|
|
|
try:
|
|
|
|
_parse_topological_token(string)
|
|
|
|
return True
|
|
|
|
except:
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
|
|
def _get_token_bound(token, comparison):
|
|
|
|
try:
|
|
|
|
s = _parse_stream_token(token)
|
|
|
|
return "%s %s %d" % ("stream_ordering", comparison, s)
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
|
|
|
try:
|
|
|
|
top, stream = _parse_topological_token(token)
|
|
|
|
return "%s %s %d AND %s %s %d" % (
|
|
|
|
"topological_ordering", comparison, top,
|
|
|
|
"stream_ordering", comparison, stream,
|
|
|
|
)
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
|
|
|
raise SynapseError(400, "Invalid token")
|
|
|
|
|
|
|
|
|
2014-08-14 13:01:39 -04:00
|
|
|
class StreamStore(SQLBaseStore):
|
2014-08-19 09:19:48 -04:00
|
|
|
@log_function
|
|
|
|
def get_room_events(self, user_id, from_key, to_key, room_id, limit=0,
|
|
|
|
direction='f', with_feedback=False):
|
2014-08-19 12:18:19 -04:00
|
|
|
# We deal with events request in two different ways depending on if
|
|
|
|
# this looks like an /events request or a pagination request.
|
2014-08-19 09:19:48 -04:00
|
|
|
is_events = (
|
|
|
|
direction == 'f'
|
2014-08-19 12:18:19 -04:00
|
|
|
and user_id
|
2014-08-19 09:19:48 -04:00
|
|
|
and is_stream_token(from_key)
|
|
|
|
and to_key and is_stream_token(to_key)
|
|
|
|
)
|
|
|
|
|
|
|
|
if is_events:
|
|
|
|
return self.get_room_events_stream(
|
|
|
|
user_id=user_id,
|
|
|
|
from_key=from_key,
|
|
|
|
to_key=to_key,
|
|
|
|
room_id=room_id,
|
|
|
|
limit=limit,
|
|
|
|
with_feedback=with_feedback,
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
return self.paginate_room_events(
|
|
|
|
from_key=from_key,
|
|
|
|
to_key=to_key,
|
|
|
|
room_id=room_id,
|
|
|
|
limit=limit,
|
|
|
|
with_feedback=with_feedback,
|
|
|
|
)
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2014-08-14 13:01:39 -04:00
|
|
|
@defer.inlineCallbacks
|
2014-08-19 09:19:48 -04:00
|
|
|
@log_function
|
2014-08-14 13:01:39 -04:00
|
|
|
def get_room_events_stream(self, user_id, from_key, to_key, room_id,
|
|
|
|
limit=0, with_feedback=False):
|
2014-08-15 08:58:28 -04:00
|
|
|
# TODO (erikj): Handle compressed feedback
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2014-08-14 13:01:39 -04:00
|
|
|
current_room_membership_sql = (
|
|
|
|
"SELECT m.room_id FROM room_memberships as m "
|
2014-08-14 13:40:50 -04:00
|
|
|
"INNER JOIN current_state_events as c ON m.event_id = c.event_id "
|
2014-08-14 13:01:39 -04:00
|
|
|
"WHERE m.user_id = ?"
|
2014-08-12 10:10:52 -04:00
|
|
|
)
|
|
|
|
|
2014-08-20 11:07:20 -04:00
|
|
|
# We also want to get any membership events about that user, e.g.
|
|
|
|
# invites or leave notifications.
|
|
|
|
membership_sql = (
|
2014-08-15 10:28:54 -04:00
|
|
|
"SELECT m.event_id FROM room_memberships as m "
|
2014-08-14 13:40:50 -04:00
|
|
|
"INNER JOIN current_state_events as c ON m.event_id = c.event_id "
|
2014-08-20 11:07:20 -04:00
|
|
|
"WHERE m.user_id = ? "
|
2014-08-12 10:10:52 -04:00
|
|
|
)
|
|
|
|
|
2014-08-14 13:01:39 -04:00
|
|
|
if limit:
|
|
|
|
limit = max(limit, MAX_STREAM_SIZE)
|
|
|
|
else:
|
2014-08-15 11:04:54 -04:00
|
|
|
limit = MAX_STREAM_SIZE
|
2014-08-14 13:01:39 -04:00
|
|
|
|
2014-08-15 10:53:06 -04:00
|
|
|
# From and to keys should be integers from ordering.
|
2014-08-19 09:19:48 -04:00
|
|
|
from_id = _parse_stream_token(from_key)
|
|
|
|
to_id = _parse_stream_token(to_key)
|
2014-08-15 10:53:06 -04:00
|
|
|
|
|
|
|
if from_key == to_key:
|
|
|
|
defer.returnValue(([], to_key))
|
|
|
|
return
|
|
|
|
|
2014-08-14 13:01:39 -04:00
|
|
|
sql = (
|
|
|
|
"SELECT * FROM events as e WHERE "
|
2014-08-15 10:28:54 -04:00
|
|
|
"((room_id IN (%(current)s)) OR "
|
|
|
|
"(event_id IN (%(invites)s))) "
|
2014-08-19 09:19:48 -04:00
|
|
|
"AND e.stream_ordering > ? AND e.stream_ordering < ? "
|
|
|
|
"ORDER BY stream_ordering ASC LIMIT %(limit)d "
|
2014-08-14 13:01:39 -04:00
|
|
|
) % {
|
|
|
|
"current": current_room_membership_sql,
|
2014-08-20 11:07:20 -04:00
|
|
|
"invites": membership_sql,
|
2014-08-19 09:19:48 -04:00
|
|
|
"limit": limit
|
2014-08-14 13:01:39 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
rows = yield self._execute_and_decode(
|
|
|
|
sql,
|
2014-08-20 11:07:20 -04:00
|
|
|
user_id, user_id, from_id, to_id
|
2014-08-12 10:10:52 -04:00
|
|
|
)
|
|
|
|
|
2014-08-15 10:28:54 -04:00
|
|
|
ret = [self._parse_event_from_row(r) for r in rows]
|
|
|
|
|
2014-08-15 11:05:46 -04:00
|
|
|
if rows:
|
2014-08-19 09:19:48 -04:00
|
|
|
key = "s%d" % max([r["stream_ordering"] for r in rows])
|
2014-08-15 10:28:54 -04:00
|
|
|
else:
|
2014-08-19 09:19:48 -04:00
|
|
|
# Assume we didn't get anything because there was nothing to get.
|
2014-08-15 11:05:46 -04:00
|
|
|
key = to_key
|
2014-08-15 10:28:54 -04:00
|
|
|
|
2014-08-15 10:53:06 -04:00
|
|
|
defer.returnValue((ret, key))
|
2014-08-15 08:58:28 -04:00
|
|
|
|
2014-08-19 09:19:48 -04:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
@log_function
|
|
|
|
def paginate_room_events(self, room_id, from_key, to_key=None,
|
|
|
|
direction='b', limit=-1,
|
|
|
|
with_feedback=False):
|
|
|
|
# TODO (erikj): Handle compressed feedback
|
|
|
|
|
|
|
|
from_comp = '<' if direction =='b' else '>'
|
|
|
|
to_comp = '>' if direction =='b' else '<'
|
|
|
|
order = "DESC" if direction == 'b' else "ASC"
|
|
|
|
|
|
|
|
args = [room_id]
|
|
|
|
|
|
|
|
bounds = _get_token_bound(from_key, from_comp)
|
|
|
|
if to_key:
|
|
|
|
bounds = "%s AND %s" % (bounds, _get_token_bound(to_key, to_comp))
|
|
|
|
|
|
|
|
if int(limit) > 0:
|
|
|
|
args.append(int(limit))
|
|
|
|
limit_str = " LIMIT ?"
|
|
|
|
else:
|
|
|
|
limit_str = ""
|
|
|
|
|
|
|
|
sql = (
|
|
|
|
"SELECT * FROM events "
|
|
|
|
"WHERE room_id = ? AND %(bounds)s "
|
|
|
|
"ORDER BY topological_ordering %(order)s, stream_ordering %(order)s %(limit)s "
|
|
|
|
) % {"bounds": bounds, "order": order, "limit": limit_str}
|
|
|
|
|
|
|
|
rows = yield self._execute_and_decode(
|
|
|
|
sql,
|
|
|
|
*args
|
|
|
|
)
|
|
|
|
|
|
|
|
if rows:
|
|
|
|
topo = rows[-1]["topological_ordering"]
|
|
|
|
toke = rows[-1]["stream_ordering"]
|
|
|
|
next_token = "t%s-%s" % (topo, toke)
|
|
|
|
else:
|
|
|
|
# TODO (erikj): We should work out what to do here instead.
|
|
|
|
next_token = to_key if to_key else from_key
|
|
|
|
|
|
|
|
defer.returnValue(
|
|
|
|
(
|
|
|
|
[self._parse_event_from_row(r) for r in rows],
|
|
|
|
next_token
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
2014-08-15 08:58:28 -04:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def get_recent_events_for_room(self, room_id, limit, with_feedback=False):
|
|
|
|
# TODO (erikj): Handle compressed feedback
|
|
|
|
|
2014-08-18 11:20:21 -04:00
|
|
|
end_token = yield self.get_room_events_max_id()
|
|
|
|
|
2014-08-15 08:58:28 -04:00
|
|
|
sql = (
|
2014-08-18 11:25:18 -04:00
|
|
|
"SELECT * FROM events "
|
2014-08-19 09:19:48 -04:00
|
|
|
"WHERE room_id = ? AND stream_ordering <= ? "
|
|
|
|
"ORDER BY topological_ordering, stream_ordering DESC LIMIT ? "
|
2014-08-15 08:58:28 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
rows = yield self._execute_and_decode(
|
|
|
|
sql,
|
2014-08-18 11:20:21 -04:00
|
|
|
room_id, end_token, limit
|
2014-08-15 08:58:28 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
rows.reverse() # As we selected with reverse ordering
|
|
|
|
|
2014-08-18 11:20:21 -04:00
|
|
|
if rows:
|
|
|
|
topo = rows[0]["topological_ordering"]
|
2014-08-19 09:19:48 -04:00
|
|
|
toke = rows[0]["stream_ordering"]
|
2014-08-19 11:45:55 -04:00
|
|
|
start_token = "t%s-%s" % (topo, toke)
|
2014-08-18 11:20:21 -04:00
|
|
|
|
|
|
|
token = (start_token, end_token)
|
|
|
|
else:
|
2014-08-19 09:19:48 -04:00
|
|
|
token = (end_token, end_token)
|
2014-08-18 11:20:21 -04:00
|
|
|
|
|
|
|
defer.returnValue(
|
|
|
|
(
|
|
|
|
[self._parse_event_from_row(r) for r in rows],
|
|
|
|
token
|
|
|
|
)
|
|
|
|
)
|
2014-08-15 08:58:28 -04:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def get_room_events_max_id(self):
|
|
|
|
res = yield self._execute_and_decode(
|
2014-08-19 09:19:48 -04:00
|
|
|
"SELECT MAX(stream_ordering) as m FROM events"
|
2014-08-15 08:58:28 -04:00
|
|
|
)
|
|
|
|
|
2014-08-19 09:19:48 -04:00
|
|
|
logger.debug("get_room_events_max_id: %s", res)
|
|
|
|
|
|
|
|
if not res or not res[0] or not res[0]["m"]:
|
|
|
|
defer.returnValue("s1")
|
2014-08-15 08:58:28 -04:00
|
|
|
return
|
|
|
|
|
2014-08-19 09:19:48 -04:00
|
|
|
key = res[0]["m"] + 1
|
|
|
|
defer.returnValue("s%d" % (key,))
|