Limit number of entries to prefill from cache

Some tables, like device_inbox, take a long time to query at startup for
the stream change cache prefills. This is likely because they are slower
growing streams and so are more fragmented on disk. For now, lets pull
fewer entries out to make startup quicker.

In future, we should add a better index to make it even faster.
This commit is contained in:
Erik Johnston 2017-01-10 14:34:50 +00:00
parent 9898bbd9dc
commit dd52d4de4c
2 changed files with 6 additions and 3 deletions

View File

@ -189,7 +189,8 @@ class DataStore(RoomMemberStore, RoomStore,
db_conn, "device_inbox",
entity_column="user_id",
stream_column="stream_id",
max_value=max_device_inbox_id
max_value=max_device_inbox_id,
limit=1000,
)
self._device_inbox_stream_cache = StreamChangeCache(
"DeviceInboxStreamChangeCache", min_device_inbox_id,
@ -202,6 +203,7 @@ class DataStore(RoomMemberStore, RoomStore,
entity_column="destination",
stream_column="stream_id",
max_value=max_device_inbox_id,
limit=1000,
)
self._device_federation_outbox_stream_cache = StreamChangeCache(
"DeviceFederationOutboxStreamChangeCache", min_device_outbox_id,

View File

@ -838,18 +838,19 @@ class SQLBaseStore(object):
return txn.execute(sql, keyvalues.values())
def _get_cache_dict(self, db_conn, table, entity_column, stream_column,
max_value):
max_value, limit=100000):
# Fetch a mapping of room_id -> max stream position for "recent" rooms.
# It doesn't really matter how many we get, the StreamChangeCache will
# do the right thing to ensure it respects the max size of cache.
sql = (
"SELECT %(entity)s, MAX(%(stream)s) FROM %(table)s"
" WHERE %(stream)s > ? - 100000"
" WHERE %(stream)s > ? - %(limit)s"
" GROUP BY %(entity)s"
) % {
"table": table,
"entity": entity_column,
"stream": stream_column,
"limit": limit,
}
sql = self.database_engine.convert_param_style(sql)