Add a cache for initialSync responses that expires after 5 minutes

This commit is contained in:
Mark Haines 2015-12-22 18:27:56 +00:00
parent 7df276d219
commit 9ac417fa88
2 changed files with 94 additions and 1 deletions

View File

@ -22,6 +22,7 @@ from synapse.events.utils import serialize_event
from synapse.events.validator import EventValidator
from synapse.util import unwrapFirstError
from synapse.util.logcontext import PreserveLoggingContext
from synapse.util.caches.snapshot_cache import SnapshotCache
from synapse.types import UserID, RoomStreamToken, StreamToken
from ._base import BaseHandler
@ -45,6 +46,7 @@ class MessageHandler(BaseHandler):
self.state = hs.get_state_handler()
self.clock = hs.get_clock()
self.validator = EventValidator()
self.snapshot_cache = SnapshotCache()
@defer.inlineCallbacks
def get_message(self, msg_id=None, room_id=None, sender_id=None,
@ -326,9 +328,29 @@ class MessageHandler(BaseHandler):
[serialize_event(c, now) for c in room_state.values()]
)
@defer.inlineCallbacks
def snapshot_all_rooms(self, user_id=None, pagin_config=None,
as_client_event=True, include_archived=False):
key = (
user_id,
pagin_config.from_token,
pagin_config.to_token,
pagin_config.direction,
pagin_config.limit,
as_client_event,
include_archived,
)
now_ms = self.clock.time_msec()
result = self.snapshot_cache.get(now_ms, key)
if result is not None:
return result
return self.snapshot_cache.set(now_ms, key, self._snapshot_all_rooms(
user_id, pagin_config, as_client_event, include_archived
))
@defer.inlineCallbacks
def _snapshot_all_rooms(self, user_id=None, pagin_config=None,
as_client_event=True, include_archived=False):
"""Retrieve a snapshot of all rooms the user is invited or has joined.
This snapshot may include messages for all rooms where the user is

View File

@ -0,0 +1,71 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.util.async import ObservableDeferred
class SnapshotCache(object):
DURATION_MS = 5 * 60 * 1000 # Cache results for 2 minutes.
def __init__(self):
self.pending_result_cache = {} # Request that haven't finished yet.
self.prev_result_cache = {} # The older requests that have finished.
self.next_result_cache = {} # The newer requests that have finished.
self.time_last_rotated_ms = 0
def rotate(self, time_now_ms):
# Rotate once if the cache duration has passed since the last rotation.
if time_now_ms - self.time_last_rotated_ms > self.DURATION_MS:
self.prev_result_cache = self.next_result_cache
self.next_result_cache = {}
self.time_last_rotated_ms += self.DURATION_MS
# Rotate again if the cache duration has passed twice since the last
# rotation.
if time_now_ms - self.time_last_rotated_ms > self.DURATION_MS:
self.prev_result_cache = self.next_result_cache
self.next_result_cache = {}
self.time_last_rotated_ms = time_now_ms
def get(self, time_now_ms, key):
self.rotate(time_now_ms)
# This cache is intended to deduplicate requests, so we expect it to be
# missed most of the time. So we just lookup the key in all of the
# dictionaries rather than trying to short circuit the lookup if the
# key is found.
result = self.prev_result_cache.get(key)
result = self.next_result_cache.get(key, result)
result = self.pending_result_cache.get(key, result)
if result is not None:
return result.observe()
def set(self, time_now_ms, key, deferred):
self.rotate(time_now_ms)
result = ObservableDeferred(deferred)
self.pending_result_cache[key] = result
def shuffle_along(r):
# When the deferred completes we shuffle it along to the first
# generation of the result cache. So that it will eventually
# expire from the rotation of that cache.
self.next_result_cache[key] = result
self.pending_result_cache.pop(key, None)
result.observe().addBoth(shuffle_along)
return result.observe()