From f683b5de47ae57a4fb6e9b80ad2f83c34c913486 Mon Sep 17 00:00:00 2001 From: "Paul \"LeoNerd\" Evans" Date: Thu, 7 May 2015 21:27:53 +0100 Subject: [PATCH 01/22] Store presence cachemap in an ordered dict, so that the newer serials will be at the end --- synapse/handlers/presence.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 9e1561040..6547e0434 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -26,6 +26,7 @@ import synapse.metrics from ._base import BaseHandler import logging +from collections import OrderedDict logger = logging.getLogger(__name__) @@ -143,7 +144,7 @@ class PresenceHandler(BaseHandler): self._remote_offline_serials = [] # map any user to a UserPresenceCache - self._user_cachemap = {} + self._user_cachemap = OrderedDict() # keep them sorted by serial self._user_cachemap_latest_serial = 0 metrics.register_callback( @@ -165,6 +166,14 @@ class PresenceHandler(BaseHandler): else: return UserPresenceCache() + def _bump_serial(self, user=None): + self._user_cachemap_latest_serial += 1 + + if user: + # Move to end + cache = self._user_cachemap.pop(user) + self._user_cachemap[user] = cache + def registered_user(self, user): return self.store.create_presence(user.localpart) @@ -301,7 +310,7 @@ class PresenceHandler(BaseHandler): def changed_presencelike_data(self, user, state): statuscache = self._get_or_make_usercache(user) - self._user_cachemap_latest_serial += 1 + self._bump_serial(user=user) statuscache.update(state, serial=self._user_cachemap_latest_serial) return self.push_presence(user, statuscache=statuscache) @@ -323,7 +332,7 @@ class PresenceHandler(BaseHandler): # No actual update but we need to bump the serial anyway for the # event source - self._user_cachemap_latest_serial += 1 + self._bump_serial() statuscache.update({}, serial=self._user_cachemap_latest_serial) self.push_update_to_local_and_remote( @@ -706,7 +715,7 @@ class PresenceHandler(BaseHandler): statuscache = self._get_or_make_usercache(user) - self._user_cachemap_latest_serial += 1 + self._bump_serial(user=user) statuscache.update(state, serial=self._user_cachemap_latest_serial) if not observers and not room_ids: From 45543028bbeb8395e8bbc5768680f6bf074d366f Mon Sep 17 00:00:00 2001 From: "Paul \"LeoNerd\" Evans" Date: Thu, 7 May 2015 22:40:10 +0100 Subject: [PATCH 02/22] Use the presence cachemap ordering to early-abort the iteration loop --- synapse/handlers/presence.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 6547e0434..601a4c6db 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -875,10 +875,15 @@ class PresenceEventSource(object): updates = [] # TODO(paul): use a DeferredList ? How to limit concurrency. - for observed_user in cachemap.keys(): + for observed_user in reversed(cachemap.keys()): cached = cachemap[observed_user] - if cached.serial <= from_key or cached.serial > max_serial: + # Since this is ordered in descending order of serial, we can just + # stop once we've seen enough + if cached.serial <= from_key: + break + + if cached.serial > max_serial: continue if not (yield self.is_visible(observer_user, observed_user)): From fca28d243e520dc3f8bae919182120c4757d575c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 8 May 2015 16:27:36 +0100 Subject: [PATCH 03/22] Change the way we create observers to deferreds so that we don't get spammed by 'unhandled errors' --- synapse/crypto/keyring.py | 8 +++- synapse/rest/media/v1/base_resource.py | 8 +++- synapse/util/async.py | 60 ++++++++++++++++++++------ 3 files changed, 60 insertions(+), 16 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 8709394b9..a859872ce 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -26,7 +26,7 @@ from synapse.api.errors import SynapseError, Codes from synapse.util.retryutils import get_retry_limiter -from synapse.util.async import create_observer +from synapse.util.async import ObservableDeferred from OpenSSL import crypto @@ -111,6 +111,10 @@ class Keyring(object): if download is None: download = self._get_server_verify_key_impl(server_name, key_ids) + download = ObservableDeferred( + download, + consumeErrors=True + ) self.key_downloads[server_name] = download @download.addBoth @@ -118,7 +122,7 @@ class Keyring(object): del self.key_downloads[server_name] return ret - r = yield create_observer(download) + r = yield download.observe() defer.returnValue(r) @defer.inlineCallbacks diff --git a/synapse/rest/media/v1/base_resource.py b/synapse/rest/media/v1/base_resource.py index 08c8d75af..4af5f7387 100644 --- a/synapse/rest/media/v1/base_resource.py +++ b/synapse/rest/media/v1/base_resource.py @@ -25,7 +25,7 @@ from twisted.internet import defer from twisted.web.resource import Resource from twisted.protocols.basic import FileSender -from synapse.util.async import create_observer +from synapse.util.async import ObservableDeferred import os @@ -83,13 +83,17 @@ class BaseMediaResource(Resource): download = self.downloads.get(key) if download is None: download = self._get_remote_media_impl(server_name, media_id) + download = ObservableDeferred( + download, + consumeErrors=True + ) self.downloads[key] = download @download.addBoth def callback(media_info): del self.downloads[key] return media_info - return create_observer(download) + return download.observe() @defer.inlineCallbacks def _get_remote_media_impl(self, server_name, media_id): diff --git a/synapse/util/async.py b/synapse/util/async.py index d8febdb90..34acb14a6 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -34,20 +34,56 @@ def run_on_reactor(): return sleep(0) -def create_observer(deferred): - """Creates a deferred that observes the result or failure of the given - deferred *without* affecting the given deferred. +class ObservableDeferred(object): + """Wraps a deferred object so that we can add observer deferreds. These + observer deferreds do not affect the callback chain of the original + deferred. + + If consumeErrors is true errors will be captured from the origin deferred. """ - d = defer.Deferred() - def callback(r): - d.callback(r) - return r + __slots__ = ["_deferred", "_observers", "_result"] - def errback(f): - d.errback(f) - return f + def __init__(self, deferred, consumeErrors=False): + object.__setattr__(self, "_deferred", deferred) + object.__setattr__(self, "_result", None) + object.__setattr__(self, "_observers", []) - deferred.addCallbacks(callback, errback) + def callback(r): + self._result = (True, r) + while self._observers: + try: + self._observers.pop().callback(r) + except: + pass + return r - return d + def errback(f): + self._result = (False, f) + while self._observers: + try: + self._observers.pop().errback(f) + except: + pass + + if consumeErrors: + return None + else: + return f + + deferred.addCallbacks(callback, errback) + + def observe(self): + if not self._result: + d = defer.Deferred() + self._observers.append(d) + return d + else: + success, res = self._result + return defer.succeed(res) if success else defer.fail(res) + + def __getattr__(self, name): + return getattr(self._deferred, name) + + def __setattr__(self, name, value): + setattr(self._deferred, name, value) From 476899295f5fd6cff64799bcbc84cd4bf9005e33 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 8 May 2015 16:32:18 +0100 Subject: [PATCH 04/22] Change the way we do logging contexts so that they survive divergences --- demo/start.sh | 1 + synapse/crypto/keyclient.py | 17 +++++++---- synapse/federation/federation_server.py | 40 ++++++++++++------------- synapse/handlers/events.py | 8 ++--- synapse/handlers/presence.py | 34 ++++++++++----------- synapse/handlers/profile.py | 15 +++++----- synapse/http/client.py | 6 +++- synapse/http/matrixfederationclient.py | 32 ++++++++++---------- synapse/notifier.py | 16 +++++----- synapse/storage/_base.py | 11 +++---- synapse/util/__init__.py | 8 +++-- synapse/util/async.py | 6 ++-- synapse/util/logcontext.py | 31 +++++++++++++++++++ 13 files changed, 128 insertions(+), 97 deletions(-) diff --git a/demo/start.sh b/demo/start.sh index 5b3daef57..b9cc14b9d 100755 --- a/demo/start.sh +++ b/demo/start.sh @@ -31,6 +31,7 @@ for port in 8080 8081 8082; do #rm $DIR/etc/$port.config python -m synapse.app.homeserver \ --generate-config \ + --enable_registration \ -H "localhost:$https_port" \ --config-path "$DIR/etc/$port.config" \ diff --git a/synapse/crypto/keyclient.py b/synapse/crypto/keyclient.py index 4911f0896..24f15f315 100644 --- a/synapse/crypto/keyclient.py +++ b/synapse/crypto/keyclient.py @@ -18,7 +18,9 @@ from twisted.web.http import HTTPClient from twisted.internet.protocol import Factory from twisted.internet import defer, reactor from synapse.http.endpoint import matrix_federation_endpoint -from synapse.util.logcontext import PreserveLoggingContext +from synapse.util.logcontext import ( + preserve_context_over_fn, preserve_context_over_deferred +) import simplejson as json import logging @@ -40,11 +42,14 @@ def fetch_server_key(server_name, ssl_context_factory, path=KEY_API_V1): for i in range(5): try: - with PreserveLoggingContext(): - protocol = yield endpoint.connect(factory) - server_response, server_certificate = yield protocol.remote_key - defer.returnValue((server_response, server_certificate)) - return + protocol = yield preserve_context_over_fn( + endpoint.connect, factory + ) + server_response, server_certificate = yield preserve_context_over_deferred( + protocol.remote_key + ) + defer.returnValue((server_response, server_certificate)) + return except SynapseKeyClientError as e: logger.exception("Error getting key for %r" % (server_name,)) if e.status.startswith("4"): diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 2b46188c9..cd79e23f4 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -20,7 +20,6 @@ from .federation_base import FederationBase from .units import Transaction, Edu from synapse.util.logutils import log_function -from synapse.util.logcontext import PreserveLoggingContext from synapse.events import FrozenEvent import synapse.metrics @@ -123,29 +122,28 @@ class FederationServer(FederationBase): logger.debug("[%s] Transaction is new", transaction.transaction_id) - with PreserveLoggingContext(): - results = [] + results = [] - for pdu in pdu_list: - d = self._handle_new_pdu(transaction.origin, pdu) + for pdu in pdu_list: + d = self._handle_new_pdu(transaction.origin, pdu) - try: - yield d - results.append({}) - except FederationError as e: - self.send_failure(e, transaction.origin) - results.append({"error": str(e)}) - except Exception as e: - results.append({"error": str(e)}) - logger.exception("Failed to handle PDU") + try: + yield d + results.append({}) + except FederationError as e: + self.send_failure(e, transaction.origin) + results.append({"error": str(e)}) + except Exception as e: + results.append({"error": str(e)}) + logger.exception("Failed to handle PDU") - if hasattr(transaction, "edus"): - for edu in [Edu(**x) for x in transaction.edus]: - self.received_edu( - transaction.origin, - edu.edu_type, - edu.content - ) + if hasattr(transaction, "edus"): + for edu in [Edu(**x) for x in transaction.edus]: + self.received_edu( + transaction.origin, + edu.edu_type, + edu.content + ) for failure in getattr(transaction, "pdu_failures", []): logger.info("Got failure %r", failure) diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index f9f855213..993d33ba4 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -15,7 +15,6 @@ from twisted.internet import defer -from synapse.util.logcontext import PreserveLoggingContext from synapse.util.logutils import log_function from synapse.types import UserID from synapse.events.utils import serialize_event @@ -81,10 +80,9 @@ class EventStreamHandler(BaseHandler): # thundering herds on restart. timeout = random.randint(int(timeout*0.9), int(timeout*1.1)) - with PreserveLoggingContext(): - events, tokens = yield self.notifier.get_events_for( - auth_user, room_ids, pagin_config, timeout - ) + events, tokens = yield self.notifier.get_events_for( + auth_user, room_ids, pagin_config, timeout + ) time_now = self.clock.time_msec() diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 9e1561040..6ae39a1d3 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -19,7 +19,6 @@ from synapse.api.errors import SynapseError, AuthError from synapse.api.constants import PresenceState from synapse.util.logutils import log_function -from synapse.util.logcontext import PreserveLoggingContext from synapse.types import UserID import synapse.metrics @@ -278,15 +277,14 @@ class PresenceHandler(BaseHandler): now_online = state["presence"] != PresenceState.OFFLINE was_polling = target_user in self._user_cachemap - with PreserveLoggingContext(): - if now_online and not was_polling: - self.start_polling_presence(target_user, state=state) - elif not now_online and was_polling: - self.stop_polling_presence(target_user) + if now_online and not was_polling: + self.start_polling_presence(target_user, state=state) + elif not now_online and was_polling: + self.stop_polling_presence(target_user) - # TODO(paul): perform a presence push as part of start/stop poll so - # we don't have to do this all the time - self.changed_presencelike_data(target_user, state) + # TODO(paul): perform a presence push as part of start/stop poll so + # we don't have to do this all the time + self.changed_presencelike_data(target_user, state) def bump_presence_active_time(self, user, now=None): if now is None: @@ -408,10 +406,10 @@ class PresenceHandler(BaseHandler): yield self.store.set_presence_list_accepted( observer_user.localpart, observed_user.to_string() ) - with PreserveLoggingContext(): - self.start_polling_presence( - observer_user, target_user=observed_user - ) + + self.start_polling_presence( + observer_user, target_user=observed_user + ) @defer.inlineCallbacks def deny_presence(self, observed_user, observer_user): @@ -430,10 +428,9 @@ class PresenceHandler(BaseHandler): observer_user.localpart, observed_user.to_string() ) - with PreserveLoggingContext(): - self.stop_polling_presence( - observer_user, target_user=observed_user - ) + self.stop_polling_presence( + observer_user, target_user=observed_user + ) @defer.inlineCallbacks def get_presence_list(self, observer_user, accepted=None): @@ -766,8 +763,7 @@ class PresenceHandler(BaseHandler): if not self._remote_sendmap[user]: del self._remote_sendmap[user] - with PreserveLoggingContext(): - yield defer.DeferredList(deferreds, consumeErrors=True) + yield defer.DeferredList(deferreds, consumeErrors=True) @defer.inlineCallbacks def push_update_to_local_and_remote(self, observed_user, statuscache, diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index ee2732b84..a7de7a80f 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -154,14 +154,13 @@ class ProfileHandler(BaseHandler): if not self.hs.is_mine(user): defer.returnValue(None) - with PreserveLoggingContext(): - (displayname, avatar_url) = yield defer.gatherResults( - [ - self.store.get_profile_displayname(user.localpart), - self.store.get_profile_avatar_url(user.localpart), - ], - consumeErrors=True - ) + (displayname, avatar_url) = yield defer.gatherResults( + [ + self.store.get_profile_displayname(user.localpart), + self.store.get_profile_avatar_url(user.localpart), + ], + consumeErrors=True + ) state["displayname"] = displayname state["avatar_url"] = avatar_url diff --git a/synapse/http/client.py b/synapse/http/client.py index e8a5dedab..5b3cefb2d 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -14,6 +14,7 @@ # limitations under the License. from synapse.api.errors import CodeMessageException +from synapse.util.logcontext import preserve_context_over_fn from syutil.jsonutil import encode_canonical_json import synapse.metrics @@ -61,7 +62,10 @@ class SimpleHttpClient(object): # A small wrapper around self.agent.request() so we can easily attach # counters to it outgoing_requests_counter.inc(method) - d = self.agent.request(method, *args, **kwargs) + d = preserve_context_over_fn( + self.agent.request, + method, *args, **kwargs + ) def _cb(response): incoming_responses_counter.inc(method, response.code) diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 7fa295cad..c99d237c7 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -22,7 +22,7 @@ from twisted.web._newclient import ResponseDone from synapse.http.endpoint import matrix_federation_endpoint from synapse.util.async import sleep -from synapse.util.logcontext import PreserveLoggingContext +from synapse.util.logcontext import preserve_context_over_fn import synapse.metrics from syutil.jsonutil import encode_canonical_json @@ -144,22 +144,22 @@ class MatrixFederationHttpClient(object): producer = body_callback(method, url_bytes, headers_dict) try: - with PreserveLoggingContext(): - request_deferred = self.agent.request( - destination, - endpoint, - method, - path_bytes, - param_bytes, - query_bytes, - Headers(headers_dict), - producer - ) + request_deferred = preserve_context_over_fn( + self.agent.request, + destination, + endpoint, + method, + path_bytes, + param_bytes, + query_bytes, + Headers(headers_dict), + producer + ) - response = yield self.clock.time_bound_deferred( - request_deferred, - time_out=60, - ) + response = yield self.clock.time_bound_deferred( + request_deferred, + time_out=60, + ) logger.debug("Got response to %s", method) break diff --git a/synapse/notifier.py b/synapse/notifier.py index 78eb28e4b..fbbccb38e 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -16,7 +16,7 @@ from twisted.internet import defer from synapse.util.logutils import log_function -from synapse.util.logcontext import PreserveLoggingContext +from synapse.util.logcontext import preserve_context_over_deferred from synapse.types import StreamToken import synapse.metrics @@ -223,11 +223,10 @@ class Notifier(object): def eb(failure): logger.exception("Failed to notify listener", failure) - with PreserveLoggingContext(): - yield defer.DeferredList( + yield defer.DeferredList( [notify(l).addErrback(eb) for l in listeners], consumeErrors=True, - ) + ) @defer.inlineCallbacks @log_function @@ -298,11 +297,10 @@ class Notifier(object): failure.getTracebackObject()) ) - with PreserveLoggingContext(): - yield defer.DeferredList( - [notify(l).addErrback(eb) for l in listeners], - consumeErrors=True, - ) + yield defer.DeferredList( + [notify(l).addErrback(eb) for l in listeners], + consumeErrors=True, + ) @defer.inlineCallbacks def wait_for_events(self, user, rooms, filter, timeout, callback): diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index ee5587c72..b0020f51d 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -18,7 +18,7 @@ from synapse.api.errors import StoreError from synapse.events import FrozenEvent from synapse.events.utils import prune_event from synapse.util.logutils import log_function -from synapse.util.logcontext import PreserveLoggingContext, LoggingContext +from synapse.util.logcontext import preserve_context_over_fn, LoggingContext from synapse.util.lrucache import LruCache import synapse.metrics @@ -419,10 +419,11 @@ class SQLBaseStore(object): self._txn_perf_counters.update(desc, start, end) sql_txn_timer.inc_by(duration, desc) - with PreserveLoggingContext(): - result = yield self._db_pool.runWithConnection( - inner_func, *args, **kwargs - ) + result = yield preserve_context_over_fn( + self._db_pool.runWithConnection, + inner_func, *args, **kwargs + ) + for after_callback, after_args in after_callbacks: after_callback(*after_args) defer.returnValue(result) diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 79109d0b1..364b92785 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.util.logcontext import LoggingContext +from synapse.util.logcontext import LoggingContext, PreserveLoggingContext from twisted.internet import defer, reactor, task @@ -50,8 +50,10 @@ class Clock(object): current_context = LoggingContext.current_context() def wrapped_callback(): - LoggingContext.thread_local.current_context = current_context - callback() + with PreserveLoggingContext(): + LoggingContext.thread_local.current_context = current_context + callback() + return reactor.callLater(delay, wrapped_callback) def cancel_call_later(self, timer): diff --git a/synapse/util/async.py b/synapse/util/async.py index d8febdb90..f78395a43 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -16,15 +16,13 @@ from twisted.internet import defer, reactor -from .logcontext import PreserveLoggingContext +from .logcontext import preserve_context_over_deferred -@defer.inlineCallbacks def sleep(seconds): d = defer.Deferred() reactor.callLater(seconds, d.callback, seconds) - with PreserveLoggingContext(): - yield d + return preserve_context_over_deferred(d) def run_on_reactor(): diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index da7872e95..192e3f49f 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from twisted.internet import defer + import threading import logging @@ -129,3 +131,32 @@ class PreserveLoggingContext(object): def __exit__(self, type, value, traceback): """Restores the current logging context""" LoggingContext.thread_local.current_context = self.current_context + + +def preserve_context_over_fn(fn, *args, **kwargs): + with PreserveLoggingContext(): + deferred = fn(*args, **kwargs) + + return preserve_context_over_deferred(deferred) + + +def preserve_context_over_deferred(deferred): + d = defer.Deferred() + + current_context = LoggingContext.current_context() + + def cb(res): + with PreserveLoggingContext(): + LoggingContext.thread_local.current_context = current_context + res = d.callback(res) + return res + + def eb(failure): + with PreserveLoggingContext(): + LoggingContext.thread_local.current_context = current_context + res = d.errback(failure) + return res + + deferred.addCallbacks(cb, eb) + + return d From 4ac194159200e9cfb0020003409592e5324cbd18 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 8 May 2015 16:32:56 +0100 Subject: [PATCH 05/22] PEP8 --- synapse/handlers/profile.py | 1 - synapse/notifier.py | 5 ++--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index a7de7a80f..ffb449d45 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -17,7 +17,6 @@ from twisted.internet import defer from synapse.api.errors import SynapseError, AuthError, CodeMessageException from synapse.api.constants import EventTypes, Membership -from synapse.util.logcontext import PreserveLoggingContext from synapse.types import UserID from ._base import BaseHandler diff --git a/synapse/notifier.py b/synapse/notifier.py index fbbccb38e..7282dfd7f 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -16,7 +16,6 @@ from twisted.internet import defer from synapse.util.logutils import log_function -from synapse.util.logcontext import preserve_context_over_deferred from synapse.types import StreamToken import synapse.metrics @@ -224,8 +223,8 @@ class Notifier(object): logger.exception("Failed to notify listener", failure) yield defer.DeferredList( - [notify(l).addErrback(eb) for l in listeners], - consumeErrors=True, + [notify(l).addErrback(eb) for l in listeners], + consumeErrors=True, ) @defer.inlineCallbacks From da1aa07db5f29436aeaae8d0dcbfe9872b4a7233 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 8 May 2015 16:52:49 +0100 Subject: [PATCH 06/22] Add some docs --- synapse/util/logcontext.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index 192e3f49f..3dce8d2bf 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -134,13 +134,25 @@ class PreserveLoggingContext(object): def preserve_context_over_fn(fn, *args, **kwargs): - with PreserveLoggingContext(): - deferred = fn(*args, **kwargs) + """Takes a function and invokes it with the given arguments, but removes + and restores the current logging context while doing so. - return preserve_context_over_deferred(deferred) + If the result is a deferred, call preserve_context_over_deferred before + returning it. + """ + with PreserveLoggingContext(): + res = fn(*args, **kwargs) + + if isinstance(res, defer.Deferred): + return preserve_context_over_deferred(res) + else: + return res def preserve_context_over_deferred(deferred): + """Given a deferred wrap it such that any callbacks added later to it will + be invoked with the current context. + """ d = defer.Deferred() current_context = LoggingContext.current_context() From 2236ef6c92b7964665f5c43b941754d70aa506d8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 8 May 2015 19:53:34 +0100 Subject: [PATCH 07/22] Fix up leak. Add warnings. --- synapse/handlers/_base.py | 11 +++++---- synapse/handlers/federation.py | 29 +++++++++++++---------- synapse/handlers/presence.py | 10 ++++---- synapse/handlers/typing.py | 4 +++- synapse/http/server.py | 6 +++-- synapse/util/__init__.py | 3 ++- synapse/util/distributor.py | 43 ++++++++++++++++------------------ synapse/util/logcontext.py | 11 ++++++++- 8 files changed, 69 insertions(+), 48 deletions(-) diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index 4b3f4eada..ddc5c21e7 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -20,6 +20,8 @@ from synapse.crypto.event_signing import add_hashes_and_signatures from synapse.api.constants import Membership, EventTypes from synapse.types import UserID +from synapse.util.logcontext import PreserveLoggingContext + import logging @@ -137,10 +139,11 @@ class BaseHandler(object): "Failed to get destination from event %s", s.event_id ) - # Don't block waiting on waking up all the listeners. - notify_d = self.notifier.on_new_room_event( - event, extra_users=extra_users - ) + with PreserveLoggingContext(): + # Don't block waiting on waking up all the listeners. + notify_d = self.notifier.on_new_room_event( + event, extra_users=extra_users + ) def log_failure(f): logger.warn( diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 85e275722..77c315c47 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -21,6 +21,7 @@ from synapse.api.errors import ( AuthError, FederationError, StoreError, ) from synapse.api.constants import EventTypes, Membership, RejectedReason +from synapse.util.logcontext import PreserveLoggingContext from synapse.util.logutils import log_function from synapse.util.async import run_on_reactor from synapse.util.frozenutils import unfreeze @@ -197,9 +198,10 @@ class FederationHandler(BaseHandler): target_user = UserID.from_string(target_user_id) extra_users.append(target_user) - d = self.notifier.on_new_room_event( - event, extra_users=extra_users - ) + with PreserveLoggingContext(): + d = self.notifier.on_new_room_event( + event, extra_users=extra_users + ) def log_failure(f): logger.warn( @@ -431,9 +433,10 @@ class FederationHandler(BaseHandler): auth_events=auth_events, ) - d = self.notifier.on_new_room_event( - new_event, extra_users=[joinee] - ) + with PreserveLoggingContext(): + d = self.notifier.on_new_room_event( + new_event, extra_users=[joinee] + ) def log_failure(f): logger.warn( @@ -512,9 +515,10 @@ class FederationHandler(BaseHandler): target_user = UserID.from_string(target_user_id) extra_users.append(target_user) - d = self.notifier.on_new_room_event( - event, extra_users=extra_users - ) + with PreserveLoggingContext(): + d = self.notifier.on_new_room_event( + event, extra_users=extra_users + ) def log_failure(f): logger.warn( @@ -594,9 +598,10 @@ class FederationHandler(BaseHandler): ) target_user = UserID.from_string(event.state_key) - d = self.notifier.on_new_room_event( - event, extra_users=[target_user], - ) + with PreserveLoggingContext(): + d = self.notifier.on_new_room_event( + event, extra_users=[target_user], + ) def log_failure(f): logger.warn( diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 6ae39a1d3..1edab0549 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -18,6 +18,7 @@ from twisted.internet import defer from synapse.api.errors import SynapseError, AuthError from synapse.api.constants import PresenceState +from synapse.util.logcontext import PreserveLoggingContext from synapse.util.logutils import log_function from synapse.types import UserID import synapse.metrics @@ -808,10 +809,11 @@ class PresenceHandler(BaseHandler): def push_update_to_clients(self, observed_user, users_to_push=[], room_ids=[], statuscache=None): - self.notifier.on_new_user_event( - users_to_push, - room_ids, - ) + with PreserveLoggingContext(): + self.notifier.on_new_user_event( + users_to_push, + room_ids, + ) class PresenceEventSource(object): diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index c0b2bd7db..64fe51aa3 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -18,6 +18,7 @@ from twisted.internet import defer from ._base import BaseHandler from synapse.api.errors import SynapseError, AuthError +from synapse.util.logcontext import PreserveLoggingContext from synapse.types import UserID import logging @@ -216,7 +217,8 @@ class TypingNotificationHandler(BaseHandler): self._latest_room_serial += 1 self._room_serials[room_id] = self._latest_room_serial - self.notifier.on_new_user_event(rooms=[room_id]) + with PreserveLoggingContext(): + self.notifier.on_new_user_event(rooms=[room_id]) class TypingNotificationEventSource(object): diff --git a/synapse/http/server.py b/synapse/http/server.py index 93ecbd758..73efbff4f 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -17,7 +17,7 @@ from synapse.api.errors import ( cs_exception, SynapseError, CodeMessageException, UnrecognizedRequestError ) -from synapse.util.logcontext import LoggingContext +from synapse.util.logcontext import LoggingContext, PreserveLoggingContext import synapse.metrics from syutil.jsonutil import ( @@ -85,7 +85,9 @@ def request_handler(request_handler): "Received request: %s %s", request.method, request.path ) - yield request_handler(self, request) + d = request_handler(self, request) + with PreserveLoggingContext(): + yield d code = request.code except CodeMessageException as e: code = e.code diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 364b92785..fd3eb1f57 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -54,7 +54,8 @@ class Clock(object): LoggingContext.thread_local.current_context = current_context callback() - return reactor.callLater(delay, wrapped_callback) + with PreserveLoggingContext(): + return reactor.callLater(delay, wrapped_callback) def cancel_call_later(self, timer): timer.cancel() diff --git a/synapse/util/distributor.py b/synapse/util/distributor.py index 9d9c35039..5b150cb0e 100644 --- a/synapse/util/distributor.py +++ b/synapse/util/distributor.py @@ -13,8 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.util.logcontext import PreserveLoggingContext - from twisted.internet import defer import logging @@ -93,7 +91,6 @@ class Signal(object): Each observer callable may return a Deferred.""" self.observers.append(observer) - @defer.inlineCallbacks def fire(self, *args, **kwargs): """Invokes every callable in the observer list, passing in the args and kwargs. Exceptions thrown by observers are logged but ignored. It is @@ -101,24 +98,24 @@ class Signal(object): Returns a Deferred that will complete when all the observers have completed.""" - with PreserveLoggingContext(): - deferreds = [] - for observer in self.observers: - d = defer.maybeDeferred(observer, *args, **kwargs) - def eb(failure): - logger.warning( - "%s signal observer %s failed: %r", - self.name, observer, failure, - exc_info=( - failure.type, - failure.value, - failure.getTracebackObject())) - if not self.suppress_failures: - failure.raiseException() - deferreds.append(d.addErrback(eb)) - results = [] - for deferred in deferreds: - result = yield deferred - results.append(result) - defer.returnValue(results) + def eb(failure): + logger.warning( + "%s signal observer %s failed: %r", + self.name, observer, failure, + exc_info=( + failure.type, + failure.value, + failure.getTracebackObject())) + if not self.suppress_failures: + failure.raiseException() + + deferreds = [ + defer.maybeDeferred(observer, *args, **kwargs) + for observer in self.observers + ] + + d = defer.gatherResults(deferreds, consumeErrors=True) + d.addErrback(eb) + + return d diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index 3dce8d2bf..a92d518b4 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -132,6 +132,13 @@ class PreserveLoggingContext(object): """Restores the current logging context""" LoggingContext.thread_local.current_context = self.current_context + if self.current_context is not LoggingContext.sentinel: + if self.current_context.parent_context is None: + logger.warn( + "Restoring dead context: %s", + self.current_context, + ) + def preserve_context_over_fn(fn, *args, **kwargs): """Takes a function and invokes it with the given arguments, but removes @@ -169,6 +176,8 @@ def preserve_context_over_deferred(deferred): res = d.errback(failure) return res - deferred.addCallbacks(cb, eb) + if deferred.called: + return deferred + deferred.addCallbacks(cb, eb) return d From 17653a5dfeae55b1f721336fc23058e6e67a3f9a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 11 May 2015 18:00:33 +0100 Subject: [PATCH 08/22] Move storage.stream._StreamToken to types.RoomStreamToken --- synapse/storage/stream.py | 118 +++++++++++--------------------------- synapse/types.py | 52 +++++++++++++++++ 2 files changed, 85 insertions(+), 85 deletions(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 280d4ad60..b03fc67f7 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -37,11 +37,9 @@ from twisted.internet import defer from ._base import SQLBaseStore from synapse.api.constants import EventTypes -from synapse.api.errors import SynapseError +from synapse.types import RoomStreamToken from synapse.util.logutils import log_function -from collections import namedtuple - import logging @@ -55,76 +53,26 @@ _STREAM_TOKEN = "stream" _TOPOLOGICAL_TOKEN = "topological" -class _StreamToken(namedtuple("_StreamToken", "topological stream")): - """Tokens are positions between events. The token "s1" comes after event 1. +def lower_bound(token): + if token.topological is None: + return "(%d < %s)" % (token.stream, "stream_ordering") + else: + return "(%d < %s OR (%d = %s AND %d < %s))" % ( + token.topological, "topological_ordering", + token.topological, "topological_ordering", + token.stream, "stream_ordering", + ) - s0 s1 - | | - [0] V [1] V [2] - Tokens can either be a point in the live event stream or a cursor going - through historic events. - - When traversing the live event stream events are ordered by when they - arrived at the homeserver. - - When traversing historic events the events are ordered by their depth in - the event graph "topological_ordering" and then by when they arrived at the - homeserver "stream_ordering". - - Live tokens start with an "s" followed by the "stream_ordering" id of the - event it comes after. Historic tokens start with a "t" followed by the - "topological_ordering" id of the event it comes after, follewed by "-", - followed by the "stream_ordering" id of the event it comes after. - """ - __slots__ = [] - - @classmethod - def parse(cls, string): - try: - if string[0] == 's': - return cls(topological=None, stream=int(string[1:])) - if string[0] == 't': - parts = string[1:].split('-', 1) - return cls(topological=int(parts[0]), stream=int(parts[1])) - except: - pass - raise SynapseError(400, "Invalid token %r" % (string,)) - - @classmethod - def parse_stream_token(cls, string): - try: - if string[0] == 's': - return cls(topological=None, stream=int(string[1:])) - except: - pass - raise SynapseError(400, "Invalid token %r" % (string,)) - - def __str__(self): - if self.topological is not None: - return "t%d-%d" % (self.topological, self.stream) - else: - return "s%d" % (self.stream,) - - def lower_bound(self): - if self.topological is None: - return "(%d < %s)" % (self.stream, "stream_ordering") - else: - return "(%d < %s OR (%d = %s AND %d < %s))" % ( - self.topological, "topological_ordering", - self.topological, "topological_ordering", - self.stream, "stream_ordering", - ) - - def upper_bound(self): - if self.topological is None: - return "(%d >= %s)" % (self.stream, "stream_ordering") - else: - return "(%d > %s OR (%d = %s AND %d >= %s))" % ( - self.topological, "topological_ordering", - self.topological, "topological_ordering", - self.stream, "stream_ordering", - ) +def upper_bound(token): + if token.topological is None: + return "(%d >= %s)" % (token.stream, "stream_ordering") + else: + return "(%d > %s OR (%d = %s AND %d >= %s))" % ( + token.topological, "topological_ordering", + token.topological, "topological_ordering", + token.stream, "stream_ordering", + ) class StreamStore(SQLBaseStore): @@ -139,8 +87,8 @@ class StreamStore(SQLBaseStore): limit = MAX_STREAM_SIZE # From and to keys should be integers from ordering. - from_id = _StreamToken.parse_stream_token(from_key) - to_id = _StreamToken.parse_stream_token(to_key) + from_id = RoomStreamToken.parse_stream_token(from_key) + to_id = RoomStreamToken.parse_stream_token(to_key) if from_key == to_key: defer.returnValue(([], to_key)) @@ -234,8 +182,8 @@ class StreamStore(SQLBaseStore): limit = MAX_STREAM_SIZE # From and to keys should be integers from ordering. - from_id = _StreamToken.parse_stream_token(from_key) - to_id = _StreamToken.parse_stream_token(to_key) + from_id = RoomStreamToken.parse_stream_token(from_key) + to_id = RoomStreamToken.parse_stream_token(to_key) if from_key == to_key: return defer.succeed(([], to_key)) @@ -288,17 +236,17 @@ class StreamStore(SQLBaseStore): args = [False, room_id] if direction == 'b': order = "DESC" - bounds = _StreamToken.parse(from_key).upper_bound() + bounds = upper_bound(RoomStreamToken.parse(from_key)) if to_key: bounds = "%s AND %s" % ( - bounds, _StreamToken.parse(to_key).lower_bound() + bounds, lower_bound(RoomStreamToken.parse(to_key)) ) else: order = "ASC" - bounds = _StreamToken.parse(from_key).lower_bound() + bounds = lower_bound(RoomStreamToken.parse(from_key)) if to_key: bounds = "%s AND %s" % ( - bounds, _StreamToken.parse(to_key).upper_bound() + bounds, upper_bound(RoomStreamToken.parse(to_key)) ) if int(limit) > 0: @@ -333,7 +281,7 @@ class StreamStore(SQLBaseStore): # when we are going backwards so we subtract one from the # stream part. toke -= 1 - next_token = str(_StreamToken(topo, toke)) + next_token = str(RoomStreamToken(topo, toke)) else: # TODO (erikj): We should work out what to do here instead. next_token = to_key if to_key else from_key @@ -354,7 +302,7 @@ class StreamStore(SQLBaseStore): with_feedback=False, from_token=None): # TODO (erikj): Handle compressed feedback - end_token = _StreamToken.parse_stream_token(end_token) + end_token = RoomStreamToken.parse_stream_token(end_token) if from_token is None: sql = ( @@ -365,7 +313,7 @@ class StreamStore(SQLBaseStore): " LIMIT ?" ) else: - from_token = _StreamToken.parse_stream_token(from_token) + from_token = RoomStreamToken.parse_stream_token(from_token) sql = ( "SELECT stream_ordering, topological_ordering, event_id" " FROM events" @@ -395,7 +343,7 @@ class StreamStore(SQLBaseStore): # stream part. topo = rows[0]["topological_ordering"] toke = rows[0]["stream_ordering"] - 1 - start_token = str(_StreamToken(topo, toke)) + start_token = str(RoomStreamToken(topo, toke)) token = (start_token, str(end_token)) else: @@ -439,5 +387,5 @@ class StreamStore(SQLBaseStore): stream = row["stream_ordering"] topo = event.depth internal = event.internal_metadata - internal.before = str(_StreamToken(topo, stream - 1)) - internal.after = str(_StreamToken(topo, stream)) + internal.before = str(RoomStreamToken(topo, stream - 1)) + internal.after = str(RoomStreamToken(topo, stream)) diff --git a/synapse/types.py b/synapse/types.py index f6a1b0bbc..0f16867d7 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -121,4 +121,56 @@ class StreamToken( return StreamToken(**d) +class RoomStreamToken(namedtuple("_StreamToken", "topological stream")): + """Tokens are positions between events. The token "s1" comes after event 1. + + s0 s1 + | | + [0] V [1] V [2] + + Tokens can either be a point in the live event stream or a cursor going + through historic events. + + When traversing the live event stream events are ordered by when they + arrived at the homeserver. + + When traversing historic events the events are ordered by their depth in + the event graph "topological_ordering" and then by when they arrived at the + homeserver "stream_ordering". + + Live tokens start with an "s" followed by the "stream_ordering" id of the + event it comes after. Historic tokens start with a "t" followed by the + "topological_ordering" id of the event it comes after, follewed by "-", + followed by the "stream_ordering" id of the event it comes after. + """ + __slots__ = [] + + @classmethod + def parse(cls, string): + try: + if string[0] == 's': + return cls(topological=None, stream=int(string[1:])) + if string[0] == 't': + parts = string[1:].split('-', 1) + return cls(topological=int(parts[0]), stream=int(parts[1])) + except: + pass + raise SynapseError(400, "Invalid token %r" % (string,)) + + @classmethod + def parse_stream_token(cls, string): + try: + if string[0] == 's': + return cls(topological=None, stream=int(string[1:])) + except: + pass + raise SynapseError(400, "Invalid token %r" % (string,)) + + def __str__(self): + if self.topological is not None: + return "t%d-%d" % (self.topological, self.stream) + else: + return "s%d" % (self.stream,) + + ClientInfo = namedtuple("ClientInfo", ("device_id", "token_id")) From 84e6b4001f22b0e8c2f806053189fcdb1e85205b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 11 May 2015 18:01:31 +0100 Subject: [PATCH 09/22] Initial hack at wiring together pagination and backfill --- synapse/handlers/federation.py | 108 +++++++++++++++++++++++++++- synapse/handlers/message.py | 10 ++- synapse/storage/event_federation.py | 28 +++++++- 3 files changed, 141 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 85e275722..4d39cd4b3 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -218,10 +218,11 @@ class FederationHandler(BaseHandler): @log_function @defer.inlineCallbacks - def backfill(self, dest, room_id, limit): + def backfill(self, dest, room_id, limit, extremities=[]): """ Trigger a backfill request to `dest` for the given `room_id` """ - extremities = yield self.store.get_oldest_events_in_room(room_id) + if not extremities: + extremities = yield self.store.get_oldest_events_in_room(room_id) pdus = yield self.replication_layer.backfill( dest, @@ -248,6 +249,109 @@ class FederationHandler(BaseHandler): defer.returnValue(events) + @defer.inlineCallbacks + def maybe_backfill(self, room_id, current_depth): + """Checks the database to see if we should backfill before paginating + """ + extremities = yield self.store.get_oldest_events_with_depth_in_room( + room_id + ) + + logger.debug("Got extremeties: %r", extremities) + + if not extremities: + return + + # Check if we reached a point where we should start backfilling. + sorted_extremeties_tuple = sorted( + extremities.items(), + key=lambda e: -int(e[1]) + ) + max_depth = sorted_extremeties_tuple[0][1] + + logger.debug("max_depth: %r", max_depth) + if current_depth > max_depth: + return + + # Now we need to decide which hosts to hit first. + + # First we try hosts that are already in the room, that were around + # at the time. TODO: HEURISTIC ALERT. + + curr_state = yield self.state_handler.get_current_state(room_id) + + def get_domains_from_state(state): + joined_users = [ + (state_key, int(event.depth)) + for (e_type, state_key), event in state.items() + if e_type == EventTypes.Member + and event.membership == Membership.JOIN + ] + + joined_domains = {} + for u, d in joined_users: + try: + dom = UserID.from_string(u).domain + old_d = joined_domains.get(dom) + if old_d: + joined_domains[dom] = min(d, old_d) + else: + joined_domains[dom] = d + except: + pass + + return sorted(joined_domains.items(), key=lambda d: d[1]) + + curr_domains = get_domains_from_state(curr_state) + + logger.debug("curr_domains: %r", curr_domains) + + likely_domains = [ + domain for domain, depth in curr_domains + ] + + @defer.inlineCallbacks + def try_backfill(domains): + # TODO: Should we try multiple of these at a time? + for dom in domains: + events = yield self.backfill( + dom, room_id, + limit=100, + extremities=[e for e in extremities.keys()] + ) + + if events: + defer.returnValue(True) + defer.returnValue(False) + + success = yield try_backfill(likely_domains) + if success: + defer.returnValue(True) + + # Huh, well *those* domains didn't work out. Lets try some domains + # from the time. + + tried_domains = set(likely_domains) + + states = yield defer.gatherResults({ + e: self.state_handler.resolve_state_groups([e])[1] + for e in extremities.keys() + }) + + for e_id, _ in sorted_extremeties_tuple: + likely_domains = get_domains_from_state(states[e_id])[0] + + success = yield try_backfill([ + dom for dom in likely_domains + if dom not in tried_domains + ]) + if success: + defer.returnValue(True) + + tried_domains.update(likely_domains) + + defer.returnValue(False) + @defer.inlineCallbacks def send_invite(self, target_host, event): """ Sends the invite to the remote server for signing. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 22e19af17..38e375f86 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -21,7 +21,7 @@ from synapse.streams.config import PaginationConfig from synapse.events.utils import serialize_event from synapse.events.validator import EventValidator from synapse.util.logcontext import PreserveLoggingContext -from synapse.types import UserID +from synapse.types import UserID, RoomStreamToken from ._base import BaseHandler @@ -92,6 +92,14 @@ class MessageHandler(BaseHandler): yield self.hs.get_event_sources().get_current_token() ) + room_token = RoomStreamToken.parse(pagin_config.from_token.room_key) + if room_token.topological is None: + raise SynapseError(400, "Invalid token") + + yield self.hs.get_handlers().federation_handler.maybe_backfill( + room_id, room_token.topological + ) + user = UserID.from_string(user_id) events, next_key = yield data_source.get_pagination_rows( diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py index 74b4e2359..2b5424ced 100644 --- a/synapse/storage/event_federation.py +++ b/synapse/storage/event_federation.py @@ -79,6 +79,28 @@ class EventFederationStore(SQLBaseStore): room_id, ) + def get_oldest_events_with_depth_in_room(self, room_id): + return self.runInteraction( + "get_oldest_events_with_depth_in_room", + self.get_oldest_events_with_depth_in_room_txn, + room_id, + ) + + def get_oldest_events_with_depth_in_room_txn(self, txn, room_id): + sql = ( + "SELECT b.event_id, MAX(e.depth) FROM events as e" + " INNER JOIN event_edges as g" + " ON g.event_id = e.event_id AND g.room_id = e.room_id" + " INNER JOIN event_backward_extremities as b" + " ON g.prev_event_id = b.event_id AND g.room_id = b.room_id" + " WHERE b.room_id = ? AND g.is_state is ?" + " GROUP BY b.event_id" + ) + + txn.execute(sql, (room_id, False,)) + + return dict(txn.fetchall()) + def _get_oldest_events_in_room_txn(self, txn, room_id): return self._simple_select_onecol_txn( txn, @@ -247,11 +269,13 @@ class EventFederationStore(SQLBaseStore): do_insert = depth < min_depth if min_depth else True if do_insert: - self._simple_insert_txn( + self._simple_upsert_txn( txn, table="room_depth", - values={ + keyvalues={ "room_id": room_id, + }, + values={ "min_depth": depth, }, ) From 4df11b503957a74a948150950da49574c21887bf Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 12 May 2015 10:28:10 +0100 Subject: [PATCH 10/22] Make get_current_token accept a direction parameter, which tells whether the source whether we want a token for going 'forwards' or 'backwards' --- synapse/handlers/message.py | 4 +++- synapse/handlers/room.py | 4 ++-- synapse/storage/stream.py | 20 ++++++++++++++++++-- synapse/streams/events.py | 6 +++--- 4 files changed, 26 insertions(+), 8 deletions(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 38e375f86..1809a44a9 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -89,7 +89,9 @@ class MessageHandler(BaseHandler): if not pagin_config.from_token: pagin_config.from_token = ( - yield self.hs.get_event_sources().get_current_token() + yield self.hs.get_event_sources().get_current_token( + direction='b' + ) ) room_token = RoomStreamToken.parse(pagin_config.from_token.room_key) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index cfa2e38ed..29b6d5275 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -577,8 +577,8 @@ class RoomEventSource(object): defer.returnValue((events, end_key)) - def get_current_key(self): - return self.store.get_room_events_max_id() + def get_current_key(self, direction='f'): + return self.store.get_room_events_max_id(direction) @defer.inlineCallbacks def get_pagination_rows(self, user, config, key): diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index b03fc67f7..8045e17fd 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -364,9 +364,25 @@ class StreamStore(SQLBaseStore): ) @defer.inlineCallbacks - def get_room_events_max_id(self): + def get_room_events_max_id(self, direction='f'): token = yield self._stream_id_gen.get_max_token(self) - defer.returnValue("s%d" % (token,)) + if direction != 'b': + defer.returnValue("s%d" % (token,)) + else: + topo = yield self.runInteraction( + "_get_max_topological_txn", self._get_max_topological_txn + ) + defer.returnValue("t%d-%d" % (topo, token)) + + def _get_max_topological_txn(self, txn): + txn.execute( + "SELECT MAX(topological_ordering) FROM events" + " WHERE outlier = ?", + (False,) + ) + + rows = txn.fetchall() + return rows[0][0] if rows else 0 @defer.inlineCallbacks def _get_min_token(self): diff --git a/synapse/streams/events.py b/synapse/streams/events.py index 5c8e54b78..dff7970be 100644 --- a/synapse/streams/events.py +++ b/synapse/streams/events.py @@ -31,7 +31,7 @@ class NullSource(object): def get_new_events_for_user(self, user, from_key, limit): return defer.succeed(([], from_key)) - def get_current_key(self): + def get_current_key(self, direction='f'): return defer.succeed(0) def get_pagination_rows(self, user, pagination_config, key): @@ -52,10 +52,10 @@ class EventSources(object): } @defer.inlineCallbacks - def get_current_token(self): + def get_current_token(self, direction='f'): token = StreamToken( room_key=( - yield self.sources["room"].get_current_key() + yield self.sources["room"].get_current_key(direction) ), presence_key=( yield self.sources["presence"].get_current_key() From 367382b575a61f780f3e70a62cc01a790dcc9375 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 12 May 2015 10:35:45 +0100 Subject: [PATCH 11/22] Handle the case where the other side is unreachable when backfilling --- synapse/handlers/federation.py | 56 +++++++++++++++++++++++++--------- 1 file changed, 42 insertions(+), 14 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 4d39cd4b3..8b5ac5d6c 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -18,7 +18,7 @@ from ._base import BaseHandler from synapse.api.errors import ( - AuthError, FederationError, StoreError, + AuthError, FederationError, StoreError, CodeMessageException, SynapseError, ) from synapse.api.constants import EventTypes, Membership, RejectedReason from synapse.util.logutils import log_function @@ -29,6 +29,8 @@ from synapse.crypto.event_signing import ( ) from synapse.types import UserID +from synapse.util.retryutils import NotRetryingDestination + from twisted.internet import defer import itertools @@ -251,15 +253,15 @@ class FederationHandler(BaseHandler): @defer.inlineCallbacks def maybe_backfill(self, room_id, current_depth): - """Checks the database to see if we should backfill before paginating + """Checks the database to see if we should backfill before paginating, + and if so do. """ extremities = yield self.store.get_oldest_events_with_depth_in_room( room_id ) - logger.debug("Got extremeties: %r", extremities) - if not extremities: + logger.debug("Not backfilling as no extremeties found.") return # Check if we reached a point where we should start backfilling. @@ -269,14 +271,17 @@ class FederationHandler(BaseHandler): ) max_depth = sorted_extremeties_tuple[0][1] - logger.debug("max_depth: %r", max_depth) if current_depth > max_depth: + logger.debug( + "Not backfilling as we don't need to. %d < %d", + current_depth, max_depth, + ) return # Now we need to decide which hosts to hit first. - # First we try hosts that are already in the room, that were around - # at the time. TODO: HEURISTIC ALERT. + # First we try hosts that are already in the room + # TODO: HEURISTIC ALERT. curr_state = yield self.state_handler.get_current_state(room_id) @@ -304,8 +309,6 @@ class FederationHandler(BaseHandler): curr_domains = get_domains_from_state(curr_state) - logger.debug("curr_domains: %r", curr_domains) - likely_domains = [ domain for domain, depth in curr_domains ] @@ -314,11 +317,36 @@ class FederationHandler(BaseHandler): def try_backfill(domains): # TODO: Should we try multiple of these at a time? for dom in domains: - events = yield self.backfill( - dom, room_id, - limit=100, - extremities=[e for e in extremities.keys()] - ) + try: + events = yield self.backfill( + dom, room_id, + limit=100, + extremities=[e for e in extremities.keys()] + ) + except SynapseError: + logger.info( + "Failed to backfil from %s because %s", + dom, e, + ) + continue + except CodeMessageException as e: + if 400 <= e.code < 500: + raise + + logger.info( + "Failed to backfil from %s because %s", + dom, e, + ) + continue + except NotRetryingDestination as e: + logger.info(e.message) + continue + except Exception as e: + logger.info( + "Failed to backfil from %s because %s", + dom, e, + ) + continue if events: defer.returnValue(True) From 95dedb866f04ee4ae034c35130f2a8dc86243fbb Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 12 May 2015 13:14:29 +0100 Subject: [PATCH 12/22] Unwrap defer.gatherResults failures --- synapse/federation/federation_base.py | 4 +++- synapse/handlers/federation.py | 3 ++- synapse/handlers/message.py | 5 +++-- synapse/handlers/profile.py | 3 ++- synapse/handlers/room.py | 4 ++-- synapse/util/__init__.py | 6 ++++++ 6 files changed, 18 insertions(+), 7 deletions(-) diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 21a763214..5217d91aa 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -24,6 +24,8 @@ from synapse.crypto.event_signing import check_event_content_hash from synapse.api.errors import SynapseError +from synapse.util import unwrapFirstError + import logging @@ -94,7 +96,7 @@ class FederationBase(object): yield defer.gatherResults( [do(pdu) for pdu in pdus], consumeErrors=True - ) + ).addErrback(unwrapFirstError) defer.returnValue(signed_pdus) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 77c315c47..cd8500157 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -21,6 +21,7 @@ from synapse.api.errors import ( AuthError, FederationError, StoreError, ) from synapse.api.constants import EventTypes, Membership, RejectedReason +from synapse.util import unwrapFirstError from synapse.util.logcontext import PreserveLoggingContext from synapse.util.logutils import log_function from synapse.util.async import run_on_reactor @@ -926,7 +927,7 @@ class FederationHandler(BaseHandler): if d in have_events and not have_events[d] ], consumeErrors=True - ) + ).addErrback(unwrapFirstError) if different_events: local_view = dict(auth_events) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 22e19af17..b7d52647d 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -20,6 +20,7 @@ from synapse.api.errors import RoomError, SynapseError from synapse.streams.config import PaginationConfig from synapse.events.utils import serialize_event from synapse.events.validator import EventValidator +from synapse.util import unwrapFirstError from synapse.util.logcontext import PreserveLoggingContext from synapse.types import UserID @@ -303,7 +304,7 @@ class MessageHandler(BaseHandler): event.room_id ), ] - ) + ).addErrback(unwrapFirstError) start_token = now_token.copy_and_replace("room_key", token[0]) end_token = now_token.copy_and_replace("room_key", token[1]) @@ -328,7 +329,7 @@ class MessageHandler(BaseHandler): yield defer.gatherResults( [handle_room(e) for e in room_list], consumeErrors=True - ) + ).addErrback(unwrapFirstError) ret = { "rooms": rooms_ret, diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index ffb449d45..71ff78ab2 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -18,6 +18,7 @@ from twisted.internet import defer from synapse.api.errors import SynapseError, AuthError, CodeMessageException from synapse.api.constants import EventTypes, Membership from synapse.types import UserID +from synapse.util import unwrapFirstError from ._base import BaseHandler @@ -159,7 +160,7 @@ class ProfileHandler(BaseHandler): self.store.get_profile_avatar_url(user.localpart), ], consumeErrors=True - ) + ).addErrback(unwrapFirstError) state["displayname"] = displayname state["avatar_url"] = avatar_url diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index cfa2e38ed..ea5abba6a 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -21,7 +21,7 @@ from ._base import BaseHandler from synapse.types import UserID, RoomAlias, RoomID from synapse.api.constants import EventTypes, Membership, JoinRules from synapse.api.errors import StoreError, SynapseError -from synapse.util import stringutils +from synapse.util import stringutils, unwrapFirstError from synapse.util.async import run_on_reactor from synapse.events.utils import serialize_event @@ -537,7 +537,7 @@ class RoomListHandler(BaseHandler): for room in chunk ], consumeErrors=True, - ) + ).addErrback(unwrapFirstError) for i, room in enumerate(chunk): room["num_joined_members"] = len(results[i]) diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index fd3eb1f57..c1a16b639 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -23,6 +23,12 @@ import logging logger = logging.getLogger(__name__) +def unwrapFirstError(failure): + # defer.gatherResults and DeferredLists wrap failures. + failure.trap(defer.FirstError) + return failure.value.subFailure + + class Clock(object): """A small utility that obtains current time-of-day so that time may be mocked during unit-tests. From 8022b27fc26bd2127019f5179c8956ea475dd284 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 12 May 2015 13:14:48 +0100 Subject: [PATCH 13/22] Make distributer.fire work as it did --- synapse/util/distributor.py | 44 +++++++++++++++++++++++-------------- 1 file changed, 27 insertions(+), 17 deletions(-) diff --git a/synapse/util/distributor.py b/synapse/util/distributor.py index 5b150cb0e..064c4a7a1 100644 --- a/synapse/util/distributor.py +++ b/synapse/util/distributor.py @@ -15,6 +15,12 @@ from twisted.internet import defer +from synapse.util.logcontext import ( + PreserveLoggingContext, preserve_context_over_deferred, +) + +from synapse.util import unwrapFirstError + import logging @@ -99,23 +105,27 @@ class Signal(object): Returns a Deferred that will complete when all the observers have completed.""" - def eb(failure): - logger.warning( - "%s signal observer %s failed: %r", - self.name, observer, failure, - exc_info=( - failure.type, - failure.value, - failure.getTracebackObject())) - if not self.suppress_failures: - failure.raiseException() + def do(observer): + def eb(failure): + logger.warning( + "%s signal observer %s failed: %r", + self.name, observer, failure, + exc_info=( + failure.type, + failure.value, + failure.getTracebackObject())) + if not self.suppress_failures: + return failure + return defer.maybeDeferred(observer, *args, **kwargs).addErrback(eb) - deferreds = [ - defer.maybeDeferred(observer, *args, **kwargs) - for observer in self.observers - ] + with PreserveLoggingContext(): + deferreds = [ + do(observer) + for observer in self.observers + ] - d = defer.gatherResults(deferreds, consumeErrors=True) - d.addErrback(eb) + d = defer.gatherResults(deferreds, consumeErrors=True) - return d + d.addErrback(unwrapFirstError) + + return preserve_context_over_deferred(d) From 6e5ac4a28fe79162e62b68cc62aa4e37badcc8b4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 12 May 2015 13:58:14 +0100 Subject: [PATCH 14/22] Err, gatherResults doesn't take a dict... --- synapse/handlers/federation.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 8b5ac5d6c..31c09365e 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -361,10 +361,13 @@ class FederationHandler(BaseHandler): tried_domains = set(likely_domains) - states = yield defer.gatherResults({ - e: self.state_handler.resolve_state_groups([e])[1] - for e in extremities.keys() - }) + event_ids = list(extremities.keys()) + + states = yield defer.gatherResults([ + self.state_handler.resolve_state_groups([e])[1] + for e in event_ids + ]) + states = dict(zip(event_ids, states)) for e_id, _ in sorted_extremeties_tuple: likely_domains = get_domains_from_state(states[e_id])[0] From a0dfffb33cf8ca721526be0c6a1e05199f2b6258 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 12 May 2015 14:00:31 +0100 Subject: [PATCH 15/22] And another typo. --- synapse/handlers/federation.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 31c09365e..6f97127ae 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -274,7 +274,7 @@ class FederationHandler(BaseHandler): if current_depth > max_depth: logger.debug( "Not backfilling as we don't need to. %d < %d", - current_depth, max_depth, + max_depth, current_depth, ) return @@ -364,10 +364,10 @@ class FederationHandler(BaseHandler): event_ids = list(extremities.keys()) states = yield defer.gatherResults([ - self.state_handler.resolve_state_groups([e])[1] + self.state_handler.resolve_state_groups([e]) for e in event_ids ]) - states = dict(zip(event_ids, states)) + states = dict(zip(event_ids, [s[1] for s in states])) for e_id, _ in sorted_extremeties_tuple: likely_domains = get_domains_from_state(states[e_id])[0] From 0d31ad5101546380308e7735d4543102b7e60bca Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 12 May 2015 14:02:01 +0100 Subject: [PATCH 16/22] Typos everywhere --- synapse/handlers/federation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 6f97127ae..7b7b998f0 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -370,7 +370,7 @@ class FederationHandler(BaseHandler): states = dict(zip(event_ids, [s[1] for s in states])) for e_id, _ in sorted_extremeties_tuple: - likely_domains = get_domains_from_state(states[e_id])[0] + likely_domains = get_domains_from_state(states[e_id]) success = yield try_backfill([ dom for dom in likely_domains From 07a12231569189be1699f50d71b38414ba822bdc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 12 May 2015 14:09:54 +0100 Subject: [PATCH 17/22] s/backfil/backfill/ --- synapse/handlers/federation.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 7b7b998f0..109311258 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -325,7 +325,7 @@ class FederationHandler(BaseHandler): ) except SynapseError: logger.info( - "Failed to backfil from %s because %s", + "Failed to backfill from %s because %s", dom, e, ) continue @@ -334,7 +334,7 @@ class FederationHandler(BaseHandler): raise logger.info( - "Failed to backfil from %s because %s", + "Failed to backfill from %s because %s", dom, e, ) continue @@ -342,8 +342,8 @@ class FederationHandler(BaseHandler): logger.info(e.message) continue except Exception as e: - logger.info( - "Failed to backfil from %s because %s", + logger.warn( + "Failed to backfill from %s because %s", dom, e, ) continue From 74850d7f75f64e537c3db36103107aece0fdf47f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 12 May 2015 14:14:58 +0100 Subject: [PATCH 18/22] Do state groups persistence /after/ checking if we have already persisted the event --- synapse/storage/events.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 38395c66a..a66e84b34 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -135,19 +135,17 @@ class EventsStore(SQLBaseStore): outlier = event.internal_metadata.is_outlier() if not outlier: - self._store_state_groups_txn(txn, event, context) - self._update_min_depth_for_room_txn( txn, event.room_id, event.depth ) - have_persisted = self._simple_select_one_onecol_txn( + have_persisted = self._simple_select_one_txn( txn, - table="event_json", + table="events", keyvalues={"event_id": event.event_id}, - retcol="event_id", + retcols=["event_id", "outlier"], allow_none=True, ) @@ -162,7 +160,9 @@ class EventsStore(SQLBaseStore): # if we are persisting an event that we had persisted as an outlier, # but is no longer one. if have_persisted: - if not outlier: + if not outlier and have_persisted["outlier"]: + self._store_state_groups_txn(txn, event, context) + sql = ( "UPDATE event_json SET internal_metadata = ?" " WHERE event_id = ?" @@ -182,6 +182,9 @@ class EventsStore(SQLBaseStore): ) return + if not outlier: + self._store_state_groups_txn(txn, event, context) + self._handle_prev_events( txn, outlier=outlier, From c1779a79bc6da69621d0034e582008e95db02dad Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 12 May 2015 14:41:50 +0100 Subject: [PATCH 19/22] Fix up _handle_prev_events to not try to insert duplicate rows --- synapse/storage/event_federation.py | 36 +++++++++++------------------ 1 file changed, 13 insertions(+), 23 deletions(-) diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py index 2b5424ced..6773e4468 100644 --- a/synapse/storage/event_federation.py +++ b/synapse/storage/event_federation.py @@ -330,31 +330,21 @@ class EventFederationStore(SQLBaseStore): txn.execute(query, (event_id, room_id)) - # Insert all the prev_events as a backwards thing, they'll get - # deleted in a second if they're incorrect anyway. - self._simple_insert_many_txn( - txn, - table="event_backward_extremities", - values=[ - { - "event_id": e_id, - "room_id": room_id, - } - for e_id, _ in prev_events - ], + query = ( + "INSERT INTO event_backward_extremities (event_id, room_id)" + " SELECT ?, ? WHERE NOT EXISTS (" + " SELECT 1 FROM event_backward_extremities" + " WHERE event_id = ? AND room_id = ?" + " )" + " AND NOT EXISTS (" + " SELECT 1 FROM events WHERE event_id = ? AND room_id = ?" + " )" ) - # Also delete from the backwards extremities table all ones that - # reference events that we have already seen - query = ( - "DELETE FROM event_backward_extremities WHERE EXISTS (" - "SELECT 1 FROM events " - "WHERE " - "event_backward_extremities.event_id = events.event_id " - "AND not events.outlier " - ")" - ) - txn.execute(query) + txn.executemany(query, [ + (e_id, room_id, e_id, room_id, e_id, room_id,) + for e_id, _ in prev_events + ]) txn.call_after( self.get_latest_event_ids_in_room.invalidate, room_id From e4eddf9b367bdd0384f9b834cb8ba75db4804ae1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 12 May 2015 14:47:23 +0100 Subject: [PATCH 20/22] We do actually want to delete rows out of event_backward_extremities --- synapse/storage/event_federation.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py index 6773e4468..f807236eb 100644 --- a/synapse/storage/event_federation.py +++ b/synapse/storage/event_federation.py @@ -336,16 +336,23 @@ class EventFederationStore(SQLBaseStore): " SELECT 1 FROM event_backward_extremities" " WHERE event_id = ? AND room_id = ?" " )" - " AND NOT EXISTS (" - " SELECT 1 FROM events WHERE event_id = ? AND room_id = ?" - " )" ) txn.executemany(query, [ - (e_id, room_id, e_id, room_id, e_id, room_id,) + (e_id, room_id, e_id, room_id, ) for e_id, _ in prev_events ]) + + # Also delete from the backwards extremities table all ones that + # reference events that we have already seen + query = ( + "DELETE FROM event_backward_extremities" + " WHERE event_id = ? AND room_id = ?" + ) + txn.executemany(query, [(e_id, room_id) for e_id, _ in prev_events]) + + txn.call_after( self.get_latest_event_ids_in_room.invalidate, room_id ) From 30c72d377ef4047f93a6210e25a92dc5272ea0e9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 12 May 2015 14:47:40 +0100 Subject: [PATCH 21/22] Newlines --- synapse/storage/event_federation.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py index f807236eb..7ea0ee232 100644 --- a/synapse/storage/event_federation.py +++ b/synapse/storage/event_federation.py @@ -343,7 +343,6 @@ class EventFederationStore(SQLBaseStore): for e_id, _ in prev_events ]) - # Also delete from the backwards extremities table all ones that # reference events that we have already seen query = ( @@ -352,7 +351,6 @@ class EventFederationStore(SQLBaseStore): ) txn.executemany(query, [(e_id, room_id) for e_id, _ in prev_events]) - txn.call_after( self.get_latest_event_ids_in_room.invalidate, room_id ) From 8b28209c6050656b998f7eb7fab8ac55ae9b019b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 12 May 2015 15:02:53 +0100 Subject: [PATCH 22/22] Err, delete the right stuff --- synapse/storage/event_federation.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py index 7ea0ee232..a1982dfbb 100644 --- a/synapse/storage/event_federation.py +++ b/synapse/storage/event_federation.py @@ -336,20 +336,21 @@ class EventFederationStore(SQLBaseStore): " SELECT 1 FROM event_backward_extremities" " WHERE event_id = ? AND room_id = ?" " )" + " AND NOT EXISTS (" + " SELECT 1 FROM events WHERE event_id = ? AND room_id = ?" + " )" ) txn.executemany(query, [ - (e_id, room_id, e_id, room_id, ) + (e_id, room_id, e_id, room_id, e_id, room_id, ) for e_id, _ in prev_events ]) - # Also delete from the backwards extremities table all ones that - # reference events that we have already seen query = ( "DELETE FROM event_backward_extremities" " WHERE event_id = ? AND room_id = ?" ) - txn.executemany(query, [(e_id, room_id) for e_id, _ in prev_events]) + txn.execute(query, (event_id, room_id)) txn.call_after( self.get_latest_event_ids_in_room.invalidate, room_id