mirror of
https://git.anonymousland.org/anonymousland/synapse-product.git
synced 2024-12-30 18:06:25 -05:00
Fix broken cache for getting retry times. This meant we retried remote destinations way more frequently than we should
This commit is contained in:
parent
3eb62873f6
commit
7ce264ce5f
@ -202,19 +202,6 @@ class TransactionQueue(object):
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def _attempt_new_transaction(self, destination):
|
||||
if destination in self.pending_transactions:
|
||||
# XXX: pending_transactions can get stuck on by a never-ending
|
||||
# request at which point pending_pdus_by_dest just keeps growing.
|
||||
# we need application-layer timeouts of some flavour of these
|
||||
# requests
|
||||
logger.debug(
|
||||
"TX [%s] Transaction already in progress",
|
||||
destination
|
||||
)
|
||||
return
|
||||
|
||||
logger.debug("TX [%s] _attempt_new_transaction", destination)
|
||||
|
||||
# list of (pending_pdu, deferred, order)
|
||||
pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
|
||||
pending_edus = self.pending_edus_by_dest.pop(destination, [])
|
||||
@ -228,6 +215,23 @@ class TransactionQueue(object):
|
||||
logger.debug("TX [%s] Nothing to send", destination)
|
||||
return
|
||||
|
||||
if destination in self.pending_transactions:
|
||||
# XXX: pending_transactions can get stuck on by a never-ending
|
||||
# request at which point pending_pdus_by_dest just keeps growing.
|
||||
# we need application-layer timeouts of some flavour of these
|
||||
# requests
|
||||
logger.debug(
|
||||
"TX [%s] Transaction already in progress",
|
||||
destination
|
||||
)
|
||||
return
|
||||
|
||||
# NOTE: Nothing should be between the above check and the insertion below
|
||||
try:
|
||||
self.pending_transactions[destination] = 1
|
||||
|
||||
logger.debug("TX [%s] _attempt_new_transaction", destination)
|
||||
|
||||
# Sort based on the order field
|
||||
pending_pdus.sort(key=lambda t: t[2])
|
||||
|
||||
@ -239,9 +243,6 @@ class TransactionQueue(object):
|
||||
for x in pending_pdus + pending_edus + pending_failures
|
||||
]
|
||||
|
||||
try:
|
||||
self.pending_transactions[destination] = 1
|
||||
|
||||
txn_id = str(self._next_txn_id)
|
||||
|
||||
limiter = yield get_retry_limiter(
|
||||
|
@ -253,16 +253,6 @@ class TransactionStore(SQLBaseStore):
|
||||
retry_interval (int) - how long until next retry in ms
|
||||
"""
|
||||
|
||||
# As this is the new value, we might as well prefill the cache
|
||||
self.get_destination_retry_timings.prefill(
|
||||
destination,
|
||||
{
|
||||
"destination": destination,
|
||||
"retry_last_ts": retry_last_ts,
|
||||
"retry_interval": retry_interval
|
||||
},
|
||||
)
|
||||
|
||||
# XXX: we could chose to not bother persisting this if our cache thinks
|
||||
# this is a NOOP
|
||||
return self.runInteraction(
|
||||
@ -275,25 +265,19 @@ class TransactionStore(SQLBaseStore):
|
||||
|
||||
def _set_destination_retry_timings(self, txn, destination,
|
||||
retry_last_ts, retry_interval):
|
||||
query = (
|
||||
"UPDATE destinations"
|
||||
" SET retry_last_ts = ?, retry_interval = ?"
|
||||
" WHERE destination = ?"
|
||||
)
|
||||
txn.call_after(self.get_destination_retry_timings.invalidate, (destination,))
|
||||
|
||||
txn.execute(
|
||||
query,
|
||||
(
|
||||
retry_last_ts, retry_interval, destination,
|
||||
)
|
||||
)
|
||||
|
||||
if txn.rowcount == 0:
|
||||
# destination wasn't already in table. Insert it.
|
||||
self._simple_insert_txn(
|
||||
self._simple_upsert_txn(
|
||||
txn,
|
||||
table="destinations",
|
||||
"destinations",
|
||||
keyvalues={
|
||||
"destination": destination,
|
||||
},
|
||||
values={
|
||||
"retry_last_ts": retry_last_ts,
|
||||
"retry_interval": retry_interval,
|
||||
},
|
||||
insertion_values={
|
||||
"destination": destination,
|
||||
"retry_last_ts": retry_last_ts,
|
||||
"retry_interval": retry_interval,
|
||||
|
Loading…
Reference in New Issue
Block a user