2016-01-06 23:26:29 -05:00
|
|
|
# Copyright 2014-2016 OpenMarket Ltd
|
2014-08-12 10:10:52 -04:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2014-08-12 22:14:34 -04:00
|
|
|
|
2018-07-09 02:09:20 -04:00
|
|
|
import logging
|
|
|
|
from collections import namedtuple
|
2021-04-28 06:04:38 -04:00
|
|
|
from typing import Iterable, List, Optional, Tuple
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2021-05-21 12:57:08 -04:00
|
|
|
import attr
|
2018-08-30 10:19:58 -04:00
|
|
|
from canonicaljson import encode_canonical_json
|
2016-05-06 06:20:18 -04:00
|
|
|
|
2020-10-07 11:27:56 -04:00
|
|
|
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
2021-05-21 12:57:08 -04:00
|
|
|
from synapse.storage._base import db_to_json
|
2020-09-04 07:22:23 -04:00
|
|
|
from synapse.storage.database import DatabasePool, LoggingTransaction
|
2021-05-21 12:57:08 -04:00
|
|
|
from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
|
2020-08-27 13:38:41 -04:00
|
|
|
from synapse.types import JsonDict
|
2021-05-21 12:57:08 -04:00
|
|
|
from synapse.util.caches.descriptors import cached
|
2018-07-09 02:09:20 -04:00
|
|
|
|
2020-05-15 14:26:54 -04:00
|
|
|
db_binary_type = memoryview
|
2018-05-29 11:42:43 -04:00
|
|
|
|
2014-08-12 10:10:52 -04:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
2016-05-06 06:20:18 -04:00
|
|
|
_TransactionRow = namedtuple(
|
2019-04-03 05:07:29 -04:00
|
|
|
"_TransactionRow",
|
|
|
|
("id", "transaction_id", "destination", "ts", "response_code", "response_json"),
|
2016-05-06 06:20:18 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
_UpdateTransactionRow = namedtuple(
|
2019-04-03 05:07:29 -04:00
|
|
|
"_TransactionRow", ("response_code", "response_json")
|
2016-05-06 06:20:18 -04:00
|
|
|
)
|
|
|
|
|
2018-09-21 09:55:47 -04:00
|
|
|
|
2021-05-21 12:57:08 -04:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
|
|
|
class DestinationRetryTimings:
|
|
|
|
"""The current destination retry timing info for a remote server."""
|
2016-05-06 06:20:18 -04:00
|
|
|
|
2021-05-21 12:57:08 -04:00
|
|
|
# The first time we tried and failed to reach the remote server, in ms.
|
|
|
|
failure_ts: int
|
|
|
|
|
|
|
|
# The last time we tried and failed to reach the remote server, in ms.
|
|
|
|
retry_last_ts: int
|
|
|
|
|
|
|
|
# How long since the last time we tried to reach the remote server before
|
|
|
|
# trying again, in ms.
|
|
|
|
retry_interval: int
|
|
|
|
|
|
|
|
|
|
|
|
class TransactionWorkerStore(CacheInvalidationWorkerStore):
|
2020-10-07 11:27:56 -04:00
|
|
|
def __init__(self, database: DatabasePool, db_conn, hs):
|
|
|
|
super().__init__(database, db_conn, hs)
|
|
|
|
|
2021-09-13 13:07:12 -04:00
|
|
|
if hs.config.worker.run_background_tasks:
|
2020-10-07 11:27:56 -04:00
|
|
|
self._clock.looping_call(self._cleanup_transactions, 30 * 60 * 1000)
|
|
|
|
|
|
|
|
@wrap_as_background_process("cleanup_transactions")
|
|
|
|
async def _cleanup_transactions(self) -> None:
|
|
|
|
now = self._clock.time_msec()
|
|
|
|
month_ago = now - 30 * 24 * 60 * 60 * 1000
|
|
|
|
|
|
|
|
def _cleanup_transactions_txn(txn):
|
|
|
|
txn.execute("DELETE FROM received_transactions WHERE ts < ?", (month_ago,))
|
|
|
|
|
|
|
|
await self.db_pool.runInteraction(
|
|
|
|
"_cleanup_transactions", _cleanup_transactions_txn
|
|
|
|
)
|
|
|
|
|
2020-08-28 07:54:27 -04:00
|
|
|
async def get_received_txn_response(
|
|
|
|
self, transaction_id: str, origin: str
|
|
|
|
) -> Optional[Tuple[int, JsonDict]]:
|
2014-08-12 10:10:52 -04:00
|
|
|
"""For an incoming transaction from a given origin, check if we have
|
|
|
|
already responded to it. If so, return the response code and response
|
|
|
|
body (as a dict).
|
|
|
|
|
|
|
|
Args:
|
2020-08-28 07:54:27 -04:00
|
|
|
transaction_id
|
|
|
|
origin
|
2014-08-12 10:10:52 -04:00
|
|
|
|
|
|
|
Returns:
|
2020-08-28 07:54:27 -04:00
|
|
|
None if we have not previously responded to this transaction or a
|
|
|
|
2-tuple of (int, dict)
|
2014-08-12 10:10:52 -04:00
|
|
|
"""
|
|
|
|
|
2020-08-28 07:54:27 -04:00
|
|
|
return await self.db_pool.runInteraction(
|
2014-10-28 07:18:04 -04:00
|
|
|
"get_received_txn_response",
|
2019-04-03 05:07:29 -04:00
|
|
|
self._get_received_txn_response,
|
|
|
|
transaction_id,
|
|
|
|
origin,
|
2014-08-12 10:10:52 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
def _get_received_txn_response(self, txn, transaction_id, origin):
|
2020-08-05 16:38:57 -04:00
|
|
|
result = self.db_pool.simple_select_one_txn(
|
2015-03-20 11:05:44 -04:00
|
|
|
txn,
|
2016-01-13 06:15:20 -05:00
|
|
|
table="received_transactions",
|
2019-04-03 05:07:29 -04:00
|
|
|
keyvalues={"transaction_id": transaction_id, "origin": origin},
|
2016-01-13 06:15:20 -05:00
|
|
|
retcols=(
|
2019-04-03 05:07:29 -04:00
|
|
|
"transaction_id",
|
|
|
|
"origin",
|
|
|
|
"ts",
|
|
|
|
"response_code",
|
|
|
|
"response_json",
|
2016-01-13 06:15:20 -05:00
|
|
|
"has_been_referenced",
|
|
|
|
),
|
2015-03-20 11:05:44 -04:00
|
|
|
allow_none=True,
|
|
|
|
)
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2015-11-12 09:06:31 -05:00
|
|
|
if result and result["response_code"]:
|
2018-08-30 10:19:58 -04:00
|
|
|
return result["response_code"], db_to_json(result["response_json"])
|
|
|
|
|
2014-08-12 10:10:52 -04:00
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
2020-08-27 13:38:41 -04:00
|
|
|
async def set_received_txn_response(
|
|
|
|
self, transaction_id: str, origin: str, code: int, response_dict: JsonDict
|
|
|
|
) -> None:
|
|
|
|
"""Persist the response we returned for an incoming transaction, and
|
2014-08-12 10:10:52 -04:00
|
|
|
should return for subsequent transactions with the same transaction_id
|
|
|
|
and origin.
|
|
|
|
|
|
|
|
Args:
|
2020-08-27 13:38:41 -04:00
|
|
|
transaction_id: The incoming transaction ID.
|
|
|
|
origin: The origin server.
|
|
|
|
code: The response code.
|
|
|
|
response_dict: The response, to be encoded into JSON.
|
2014-08-12 10:10:52 -04:00
|
|
|
"""
|
|
|
|
|
2021-07-22 07:39:50 -04:00
|
|
|
await self.db_pool.simple_upsert(
|
2016-01-13 06:15:20 -05:00
|
|
|
table="received_transactions",
|
2021-07-22 07:39:50 -04:00
|
|
|
keyvalues={
|
2015-03-23 09:43:21 -04:00
|
|
|
"transaction_id": transaction_id,
|
|
|
|
"origin": origin,
|
2021-07-22 07:39:50 -04:00
|
|
|
},
|
|
|
|
values={},
|
|
|
|
insertion_values={
|
2015-03-23 09:43:21 -04:00
|
|
|
"response_code": code,
|
2018-05-29 11:42:43 -04:00
|
|
|
"response_json": db_binary_type(encode_canonical_json(response_dict)),
|
2016-08-22 11:29:46 -04:00
|
|
|
"ts": self._clock.time_msec(),
|
2015-04-27 08:22:30 -04:00
|
|
|
},
|
|
|
|
desc="set_received_txn_response",
|
2015-03-23 09:43:21 -04:00
|
|
|
)
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2021-05-21 12:57:08 -04:00
|
|
|
@cached(max_entries=10000)
|
|
|
|
async def get_destination_retry_timings(
|
|
|
|
self,
|
|
|
|
destination: str,
|
|
|
|
) -> Optional[DestinationRetryTimings]:
|
2014-12-06 21:26:07 -05:00
|
|
|
"""Gets the current retry timings (if any) for a given destination.
|
2014-12-10 05:16:09 -05:00
|
|
|
|
2014-12-06 21:26:07 -05:00
|
|
|
Args:
|
|
|
|
destination (str)
|
2014-12-10 05:16:09 -05:00
|
|
|
|
2014-12-06 21:26:07 -05:00
|
|
|
Returns:
|
|
|
|
None if not retrying
|
2015-03-23 09:43:21 -04:00
|
|
|
Otherwise a dict for the retry scheme
|
2014-12-06 21:26:07 -05:00
|
|
|
"""
|
2018-09-21 09:55:47 -04:00
|
|
|
|
2020-08-12 09:28:48 -04:00
|
|
|
result = await self.db_pool.runInteraction(
|
2014-12-06 21:26:07 -05:00
|
|
|
"get_destination_retry_timings",
|
2019-04-03 05:07:29 -04:00
|
|
|
self._get_destination_retry_timings,
|
|
|
|
destination,
|
|
|
|
)
|
2014-12-10 05:16:09 -05:00
|
|
|
|
2019-07-23 09:00:55 -04:00
|
|
|
return result
|
2018-09-21 09:55:47 -04:00
|
|
|
|
2021-05-21 12:57:08 -04:00
|
|
|
def _get_destination_retry_timings(
|
|
|
|
self, txn, destination: str
|
|
|
|
) -> Optional[DestinationRetryTimings]:
|
2020-08-05 16:38:57 -04:00
|
|
|
result = self.db_pool.simple_select_one_txn(
|
2015-03-23 09:43:21 -04:00
|
|
|
txn,
|
2016-01-13 06:15:20 -05:00
|
|
|
table="destinations",
|
2019-04-03 05:07:29 -04:00
|
|
|
keyvalues={"destination": destination},
|
2021-05-21 12:57:08 -04:00
|
|
|
retcols=("failure_ts", "retry_last_ts", "retry_interval"),
|
2015-03-23 09:43:21 -04:00
|
|
|
allow_none=True,
|
|
|
|
)
|
|
|
|
|
2020-09-04 07:22:23 -04:00
|
|
|
# check we have a row and retry_last_ts is not null or zero
|
|
|
|
# (retry_last_ts can't be negative)
|
|
|
|
if result and result["retry_last_ts"]:
|
2021-05-21 12:57:08 -04:00
|
|
|
return DestinationRetryTimings(**result)
|
2015-03-23 09:43:21 -04:00
|
|
|
else:
|
|
|
|
return None
|
2014-12-10 05:16:09 -05:00
|
|
|
|
2020-08-28 07:54:27 -04:00
|
|
|
async def set_destination_retry_timings(
|
|
|
|
self,
|
|
|
|
destination: str,
|
|
|
|
failure_ts: Optional[int],
|
|
|
|
retry_last_ts: int,
|
|
|
|
retry_interval: int,
|
|
|
|
) -> None:
|
2014-12-06 21:26:07 -05:00
|
|
|
"""Sets the current retry timings for a given destination.
|
2021-02-12 11:01:48 -05:00
|
|
|
Both timings should be zero if retrying is no longer occurring.
|
2014-12-10 05:16:09 -05:00
|
|
|
|
2014-12-06 21:26:07 -05:00
|
|
|
Args:
|
2020-08-28 07:54:27 -04:00
|
|
|
destination
|
|
|
|
failure_ts: when the server started failing (ms since epoch)
|
|
|
|
retry_last_ts: time of last retry attempt in unix epoch ms
|
|
|
|
retry_interval: how long until next retry in ms
|
2014-12-06 21:26:07 -05:00
|
|
|
"""
|
2014-12-10 05:16:09 -05:00
|
|
|
|
2020-10-14 10:50:59 -04:00
|
|
|
if self.database_engine.can_native_upsert:
|
|
|
|
return await self.db_pool.runInteraction(
|
|
|
|
"set_destination_retry_timings",
|
|
|
|
self._set_destination_retry_timings_native,
|
|
|
|
destination,
|
|
|
|
failure_ts,
|
|
|
|
retry_last_ts,
|
|
|
|
retry_interval,
|
|
|
|
db_autocommit=True, # Safe as its a single upsert
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
return await self.db_pool.runInteraction(
|
|
|
|
"set_destination_retry_timings",
|
|
|
|
self._set_destination_retry_timings_emulated,
|
|
|
|
destination,
|
|
|
|
failure_ts,
|
|
|
|
retry_last_ts,
|
|
|
|
retry_interval,
|
|
|
|
)
|
2014-12-10 05:16:09 -05:00
|
|
|
|
2020-10-14 10:50:59 -04:00
|
|
|
def _set_destination_retry_timings_native(
|
2019-09-17 06:41:54 -04:00
|
|
|
self, txn, destination, failure_ts, retry_last_ts, retry_interval
|
2019-04-03 05:07:29 -04:00
|
|
|
):
|
2020-10-14 10:50:59 -04:00
|
|
|
assert self.database_engine.can_native_upsert
|
|
|
|
|
|
|
|
# Upsert retry time interval if retry_interval is zero (i.e. we're
|
|
|
|
# resetting it) or greater than the existing retry interval.
|
|
|
|
#
|
|
|
|
# WARNING: This is executed in autocommit, so we shouldn't add any more
|
|
|
|
# SQL calls in here (without being very careful).
|
|
|
|
sql = """
|
|
|
|
INSERT INTO destinations (
|
|
|
|
destination, failure_ts, retry_last_ts, retry_interval
|
|
|
|
)
|
|
|
|
VALUES (?, ?, ?, ?)
|
|
|
|
ON CONFLICT (destination) DO UPDATE SET
|
|
|
|
failure_ts = EXCLUDED.failure_ts,
|
|
|
|
retry_last_ts = EXCLUDED.retry_last_ts,
|
|
|
|
retry_interval = EXCLUDED.retry_interval
|
|
|
|
WHERE
|
|
|
|
EXCLUDED.retry_interval = 0
|
|
|
|
OR destinations.retry_interval IS NULL
|
|
|
|
OR destinations.retry_interval < EXCLUDED.retry_interval
|
|
|
|
"""
|
2019-07-19 06:34:15 -04:00
|
|
|
|
2020-10-14 10:50:59 -04:00
|
|
|
txn.execute(sql, (destination, failure_ts, retry_last_ts, retry_interval))
|
2019-07-19 06:34:15 -04:00
|
|
|
|
2021-05-21 12:57:08 -04:00
|
|
|
self._invalidate_cache_and_stream(
|
|
|
|
txn, self.get_destination_retry_timings, (destination,)
|
|
|
|
)
|
|
|
|
|
2020-10-14 10:50:59 -04:00
|
|
|
def _set_destination_retry_timings_emulated(
|
|
|
|
self, txn, destination, failure_ts, retry_last_ts, retry_interval
|
|
|
|
):
|
2016-11-22 12:45:44 -05:00
|
|
|
self.database_engine.lock_table(txn, "destinations")
|
2014-12-06 21:26:07 -05:00
|
|
|
|
2016-11-22 12:45:44 -05:00
|
|
|
# We need to be careful here as the data may have changed from under us
|
|
|
|
# due to a worker setting the timings.
|
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
prev_row = self.db_pool.simple_select_one_txn(
|
2015-11-03 11:23:35 -05:00
|
|
|
txn,
|
2016-11-22 12:45:44 -05:00
|
|
|
table="destinations",
|
2019-04-03 05:07:29 -04:00
|
|
|
keyvalues={"destination": destination},
|
2019-09-17 06:41:54 -04:00
|
|
|
retcols=("failure_ts", "retry_last_ts", "retry_interval"),
|
2016-11-22 12:45:44 -05:00
|
|
|
allow_none=True,
|
2015-03-23 09:43:21 -04:00
|
|
|
)
|
2014-12-06 21:26:07 -05:00
|
|
|
|
2016-11-22 12:45:44 -05:00
|
|
|
if not prev_row:
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_insert_txn(
|
2016-11-22 12:45:44 -05:00
|
|
|
txn,
|
|
|
|
table="destinations",
|
|
|
|
values={
|
|
|
|
"destination": destination,
|
2019-09-17 06:41:54 -04:00
|
|
|
"failure_ts": failure_ts,
|
2016-11-22 12:45:44 -05:00
|
|
|
"retry_last_ts": retry_last_ts,
|
|
|
|
"retry_interval": retry_interval,
|
2019-04-03 05:07:29 -04:00
|
|
|
},
|
2016-11-22 12:45:44 -05:00
|
|
|
)
|
2020-09-18 09:59:13 -04:00
|
|
|
elif (
|
|
|
|
retry_interval == 0
|
|
|
|
or prev_row["retry_interval"] is None
|
|
|
|
or prev_row["retry_interval"] < retry_interval
|
|
|
|
):
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_update_one_txn(
|
2016-11-22 12:45:44 -05:00
|
|
|
txn,
|
|
|
|
"destinations",
|
2019-04-03 05:07:29 -04:00
|
|
|
keyvalues={"destination": destination},
|
2016-11-22 12:45:44 -05:00
|
|
|
updatevalues={
|
2019-09-17 06:41:54 -04:00
|
|
|
"failure_ts": failure_ts,
|
2016-11-22 12:45:44 -05:00
|
|
|
"retry_last_ts": retry_last_ts,
|
|
|
|
"retry_interval": retry_interval,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
2021-05-21 12:57:08 -04:00
|
|
|
self._invalidate_cache_and_stream(
|
|
|
|
txn, self.get_destination_retry_timings, (destination,)
|
|
|
|
)
|
|
|
|
|
2021-04-28 06:04:38 -04:00
|
|
|
async def store_destination_rooms_entries(
|
|
|
|
self,
|
|
|
|
destinations: Iterable[str],
|
|
|
|
room_id: str,
|
|
|
|
stream_ordering: int,
|
|
|
|
) -> None:
|
2020-09-04 07:22:23 -04:00
|
|
|
"""
|
2021-04-28 06:04:38 -04:00
|
|
|
Updates or creates `destination_rooms` entries in batch for a single event.
|
2020-09-04 07:22:23 -04:00
|
|
|
|
|
|
|
Args:
|
2021-04-28 06:04:38 -04:00
|
|
|
destinations: list of destinations
|
|
|
|
room_id: the room_id of the event
|
|
|
|
stream_ordering: the stream_ordering of the event
|
2020-09-04 07:22:23 -04:00
|
|
|
"""
|
|
|
|
|
2021-03-23 09:52:30 -04:00
|
|
|
await self.db_pool.simple_upsert_many(
|
|
|
|
table="destinations",
|
|
|
|
key_names=("destination",),
|
2021-04-28 06:04:38 -04:00
|
|
|
key_values=[(d,) for d in destinations],
|
2021-03-23 09:52:30 -04:00
|
|
|
value_names=[],
|
|
|
|
value_values=[],
|
|
|
|
desc="store_destination_rooms_entries_dests",
|
2020-09-04 07:22:23 -04:00
|
|
|
)
|
|
|
|
|
2021-04-28 06:04:38 -04:00
|
|
|
rows = [(destination, room_id) for destination in destinations]
|
2021-03-23 09:52:30 -04:00
|
|
|
await self.db_pool.simple_upsert_many(
|
2021-03-15 10:42:40 -04:00
|
|
|
table="destination_rooms",
|
2021-04-28 06:04:38 -04:00
|
|
|
key_names=("destination", "room_id"),
|
|
|
|
key_values=rows,
|
2021-03-15 10:42:40 -04:00
|
|
|
value_names=["stream_ordering"],
|
2021-04-28 06:04:38 -04:00
|
|
|
value_values=[(stream_ordering,)] * len(rows),
|
2021-03-23 09:52:30 -04:00
|
|
|
desc="store_destination_rooms_entries_rooms",
|
2020-09-04 07:22:23 -04:00
|
|
|
)
|
2020-09-04 10:06:51 -04:00
|
|
|
|
|
|
|
async def get_destination_last_successful_stream_ordering(
|
|
|
|
self, destination: str
|
|
|
|
) -> Optional[int]:
|
|
|
|
"""
|
|
|
|
Gets the stream ordering of the PDU most-recently successfully sent
|
|
|
|
to the specified destination, or None if this information has not been
|
|
|
|
tracked yet.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
destination: the destination to query
|
|
|
|
"""
|
|
|
|
return await self.db_pool.simple_select_one_onecol(
|
|
|
|
"destinations",
|
|
|
|
{"destination": destination},
|
|
|
|
"last_successful_stream_ordering",
|
|
|
|
allow_none=True,
|
|
|
|
desc="get_last_successful_stream_ordering",
|
|
|
|
)
|
|
|
|
|
|
|
|
async def set_destination_last_successful_stream_ordering(
|
|
|
|
self, destination: str, last_successful_stream_ordering: int
|
|
|
|
) -> None:
|
|
|
|
"""
|
|
|
|
Marks that we have successfully sent the PDUs up to and including the
|
|
|
|
one specified.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
destination: the destination we have successfully sent to
|
|
|
|
last_successful_stream_ordering: the stream_ordering of the most
|
|
|
|
recent successfully-sent PDU
|
|
|
|
"""
|
|
|
|
return await self.db_pool.simple_upsert(
|
|
|
|
"destinations",
|
|
|
|
keyvalues={"destination": destination},
|
|
|
|
values={"last_successful_stream_ordering": last_successful_stream_ordering},
|
|
|
|
desc="set_last_successful_stream_ordering",
|
|
|
|
)
|
2020-09-15 04:07:19 -04:00
|
|
|
|
|
|
|
async def get_catch_up_room_event_ids(
|
|
|
|
self,
|
|
|
|
destination: str,
|
|
|
|
last_successful_stream_ordering: int,
|
|
|
|
) -> List[str]:
|
|
|
|
"""
|
|
|
|
Returns at most 50 event IDs and their corresponding stream_orderings
|
|
|
|
that correspond to the oldest events that have not yet been sent to
|
|
|
|
the destination.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
destination: the destination in question
|
|
|
|
last_successful_stream_ordering: the stream_ordering of the
|
|
|
|
most-recently successfully-transmitted event to the destination
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
list of event_ids
|
|
|
|
"""
|
|
|
|
return await self.db_pool.runInteraction(
|
|
|
|
"get_catch_up_room_event_ids",
|
|
|
|
self._get_catch_up_room_event_ids_txn,
|
|
|
|
destination,
|
|
|
|
last_successful_stream_ordering,
|
|
|
|
)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _get_catch_up_room_event_ids_txn(
|
2020-09-18 09:59:13 -04:00
|
|
|
txn: LoggingTransaction,
|
|
|
|
destination: str,
|
|
|
|
last_successful_stream_ordering: int,
|
2020-09-15 04:07:19 -04:00
|
|
|
) -> List[str]:
|
|
|
|
q = """
|
|
|
|
SELECT event_id FROM destination_rooms
|
|
|
|
JOIN events USING (stream_ordering)
|
|
|
|
WHERE destination = ?
|
|
|
|
AND stream_ordering > ?
|
|
|
|
ORDER BY stream_ordering
|
|
|
|
LIMIT 50
|
|
|
|
"""
|
|
|
|
txn.execute(
|
|
|
|
q,
|
|
|
|
(destination, last_successful_stream_ordering),
|
|
|
|
)
|
|
|
|
event_ids = [row[0] for row in txn]
|
|
|
|
return event_ids
|
2020-09-18 09:59:13 -04:00
|
|
|
|
|
|
|
async def get_catch_up_outstanding_destinations(
|
|
|
|
self, after_destination: Optional[str]
|
|
|
|
) -> List[str]:
|
|
|
|
"""
|
|
|
|
Gets at most 25 destinations which have outstanding PDUs to be caught up,
|
|
|
|
and are not being backed off from
|
|
|
|
Args:
|
|
|
|
after_destination:
|
|
|
|
If provided, all destinations must be lexicographically greater
|
|
|
|
than this one.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
list of up to 25 destinations with outstanding catch-up.
|
|
|
|
These are the lexicographically first destinations which are
|
|
|
|
lexicographically greater than after_destination (if provided).
|
|
|
|
"""
|
|
|
|
time = self.hs.get_clock().time_msec()
|
|
|
|
|
|
|
|
return await self.db_pool.runInteraction(
|
|
|
|
"get_catch_up_outstanding_destinations",
|
|
|
|
self._get_catch_up_outstanding_destinations_txn,
|
|
|
|
time,
|
|
|
|
after_destination,
|
|
|
|
)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _get_catch_up_outstanding_destinations_txn(
|
|
|
|
txn: LoggingTransaction, now_time_ms: int, after_destination: Optional[str]
|
|
|
|
) -> List[str]:
|
|
|
|
q = """
|
2021-01-14 11:47:21 -05:00
|
|
|
SELECT DISTINCT destination FROM destinations
|
|
|
|
INNER JOIN destination_rooms USING (destination)
|
|
|
|
WHERE
|
|
|
|
stream_ordering > last_successful_stream_ordering
|
|
|
|
AND destination > ?
|
|
|
|
AND (
|
|
|
|
retry_last_ts IS NULL OR
|
|
|
|
retry_last_ts + retry_interval < ?
|
|
|
|
)
|
|
|
|
ORDER BY destination
|
|
|
|
LIMIT 25
|
2020-09-18 09:59:13 -04:00
|
|
|
"""
|
|
|
|
txn.execute(
|
|
|
|
q,
|
|
|
|
(
|
|
|
|
# everything is lexicographically greater than "" so this gives
|
|
|
|
# us the first batch of up to 25.
|
|
|
|
after_destination or "",
|
|
|
|
now_time_ms,
|
|
|
|
),
|
|
|
|
)
|
|
|
|
|
|
|
|
destinations = [row[0] for row in txn]
|
|
|
|
return destinations
|