mirror of
				https://git.anonymousland.org/anonymousland/synapse.git
				synced 2025-10-31 03:48:51 -04:00 
			
		
		
		
	 158d73ebdd
			
		
	
	
		158d73ebdd
		
	
	
	
	
		
			
			Revert "Sort internal changes in changelog" Revert "Update CHANGES.md" Revert "1.49.0rc1" Revert "Revert "Move `glob_to_regex` and `re_word_boundary` to `matrix-python-common` (#11505) (#11527)" Revert "Refactors in `_generate_sync_entry_for_rooms` (#11515)" Revert "Correctly register shutdown handler for presence workers (#11518)" Revert "Fix `ModuleApi.looping_background_call` for non-async functions (#11524)" Revert "Fix 'delete room' admin api to work on incomplete rooms (#11523)" Revert "Correctly ignore invites from ignored users (#11511)" Revert "Fix the test breakage introduced by #11435 as a result of concurrent PRs (#11522)" Revert "Stabilise support for MSC2918 refresh tokens as they have now been merged into the Matrix specification. (#11435)" Revert "Save the OIDC session ID (sid) with the device on login (#11482)" Revert "Add admin API to get some information about federation status (#11407)" Revert "Include bundled aggregations in /sync and related fixes (#11478)" Revert "Move `glob_to_regex` and `re_word_boundary` to `matrix-python-common` (#11505)" Revert "Update backward extremity docs to make it clear that it does not indicate whether we have fetched an events' `prev_events` (#11469)" Revert "Support configuring the lifetime of non-refreshable access tokens separately to refreshable access tokens. (#11445)" Revert "Add type hints to `synapse/tests/rest/admin` (#11501)" Revert "Revert accidental commits to develop." Revert "Newsfile" Revert "Give `tests.server.setup_test_homeserver` (nominally!) the same behaviour" Revert "Move `tests.utils.setup_test_homeserver` to `tests.server`" Revert "Convert one of the `setup_test_homeserver`s to `make_test_homeserver_synchronous`" Revert "Disambiguate queries on `state_key` (#11497)" Revert "Comments on the /sync tentacles (#11494)" Revert "Clean up tests.storage.test_appservice (#11492)" Revert "Clean up `tests.storage.test_main` to remove use of legacy code. (#11493)" Revert "Clean up `tests.test_visibility` to remove legacy code. (#11495)" Revert "Minor cleanup on recently ported doc pages (#11466)" Revert "Add most of the missing type hints to `synapse.federation`. (#11483)" Revert "Avoid waiting for zombie processes in `synctl stop` (#11490)" Revert "Fix media repository failing when media store path contains symlinks (#11446)" Revert "Add type annotations to `tests.storage.test_appservice`. (#11488)" Revert "`scripts-dev/sign_json`: support for signing events (#11486)" Revert "Add MSC3030 experimental client and federation API endpoints to get the closest event to a given timestamp (#9445)" Revert "Port wiki pages to documentation website (#11402)" Revert "Add a license header and comment. (#11479)" Revert "Clean-up get_version_string (#11468)" Revert "Link background update controller docs to summary (#11475)" Revert "Additional type hints for config module. (#11465)" Revert "Register the login redirect endpoint for v3. (#11451)" Revert "Update openid.md" Revert "Remove mention of OIDC certification from Dex (#11470)" Revert "Add a note about huge pages to our Postgres doc (#11467)" Revert "Don't start Synapse master process if `worker_app` is set (#11416)" Revert "Expose worker & homeserver as entrypoints in `setup.py` (#11449)" Revert "Bundle relations of relations into the `/relations` result. (#11284)" Revert "Fix `LruCache` corruption bug with a `size_callback` that can return 0 (#11454)" Revert "Eliminate a few `Any`s in `LruCache` type hints (#11453)" Revert "Remove unnecessary `json.dumps` from `tests.rest.admin` (#11461)" Revert "Merge branch 'master' into develop" This reverts commit26b5d2320f. This reverts commitbce4220f38. This reverts commit966b5d0fa0. This reverts commit088d748f2c. This reverts commit14d593f72d. This reverts commit2a3ec6facf. This reverts commiteccc49d755. This reverts commitb1ecd19c5d. This reverts commit9c55dedc8c. This reverts commit2d42e586a8. This reverts commit2f053f3f82. This reverts commita15a893df8. This reverts commit8b4b153c9e. This reverts commit494ebd7347. This reverts commita77c369897. This reverts commit4eb77965cd. This reverts commit637df95de6. This reverts commite5f426cd54. This reverts commit8cd68b8102. This reverts commit6cae125e20. This reverts commit7be88fbf48. This reverts commitb3fd99b74a. This reverts commitf7ec6e7d9e. This reverts commit5640992d17. This reverts commitd26808dd85. This reverts commitf91624a595. This reverts commit16d39a5490. This reverts commit8a4c296987. This reverts commit49e1356ee3. This reverts commitd2279f471b. This reverts commitb50e39df57. This reverts commit858d80bf0f. This reverts commit435f044807. This reverts commitf61462e1be. This reverts commita6f1a3abec. This reverts commit84dc50e160. This reverts commited635d3285. This reverts commit7b62791e00. This reverts commit153194c771. This reverts commitf44d729d4c. This reverts commita265fbd397. This reverts commitb9fef1a7cd. This reverts commitb0eb64ff7b. This reverts commitf1795463bf. This reverts commit70cbb1a5e3. This reverts commit42bf020463. This reverts commit379f2650cf. This reverts commit7ff22d6da4. This reverts commit5a0b652d36. This reverts commit432a174bc1. This reverts commitb14f8a1baf, reversing changes made toe713855dca.
		
			
				
	
	
		
			482 lines
		
	
	
	
		
			16 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			482 lines
		
	
	
	
		
			16 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
| # Copyright 2014-2016 OpenMarket Ltd
 | |
| #
 | |
| # Licensed under the Apache License, Version 2.0 (the "License");
 | |
| # you may not use this file except in compliance with the License.
 | |
| # You may obtain a copy of the License at
 | |
| #
 | |
| #     http://www.apache.org/licenses/LICENSE-2.0
 | |
| #
 | |
| # Unless required by applicable law or agreed to in writing, software
 | |
| # distributed under the License is distributed on an "AS IS" BASIS,
 | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | |
| # See the License for the specific language governing permissions and
 | |
| # limitations under the License.
 | |
| 
 | |
| import logging
 | |
| from collections import namedtuple
 | |
| from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple
 | |
| 
 | |
| import attr
 | |
| from canonicaljson import encode_canonical_json
 | |
| 
 | |
| from synapse.metrics.background_process_metrics import wrap_as_background_process
 | |
| from synapse.storage._base import db_to_json
 | |
| from synapse.storage.database import DatabasePool, LoggingTransaction
 | |
| from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
 | |
| from synapse.types import JsonDict
 | |
| from synapse.util.caches.descriptors import cached
 | |
| 
 | |
| if TYPE_CHECKING:
 | |
|     from synapse.server import HomeServer
 | |
| 
 | |
| db_binary_type = memoryview
 | |
| 
 | |
| logger = logging.getLogger(__name__)
 | |
| 
 | |
| 
 | |
| _TransactionRow = namedtuple(
 | |
|     "_TransactionRow",
 | |
|     ("id", "transaction_id", "destination", "ts", "response_code", "response_json"),
 | |
| )
 | |
| 
 | |
| _UpdateTransactionRow = namedtuple(
 | |
|     "_TransactionRow", ("response_code", "response_json")
 | |
| )
 | |
| 
 | |
| 
 | |
| @attr.s(slots=True, frozen=True, auto_attribs=True)
 | |
| class DestinationRetryTimings:
 | |
|     """The current destination retry timing info for a remote server."""
 | |
| 
 | |
|     # The first time we tried and failed to reach the remote server, in ms.
 | |
|     failure_ts: int
 | |
| 
 | |
|     # The last time we tried and failed to reach the remote server, in ms.
 | |
|     retry_last_ts: int
 | |
| 
 | |
|     # How long since the last time we tried to reach the remote server before
 | |
|     # trying again, in ms.
 | |
|     retry_interval: int
 | |
| 
 | |
| 
 | |
| class TransactionWorkerStore(CacheInvalidationWorkerStore):
 | |
|     def __init__(self, database: DatabasePool, db_conn, hs: "HomeServer"):
 | |
|         super().__init__(database, db_conn, hs)
 | |
| 
 | |
|         if hs.config.worker.run_background_tasks:
 | |
|             self._clock.looping_call(self._cleanup_transactions, 30 * 60 * 1000)
 | |
| 
 | |
|     @wrap_as_background_process("cleanup_transactions")
 | |
|     async def _cleanup_transactions(self) -> None:
 | |
|         now = self._clock.time_msec()
 | |
|         month_ago = now - 30 * 24 * 60 * 60 * 1000
 | |
| 
 | |
|         def _cleanup_transactions_txn(txn):
 | |
|             txn.execute("DELETE FROM received_transactions WHERE ts < ?", (month_ago,))
 | |
| 
 | |
|         await self.db_pool.runInteraction(
 | |
|             "_cleanup_transactions", _cleanup_transactions_txn
 | |
|         )
 | |
| 
 | |
|     async def get_received_txn_response(
 | |
|         self, transaction_id: str, origin: str
 | |
|     ) -> Optional[Tuple[int, JsonDict]]:
 | |
|         """For an incoming transaction from a given origin, check if we have
 | |
|         already responded to it. If so, return the response code and response
 | |
|         body (as a dict).
 | |
| 
 | |
|         Args:
 | |
|             transaction_id
 | |
|             origin
 | |
| 
 | |
|         Returns:
 | |
|             None if we have not previously responded to this transaction or a
 | |
|             2-tuple of (int, dict)
 | |
|         """
 | |
| 
 | |
|         return await self.db_pool.runInteraction(
 | |
|             "get_received_txn_response",
 | |
|             self._get_received_txn_response,
 | |
|             transaction_id,
 | |
|             origin,
 | |
|         )
 | |
| 
 | |
|     def _get_received_txn_response(self, txn, transaction_id, origin):
 | |
|         result = self.db_pool.simple_select_one_txn(
 | |
|             txn,
 | |
|             table="received_transactions",
 | |
|             keyvalues={"transaction_id": transaction_id, "origin": origin},
 | |
|             retcols=(
 | |
|                 "transaction_id",
 | |
|                 "origin",
 | |
|                 "ts",
 | |
|                 "response_code",
 | |
|                 "response_json",
 | |
|                 "has_been_referenced",
 | |
|             ),
 | |
|             allow_none=True,
 | |
|         )
 | |
| 
 | |
|         if result and result["response_code"]:
 | |
|             return result["response_code"], db_to_json(result["response_json"])
 | |
| 
 | |
|         else:
 | |
|             return None
 | |
| 
 | |
|     async def set_received_txn_response(
 | |
|         self, transaction_id: str, origin: str, code: int, response_dict: JsonDict
 | |
|     ) -> None:
 | |
|         """Persist the response we returned for an incoming transaction, and
 | |
|         should return for subsequent transactions with the same transaction_id
 | |
|         and origin.
 | |
| 
 | |
|         Args:
 | |
|             transaction_id: The incoming transaction ID.
 | |
|             origin: The origin server.
 | |
|             code: The response code.
 | |
|             response_dict: The response, to be encoded into JSON.
 | |
|         """
 | |
| 
 | |
|         await self.db_pool.simple_upsert(
 | |
|             table="received_transactions",
 | |
|             keyvalues={
 | |
|                 "transaction_id": transaction_id,
 | |
|                 "origin": origin,
 | |
|             },
 | |
|             values={},
 | |
|             insertion_values={
 | |
|                 "response_code": code,
 | |
|                 "response_json": db_binary_type(encode_canonical_json(response_dict)),
 | |
|                 "ts": self._clock.time_msec(),
 | |
|             },
 | |
|             desc="set_received_txn_response",
 | |
|         )
 | |
| 
 | |
|     @cached(max_entries=10000)
 | |
|     async def get_destination_retry_timings(
 | |
|         self,
 | |
|         destination: str,
 | |
|     ) -> Optional[DestinationRetryTimings]:
 | |
|         """Gets the current retry timings (if any) for a given destination.
 | |
| 
 | |
|         Args:
 | |
|             destination (str)
 | |
| 
 | |
|         Returns:
 | |
|             None if not retrying
 | |
|             Otherwise a dict for the retry scheme
 | |
|         """
 | |
| 
 | |
|         result = await self.db_pool.runInteraction(
 | |
|             "get_destination_retry_timings",
 | |
|             self._get_destination_retry_timings,
 | |
|             destination,
 | |
|         )
 | |
| 
 | |
|         return result
 | |
| 
 | |
|     def _get_destination_retry_timings(
 | |
|         self, txn, destination: str
 | |
|     ) -> Optional[DestinationRetryTimings]:
 | |
|         result = self.db_pool.simple_select_one_txn(
 | |
|             txn,
 | |
|             table="destinations",
 | |
|             keyvalues={"destination": destination},
 | |
|             retcols=("failure_ts", "retry_last_ts", "retry_interval"),
 | |
|             allow_none=True,
 | |
|         )
 | |
| 
 | |
|         # check we have a row and retry_last_ts is not null or zero
 | |
|         # (retry_last_ts can't be negative)
 | |
|         if result and result["retry_last_ts"]:
 | |
|             return DestinationRetryTimings(**result)
 | |
|         else:
 | |
|             return None
 | |
| 
 | |
|     async def set_destination_retry_timings(
 | |
|         self,
 | |
|         destination: str,
 | |
|         failure_ts: Optional[int],
 | |
|         retry_last_ts: int,
 | |
|         retry_interval: int,
 | |
|     ) -> None:
 | |
|         """Sets the current retry timings for a given destination.
 | |
|         Both timings should be zero if retrying is no longer occurring.
 | |
| 
 | |
|         Args:
 | |
|             destination
 | |
|             failure_ts: when the server started failing (ms since epoch)
 | |
|             retry_last_ts: time of last retry attempt in unix epoch ms
 | |
|             retry_interval: how long until next retry in ms
 | |
|         """
 | |
| 
 | |
|         if self.database_engine.can_native_upsert:
 | |
|             return await self.db_pool.runInteraction(
 | |
|                 "set_destination_retry_timings",
 | |
|                 self._set_destination_retry_timings_native,
 | |
|                 destination,
 | |
|                 failure_ts,
 | |
|                 retry_last_ts,
 | |
|                 retry_interval,
 | |
|                 db_autocommit=True,  # Safe as its a single upsert
 | |
|             )
 | |
|         else:
 | |
|             return await self.db_pool.runInteraction(
 | |
|                 "set_destination_retry_timings",
 | |
|                 self._set_destination_retry_timings_emulated,
 | |
|                 destination,
 | |
|                 failure_ts,
 | |
|                 retry_last_ts,
 | |
|                 retry_interval,
 | |
|             )
 | |
| 
 | |
|     def _set_destination_retry_timings_native(
 | |
|         self, txn, destination, failure_ts, retry_last_ts, retry_interval
 | |
|     ):
 | |
|         assert self.database_engine.can_native_upsert
 | |
| 
 | |
|         # Upsert retry time interval if retry_interval is zero (i.e. we're
 | |
|         # resetting it) or greater than the existing retry interval.
 | |
|         #
 | |
|         # WARNING: This is executed in autocommit, so we shouldn't add any more
 | |
|         # SQL calls in here (without being very careful).
 | |
|         sql = """
 | |
|             INSERT INTO destinations (
 | |
|                 destination, failure_ts, retry_last_ts, retry_interval
 | |
|             )
 | |
|                 VALUES (?, ?, ?, ?)
 | |
|             ON CONFLICT (destination) DO UPDATE SET
 | |
|                     failure_ts = EXCLUDED.failure_ts,
 | |
|                     retry_last_ts = EXCLUDED.retry_last_ts,
 | |
|                     retry_interval = EXCLUDED.retry_interval
 | |
|                 WHERE
 | |
|                     EXCLUDED.retry_interval = 0
 | |
|                     OR destinations.retry_interval IS NULL
 | |
|                     OR destinations.retry_interval < EXCLUDED.retry_interval
 | |
|         """
 | |
| 
 | |
|         txn.execute(sql, (destination, failure_ts, retry_last_ts, retry_interval))
 | |
| 
 | |
|         self._invalidate_cache_and_stream(
 | |
|             txn, self.get_destination_retry_timings, (destination,)
 | |
|         )
 | |
| 
 | |
|     def _set_destination_retry_timings_emulated(
 | |
|         self, txn, destination, failure_ts, retry_last_ts, retry_interval
 | |
|     ):
 | |
|         self.database_engine.lock_table(txn, "destinations")
 | |
| 
 | |
|         # We need to be careful here as the data may have changed from under us
 | |
|         # due to a worker setting the timings.
 | |
| 
 | |
|         prev_row = self.db_pool.simple_select_one_txn(
 | |
|             txn,
 | |
|             table="destinations",
 | |
|             keyvalues={"destination": destination},
 | |
|             retcols=("failure_ts", "retry_last_ts", "retry_interval"),
 | |
|             allow_none=True,
 | |
|         )
 | |
| 
 | |
|         if not prev_row:
 | |
|             self.db_pool.simple_insert_txn(
 | |
|                 txn,
 | |
|                 table="destinations",
 | |
|                 values={
 | |
|                     "destination": destination,
 | |
|                     "failure_ts": failure_ts,
 | |
|                     "retry_last_ts": retry_last_ts,
 | |
|                     "retry_interval": retry_interval,
 | |
|                 },
 | |
|             )
 | |
|         elif (
 | |
|             retry_interval == 0
 | |
|             or prev_row["retry_interval"] is None
 | |
|             or prev_row["retry_interval"] < retry_interval
 | |
|         ):
 | |
|             self.db_pool.simple_update_one_txn(
 | |
|                 txn,
 | |
|                 "destinations",
 | |
|                 keyvalues={"destination": destination},
 | |
|                 updatevalues={
 | |
|                     "failure_ts": failure_ts,
 | |
|                     "retry_last_ts": retry_last_ts,
 | |
|                     "retry_interval": retry_interval,
 | |
|                 },
 | |
|             )
 | |
| 
 | |
|         self._invalidate_cache_and_stream(
 | |
|             txn, self.get_destination_retry_timings, (destination,)
 | |
|         )
 | |
| 
 | |
|     async def store_destination_rooms_entries(
 | |
|         self,
 | |
|         destinations: Iterable[str],
 | |
|         room_id: str,
 | |
|         stream_ordering: int,
 | |
|     ) -> None:
 | |
|         """
 | |
|         Updates or creates `destination_rooms` entries in batch for a single event.
 | |
| 
 | |
|         Args:
 | |
|             destinations: list of destinations
 | |
|             room_id: the room_id of the event
 | |
|             stream_ordering: the stream_ordering of the event
 | |
|         """
 | |
| 
 | |
|         await self.db_pool.simple_upsert_many(
 | |
|             table="destinations",
 | |
|             key_names=("destination",),
 | |
|             key_values=[(d,) for d in destinations],
 | |
|             value_names=[],
 | |
|             value_values=[],
 | |
|             desc="store_destination_rooms_entries_dests",
 | |
|         )
 | |
| 
 | |
|         rows = [(destination, room_id) for destination in destinations]
 | |
|         await self.db_pool.simple_upsert_many(
 | |
|             table="destination_rooms",
 | |
|             key_names=("destination", "room_id"),
 | |
|             key_values=rows,
 | |
|             value_names=["stream_ordering"],
 | |
|             value_values=[(stream_ordering,)] * len(rows),
 | |
|             desc="store_destination_rooms_entries_rooms",
 | |
|         )
 | |
| 
 | |
|     async def get_destination_last_successful_stream_ordering(
 | |
|         self, destination: str
 | |
|     ) -> Optional[int]:
 | |
|         """
 | |
|         Gets the stream ordering of the PDU most-recently successfully sent
 | |
|         to the specified destination, or None if this information has not been
 | |
|         tracked yet.
 | |
| 
 | |
|         Args:
 | |
|             destination: the destination to query
 | |
|         """
 | |
|         return await self.db_pool.simple_select_one_onecol(
 | |
|             "destinations",
 | |
|             {"destination": destination},
 | |
|             "last_successful_stream_ordering",
 | |
|             allow_none=True,
 | |
|             desc="get_last_successful_stream_ordering",
 | |
|         )
 | |
| 
 | |
|     async def set_destination_last_successful_stream_ordering(
 | |
|         self, destination: str, last_successful_stream_ordering: int
 | |
|     ) -> None:
 | |
|         """
 | |
|         Marks that we have successfully sent the PDUs up to and including the
 | |
|         one specified.
 | |
| 
 | |
|         Args:
 | |
|             destination: the destination we have successfully sent to
 | |
|             last_successful_stream_ordering: the stream_ordering of the most
 | |
|                 recent successfully-sent PDU
 | |
|         """
 | |
|         return await self.db_pool.simple_upsert(
 | |
|             "destinations",
 | |
|             keyvalues={"destination": destination},
 | |
|             values={"last_successful_stream_ordering": last_successful_stream_ordering},
 | |
|             desc="set_last_successful_stream_ordering",
 | |
|         )
 | |
| 
 | |
|     async def get_catch_up_room_event_ids(
 | |
|         self,
 | |
|         destination: str,
 | |
|         last_successful_stream_ordering: int,
 | |
|     ) -> List[str]:
 | |
|         """
 | |
|         Returns at most 50 event IDs and their corresponding stream_orderings
 | |
|         that correspond to the oldest events that have not yet been sent to
 | |
|         the destination.
 | |
| 
 | |
|         Args:
 | |
|             destination: the destination in question
 | |
|             last_successful_stream_ordering: the stream_ordering of the
 | |
|                 most-recently successfully-transmitted event to the destination
 | |
| 
 | |
|         Returns:
 | |
|             list of event_ids
 | |
|         """
 | |
|         return await self.db_pool.runInteraction(
 | |
|             "get_catch_up_room_event_ids",
 | |
|             self._get_catch_up_room_event_ids_txn,
 | |
|             destination,
 | |
|             last_successful_stream_ordering,
 | |
|         )
 | |
| 
 | |
|     @staticmethod
 | |
|     def _get_catch_up_room_event_ids_txn(
 | |
|         txn: LoggingTransaction,
 | |
|         destination: str,
 | |
|         last_successful_stream_ordering: int,
 | |
|     ) -> List[str]:
 | |
|         q = """
 | |
|                 SELECT event_id FROM destination_rooms
 | |
|                  JOIN events USING (stream_ordering)
 | |
|                 WHERE destination = ?
 | |
|                   AND stream_ordering > ?
 | |
|                 ORDER BY stream_ordering
 | |
|                 LIMIT 50
 | |
|             """
 | |
|         txn.execute(
 | |
|             q,
 | |
|             (destination, last_successful_stream_ordering),
 | |
|         )
 | |
|         event_ids = [row[0] for row in txn]
 | |
|         return event_ids
 | |
| 
 | |
|     async def get_catch_up_outstanding_destinations(
 | |
|         self, after_destination: Optional[str]
 | |
|     ) -> List[str]:
 | |
|         """
 | |
|         Gets at most 25 destinations which have outstanding PDUs to be caught up,
 | |
|         and are not being backed off from
 | |
|         Args:
 | |
|             after_destination:
 | |
|                 If provided, all destinations must be lexicographically greater
 | |
|                 than this one.
 | |
| 
 | |
|         Returns:
 | |
|             list of up to 25 destinations with outstanding catch-up.
 | |
|                 These are the lexicographically first destinations which are
 | |
|                 lexicographically greater than after_destination (if provided).
 | |
|         """
 | |
|         time = self.hs.get_clock().time_msec()
 | |
| 
 | |
|         return await self.db_pool.runInteraction(
 | |
|             "get_catch_up_outstanding_destinations",
 | |
|             self._get_catch_up_outstanding_destinations_txn,
 | |
|             time,
 | |
|             after_destination,
 | |
|         )
 | |
| 
 | |
|     @staticmethod
 | |
|     def _get_catch_up_outstanding_destinations_txn(
 | |
|         txn: LoggingTransaction, now_time_ms: int, after_destination: Optional[str]
 | |
|     ) -> List[str]:
 | |
|         q = """
 | |
|             SELECT DISTINCT destination FROM destinations
 | |
|             INNER JOIN destination_rooms USING (destination)
 | |
|                 WHERE
 | |
|                     stream_ordering > last_successful_stream_ordering
 | |
|                     AND destination > ?
 | |
|                     AND (
 | |
|                         retry_last_ts IS NULL OR
 | |
|                         retry_last_ts + retry_interval < ?
 | |
|                     )
 | |
|                     ORDER BY destination
 | |
|                     LIMIT 25
 | |
|         """
 | |
|         txn.execute(
 | |
|             q,
 | |
|             (
 | |
|                 # everything is lexicographically greater than "" so this gives
 | |
|                 # us the first batch of up to 25.
 | |
|                 after_destination or "",
 | |
|                 now_time_ms,
 | |
|             ),
 | |
|         )
 | |
| 
 | |
|         destinations = [row[0] for row in txn]
 | |
|         return destinations
 |