Avoid executing no-op queries. (#16583)

If simple_{insert,upsert,update}_many_txn is called without any data
to modify then return instead of executing the query.

This matches the behavior of simple_{select,delete}_many_txn.
This commit is contained in:
Patrick Cloke 2023-11-07 14:00:25 -05:00 committed by GitHub
parent ec9ff389f4
commit 9738b1c497
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 39 additions and 39 deletions

1
changelog.d/16583.misc Normal file
View File

@ -0,0 +1 @@
Avoid executing no-op queries.

View File

@ -1117,7 +1117,7 @@ class DatabasePool:
txn: LoggingTransaction, txn: LoggingTransaction,
table: str, table: str,
keys: Collection[str], keys: Collection[str],
values: Iterable[Iterable[Any]], values: Collection[Iterable[Any]],
) -> None: ) -> None:
"""Executes an INSERT query on the named table. """Executes an INSERT query on the named table.
@ -1130,6 +1130,9 @@ class DatabasePool:
keys: list of column names keys: list of column names
values: for each row, a list of values in the same order as `keys` values: for each row, a list of values in the same order as `keys`
""" """
# If there's nothing to insert, then skip executing the query.
if not values:
return
if isinstance(txn.database_engine, PostgresEngine): if isinstance(txn.database_engine, PostgresEngine):
# We use `execute_values` as it can be a lot faster than `execute_batch`, # We use `execute_values` as it can be a lot faster than `execute_batch`,
@ -1455,7 +1458,7 @@ class DatabasePool:
key_names: Collection[str], key_names: Collection[str],
key_values: Collection[Iterable[Any]], key_values: Collection[Iterable[Any]],
value_names: Collection[str], value_names: Collection[str],
value_values: Iterable[Iterable[Any]], value_values: Collection[Iterable[Any]],
) -> None: ) -> None:
""" """
Upsert, many times. Upsert, many times.
@ -1468,6 +1471,19 @@ class DatabasePool:
value_values: A list of each row's value column values. value_values: A list of each row's value column values.
Ignored if value_names is empty. Ignored if value_names is empty.
""" """
# If there's nothing to upsert, then skip executing the query.
if not key_values:
return
# No value columns, therefore make a blank list so that the following
# zip() works correctly.
if not value_names:
value_values = [() for x in range(len(key_values))]
elif len(value_values) != len(key_values):
raise ValueError(
f"{len(key_values)} key rows and {len(value_values)} value rows: should be the same number."
)
if table not in self._unsafe_to_upsert_tables: if table not in self._unsafe_to_upsert_tables:
return self.simple_upsert_many_txn_native_upsert( return self.simple_upsert_many_txn_native_upsert(
txn, table, key_names, key_values, value_names, value_values txn, table, key_names, key_values, value_names, value_values
@ -1502,10 +1518,6 @@ class DatabasePool:
value_values: A list of each row's value column values. value_values: A list of each row's value column values.
Ignored if value_names is empty. Ignored if value_names is empty.
""" """
# No value columns, therefore make a blank list so that the following
# zip() works correctly.
if not value_names:
value_values = [() for x in range(len(key_values))]
# Lock the table just once, to prevent it being done once per row. # Lock the table just once, to prevent it being done once per row.
# Note that, according to Postgres' documentation, once obtained, # Note that, according to Postgres' documentation, once obtained,
@ -1543,10 +1555,7 @@ class DatabasePool:
allnames.extend(value_names) allnames.extend(value_names)
if not value_names: if not value_names:
# No value columns, therefore make a blank list so that the
# following zip() works correctly.
latter = "NOTHING" latter = "NOTHING"
value_values = [() for x in range(len(key_values))]
else: else:
latter = "UPDATE SET " + ", ".join( latter = "UPDATE SET " + ", ".join(
k + "=EXCLUDED." + k for k in value_names k + "=EXCLUDED." + k for k in value_names
@ -1910,6 +1919,7 @@ class DatabasePool:
Returns: Returns:
The results as a list of tuples. The results as a list of tuples.
""" """
# If there's nothing to select, then skip executing the query.
if not iterable: if not iterable:
return [] return []
@ -2044,6 +2054,9 @@ class DatabasePool:
raise ValueError( raise ValueError(
f"{len(key_values)} key rows and {len(value_values)} value rows: should be the same number." f"{len(key_values)} key rows and {len(value_values)} value rows: should be the same number."
) )
# If there is nothing to update, then skip executing the query.
if not key_values:
return
# List of tuples of (value values, then key values) # List of tuples of (value values, then key values)
# (This matches the order needed for the query) # (This matches the order needed for the query)
@ -2278,6 +2291,7 @@ class DatabasePool:
Returns: Returns:
Number rows deleted Number rows deleted
""" """
# If there's nothing to delete, then skip executing the query.
if not values: if not values:
return 0 return 0

View File

@ -703,7 +703,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
key_names=("destination", "user_id"), key_names=("destination", "user_id"),
key_values=[(destination, user_id) for user_id, _ in rows], key_values=[(destination, user_id) for user_id, _ in rows],
value_names=("stream_id",), value_names=("stream_id",),
value_values=((stream_id,) for _, stream_id in rows), value_values=[(stream_id,) for _, stream_id in rows],
) )
# Delete all sent outbound pokes # Delete all sent outbound pokes

View File

@ -1476,7 +1476,7 @@ class PersistEventsStore:
txn, txn,
table="event_json", table="event_json",
keys=("event_id", "room_id", "internal_metadata", "json", "format_version"), keys=("event_id", "room_id", "internal_metadata", "json", "format_version"),
values=( values=[
( (
event.event_id, event.event_id,
event.room_id, event.room_id,
@ -1485,7 +1485,7 @@ class PersistEventsStore:
event.format_version, event.format_version,
) )
for event, _ in events_and_contexts for event, _ in events_and_contexts
), ],
) )
self.db_pool.simple_insert_many_txn( self.db_pool.simple_insert_many_txn(
@ -1508,7 +1508,7 @@ class PersistEventsStore:
"state_key", "state_key",
"rejection_reason", "rejection_reason",
), ),
values=( values=[
( (
self._instance_name, self._instance_name,
event.internal_metadata.stream_ordering, event.internal_metadata.stream_ordering,
@ -1527,7 +1527,7 @@ class PersistEventsStore:
context.rejected, context.rejected,
) )
for event, context in events_and_contexts for event, context in events_and_contexts
), ],
) )
# If we're persisting an unredacted event we go and ensure # If we're persisting an unredacted event we go and ensure
@ -1550,11 +1550,11 @@ class PersistEventsStore:
txn, txn,
table="state_events", table="state_events",
keys=("event_id", "room_id", "type", "state_key"), keys=("event_id", "room_id", "type", "state_key"),
values=( values=[
(event.event_id, event.room_id, event.type, event.state_key) (event.event_id, event.room_id, event.type, event.state_key)
for event, _ in events_and_contexts for event, _ in events_and_contexts
if event.is_state() if event.is_state()
), ],
) )
def _store_rejected_events_txn( def _store_rejected_events_txn(

View File

@ -2268,7 +2268,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
txn, txn,
table="partial_state_rooms_servers", table="partial_state_rooms_servers",
keys=("room_id", "server_name"), keys=("room_id", "server_name"),
values=((room_id, s) for s in servers), values=[(room_id, s) for s in servers],
) )
self._invalidate_cache_and_stream(txn, self.is_partial_state_room, (room_id,)) self._invalidate_cache_and_stream(txn, self.is_partial_state_room, (room_id,))
self._invalidate_cache_and_stream( self._invalidate_cache_and_stream(

View File

@ -106,7 +106,7 @@ class SearchWorkerStore(SQLBaseStore):
txn, txn,
table="event_search", table="event_search",
keys=("event_id", "room_id", "key", "value"), keys=("event_id", "room_id", "key", "value"),
values=( values=[
( (
entry.event_id, entry.event_id,
entry.room_id, entry.room_id,
@ -114,7 +114,7 @@ class SearchWorkerStore(SQLBaseStore):
_clean_value_for_search(entry.value), _clean_value_for_search(entry.value),
) )
for entry in entries for entry in entries
), ],
) )
else: else:

View File

@ -189,17 +189,9 @@ class SQLBaseStoreTestCase(unittest.TestCase):
) )
if USE_POSTGRES_FOR_TESTS: if USE_POSTGRES_FOR_TESTS:
self.mock_execute_values.assert_called_once_with( self.mock_execute_values.assert_not_called()
self.mock_txn,
"INSERT INTO tablename (col1, col2) VALUES ?",
[],
template=None,
fetch=False,
)
else: else:
self.mock_txn.executemany.assert_called_once_with( self.mock_txn.executemany.assert_not_called()
"INSERT INTO tablename (col1, col2) VALUES(?, ?)", []
)
@defer.inlineCallbacks @defer.inlineCallbacks
def test_select_one_1col(self) -> Generator["defer.Deferred[object]", object, None]: def test_select_one_1col(self) -> Generator["defer.Deferred[object]", object, None]:
@ -393,7 +385,7 @@ class SQLBaseStoreTestCase(unittest.TestCase):
) )
@defer.inlineCallbacks @defer.inlineCallbacks
def test_update_many_no_values( def test_update_many_no_iterable(
self, self,
) -> Generator["defer.Deferred[object]", object, None]: ) -> Generator["defer.Deferred[object]", object, None]:
yield defer.ensureDeferred( yield defer.ensureDeferred(
@ -408,16 +400,9 @@ class SQLBaseStoreTestCase(unittest.TestCase):
) )
if USE_POSTGRES_FOR_TESTS: if USE_POSTGRES_FOR_TESTS:
self.mock_execute_batch.assert_called_once_with( self.mock_execute_batch.assert_not_called()
self.mock_txn,
"UPDATE tablename SET col3 = ? WHERE col1 = ? AND col2 = ?",
[],
)
else: else:
self.mock_txn.executemany.assert_called_once_with( self.mock_txn.executemany.assert_not_called()
"UPDATE tablename SET col3 = ? WHERE col1 = ? AND col2 = ?",
[],
)
@defer.inlineCallbacks @defer.inlineCallbacks
def test_delete_one(self) -> Generator["defer.Deferred[object]", object, None]: def test_delete_one(self) -> Generator["defer.Deferred[object]", object, None]: