Format files with Ruff (#17643)

I thought ruff check would also format, but it doesn't.

This runs ruff format in CI and dev scripts. The first commit is just a
run of `ruff format .` in the root directory.
This commit is contained in:
Quentin Gliech 2024-09-02 13:39:04 +02:00 committed by GitHub
parent 709b7363fe
commit 7d52ce7d4b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
152 changed files with 526 additions and 492 deletions

View File

@ -29,10 +29,14 @@ jobs:
with:
install-project: "false"
- name: Run ruff
- name: Run ruff check
continue-on-error: true
run: poetry run ruff check --fix .
- name: Run ruff format
continue-on-error: true
run: poetry run ruff format --quiet .
- run: cargo clippy --all-features --fix -- -D warnings
continue-on-error: true

View File

@ -131,9 +131,12 @@ jobs:
with:
install-project: "false"
- name: Check style
- name: Run ruff check
run: poetry run ruff check --output-format=github .
- name: Run ruff format
run: poetry run ruff format --check .
lint-mypy:
runs-on: ubuntu-latest
name: Typechecking

1
changelog.d/17643.misc Normal file
View File

@ -0,0 +1 @@
Replace `isort` and `black with `ruff`.

View File

@ -22,6 +22,7 @@
#
"""Starts a synapse client console."""
import argparse
import binascii
import cmd

View File

@ -31,6 +31,7 @@ Pydantic does not yet offer a strict mode, but it is planned for pydantic v2. Se
until then, this script is a best effort to stop us from introducing type coersion bugs
(like the infamous stringy power levels fixed in room version 10).
"""
import argparse
import contextlib
import functools

View File

@ -109,6 +109,9 @@ set -x
# --quiet suppresses the update check.
ruff check --quiet --fix "${files[@]}"
# Reformat Python code.
ruff format --quiet "${files[@]}"
# Catch any common programming mistakes in Rust code.
#
# --bins, --examples, --lib, --tests combined explicitly disable checking

View File

@ -20,8 +20,7 @@
#
#
"""An interactive script for doing a release. See `cli()` below.
"""
"""An interactive script for doing a release. See `cli()` below."""
import glob
import json

View File

@ -13,8 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains *incomplete* type hints for txredisapi.
"""
"""Contains *incomplete* type hints for txredisapi."""
from typing import Any, List, Optional, Type, Union
from twisted.internet import protocol

View File

@ -20,8 +20,7 @@
#
#
""" This is an implementation of a Matrix homeserver.
"""
"""This is an implementation of a Matrix homeserver."""
import os
import sys

View File

@ -171,7 +171,7 @@ def elide_http_methods_if_unconflicting(
"""
def paths_to_methods_dict(
methods_and_paths: Iterable[Tuple[str, str]]
methods_and_paths: Iterable[Tuple[str, str]],
) -> Dict[str, Set[str]]:
"""
Given (method, path) pairs, produces a dict from path to set of methods
@ -201,7 +201,7 @@ def elide_http_methods_if_unconflicting(
def simplify_path_regexes(
registrations: Dict[Tuple[str, str], EndpointDescription]
registrations: Dict[Tuple[str, str], EndpointDescription],
) -> Dict[Tuple[str, str], EndpointDescription]:
"""
Simplify all the path regexes for the dict of endpoint descriptions,

View File

@ -40,6 +40,7 @@ from synapse.storage.engines import create_engine
class ReviewConfig(RootConfig):
"A config class that just pulls out the database config"
config_classes = [DatabaseConfig]
@ -160,7 +161,11 @@ def main() -> None:
with make_conn(database_config, engine, "review_recent_signups") as db_conn:
# This generates a type of Cursor, not LoggingTransaction.
user_infos = get_recent_users(db_conn.cursor(), since_ms, exclude_users_with_appservice) # type: ignore[arg-type]
user_infos = get_recent_users(
db_conn.cursor(),
since_ms, # type: ignore[arg-type]
exclude_users_with_appservice,
)
for user_info in user_infos:
if exclude_users_with_email and user_info.emails:

View File

@ -717,9 +717,7 @@ class Porter:
return
# Check if all background updates are done, abort if not.
updates_complete = (
await self.sqlite_store.db_pool.updates.has_completed_background_updates()
)
updates_complete = await self.sqlite_store.db_pool.updates.has_completed_background_updates()
if not updates_complete:
end_error = (
"Pending background updates exist in the SQLite3 database."
@ -1095,11 +1093,11 @@ class Porter:
return done, remaining + done
async def _setup_state_group_id_seq(self) -> None:
curr_id: Optional[int] = (
await self.sqlite_store.db_pool.simple_select_one_onecol(
curr_id: Optional[
int
] = await self.sqlite_store.db_pool.simple_select_one_onecol(
table="state_groups", keyvalues={}, retcol="MAX(id)", allow_none=True
)
)
if not curr_id:
return
@ -1186,14 +1184,14 @@ class Porter:
)
async def _setup_auth_chain_sequence(self) -> None:
curr_chain_id: Optional[int] = (
await self.sqlite_store.db_pool.simple_select_one_onecol(
curr_chain_id: Optional[
int
] = await self.sqlite_store.db_pool.simple_select_one_onecol(
table="event_auth_chains",
keyvalues={},
retcol="MAX(chain_id)",
allow_none=True,
)
)
def r(txn: LoggingTransaction) -> None:
# Presumably there is at least one row in event_auth_chains.

View File

@ -20,6 +20,7 @@
#
"""Contains the URL paths to prefix various aspects of the server with."""
import hmac
from hashlib import sha256
from urllib.parse import urlencode

View File

@ -54,6 +54,7 @@ UP & quit +---------- YES SUCCESS
This is all tied together by the AppServiceScheduler which DIs the required
components.
"""
import logging
from typing import (
TYPE_CHECKING,

View File

@ -200,16 +200,13 @@ class KeyConfig(Config):
)
form_secret = 'form_secret: "%s"' % random_string_with_symbols(50)
return (
"""\
return """\
%(macaroon_secret_key)s
%(form_secret)s
signing_key_path: "%(base_key_name)s.signing.key"
trusted_key_servers:
- server_name: "matrix.org"
"""
% locals()
)
""" % locals()
def read_signing_keys(self, signing_key_path: str, name: str) -> List[SigningKey]:
"""Read the signing keys in the given path.
@ -249,7 +246,9 @@ class KeyConfig(Config):
if is_signing_algorithm_supported(key_id):
key_base64 = key_data["key"]
key_bytes = decode_base64(key_base64)
verify_key: "VerifyKeyWithExpiry" = decode_verify_key_bytes(key_id, key_bytes) # type: ignore[assignment]
verify_key: "VerifyKeyWithExpiry" = decode_verify_key_bytes(
key_id, key_bytes
) # type: ignore[assignment]
verify_key.expired = key_data["expired_ts"]
keys[key_id] = verify_key
else:

View File

@ -157,12 +157,9 @@ class LoggingConfig(Config):
self, config_dir_path: str, server_name: str, **kwargs: Any
) -> str:
log_config = os.path.join(config_dir_path, server_name + ".log.config")
return (
"""\
return """\
log_config: "%(log_config)s"
"""
% locals()
)
""" % locals()
def read_arguments(self, args: argparse.Namespace) -> None:
if args.no_redirect_stdio is not None:

View File

@ -828,13 +828,10 @@ class ServerConfig(Config):
).lstrip()
if not unsecure_listeners:
unsecure_http_bindings = (
"""- port: %(unsecure_port)s
unsecure_http_bindings = """- port: %(unsecure_port)s
tls: false
type: http
x_forwarded: true"""
% locals()
)
x_forwarded: true""" % locals()
if not open_private_ports:
unsecure_http_bindings += (
@ -853,16 +850,13 @@ class ServerConfig(Config):
if not secure_listeners:
secure_http_bindings = ""
return (
"""\
return """\
server_name: "%(server_name)s"
pid_file: %(pid_file)s
listeners:
%(secure_http_bindings)s
%(unsecure_http_bindings)s
"""
% locals()
)
""" % locals()
def read_arguments(self, args: argparse.Namespace) -> None:
if args.manhole is not None:

View File

@ -328,10 +328,11 @@ class WorkerConfig(Config):
)
# type-ignore: the expression `Union[A, B]` is not a Type[Union[A, B]] currently
self.instance_map: Dict[
str, InstanceLocationConfig
] = parse_and_validate_mapping(
instance_map, InstanceLocationConfig # type: ignore[arg-type]
self.instance_map: Dict[str, InstanceLocationConfig] = (
parse_and_validate_mapping(
instance_map,
InstanceLocationConfig, # type: ignore[arg-type]
)
)
# Map from type of streams to source, c.f. WriterLocations.

View File

@ -887,7 +887,8 @@ def _check_power_levels(
raise SynapseError(400, f"{v!r} must be an integer.")
if k in {"events", "notifications", "users"}:
if not isinstance(v, collections.abc.Mapping) or not all(
type(v) is int for v in v.values() # noqa: E721
type(v) is int
for v in v.values() # noqa: E721
):
raise SynapseError(
400,

View File

@ -80,7 +80,7 @@ def load_legacy_presence_router(hs: "HomeServer") -> None:
# All methods that the module provides should be async, but this wasn't enforced
# in the old module system, so we wrap them if needed
def async_wrapper(
f: Optional[Callable[P, R]]
f: Optional[Callable[P, R]],
) -> Optional[Callable[P, Awaitable[R]]]:
# f might be None if the callback isn't implemented by the module. In this
# case we don't want to register a callback at all so we return None.

View File

@ -504,7 +504,7 @@ class UnpersistedEventContext(UnpersistedEventContextBase):
def _encode_state_group_delta(
state_group_delta: Dict[Tuple[int, int], StateMap[str]]
state_group_delta: Dict[Tuple[int, int], StateMap[str]],
) -> List[Tuple[int, int, Optional[List[Tuple[str, str, str]]]]]:
if not state_group_delta:
return []
@ -517,7 +517,7 @@ def _encode_state_group_delta(
def _decode_state_group_delta(
input: List[Tuple[int, int, List[Tuple[str, str, str]]]]
input: List[Tuple[int, int, List[Tuple[str, str, str]]]],
) -> Dict[Tuple[int, int], StateMap[str]]:
if not input:
return {}
@ -544,7 +544,7 @@ def _encode_state_dict(
def _decode_state_dict(
input: Optional[List[Tuple[str, str, str]]]
input: Optional[List[Tuple[str, str, str]]],
) -> Optional[StateMap[str]]:
"""Decodes a state dict encoded using `_encode_state_dict` above"""
if input is None:

View File

@ -19,5 +19,4 @@
#
#
""" This package includes all the federation specific logic.
"""
"""This package includes all the federation specific logic."""

View File

@ -859,7 +859,6 @@ class FederationMediaThumbnailServlet(BaseFederationServerServlet):
request: SynapseRequest,
media_id: str,
) -> None:
width = parse_integer(request, "width", required=True)
height = parse_integer(request, "height", required=True)
method = parse_string(request, "method", "scale")

View File

@ -118,11 +118,11 @@ class AccountHandler:
}
if self._use_account_validity_in_account_status:
status["org.matrix.expired"] = (
await self._account_validity_handler.is_user_expired(
status[
"org.matrix.expired"
] = await self._account_validity_handler.is_user_expired(
user_id.to_string()
)
)
return status

View File

@ -197,15 +197,16 @@ class AdminHandler:
# events that we have and then filtering, this isn't the most
# efficient method perhaps but it does guarantee we get everything.
while True:
events, _ = (
await self._store.paginate_room_events_by_topological_ordering(
(
events,
_,
) = await self._store.paginate_room_events_by_topological_ordering(
room_id=room_id,
from_key=from_key,
to_key=to_key,
limit=100,
direction=Direction.FORWARDS,
)
)
if not events:
break

View File

@ -166,8 +166,7 @@ def login_id_phone_to_thirdparty(identifier: JsonDict) -> Dict[str, str]:
if "country" not in identifier or (
# The specification requires a "phone" field, while Synapse used to require a "number"
# field. Accept both for backwards compatibility.
"phone" not in identifier
and "number" not in identifier
"phone" not in identifier and "number" not in identifier
):
raise SynapseError(
400, "Invalid phone-type identifier", errcode=Codes.INVALID_PARAM

View File

@ -265,9 +265,9 @@ class DirectoryHandler:
async def get_association(self, room_alias: RoomAlias) -> JsonDict:
room_id = None
if self.hs.is_mine(room_alias):
result: Optional[RoomAliasMapping] = (
await self.get_association_from_room_alias(room_alias)
)
result: Optional[
RoomAliasMapping
] = await self.get_association_from_room_alias(room_alias)
if result:
room_id = result.room_id
@ -512,13 +512,11 @@ class DirectoryHandler:
raise SynapseError(403, "Not allowed to publish room")
# Check if publishing is blocked by a third party module
allowed_by_third_party_rules = (
await (
allowed_by_third_party_rules = await (
self._third_party_event_rules.check_visibility_can_be_modified(
room_id, visibility
)
)
)
if not allowed_by_third_party_rules:
raise SynapseError(403, "Not allowed to publish room")

View File

@ -1001,12 +1001,12 @@ class FederationHandler:
)
if include_auth_user_id:
event_content[EventContentFields.AUTHORISING_USER] = (
await self._event_auth_handler.get_user_which_could_invite(
event_content[
EventContentFields.AUTHORISING_USER
] = await self._event_auth_handler.get_user_which_could_invite(
room_id,
state_ids,
)
)
builder = self.event_builder_factory.for_room_version(
room_version,

View File

@ -21,6 +21,7 @@
#
"""Utilities for interacting with Identity Servers"""
import logging
import urllib.parse
from typing import TYPE_CHECKING, Awaitable, Callable, Dict, List, Optional, Tuple

View File

@ -1225,10 +1225,9 @@ class EventCreationHandler:
)
if prev_event_ids is not None:
assert (
len(prev_event_ids) <= 10
), "Attempting to create an event with %i prev_events" % (
len(prev_event_ids),
assert len(prev_event_ids) <= 10, (
"Attempting to create an event with %i prev_events"
% (len(prev_event_ids),)
)
else:
prev_event_ids = await self.store.get_prev_events_for_room(builder.room_id)

View File

@ -507,8 +507,10 @@ class PaginationHandler:
# Initially fetch the events from the database. With any luck, we can return
# these without blocking on backfill (handled below).
events, next_key = (
await self.store.paginate_room_events_by_topological_ordering(
(
events,
next_key,
) = await self.store.paginate_room_events_by_topological_ordering(
room_id=room_id,
from_key=from_token.room_key,
to_key=to_room_key,
@ -516,7 +518,6 @@ class PaginationHandler:
limit=pagin_config.limit,
event_filter=event_filter,
)
)
if pagin_config.direction == Direction.BACKWARDS:
# We use a `Set` because there can be multiple events at a given depth
@ -584,8 +585,10 @@ class PaginationHandler:
# If we did backfill something, refetch the events from the database to
# catch anything new that might have been added since we last fetched.
if did_backfill:
events, next_key = (
await self.store.paginate_room_events_by_topological_ordering(
(
events,
next_key,
) = await self.store.paginate_room_events_by_topological_ordering(
room_id=room_id,
from_key=from_token.room_key,
to_key=to_room_key,
@ -593,7 +596,6 @@ class PaginationHandler:
limit=pagin_config.limit,
event_filter=event_filter,
)
)
else:
# Otherwise, we can backfill in the background for eventual
# consistency's sake but we don't need to block the client waiting

View File

@ -71,6 +71,7 @@ user state; this device follows the normal timeout logic (see above) and will
automatically be replaced with any information from currently available devices.
"""
import abc
import contextlib
import itertools
@ -493,9 +494,9 @@ class WorkerPresenceHandler(BasePresenceHandler):
# The number of ongoing syncs on this process, by (user ID, device ID).
# Empty if _presence_enabled is false.
self._user_device_to_num_current_syncs: Dict[Tuple[str, Optional[str]], int] = (
{}
)
self._user_device_to_num_current_syncs: Dict[
Tuple[str, Optional[str]], int
] = {}
self.notifier = hs.get_notifier()
self.instance_id = hs.get_instance_id()
@ -818,9 +819,9 @@ class PresenceHandler(BasePresenceHandler):
# Keeps track of the number of *ongoing* syncs on this process. While
# this is non zero a user will never go offline.
self._user_device_to_num_current_syncs: Dict[Tuple[str, Optional[str]], int] = (
{}
)
self._user_device_to_num_current_syncs: Dict[
Tuple[str, Optional[str]], int
] = {}
# Keeps track of the number of *ongoing* syncs on other processes.
#

View File

@ -351,9 +351,9 @@ class ProfileHandler:
server_name = host
if self._is_mine_server_name(server_name):
media_info: Optional[Union[LocalMedia, RemoteMedia]] = (
await self.store.get_local_media(media_id)
)
media_info: Optional[
Union[LocalMedia, RemoteMedia]
] = await self.store.get_local_media(media_id)
else:
media_info = await self.store.get_cached_remote_media(server_name, media_id)

View File

@ -188,14 +188,14 @@ class RelationsHandler:
if include_original_event:
# Do not bundle aggregations when retrieving the original event because
# we want the content before relations are applied to it.
return_value["original_event"] = (
await self._event_serializer.serialize_event(
return_value[
"original_event"
] = await self._event_serializer.serialize_event(
event,
now,
bundle_aggregations=None,
config=serialize_options,
)
)
if next_token:
return_value["next_batch"] = await next_token.to_string(self._main_store)

View File

@ -20,6 +20,7 @@
#
"""Contains functions for performing actions on rooms."""
import itertools
import logging
import math
@ -900,13 +901,11 @@ class RoomCreationHandler:
)
# Check whether this visibility value is blocked by a third party module
allowed_by_third_party_rules = (
await (
allowed_by_third_party_rules = await (
self._third_party_event_rules.check_visibility_can_be_modified(
room_id, visibility
)
)
)
if not allowed_by_third_party_rules:
raise SynapseError(403, "Room visibility value not allowed.")

View File

@ -1302,12 +1302,12 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
# If this is going to be a local join, additional information must
# be included in the event content in order to efficiently validate
# the event.
content[EventContentFields.AUTHORISING_USER] = (
await self.event_auth_handler.get_user_which_could_invite(
content[
EventContentFields.AUTHORISING_USER
] = await self.event_auth_handler.get_user_which_could_invite(
room_id,
state_before_join,
)
)
return False, []
@ -1415,9 +1415,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
if requester is not None:
sender = UserID.from_string(event.sender)
assert (
sender == requester.user
), "Sender (%s) must be same as requester (%s)" % (sender, requester.user)
assert sender == requester.user, (
"Sender (%s) must be same as requester (%s)" % (sender, requester.user)
)
assert self.hs.is_mine(sender), "Sender must be our own: %s" % (sender,)
else:
requester = types.create_requester(target_user)

View File

@ -423,9 +423,9 @@ class SearchHandler:
}
if search_result.room_groups and "room_id" in group_keys:
rooms_cat_res.setdefault("groups", {})[
"room_id"
] = search_result.room_groups
rooms_cat_res.setdefault("groups", {})["room_id"] = (
search_result.room_groups
)
if sender_group and "sender" in group_keys:
rooms_cat_res.setdefault("groups", {})["sender"] = sender_group

View File

@ -587,9 +587,7 @@ class SlidingSyncHandler:
Membership.LEAVE,
Membership.BAN,
):
to_bound = (
room_membership_for_user_at_to_token.event_pos.to_room_stream_token()
)
to_bound = room_membership_for_user_at_to_token.event_pos.to_room_stream_token()
timeline_from_bound = from_bound
if ignore_timeline_bound:

View File

@ -386,9 +386,9 @@ class SlidingSyncExtensionHandler:
if have_push_rules_changed:
global_account_data_map = dict(global_account_data_map)
# TODO: This should take into account the `from_token` and `to_token`
global_account_data_map[AccountDataTypes.PUSH_RULES] = (
await self.push_rules_handler.push_rules_for_user(sync_config.user)
)
global_account_data_map[
AccountDataTypes.PUSH_RULES
] = await self.push_rules_handler.push_rules_for_user(sync_config.user)
else:
# TODO: This should take into account the `to_token`
all_global_account_data = await self.store.get_global_account_data_for_user(
@ -397,9 +397,9 @@ class SlidingSyncExtensionHandler:
global_account_data_map = dict(all_global_account_data)
# TODO: This should take into account the `to_token`
global_account_data_map[AccountDataTypes.PUSH_RULES] = (
await self.push_rules_handler.push_rules_for_user(sync_config.user)
)
global_account_data_map[
AccountDataTypes.PUSH_RULES
] = await self.push_rules_handler.push_rules_for_user(sync_config.user)
# Fetch room account data
account_data_by_room_map: Mapping[str, Mapping[str, JsonMapping]] = {}

View File

@ -293,11 +293,12 @@ class SlidingSyncRoomLists:
is_encrypted=is_encrypted,
)
newly_joined_room_ids, newly_left_room_map = (
await self._get_newly_joined_and_left_rooms(
(
newly_joined_room_ids,
newly_left_room_map,
) = await self._get_newly_joined_and_left_rooms(
user_id, from_token=from_token, to_token=to_token
)
)
dm_room_ids = await self._get_dm_rooms_for_user(user_id)
# Handle state resets in the from -> to token range.
@ -958,11 +959,12 @@ class SlidingSyncRoomLists:
else:
rooms_for_user[room_id] = change_room_for_user
newly_joined_room_ids, newly_left_room_ids = (
await self._get_newly_joined_and_left_rooms(
(
newly_joined_room_ids,
newly_left_room_ids,
) = await self._get_newly_joined_and_left_rooms(
user_id, to_token=to_token, from_token=from_token
)
)
dm_room_ids = await self._get_dm_rooms_for_user(user_id)

View File

@ -183,10 +183,7 @@ class JoinedSyncResult:
to tell if room needs to be part of the sync result.
"""
return bool(
self.timeline
or self.state
or self.ephemeral
or self.account_data
self.timeline or self.state or self.ephemeral or self.account_data
# nb the notification count does not, er, count: if there's nothing
# else in the result, we don't need to send it.
)
@ -575,11 +572,11 @@ class SyncHandler:
if timeout == 0 or since_token is None or full_state:
# we are going to return immediately, so don't bother calling
# notifier.wait_for_events.
result: Union[SyncResult, E2eeSyncResult] = (
await self.current_sync_for_user(
result: Union[
SyncResult, E2eeSyncResult
] = await self.current_sync_for_user(
sync_config, sync_version, since_token, full_state=full_state
)
)
else:
# Otherwise, we wait for something to happen and report it to the user.
async def current_sync_callback(
@ -673,11 +670,11 @@ class SyncHandler:
# Go through the `/sync` v2 path
if sync_version == SyncVersion.SYNC_V2:
sync_result: Union[SyncResult, E2eeSyncResult] = (
await self.generate_sync_result(
sync_result: Union[
SyncResult, E2eeSyncResult
] = await self.generate_sync_result(
sync_config, since_token, full_state
)
)
# Go through the MSC3575 Sliding Sync `/sync/e2ee` path
elif sync_version == SyncVersion.E2EE_SYNC:
sync_result = await self.generate_e2ee_sync_result(
@ -1488,14 +1485,17 @@ class SyncHandler:
# timeline here. The caller will then dedupe any redundant
# ones.
state_ids = await self._state_storage_controller.get_state_ids_for_event(
state_ids = (
await self._state_storage_controller.get_state_ids_for_event(
batch.events[0].event_id,
# we only want members!
state_filter=StateFilter.from_types(
(EventTypes.Member, member) for member in members_to_fetch
(EventTypes.Member, member)
for member in members_to_fetch
),
await_full_state=False,
)
)
return state_ids
if batch:
@ -2166,18 +2166,18 @@ class SyncHandler:
if push_rules_changed:
global_account_data = dict(global_account_data)
global_account_data[AccountDataTypes.PUSH_RULES] = (
await self._push_rules_handler.push_rules_for_user(sync_config.user)
)
global_account_data[
AccountDataTypes.PUSH_RULES
] = await self._push_rules_handler.push_rules_for_user(sync_config.user)
else:
all_global_account_data = await self.store.get_global_account_data_for_user(
user_id
)
global_account_data = dict(all_global_account_data)
global_account_data[AccountDataTypes.PUSH_RULES] = (
await self._push_rules_handler.push_rules_for_user(sync_config.user)
)
global_account_data[
AccountDataTypes.PUSH_RULES
] = await self._push_rules_handler.push_rules_for_user(sync_config.user)
account_data_for_user = (
await sync_config.filter_collection.filter_global_account_data(

View File

@ -183,7 +183,7 @@ class WorkerLocksHandler:
return
def _wake_all_locks(
locks: Collection[Union[WaitingLock, WaitingMultiLock]]
locks: Collection[Union[WaitingLock, WaitingMultiLock]],
) -> None:
for lock in locks:
deferred = lock.deferred

View File

@ -1313,6 +1313,5 @@ def is_unknown_endpoint(
)
) or (
# Older Synapses returned a 400 error.
e.code == 400
and synapse_error.errcode == Codes.UNRECOGNIZED
e.code == 400 and synapse_error.errcode == Codes.UNRECOGNIZED
)

View File

@ -233,7 +233,7 @@ def return_html_error(
def wrap_async_request_handler(
h: Callable[["_AsyncResource", "SynapseRequest"], Awaitable[None]]
h: Callable[["_AsyncResource", "SynapseRequest"], Awaitable[None]],
) -> Callable[["_AsyncResource", "SynapseRequest"], "defer.Deferred[None]"]:
"""Wraps an async request handler so that it calls request.processing.

View File

@ -22,6 +22,7 @@
"""
Log formatters that output terse JSON.
"""
import json
import logging

View File

@ -29,6 +29,7 @@ them.
See doc/log_contexts.rst for details on how this works.
"""
import logging
import threading
import typing
@ -751,7 +752,7 @@ def preserve_fn(
f: Union[
Callable[P, R],
Callable[P, Awaitable[R]],
]
],
) -> Callable[P, "defer.Deferred[R]"]:
"""Function decorator which wraps the function with run_in_background"""

View File

@ -169,6 +169,7 @@ Gotchas
than one caller? Will all of those calling functions have be in a context
with an active span?
"""
import contextlib
import enum
import inspect
@ -414,7 +415,7 @@ def ensure_active_span(
"""
def ensure_active_span_inner_1(
func: Callable[P, R]
func: Callable[P, R],
) -> Callable[P, Union[Optional[T], R]]:
@wraps(func)
def ensure_active_span_inner_2(
@ -700,7 +701,7 @@ def set_operation_name(operation_name: str) -> None:
@only_if_tracing
def force_tracing(
span: Union["opentracing.Span", _Sentinel] = _Sentinel.sentinel
span: Union["opentracing.Span", _Sentinel] = _Sentinel.sentinel,
) -> None:
"""Force sampling for the active/given span and its children.
@ -1093,9 +1094,10 @@ def trace_servlet(
# Mypy seems to think that start_context.tag below can be Optional[str], but
# that doesn't appear to be correct and works in practice.
request_tags[
SynapseTags.REQUEST_TAG
] = request.request_metrics.start_context.tag # type: ignore[assignment]
request_tags[SynapseTags.REQUEST_TAG] = (
request.request_metrics.start_context.tag # type: ignore[assignment]
)
# set the tags *after* the servlet completes, in case it decided to
# prioritise the span (tags will get dropped on unprioritised spans)

View File

@ -293,7 +293,7 @@ def wrap_as_background_process(
"""
def wrap_as_background_process_inner(
func: Callable[P, Awaitable[Optional[R]]]
func: Callable[P, Awaitable[Optional[R]]],
) -> Callable[P, "defer.Deferred[Optional[R]]"]:
@wraps(func)
def wrap_as_background_process_inner_2(

View File

@ -304,9 +304,9 @@ class BulkPushRuleEvaluator:
if relation_type == "m.thread" and event.content.get(
"m.relates_to", {}
).get("is_falling_back", False):
related_events["m.in_reply_to"][
"im.vector.is_falling_back"
] = ""
related_events["m.in_reply_to"]["im.vector.is_falling_back"] = (
""
)
return related_events
@ -372,7 +372,8 @@ class BulkPushRuleEvaluator:
gather_results(
(
run_in_background( # type: ignore[call-arg]
self.store.get_number_joined_users_in_room, event.room_id # type: ignore[arg-type]
self.store.get_number_joined_users_in_room,
event.room_id, # type: ignore[arg-type]
),
run_in_background(
self._get_power_levels_and_sender_level,

View File

@ -119,7 +119,9 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
return payload
async def _handle_request(self, request: Request, content: JsonDict) -> Tuple[int, JsonDict]: # type: ignore[override]
async def _handle_request( # type: ignore[override]
self, request: Request, content: JsonDict
) -> Tuple[int, JsonDict]:
with Measure(self.clock, "repl_fed_send_events_parse"):
room_id = content["room_id"]
backfilled = content["backfilled"]

View File

@ -98,7 +98,9 @@ class ReplicationCopyPusherRestServlet(ReplicationEndpoint):
self._store = hs.get_datastores().main
@staticmethod
async def _serialize_payload(user_id: str, old_room_id: str, new_room_id: str) -> JsonDict: # type: ignore[override]
async def _serialize_payload( # type: ignore[override]
user_id: str, old_room_id: str, new_room_id: str
) -> JsonDict:
return {}
async def _handle_request( # type: ignore[override]
@ -109,7 +111,6 @@ class ReplicationCopyPusherRestServlet(ReplicationEndpoint):
old_room_id: str,
new_room_id: str,
) -> Tuple[int, JsonDict]:
await self._store.copy_push_rules_from_room_to_room_for_user(
old_room_id, new_room_id, user_id
)

View File

@ -18,8 +18,8 @@
# [This file includes modifications made by New Vector Limited]
#
#
"""A replication client for use by synapse workers.
"""
"""A replication client for use by synapse workers."""
import logging
from typing import TYPE_CHECKING, Dict, Iterable, Optional, Set, Tuple

View File

@ -23,6 +23,7 @@
The VALID_SERVER_COMMANDS and VALID_CLIENT_COMMANDS define which commands are
allowed to be sent by which side.
"""
import abc
import logging
from typing import List, Optional, Tuple, Type, TypeVar

View File

@ -857,7 +857,7 @@ UpdateRow = TypeVar("UpdateRow")
def _batch_updates(
updates: Iterable[Tuple[UpdateToken, UpdateRow]]
updates: Iterable[Tuple[UpdateToken, UpdateRow]],
) -> Iterator[Tuple[UpdateToken, List[UpdateRow]]]:
"""Collect stream updates with the same token together

View File

@ -23,6 +23,7 @@ protocols.
An explanation of this protocol is available in docs/tcp_replication.md
"""
import fcntl
import logging
import struct

View File

@ -18,8 +18,7 @@
# [This file includes modifications made by New Vector Limited]
#
#
"""The server side of the replication stream.
"""
"""The server side of the replication stream."""
import logging
import random
@ -307,7 +306,7 @@ class ReplicationStreamer:
def _batch_updates(
updates: List[Tuple[Token, StreamRow]]
updates: List[Tuple[Token, StreamRow]],
) -> List[Tuple[Optional[Token], StreamRow]]:
"""Takes a list of updates of form [(token, row)] and sets the token to
None for all rows where the next row has the same token. This is used to

View File

@ -247,7 +247,7 @@ class _StreamFromIdGen(Stream):
def current_token_without_instance(
current_token: Callable[[], int]
current_token: Callable[[], int],
) -> Callable[[str], int]:
"""Takes a current token callback function for a single writer stream
that doesn't take an instance name parameter and wraps it in a function that

View File

@ -181,8 +181,7 @@ class NewRegistrationTokenRestServlet(RestServlet):
uses_allowed = body.get("uses_allowed", None)
if not (
uses_allowed is None
or (type(uses_allowed) is int and uses_allowed >= 0) # noqa: E721
uses_allowed is None or (type(uses_allowed) is int and uses_allowed >= 0) # noqa: E721
):
raise SynapseError(
HTTPStatus.BAD_REQUEST,

View File

@ -19,8 +19,8 @@
#
#
"""This module contains base REST classes for constructing client v1 servlets.
"""
"""This module contains base REST classes for constructing client v1 servlets."""
import logging
import re
from typing import Any, Awaitable, Callable, Iterable, Pattern, Tuple, TypeVar, cast

View File

@ -108,9 +108,9 @@ class AccountDataServlet(RestServlet):
# Push rules are stored in a separate table and must be queried separately.
if account_data_type == AccountDataTypes.PUSH_RULES:
account_data: Optional[JsonMapping] = (
await self._push_rules_handler.push_rules_for_user(requester.user)
)
account_data: Optional[
JsonMapping
] = await self._push_rules_handler.push_rules_for_user(requester.user)
else:
account_data = await self.store.get_global_account_data_by_type_for_user(
user_id, account_data_type

View File

@ -48,9 +48,7 @@ class AccountValidityRenewServlet(RestServlet):
self.account_renewed_template = (
hs.config.account_validity.account_validity_account_renewed_template
)
self.account_previously_renewed_template = (
hs.config.account_validity.account_validity_account_previously_renewed_template
)
self.account_previously_renewed_template = hs.config.account_validity.account_validity_account_previously_renewed_template
self.invalid_token_template = (
hs.config.account_validity.account_validity_invalid_token_template
)

View File

@ -20,6 +20,7 @@
#
"""This module contains REST servlets to do with event streaming, /events."""
import logging
from typing import TYPE_CHECKING, Dict, List, Tuple, Union

View File

@ -19,8 +19,8 @@
#
#
""" This module contains REST servlets to do with presence: /presence/<paths>
"""
"""This module contains REST servlets to do with presence: /presence/<paths>"""
import logging
from typing import TYPE_CHECKING, Tuple

View File

@ -640,14 +640,12 @@ class RegisterRestServlet(RestServlet):
if not password_hash:
raise SynapseError(400, "Missing params: password", Codes.MISSING_PARAM)
desired_username = (
await (
desired_username = await (
self.password_auth_provider.get_username_for_registration(
auth_result,
params,
)
)
)
if desired_username is None:
desired_username = params.get("username", None)
@ -696,13 +694,11 @@ class RegisterRestServlet(RestServlet):
session_id
)
display_name = (
await (
display_name = await (
self.password_auth_provider.get_displayname_for_registration(
auth_result, params
)
)
)
registered_user_id = await self.registration_handler.register_user(
localpart=desired_username,

View File

@ -20,6 +20,7 @@
#
"""This module contains REST servlets to do with rooms: /rooms/<paths>"""
import logging
import re
from enum import Enum

View File

@ -1045,9 +1045,9 @@ class SlidingSyncRestServlet(RestServlet):
serialized_rooms[room_id]["initial"] = room_result.initial
if room_result.unstable_expanded_timeline:
serialized_rooms[room_id][
"unstable_expanded_timeline"
] = room_result.unstable_expanded_timeline
serialized_rooms[room_id]["unstable_expanded_timeline"] = (
room_result.unstable_expanded_timeline
)
# This will be omitted for invite/knock rooms with `stripped_state`
if (
@ -1082,9 +1082,9 @@ class SlidingSyncRestServlet(RestServlet):
# This will be omitted for invite/knock rooms with `stripped_state`
if room_result.prev_batch is not None:
serialized_rooms[room_id]["prev_batch"] = (
await room_result.prev_batch.to_string(self.store)
)
serialized_rooms[room_id][
"prev_batch"
] = await room_result.prev_batch.to_string(self.store)
# This will be omitted for invite/knock rooms with `stripped_state`
if room_result.num_live is not None:

View File

@ -21,6 +21,7 @@
"""This module contains logic for storing HTTP PUT transactions. This is used
to ensure idempotency when performing PUTs using the REST API."""
import logging
from typing import TYPE_CHECKING, Awaitable, Callable, Dict, Hashable, Tuple

View File

@ -191,11 +191,11 @@ class RemoteKey(RestServlet):
server_keys: Dict[Tuple[str, str], Optional[FetchKeyResultForRemote]] = {}
for server_name, key_ids in query.items():
if key_ids:
results: Mapping[str, Optional[FetchKeyResultForRemote]] = (
await self.store.get_server_keys_json_for_remote(
results: Mapping[
str, Optional[FetchKeyResultForRemote]
] = await self.store.get_server_keys_json_for_remote(
server_name, key_ids
)
)
else:
results = await self.store.get_all_server_keys_json_for_remote(
server_name

View File

@ -65,9 +65,9 @@ class WellKnownBuilder:
}
account_management_url = await auth.account_management_url()
if account_management_url is not None:
result["org.matrix.msc2965.authentication"][
"account"
] = account_management_url
result["org.matrix.msc2965.authentication"]["account"] = (
account_management_url
)
if self._config.server.extra_well_known_client_content:
for (

View File

@ -119,7 +119,9 @@ class ResourceLimitsServerNotices:
elif not currently_blocked and limit_msg:
# Room is not notifying of a block, when it ought to be.
await self._apply_limit_block_notification(
user_id, limit_msg, limit_type # type: ignore
user_id,
limit_msg,
limit_type, # type: ignore
)
except SynapseError as e:
logger.error("Error sending resource limits server notice: %s", e)

View File

@ -416,7 +416,7 @@ class EventsPersistenceStorageController:
set_tag(SynapseTags.FUNC_ARG_PREFIX + "backfilled", str(backfilled))
async def enqueue(
item: Tuple[str, List[Tuple[EventBase, EventContext]]]
item: Tuple[str, List[Tuple[EventBase, EventContext]]],
) -> Dict[str, str]:
room_id, evs_ctxs = item
return await self._event_persist_queue.add_to_queue(
@ -792,9 +792,9 @@ class EventsPersistenceStorageController:
)
# Remove any events which are prev_events of any existing events.
existing_prevs: Collection[str] = (
await self.persist_events_store._get_events_which_are_prevs(result)
)
existing_prevs: Collection[
str
] = await self.persist_events_store._get_events_which_are_prevs(result)
result.difference_update(existing_prevs)
# Finally handle the case where the new events have soft-failed prev

View File

@ -238,9 +238,7 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore):
INNER JOIN user_ips USING (user_id, access_token, ip)
GROUP BY user_id, access_token, ip
HAVING count(*) > 1
""".format(
clause
),
""".format(clause),
args,
)
res = cast(
@ -373,9 +371,7 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore):
LIMIT ?
) c
INNER JOIN user_ips AS u USING (user_id, device_id, last_seen)
""" % {
"where_clause": where_clause
}
""" % {"where_clause": where_clause}
txn.execute(sql, where_args + [batch_size])
rows = cast(List[Tuple[int, str, str, str, str]], txn.fetchall())

View File

@ -1116,7 +1116,7 @@ class DeviceInboxBackgroundUpdateStore(SQLBaseStore):
txn.execute(sql, (start, stop))
destinations = {d for d, in txn}
destinations = {d for (d,) in txn}
to_remove = set()
for d in destinations:
try:

View File

@ -670,9 +670,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
result["keys"] = keys
device_display_name = None
if (
self.hs.config.federation.allow_device_name_lookup_over_federation
):
if self.hs.config.federation.allow_device_name_lookup_over_federation:
device_display_name = device.display_name
if device_display_name:
result["device_display_name"] = device_display_name
@ -917,7 +915,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
from_key,
to_key,
)
return {u for u, in rows}
return {u for (u,) in rows}
@cancellable
async def get_users_whose_devices_changed(
@ -968,7 +966,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
txn.database_engine, "user_id", chunk
)
txn.execute(sql % (clause,), [from_key, to_key] + args)
changes.update(user_id for user_id, in txn)
changes.update(user_id for (user_id,) in txn)
return changes
@ -1520,7 +1518,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
args: List[Any],
) -> Set[str]:
txn.execute(sql.format(clause=clause), args)
return {user_id for user_id, in txn}
return {user_id for (user_id,) in txn}
changes = set()
for chunk in batch_iter(changed_room_ids, 1000):
@ -1560,7 +1558,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
txn: LoggingTransaction,
) -> Set[str]:
txn.execute(sql, (from_id, to_id))
return {room_id for room_id, in txn}
return {room_id for (room_id,) in txn}
return await self.db_pool.runInteraction(
"get_all_device_list_changes",

View File

@ -387,9 +387,7 @@ class EndToEndRoomKeyStore(EndToEndRoomKeyBackgroundStore):
is_verified, session_data
FROM e2e_room_keys
WHERE user_id = ? AND version = ? AND (%s)
""" % (
" OR ".join(where_clauses)
)
""" % (" OR ".join(where_clauses))
txn.execute(sql, params)

View File

@ -472,9 +472,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
signature_sql = """
SELECT user_id, key_id, target_device_id, signature
FROM e2e_cross_signing_signatures WHERE %s
""" % (
" OR ".join("(" + q + ")" for q in signature_query_clauses)
)
""" % (" OR ".join("(" + q + ")" for q in signature_query_clauses))
txn.execute(signature_sql, signature_query_params)
return cast(
@ -917,9 +915,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
FROM e2e_cross_signing_keys
WHERE %(clause)s
ORDER BY user_id, keytype, stream_id DESC
""" % {
"clause": clause
}
""" % {"clause": clause}
else:
# SQLite has special handling for bare columns when using
# MIN/MAX with a `GROUP BY` clause where it picks the value from
@ -929,9 +925,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
FROM e2e_cross_signing_keys
WHERE %(clause)s
GROUP BY user_id, keytype
""" % {
"clause": clause
}
""" % {"clause": clause}
txn.execute(sql, params)

View File

@ -326,7 +326,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
"""
rows = txn.execute_values(sql, chains.items())
results.update(r for r, in rows)
results.update(r for (r,) in rows)
else:
# For SQLite we just fall back to doing a noddy for loop.
sql = """
@ -335,7 +335,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
"""
for chain_id, max_no in chains.items():
txn.execute(sql, (chain_id, max_no))
results.update(r for r, in txn)
results.update(r for (r,) in txn)
return results
@ -645,7 +645,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
]
rows = txn.execute_values(sql, args)
result.update(r for r, in rows)
result.update(r for (r,) in rows)
else:
# For SQLite we just fall back to doing a noddy for loop.
sql = """
@ -654,7 +654,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
"""
for chain_id, (min_no, max_no) in chain_to_gap.items():
txn.execute(sql, (chain_id, min_no, max_no))
result.update(r for r, in txn)
result.update(r for (r,) in txn)
return result
@ -1220,13 +1220,11 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
HAVING count(*) > ?
ORDER BY count(*) DESC
LIMIT ?
""" % (
where_clause,
)
""" % (where_clause,)
query_args = list(itertools.chain(room_id_filter, [min_count, limit]))
txn.execute(sql, query_args)
return [room_id for room_id, in txn]
return [room_id for (room_id,) in txn]
return await self.db_pool.runInteraction(
"get_rooms_with_many_extremities", _get_rooms_with_many_extremities_txn
@ -1358,7 +1356,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
def get_forward_extremeties_for_room_txn(txn: LoggingTransaction) -> List[str]:
txn.execute(sql, (stream_ordering, room_id))
return [event_id for event_id, in txn]
return [event_id for (event_id,) in txn]
event_ids = await self.db_pool.runInteraction(
"get_forward_extremeties_for_room", get_forward_extremeties_for_room_txn

View File

@ -1860,9 +1860,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
AND epa.notif = 1
ORDER BY epa.stream_ordering DESC
LIMIT ?
""" % (
before_clause,
)
""" % (before_clause,)
txn.execute(sql, args)
return cast(
List[Tuple[str, str, int, int, str, bool, str, int]], txn.fetchall()

View File

@ -429,9 +429,7 @@ class PersistEventsStore:
if event_type == EventTypes.Member and self.is_mine_id(state_key)
]
membership_snapshot_shared_insert_values: (
SlidingSyncMembershipSnapshotSharedInsertValues
) = {}
membership_snapshot_shared_insert_values: SlidingSyncMembershipSnapshotSharedInsertValues = {}
membership_infos_to_insert_membership_snapshots: List[
SlidingSyncMembershipInfo
] = []
@ -719,7 +717,7 @@ class PersistEventsStore:
keyvalues={},
retcols=("event_id",),
)
already_persisted_events = {event_id for event_id, in rows}
already_persisted_events = {event_id for (event_id,) in rows}
state_events = [
event
for event in state_events
@ -1830,12 +1828,8 @@ class PersistEventsStore:
if sliding_sync_table_changes.to_insert_membership_snapshots:
# Update the `sliding_sync_membership_snapshots` table
#
sliding_sync_snapshot_keys = (
sliding_sync_table_changes.membership_snapshot_shared_insert_values.keys()
)
sliding_sync_snapshot_values = (
sliding_sync_table_changes.membership_snapshot_shared_insert_values.values()
)
sliding_sync_snapshot_keys = sliding_sync_table_changes.membership_snapshot_shared_insert_values.keys()
sliding_sync_snapshot_values = sliding_sync_table_changes.membership_snapshot_shared_insert_values.values()
# We need to insert/update regardless of whether we have
# `sliding_sync_snapshot_keys` because there are other fields in the `ON
# CONFLICT` upsert to run (see inherit case (explained in
@ -3361,7 +3355,7 @@ class PersistEventsStore:
)
potential_backwards_extremities.difference_update(
e for e, in existing_events_outliers
e for (e,) in existing_events_outliers
)
if potential_backwards_extremities:

View File

@ -647,7 +647,8 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS
room_ids = {row[0] for row in rows}
for room_id in room_ids:
txn.call_after(
self.get_latest_event_ids_in_room.invalidate, (room_id,) # type: ignore[attr-defined]
self.get_latest_event_ids_in_room.invalidate, # type: ignore[attr-defined]
(room_id,),
)
self.db_pool.simple_delete_many_txn(
@ -2065,9 +2066,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS
)
# Map of values to insert/update in the `sliding_sync_membership_snapshots` table
sliding_sync_membership_snapshots_insert_map: (
SlidingSyncMembershipSnapshotSharedInsertValues
) = {}
sliding_sync_membership_snapshots_insert_map: SlidingSyncMembershipSnapshotSharedInsertValues = {}
if membership == Membership.JOIN:
# If we're still joined, we can pull from current state.
current_state_ids_map: StateMap[
@ -2149,15 +2148,16 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS
# membership (i.e. the room shouldn't disappear if your using the
# `is_encrypted` filter and you leave).
if membership in (Membership.LEAVE, Membership.BAN) and is_outlier:
invite_or_knock_event_id, invite_or_knock_membership = (
await self.db_pool.runInteraction(
(
invite_or_knock_event_id,
invite_or_knock_membership,
) = await self.db_pool.runInteraction(
"sliding_sync_membership_snapshots_bg_update._find_previous_membership",
_find_previous_membership_txn,
room_id,
user_id,
membership_event_id,
)
)
# Pull from the stripped state on the invite/knock event
invite_or_knock_event = await self.get_event(invite_or_knock_event_id)
@ -2484,9 +2484,7 @@ def _resolve_stale_data_in_sliding_sync_joined_rooms_table(
"progress_json": "{}",
},
)
depends_on = (
_BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE
)
depends_on = _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE
# Now kick-off the background update to catch-up with what we missed while Synapse
# was downgraded.

View File

@ -1665,7 +1665,7 @@ class EventsWorkerStore(SQLBaseStore):
txn.database_engine, "e.event_id", event_ids
)
txn.execute(sql + clause, args)
found_events = {eid for eid, in txn}
found_events = {eid for (eid,) in txn}
# ... and then we can update the results for each key
return {eid: (eid in found_events) for eid in event_ids}
@ -1864,9 +1864,9 @@ class EventsWorkerStore(SQLBaseStore):
" LIMIT ?"
)
txn.execute(sql, (-last_id, -current_id, instance_name, limit))
new_event_updates: List[Tuple[int, Tuple[str, str, str, str, str, str]]] = (
[]
)
new_event_updates: List[
Tuple[int, Tuple[str, str, str, str, str, str]]
] = []
row: Tuple[int, str, str, str, str, str, str]
# Type safety: iterating over `txn` yields `Tuple`, i.e.
# `Tuple[Any, ...]` of arbitrary length. Mypy detects assigning a

View File

@ -201,7 +201,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
txn.execute_batch(
"INSERT INTO event_backward_extremities (room_id, event_id)"
" VALUES (?, ?)",
[(room_id, event_id) for event_id, in new_backwards_extrems],
[(room_id, event_id) for (event_id,) in new_backwards_extrems],
)
logger.info("[purge] finding state groups referenced by deleted events")
@ -215,7 +215,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
"""
)
referenced_state_groups = {sg for sg, in txn}
referenced_state_groups = {sg for (sg,) in txn}
logger.info(
"[purge] found %i referenced state groups", len(referenced_state_groups)
)

View File

@ -762,7 +762,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
txn.execute(sql, args)
return [room_id for room_id, in txn]
return [room_id for (room_id,) in txn]
results: List[str] = []
for batch in batch_iter(room_ids, 1000):
@ -1030,9 +1030,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
SELECT event_id WHERE room_id = ? AND stream_ordering IN (
SELECT max(stream_ordering) WHERE %s
)
""" % (
clause,
)
""" % (clause,)
txn.execute(sql, [room_id] + list(args))
rows = txn.fetchall()

View File

@ -1250,9 +1250,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
SELECT address, session_id, medium, client_secret,
last_send_attempt, validated_at
FROM threepid_validation_session WHERE %s
""" % (
" AND ".join("%s = ?" % k for k in keyvalues.keys()),
)
""" % (" AND ".join("%s = ?" % k for k in keyvalues.keys()),)
if validated is not None:
sql += " AND validated_at IS " + ("NOT NULL" if validated else "NULL")

View File

@ -1608,9 +1608,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
FROM event_reports AS er
JOIN room_stats_state ON room_stats_state.room_id = er.room_id
{}
""".format(
where_clause
)
""".format(where_clause)
txn.execute(sql, args)
count = cast(Tuple[int], txn.fetchone())[0]

View File

@ -232,9 +232,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
AND m.room_id = c.room_id
AND m.user_id = c.state_key
WHERE c.type = 'm.room.member' AND c.room_id = ? AND m.membership = ? AND %s
""" % (
clause,
)
""" % (clause,)
txn.execute(sql, (room_id, Membership.JOIN, *ids))
return {r[0]: ProfileInfo(display_name=r[1], avatar_url=r[2]) for r in txn}
@ -531,9 +529,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
WHERE
user_id = ?
AND %s
""" % (
clause,
)
""" % (clause,)
txn.execute(sql, (user_id, *args))
results = [
@ -813,7 +809,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
"""
txn.execute(sql, (user_id, *args))
return {u: True for u, in txn}
return {u: True for (u,) in txn}
to_return = {}
for batch_user_ids in batch_iter(other_user_ids, 1000):
@ -1031,7 +1027,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
AND room_id = ?
"""
txn.execute(sql, (room_id,))
return {d for d, in txn}
return {d for (d,) in txn}
return await self.db_pool.runInteraction(
"get_current_hosts_in_room", get_current_hosts_in_room_txn
@ -1099,7 +1095,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
"""
txn.execute(sql, (room_id,))
# `server_domain` will be `NULL` for malformed MXIDs with no colons.
return tuple(d for d, in txn if d is not None)
return tuple(d for (d,) in txn if d is not None)
return await self.db_pool.runInteraction(
"get_current_hosts_in_room_ordered", get_current_hosts_in_room_ordered_txn
@ -1316,9 +1312,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
room_id = ? AND membership = ?
AND NOT (%s)
LIMIT 1
""" % (
clause,
)
""" % (clause,)
def _is_local_host_in_room_ignoring_users_txn(
txn: LoggingTransaction,
@ -1464,10 +1458,12 @@ class RoomMemberBackgroundUpdateStore(SQLBaseStore):
self, progress: JsonDict, batch_size: int
) -> int:
target_min_stream_id = progress.get(
"target_min_stream_id_inclusive", self._min_stream_order_on_start # type: ignore[attr-defined]
"target_min_stream_id_inclusive",
self._min_stream_order_on_start, # type: ignore[attr-defined]
)
max_stream_id = progress.get(
"max_stream_id_exclusive", self._stream_order_on_start + 1 # type: ignore[attr-defined]
"max_stream_id_exclusive",
self._stream_order_on_start + 1, # type: ignore[attr-defined]
)
def add_membership_profile_txn(txn: LoggingTransaction) -> int:

View File

@ -177,9 +177,7 @@ class SearchBackgroundUpdateStore(SearchWorkerStore):
AND (%s)
ORDER BY stream_ordering DESC
LIMIT ?
""" % (
" OR ".join("type = '%s'" % (t,) for t in TYPES),
)
""" % (" OR ".join("type = '%s'" % (t,) for t in TYPES),)
txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size))

View File

@ -535,7 +535,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
desc="check_if_events_in_current_state",
)
return frozenset(event_id for event_id, in rows)
return frozenset(event_id for (event_id,) in rows)
# FIXME: how should this be cached?
@cancellable

View File

@ -161,7 +161,7 @@ class StatsStore(StateDeltasStore):
LIMIT ?
"""
txn.execute(sql, (last_user_id, batch_size))
return [r for r, in txn]
return [r for (r,) in txn]
users_to_work_on = await self.db_pool.runInteraction(
"_populate_stats_process_users", _get_next_batch
@ -207,7 +207,7 @@ class StatsStore(StateDeltasStore):
LIMIT ?
"""
txn.execute(sql, (last_room_id, batch_size))
return [r for r, in txn]
return [r for (r,) in txn]
rooms_to_work_on = await self.db_pool.runInteraction(
"populate_stats_rooms_get_batch", _get_next_batch
@ -751,9 +751,7 @@ class StatsStore(StateDeltasStore):
LEFT JOIN profiles AS p ON lmr.user_id = p.full_user_id
{}
GROUP BY lmr.user_id, displayname
""".format(
where_clause
)
""".format(where_clause)
# SQLite does not support SELECT COUNT(*) OVER()
sql = """

View File

@ -1122,9 +1122,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
AND e.stream_ordering > ? AND e.stream_ordering <= ?
%s
ORDER BY e.stream_ordering ASC
""" % (
ignore_room_clause,
)
""" % (ignore_room_clause,)
txn.execute(sql, args)

View File

@ -224,9 +224,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
SELECT room_id, events FROM %s
ORDER BY events DESC
LIMIT 250
""" % (
TEMP_TABLE + "_rooms",
)
""" % (TEMP_TABLE + "_rooms",)
txn.execute(sql)
rooms_to_work_on = cast(List[Tuple[str, int]], txn.fetchall())

View File

@ -767,7 +767,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
remaining_state_groups = {
state_group
for state_group, in rows
for (state_group,) in rows
if state_group not in state_groups_to_delete
}

View File

@ -607,7 +607,7 @@ def _apply_module_schema_files(
"SELECT file FROM applied_module_schemas WHERE module_name = ?",
(modname,),
)
applied_deltas = {d for d, in cur}
applied_deltas = {d for (d,) in cur}
for name, stream in names_and_streams:
if name in applied_deltas:
continue
@ -710,7 +710,7 @@ def _get_or_create_schema_state(
"SELECT file FROM applied_schema_deltas WHERE version >= ?",
(current_version,),
)
applied_deltas = tuple(d for d, in txn)
applied_deltas = tuple(d for (d,) in txn)
return _SchemaState(
current_version=current_version,

View File

@ -41,8 +41,6 @@ def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) ->
(user_id, filter_id);
DROP TABLE user_filters;
ALTER TABLE user_filters_migration RENAME TO user_filters;
""" % (
select_clause,
)
""" % (select_clause,)
execute_statements_from_stream(cur, StringIO(sql))

View File

@ -23,6 +23,7 @@
This migration handles the process of changing the type of `room_depth.min_depth` to
a BIGINT.
"""
from synapse.storage.database import LoggingTransaction
from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine

View File

@ -25,6 +25,7 @@ This migration adds triggers to the partial_state_events tables to enforce uniqu
Triggers cannot be expressed in .sql files, so we have to use a separate file.
"""
from synapse.storage.database import LoggingTransaction
from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine

View File

@ -26,6 +26,7 @@ for its completion can be removed.
Note the background job must still remain defined in the database class.
"""
from synapse.config.homeserver import HomeServerConfig
from synapse.storage.database import LoggingTransaction
from synapse.storage.engines import BaseDatabaseEngine

View File

@ -24,6 +24,7 @@
This migration adds triggers to the room membership tables to enforce consistency.
Triggers cannot be expressed in .sql files, so we have to use a separate file.
"""
from synapse.storage.database import LoggingTransaction
from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine

View File

@ -23,6 +23,7 @@
"""
This migration adds foreign key constraint to `event_forward_extremities` table.
"""
from synapse.storage.background_updates import (
ForeignKeyConstraint,
run_validate_constraint_and_delete_rows_schema_delta,

View File

@ -1308,7 +1308,7 @@ class DeviceListUpdates:
def get_verify_key_from_cross_signing_key(
key_info: Mapping[str, Any]
key_info: Mapping[str, Any],
) -> Tuple[str, VerifyKey]:
"""Get the key ID and signedjson verify key from a cross-signing key dict

Some files were not shown because too many files have changed in this diff Show More