mirror of
https://git.anonymousland.org/anonymousland/synapse.git
synced 2025-11-13 12:40:39 -05:00
Clean up newline quote marks around the codebase (#6362)
This commit is contained in:
parent
9cc168e42e
commit
3916e1b97a
26 changed files with 43 additions and 46 deletions
|
|
@ -851,7 +851,7 @@ class SQLBaseStore(object):
|
|||
allvalues.update(values)
|
||||
latter = "UPDATE SET " + ", ".join(k + "=EXCLUDED." + k for k in values)
|
||||
|
||||
sql = ("INSERT INTO %s (%s) VALUES (%s) " "ON CONFLICT (%s) DO %s") % (
|
||||
sql = ("INSERT INTO %s (%s) VALUES (%s) ON CONFLICT (%s) DO %s") % (
|
||||
table,
|
||||
", ".join(k for k in allvalues),
|
||||
", ".join("?" for _ in allvalues),
|
||||
|
|
|
|||
|
|
@ -380,7 +380,7 @@ class DeviceInboxStore(DeviceInboxWorkerStore, DeviceInboxBackgroundUpdateStore)
|
|||
devices = list(messages_by_device.keys())
|
||||
if len(devices) == 1 and devices[0] == "*":
|
||||
# Handle wildcard device_ids.
|
||||
sql = "SELECT device_id FROM devices" " WHERE user_id = ?"
|
||||
sql = "SELECT device_id FROM devices WHERE user_id = ?"
|
||||
txn.execute(sql, (user_id,))
|
||||
message_json = json.dumps(messages_by_device["*"])
|
||||
for row in txn:
|
||||
|
|
|
|||
|
|
@ -138,9 +138,9 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
|
|||
result.setdefault(user_id, {})[device_id] = None
|
||||
|
||||
# get signatures on the device
|
||||
signature_sql = (
|
||||
"SELECT * " " FROM e2e_cross_signing_signatures " " WHERE %s"
|
||||
) % (" OR ".join("(" + q + ")" for q in signature_query_clauses))
|
||||
signature_sql = ("SELECT * FROM e2e_cross_signing_signatures WHERE %s") % (
|
||||
" OR ".join("(" + q + ")" for q in signature_query_clauses)
|
||||
)
|
||||
|
||||
txn.execute(signature_sql, signature_query_params)
|
||||
rows = self.cursor_to_dict(txn)
|
||||
|
|
|
|||
|
|
@ -713,9 +713,7 @@ class EventsStore(
|
|||
|
||||
metadata_json = encode_json(event.internal_metadata.get_dict())
|
||||
|
||||
sql = (
|
||||
"UPDATE event_json SET internal_metadata = ?" " WHERE event_id = ?"
|
||||
)
|
||||
sql = "UPDATE event_json SET internal_metadata = ? WHERE event_id = ?"
|
||||
txn.execute(sql, (metadata_json, event.event_id))
|
||||
|
||||
# Add an entry to the ex_outlier_stream table to replicate the
|
||||
|
|
@ -732,7 +730,7 @@ class EventsStore(
|
|||
},
|
||||
)
|
||||
|
||||
sql = "UPDATE events SET outlier = ?" " WHERE event_id = ?"
|
||||
sql = "UPDATE events SET outlier = ? WHERE event_id = ?"
|
||||
txn.execute(sql, (False, event.event_id))
|
||||
|
||||
# Update the event_backward_extremities table now that this
|
||||
|
|
@ -1479,7 +1477,7 @@ class EventsStore(
|
|||
|
||||
# We do joins against events_to_purge for e.g. calculating state
|
||||
# groups to purge, etc., so lets make an index.
|
||||
txn.execute("CREATE INDEX events_to_purge_id" " ON events_to_purge(event_id)")
|
||||
txn.execute("CREATE INDEX events_to_purge_id ON events_to_purge(event_id)")
|
||||
|
||||
txn.execute("SELECT event_id, should_delete FROM events_to_purge")
|
||||
event_rows = txn.fetchall()
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@ class FilteringStore(SQLBaseStore):
|
|||
if filter_id_response is not None:
|
||||
return filter_id_response[0]
|
||||
|
||||
sql = "SELECT MAX(filter_id) FROM user_filters " "WHERE user_id = ?"
|
||||
sql = "SELECT MAX(filter_id) FROM user_filters WHERE user_id = ?"
|
||||
txn.execute(sql, (user_localpart,))
|
||||
max_id = txn.fetchone()[0]
|
||||
if max_id is None:
|
||||
|
|
|
|||
|
|
@ -337,7 +337,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
|||
if len(media_ids) == 0:
|
||||
return
|
||||
|
||||
sql = "DELETE FROM local_media_repository_url_cache" " WHERE media_id = ?"
|
||||
sql = "DELETE FROM local_media_repository_url_cache WHERE media_id = ?"
|
||||
|
||||
def _delete_url_cache_txn(txn):
|
||||
txn.executemany(sql, [(media_id,) for media_id in media_ids])
|
||||
|
|
@ -365,11 +365,11 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
|||
return
|
||||
|
||||
def _delete_url_cache_media_txn(txn):
|
||||
sql = "DELETE FROM local_media_repository" " WHERE media_id = ?"
|
||||
sql = "DELETE FROM local_media_repository WHERE media_id = ?"
|
||||
|
||||
txn.executemany(sql, [(media_id,) for media_id in media_ids])
|
||||
|
||||
sql = "DELETE FROM local_media_repository_thumbnails" " WHERE media_id = ?"
|
||||
sql = "DELETE FROM local_media_repository_thumbnails WHERE media_id = ?"
|
||||
|
||||
txn.executemany(sql, [(media_id,) for media_id in media_ids])
|
||||
|
||||
|
|
|
|||
|
|
@ -377,9 +377,7 @@ class RegistrationWorkerStore(SQLBaseStore):
|
|||
"""
|
||||
|
||||
def f(txn):
|
||||
sql = (
|
||||
"SELECT name, password_hash FROM users" " WHERE lower(name) = lower(?)"
|
||||
)
|
||||
sql = "SELECT name, password_hash FROM users WHERE lower(name) = lower(?)"
|
||||
txn.execute(sql, (user_id,))
|
||||
return dict(txn)
|
||||
|
||||
|
|
|
|||
|
|
@ -616,7 +616,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
|
|||
|
||||
def _get_max_topological_txn(self, txn, room_id):
|
||||
txn.execute(
|
||||
"SELECT MAX(topological_ordering) FROM events" " WHERE room_id = ?",
|
||||
"SELECT MAX(topological_ordering) FROM events WHERE room_id = ?",
|
||||
(room_id,),
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -83,9 +83,7 @@ class TagsWorkerStore(AccountDataWorkerStore):
|
|||
)
|
||||
|
||||
def get_tag_content(txn, tag_ids):
|
||||
sql = (
|
||||
"SELECT tag, content" " FROM room_tags" " WHERE user_id=? AND room_id=?"
|
||||
)
|
||||
sql = "SELECT tag, content FROM room_tags WHERE user_id=? AND room_id=?"
|
||||
results = []
|
||||
for stream_id, user_id, room_id in tag_ids:
|
||||
txn.execute(sql, (user_id, room_id))
|
||||
|
|
|
|||
|
|
@ -447,7 +447,7 @@ def _apply_module_schema_files(cur, database_engine, modname, names_and_streams)
|
|||
# Mark as done.
|
||||
cur.execute(
|
||||
database_engine.convert_param_style(
|
||||
"INSERT INTO applied_module_schemas (module_name, file)" " VALUES (?,?)"
|
||||
"INSERT INTO applied_module_schemas (module_name, file) VALUES (?,?)"
|
||||
),
|
||||
(modname, name),
|
||||
)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue