mirror of
https://git.anonymousland.org/anonymousland/synapse-product.git
synced 2025-09-27 20:21:09 -04:00
Make all process_replication_rows
methods async (#13304)
More prep work for asyncronous caching, also makes all process_replication_rows methods consistent (presence handler already is so). Signed off by Nick @ Beeper (@Fizzadar)
This commit is contained in:
parent
96cf81e312
commit
5d4028f217
14 changed files with 40 additions and 25 deletions
|
@ -414,7 +414,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
|
|||
)
|
||||
)
|
||||
|
||||
def process_replication_rows(
|
||||
async def process_replication_rows(
|
||||
self,
|
||||
stream_name: str,
|
||||
instance_name: str,
|
||||
|
@ -437,7 +437,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
|
|||
)
|
||||
self._account_data_stream_cache.entity_has_changed(row.user_id, token)
|
||||
|
||||
super().process_replication_rows(stream_name, instance_name, token, rows)
|
||||
await super().process_replication_rows(stream_name, instance_name, token, rows)
|
||||
|
||||
async def add_account_data_to_room(
|
||||
self, user_id: str, room_id: str, account_data_type: str, content: JsonDict
|
||||
|
|
|
@ -119,7 +119,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
|
|||
"get_all_updated_caches", get_all_updated_caches_txn
|
||||
)
|
||||
|
||||
def process_replication_rows(
|
||||
async def process_replication_rows(
|
||||
self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any]
|
||||
) -> None:
|
||||
if stream_name == EventsStream.NAME:
|
||||
|
@ -154,7 +154,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
|
|||
else:
|
||||
self._attempt_to_invalidate_cache(row.cache_func, row.keys)
|
||||
|
||||
super().process_replication_rows(stream_name, instance_name, token, rows)
|
||||
await super().process_replication_rows(stream_name, instance_name, token, rows)
|
||||
|
||||
def _process_event_stream_row(self, token: int, row: EventsStreamRow) -> None:
|
||||
data = row.data
|
||||
|
|
|
@ -128,7 +128,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
|
|||
prefilled_cache=device_outbox_prefill,
|
||||
)
|
||||
|
||||
def process_replication_rows(
|
||||
async def process_replication_rows(
|
||||
self,
|
||||
stream_name: str,
|
||||
instance_name: str,
|
||||
|
@ -148,7 +148,9 @@ class DeviceInboxWorkerStore(SQLBaseStore):
|
|||
self._device_federation_outbox_stream_cache.entity_has_changed(
|
||||
row.entity, token
|
||||
)
|
||||
return super().process_replication_rows(stream_name, instance_name, token, rows)
|
||||
return await super().process_replication_rows(
|
||||
stream_name, instance_name, token, rows
|
||||
)
|
||||
|
||||
def get_to_device_stream_token(self) -> int:
|
||||
return self._device_inbox_id_gen.get_current_token()
|
||||
|
|
|
@ -280,7 +280,7 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
id_column="chain_id",
|
||||
)
|
||||
|
||||
def process_replication_rows(
|
||||
async def process_replication_rows(
|
||||
self,
|
||||
stream_name: str,
|
||||
instance_name: str,
|
||||
|
@ -292,7 +292,7 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
elif stream_name == BackfillStream.NAME:
|
||||
self._backfill_id_gen.advance(instance_name, -token)
|
||||
|
||||
super().process_replication_rows(stream_name, instance_name, token, rows)
|
||||
await super().process_replication_rows(stream_name, instance_name, token, rows)
|
||||
|
||||
async def have_censored_event(self, event_id: str) -> bool:
|
||||
"""Check if an event has been censored, i.e. if the content of the event has been erased
|
||||
|
|
|
@ -431,7 +431,7 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore)
|
|||
self._presence_on_startup = []
|
||||
return active_on_startup
|
||||
|
||||
def process_replication_rows(
|
||||
async def process_replication_rows(
|
||||
self,
|
||||
stream_name: str,
|
||||
instance_name: str,
|
||||
|
@ -443,4 +443,6 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore)
|
|||
for row in rows:
|
||||
self.presence_stream_cache.entity_has_changed(row.user_id, token)
|
||||
self._get_presence_for_user.invalidate((row.user_id,))
|
||||
return super().process_replication_rows(stream_name, instance_name, token, rows)
|
||||
return await super().process_replication_rows(
|
||||
stream_name, instance_name, token, rows
|
||||
)
|
||||
|
|
|
@ -589,7 +589,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
|||
"get_unread_event_push_actions_by_room_for_user", (room_id,)
|
||||
)
|
||||
|
||||
def process_replication_rows(
|
||||
async def process_replication_rows(
|
||||
self,
|
||||
stream_name: str,
|
||||
instance_name: str,
|
||||
|
@ -604,7 +604,9 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
|||
)
|
||||
self._receipts_stream_cache.entity_has_changed(row.room_id, token)
|
||||
|
||||
return super().process_replication_rows(stream_name, instance_name, token, rows)
|
||||
return await super().process_replication_rows(
|
||||
stream_name, instance_name, token, rows
|
||||
)
|
||||
|
||||
def _insert_linearized_receipt_txn(
|
||||
self,
|
||||
|
|
|
@ -292,7 +292,7 @@ class TagsWorkerStore(AccountDataWorkerStore):
|
|||
# than the id that the client has.
|
||||
pass
|
||||
|
||||
def process_replication_rows(
|
||||
async def process_replication_rows(
|
||||
self,
|
||||
stream_name: str,
|
||||
instance_name: str,
|
||||
|
@ -305,7 +305,7 @@ class TagsWorkerStore(AccountDataWorkerStore):
|
|||
self.get_tags_for_user.invalidate((row.user_id,))
|
||||
self._account_data_stream_cache.entity_has_changed(row.user_id, token)
|
||||
|
||||
super().process_replication_rows(stream_name, instance_name, token, rows)
|
||||
await super().process_replication_rows(stream_name, instance_name, token, rows)
|
||||
|
||||
|
||||
class TagsStore(TagsWorkerStore):
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue