Fix some typos.

This commit is contained in:
Patrick Cloke 2021-02-12 11:01:48 -05:00
parent 2c9b4a5f16
commit 7950aa8a27
23 changed files with 34 additions and 34 deletions

View file

@ -450,7 +450,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
},
)
# Add the messages to the approriate local device inboxes so that
# Add the messages to the appropriate local device inboxes so that
# they'll be sent to the devices when they next sync.
self._add_messages_to_local_device_inbox_txn(
txn, stream_id, local_messages_by_user_then_device

View file

@ -371,7 +371,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
# and state sets {A} and {B} then walking the auth chains of A and B
# would immediately show that C is reachable by both. However, if we
# stopped at C then we'd only reach E via the auth chain of B and so E
# would errornously get included in the returned difference.
# would erroneously get included in the returned difference.
#
# The other thing that we do is limit the number of auth chains we walk
# at once, due to practical limits (i.e. we can only query the database
@ -497,7 +497,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
a_ids = new_aids
# Mark that the auth event is reachable by the approriate sets.
# Mark that the auth event is reachable by the appropriate sets.
sets.intersection_update(event_to_missing_sets[event_id])
search.sort()

View file

@ -1050,7 +1050,7 @@ class PersistEventsStore:
# Figure out the changes of membership to invalidate the
# `get_rooms_for_user` cache.
# We find out which membership events we may have deleted
# and which we have added, then we invlidate the caches for all
# and which we have added, then we invalidate the caches for all
# those users.
members_changed = {
state_key

View file

@ -155,7 +155,7 @@ class KeyStore(SQLBaseStore):
(server_name, key_id, from_server) triplet if one already existed.
Args:
server_name: The name of the server.
key_id: The identifer of the key this JSON is for.
key_id: The identifier of the key this JSON is for.
from_server: The server this JSON was fetched from.
ts_now_ms: The time now in milliseconds.
ts_valid_until_ms: The time when this json stops being valid.
@ -182,7 +182,7 @@ class KeyStore(SQLBaseStore):
async def get_server_keys_json(
self, server_keys: Iterable[Tuple[str, Optional[str], Optional[str]]]
) -> Dict[Tuple[str, Optional[str], Optional[str]], List[dict]]:
"""Retrive the key json for a list of server_keys and key ids.
"""Retrieve the key json for a list of server_keys and key ids.
If no keys are found for a given server, key_id and source then
that server, key_id, and source triplet entry will be an empty list.
The JSON is returned as a byte array so that it can be efficiently

View file

@ -111,7 +111,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore):
async def count_daily_sent_e2ee_messages(self):
def _count_messages(txn):
# This is good enough as if you have silly characters in your own
# hostname then thats your own fault.
# hostname then that's your own fault.
like_clause = "%:" + self.hs.hostname
sql = """
@ -167,7 +167,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore):
async def count_daily_sent_messages(self):
def _count_messages(txn):
# This is good enough as if you have silly characters in your own
# hostname then thats your own fault.
# hostname then that's your own fault.
like_clause = "%:" + self.hs.hostname
sql = """

View file

@ -160,7 +160,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
Args:
room_id: List of room_ids.
to_key: Max stream id to fetch receipts upto.
to_key: Max stream id to fetch receipts up to.
from_key: Min stream id to fetch receipts from. None fetches
from the start.
@ -189,7 +189,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
Args:
room_ids: The room id.
to_key: Max stream id to fetch receipts upto.
to_key: Max stream id to fetch receipts up to.
from_key: Min stream id to fetch receipts from. None fetches
from the start.
@ -312,7 +312,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
to a limit of the latest 100 read receipts.
Args:
to_key: Max stream id to fetch receipts upto.
to_key: Max stream id to fetch receipts up to.
from_key: Min stream id to fetch receipts from. None fetches
from the start.

View file

@ -1044,7 +1044,7 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
async def _background_add_rooms_room_version_column(
self, progress: dict, batch_size: int
):
"""Background update to go and add room version inforamtion to `rooms`
"""Background update to go and add room version information to `rooms`
table from `current_state_events` table.
"""

View file

@ -64,7 +64,7 @@ class StateDeltasStore(SQLBaseStore):
def get_current_state_deltas_txn(txn):
# First we calculate the max stream id that will give us less than
# N results.
# We arbitarily limit to 100 stream_id entries to ensure we don't
# We arbitrarily limit to 100 stream_id entries to ensure we don't
# select toooo many.
sql = """
SELECT stream_id, count(*)
@ -81,7 +81,7 @@ class StateDeltasStore(SQLBaseStore):
for stream_id, count in txn:
total += count
if total > 100:
# We arbitarily limit to 100 entries to ensure we don't
# We arbitrarily limit to 100 entries to ensure we don't
# select toooo many.
logger.debug(
"Clipping current_state_delta_stream rows to stream_id %i",

View file

@ -198,7 +198,7 @@ class TransactionStore(TransactionWorkerStore):
retry_interval: int,
) -> None:
"""Sets the current retry timings for a given destination.
Both timings should be zero if retrying is no longer occuring.
Both timings should be zero if retrying is no longer occurring.
Args:
destination

View file

@ -27,7 +27,7 @@ MAX_STATE_DELTA_HOPS = 100
class StateGroupBackgroundUpdateStore(SQLBaseStore):
"""Defines functions related to state groups needed to run the state backgroud
"""Defines functions related to state groups needed to run the state background
updates.
"""

View file

@ -113,7 +113,7 @@ def prepare_database(
# which should be empty.
if config is None:
raise ValueError(
"config==None in prepare_database, but databse is not empty"
"config==None in prepare_database, but database is not empty"
)
# if it's a worker app, refuse to upgrade the database, to avoid multiple

View file

@ -245,7 +245,7 @@ class MultiWriterIdGenerator:
# and b) noting that if we have seen a run of persisted positions
# without gaps (e.g. 5, 6, 7) then we can skip forward (e.g. to 7).
#
# Note: There is no guarentee that the IDs generated by the sequence
# Note: There is no guarantee that the IDs generated by the sequence
# will be gapless; gaps can form when e.g. a transaction was rolled
# back. This means that sometimes we won't be able to skip forward the
# position even though everything has been persisted. However, since
@ -418,7 +418,7 @@ class MultiWriterIdGenerator:
# bother, as nothing will read it).
#
# We only do this on the success path so that the persisted current
# position points to a persited row with the correct instance name.
# position points to a persisted row with the correct instance name.
if self._writers:
txn.call_after(
run_as_background_process,
@ -509,7 +509,7 @@ class MultiWriterIdGenerator:
}
def advance(self, instance_name: str, new_id: int):
"""Advance the postion of the named writer to the given ID, if greater
"""Advance the position of the named writer to the given ID, if greater
than existing entry.
"""