mirror of
https://git.anonymousland.org/anonymousland/synapse.git
synced 2024-10-01 11:49:51 -04:00
Fix typos and spelling errors. (#8639)
This commit is contained in:
parent
c850dd9a8e
commit
34a5696f93
1
changelog.d/8639.misc
Normal file
1
changelog.d/8639.misc
Normal file
@ -0,0 +1 @@
|
||||
Fix typos and spelling errors in the code.
|
@ -1886,7 +1886,7 @@ sso:
|
||||
# and issued at ("iat") claims are validated if present.
|
||||
#
|
||||
# Note that this is a non-standard login type and client support is
|
||||
# expected to be non-existant.
|
||||
# expected to be non-existent.
|
||||
#
|
||||
# See https://github.com/matrix-org/synapse/blob/master/docs/jwt.md.
|
||||
#
|
||||
@ -2402,7 +2402,7 @@ spam_checker:
|
||||
#
|
||||
# Options for the rules include:
|
||||
#
|
||||
# user_id: Matches agaisnt the creator of the alias
|
||||
# user_id: Matches against the creator of the alias
|
||||
# room_id: Matches against the room ID being published
|
||||
# alias: Matches against any current local or canonical aliases
|
||||
# associated with the room
|
||||
@ -2448,7 +2448,7 @@ opentracing:
|
||||
# This is a list of regexes which are matched against the server_name of the
|
||||
# homeserver.
|
||||
#
|
||||
# By defult, it is empty, so no servers are matched.
|
||||
# By default, it is empty, so no servers are matched.
|
||||
#
|
||||
#homeserver_whitelist:
|
||||
# - ".*"
|
||||
|
@ -59,7 +59,7 @@ root:
|
||||
# then write them to a file.
|
||||
#
|
||||
# Replace "buffer" with "console" to log to stderr instead. (Note that you'll
|
||||
# also need to update the configuation for the `twisted` logger above, in
|
||||
# also need to update the configuration for the `twisted` logger above, in
|
||||
# this case.)
|
||||
#
|
||||
handlers: [buffer]
|
||||
|
@ -63,7 +63,7 @@ class JWTConfig(Config):
|
||||
# and issued at ("iat") claims are validated if present.
|
||||
#
|
||||
# Note that this is a non-standard login type and client support is
|
||||
# expected to be non-existant.
|
||||
# expected to be non-existent.
|
||||
#
|
||||
# See https://github.com/matrix-org/synapse/blob/master/docs/jwt.md.
|
||||
#
|
||||
|
@ -105,7 +105,7 @@ root:
|
||||
# then write them to a file.
|
||||
#
|
||||
# Replace "buffer" with "console" to log to stderr instead. (Note that you'll
|
||||
# also need to update the configuation for the `twisted` logger above, in
|
||||
# also need to update the configuration for the `twisted` logger above, in
|
||||
# this case.)
|
||||
#
|
||||
handlers: [buffer]
|
||||
|
@ -143,7 +143,7 @@ class RegistrationConfig(Config):
|
||||
RoomCreationPreset.TRUSTED_PRIVATE_CHAT,
|
||||
}
|
||||
|
||||
# Pull the creater/inviter from the configuration, this gets used to
|
||||
# Pull the creator/inviter from the configuration, this gets used to
|
||||
# send invites for invite-only rooms.
|
||||
mxid_localpart = config.get("auto_join_mxid_localpart")
|
||||
self.auto_join_user_id = None
|
||||
|
@ -99,7 +99,7 @@ class RoomDirectoryConfig(Config):
|
||||
#
|
||||
# Options for the rules include:
|
||||
#
|
||||
# user_id: Matches agaisnt the creator of the alias
|
||||
# user_id: Matches against the creator of the alias
|
||||
# room_id: Matches against the room ID being published
|
||||
# alias: Matches against any current local or canonical aliases
|
||||
# associated with the room
|
||||
|
@ -67,7 +67,7 @@ class TracerConfig(Config):
|
||||
# This is a list of regexes which are matched against the server_name of the
|
||||
# homeserver.
|
||||
#
|
||||
# By defult, it is empty, so no servers are matched.
|
||||
# By default, it is empty, so no servers are matched.
|
||||
#
|
||||
#homeserver_whitelist:
|
||||
# - ".*"
|
||||
|
@ -149,7 +149,7 @@ class FederationPolicyForHTTPS:
|
||||
return SSLClientConnectionCreator(host, ssl_context, should_verify)
|
||||
|
||||
def creatorForNetloc(self, hostname, port):
|
||||
"""Implements the IPolicyForHTTPS interace so that this can be passed
|
||||
"""Implements the IPolicyForHTTPS interface so that this can be passed
|
||||
directly to agents.
|
||||
"""
|
||||
return self.get_options(hostname)
|
||||
|
@ -59,7 +59,7 @@ class DictProperty:
|
||||
#
|
||||
# To exclude the KeyError from the traceback, we explicitly
|
||||
# 'raise from e1.__context__' (which is better than 'raise from None',
|
||||
# becuase that would omit any *earlier* exceptions).
|
||||
# because that would omit any *earlier* exceptions).
|
||||
#
|
||||
raise AttributeError(
|
||||
"'%s' has no '%s' property" % (type(instance), self.key)
|
||||
|
@ -180,7 +180,7 @@ def only_fields(dictionary, fields):
|
||||
in 'fields'.
|
||||
|
||||
If there are no event fields specified then all fields are included.
|
||||
The entries may include '.' charaters to indicate sub-fields.
|
||||
The entries may include '.' characters to indicate sub-fields.
|
||||
So ['content.body'] will include the 'body' field of the 'content' object.
|
||||
A literal '.' character in a field name may be escaped using a '\'.
|
||||
|
||||
|
@ -22,7 +22,7 @@ attestations have a validity period so need to be periodically renewed.
|
||||
If a user leaves (or gets kicked out of) a group, either side can still use
|
||||
their attestation to "prove" their membership, until the attestation expires.
|
||||
Therefore attestations shouldn't be relied on to prove membership in important
|
||||
cases, but can for less important situtations, e.g. showing a users membership
|
||||
cases, but can for less important situations, e.g. showing a users membership
|
||||
of groups on their profile, showing flairs, etc.
|
||||
|
||||
An attestation is a signed blob of json that looks like:
|
||||
|
@ -113,7 +113,7 @@ class GroupsServerWorkerHandler:
|
||||
entry = await self.room_list_handler.generate_room_entry(
|
||||
room_id, len(joined_users), with_alias=False, allow_private=True
|
||||
)
|
||||
entry = dict(entry) # so we don't change whats cached
|
||||
entry = dict(entry) # so we don't change what's cached
|
||||
entry.pop("room_id", None)
|
||||
|
||||
room_entry["profile"] = entry
|
||||
@ -550,7 +550,7 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
group_id, room_id, is_public=is_public
|
||||
)
|
||||
else:
|
||||
raise SynapseError(400, "Uknown config option")
|
||||
raise SynapseError(400, "Unknown config option")
|
||||
|
||||
return {}
|
||||
|
||||
|
@ -88,7 +88,7 @@ class AdminHandler(BaseHandler):
|
||||
|
||||
# We only try and fetch events for rooms the user has been in. If
|
||||
# they've been e.g. invited to a room without joining then we handle
|
||||
# those seperately.
|
||||
# those separately.
|
||||
rooms_user_has_been_in = await self.store.get_rooms_user_has_been_in(user_id)
|
||||
|
||||
for index, room in enumerate(rooms):
|
||||
@ -226,7 +226,7 @@ class ExfiltrationWriter:
|
||||
"""
|
||||
|
||||
def finished(self):
|
||||
"""Called when all data has succesfully been exported and written.
|
||||
"""Called when all data has successfully been exported and written.
|
||||
|
||||
This functions return value is passed to the caller of
|
||||
`export_user_data`.
|
||||
|
@ -690,7 +690,7 @@ class AuthHandler(BaseHandler):
|
||||
Creates a new access token for the user with the given user ID.
|
||||
|
||||
The user is assumed to have been authenticated by some other
|
||||
machanism (e.g. CAS), and the user_id converted to the canonical case.
|
||||
mechanism (e.g. CAS), and the user_id converted to the canonical case.
|
||||
|
||||
The device will be recorded in the table if it is not there already.
|
||||
|
||||
|
@ -112,7 +112,7 @@ class FederationHandler(BaseHandler):
|
||||
"""Handles events that originated from federation.
|
||||
Responsible for:
|
||||
a) handling received Pdus before handing them on as Events to the rest
|
||||
of the homeserver (including auth and state conflict resoultion)
|
||||
of the homeserver (including auth and state conflict resolutions)
|
||||
b) converting events that were produced by local clients that may need
|
||||
to be sent to remote homeservers.
|
||||
c) doing the necessary dances to invite remote users and join remote
|
||||
@ -477,7 +477,7 @@ class FederationHandler(BaseHandler):
|
||||
# ----
|
||||
#
|
||||
# Update richvdh 2018/09/18: There are a number of problems with timing this
|
||||
# request out agressively on the client side:
|
||||
# request out aggressively on the client side:
|
||||
#
|
||||
# - it plays badly with the server-side rate-limiter, which starts tarpitting you
|
||||
# if you send too many requests at once, so you end up with the server carefully
|
||||
@ -495,13 +495,13 @@ class FederationHandler(BaseHandler):
|
||||
# we'll end up back here for the *next* PDU in the list, which exacerbates the
|
||||
# problem.
|
||||
#
|
||||
# - the agressive 10s timeout was introduced to deal with incoming federation
|
||||
# - the aggressive 10s timeout was introduced to deal with incoming federation
|
||||
# requests taking 8 hours to process. It's not entirely clear why that was going
|
||||
# on; certainly there were other issues causing traffic storms which are now
|
||||
# resolved, and I think in any case we may be more sensible about our locking
|
||||
# now. We're *certainly* more sensible about our logging.
|
||||
#
|
||||
# All that said: Let's try increasing the timout to 60s and see what happens.
|
||||
# All that said: Let's try increasing the timeout to 60s and see what happens.
|
||||
|
||||
try:
|
||||
missing_events = await self.federation_client.get_missing_events(
|
||||
@ -1120,7 +1120,7 @@ class FederationHandler(BaseHandler):
|
||||
logger.info(str(e))
|
||||
continue
|
||||
except RequestSendFailed as e:
|
||||
logger.info("Falied to get backfill from %s because %s", dom, e)
|
||||
logger.info("Failed to get backfill from %s because %s", dom, e)
|
||||
continue
|
||||
except FederationDeniedError as e:
|
||||
logger.info(e)
|
||||
@ -1545,7 +1545,7 @@ class FederationHandler(BaseHandler):
|
||||
#
|
||||
# The reasons we have the destination server rather than the origin
|
||||
# server send it are slightly mysterious: the origin server should have
|
||||
# all the neccessary state once it gets the response to the send_join,
|
||||
# all the necessary state once it gets the response to the send_join,
|
||||
# so it could send the event itself if it wanted to. It may be that
|
||||
# doing it this way reduces failure modes, or avoids certain attacks
|
||||
# where a new server selectively tells a subset of the federation that
|
||||
@ -1649,7 +1649,7 @@ class FederationHandler(BaseHandler):
|
||||
event.internal_metadata.outlier = True
|
||||
event.internal_metadata.out_of_band_membership = True
|
||||
|
||||
# Try the host that we succesfully called /make_leave/ on first for
|
||||
# Try the host that we successfully called /make_leave/ on first for
|
||||
# the /send_leave/ request.
|
||||
host_list = list(target_hosts)
|
||||
try:
|
||||
|
@ -349,7 +349,7 @@ class GroupsLocalHandler(GroupsLocalWorkerHandler):
|
||||
server_name=get_domain_from_id(group_id),
|
||||
)
|
||||
|
||||
# TODO: Check that the group is public and we're being added publically
|
||||
# TODO: Check that the group is public and we're being added publicly
|
||||
is_publicised = content.get("publicise", False)
|
||||
|
||||
token = await self.store.register_user_group_membership(
|
||||
@ -394,7 +394,7 @@ class GroupsLocalHandler(GroupsLocalWorkerHandler):
|
||||
server_name=get_domain_from_id(group_id),
|
||||
)
|
||||
|
||||
# TODO: Check that the group is public and we're being added publically
|
||||
# TODO: Check that the group is public and we're being added publicly
|
||||
is_publicised = content.get("publicise", False)
|
||||
|
||||
token = await self.store.register_user_group_membership(
|
||||
|
@ -657,7 +657,7 @@ class EventCreationHandler:
|
||||
context: The event context.
|
||||
|
||||
Returns:
|
||||
The previous verion of the event is returned, if it is found in the
|
||||
The previous version of the event is returned, if it is found in the
|
||||
event context. Otherwise, None is returned.
|
||||
"""
|
||||
prev_state_ids = await context.get_prev_state_ids()
|
||||
|
@ -217,7 +217,7 @@ class OidcHandler:
|
||||
|
||||
This is based on the requested scopes: if the scopes include
|
||||
``openid``, the provider should give use an ID token containing the
|
||||
user informations. If not, we should fetch them using the
|
||||
user information. If not, we should fetch them using the
|
||||
``access_token`` with the ``userinfo_endpoint``.
|
||||
"""
|
||||
|
||||
@ -426,7 +426,7 @@ class OidcHandler:
|
||||
return resp
|
||||
|
||||
async def _fetch_userinfo(self, token: Token) -> UserInfo:
|
||||
"""Fetch user informations from the ``userinfo_endpoint``.
|
||||
"""Fetch user information from the ``userinfo_endpoint``.
|
||||
|
||||
Args:
|
||||
token: the token given by the ``token_endpoint``.
|
||||
@ -754,7 +754,7 @@ class OidcHandler:
|
||||
Defaults to an hour.
|
||||
|
||||
Returns:
|
||||
A signed macaroon token with the session informations.
|
||||
A signed macaroon token with the session information.
|
||||
"""
|
||||
macaroon = pymacaroons.Macaroon(
|
||||
location=self._server_name, identifier="key", key=self._macaroon_secret_key,
|
||||
|
@ -802,7 +802,7 @@ class PresenceHandler(BasePresenceHandler):
|
||||
between the requested tokens due to the limit.
|
||||
|
||||
The token returned can be used in a subsequent call to this
|
||||
function to get further updatees.
|
||||
function to get further updates.
|
||||
|
||||
The updates are a list of 2-tuples of stream ID and the row data
|
||||
"""
|
||||
@ -977,7 +977,7 @@ def should_notify(old_state, new_state):
|
||||
new_state.last_active_ts - old_state.last_active_ts
|
||||
> LAST_ACTIVE_GRANULARITY
|
||||
):
|
||||
# Only notify about last active bumps if we're not currently acive
|
||||
# Only notify about last active bumps if we're not currently active
|
||||
if not new_state.currently_active:
|
||||
notify_reason_counter.labels("last_active_change_online").inc()
|
||||
return True
|
||||
|
@ -102,7 +102,7 @@ class ProfileHandler(BaseHandler):
|
||||
|
||||
async def get_profile_from_cache(self, user_id: str) -> JsonDict:
|
||||
"""Get the profile information from our local cache. If the user is
|
||||
ours then the profile information will always be corect. Otherwise,
|
||||
ours then the profile information will always be correct. Otherwise,
|
||||
it may be out of date/missing.
|
||||
"""
|
||||
target_user = UserID.from_string(user_id)
|
||||
|
@ -1268,7 +1268,7 @@ class RoomShutdownHandler:
|
||||
)
|
||||
|
||||
# We now wait for the create room to come back in via replication so
|
||||
# that we can assume that all the joins/invites have propogated before
|
||||
# that we can assume that all the joins/invites have propagated before
|
||||
# we try and auto join below.
|
||||
await self._replication.wait_for_stream_position(
|
||||
self.hs.config.worker.events_shard_config.get_instance(new_room_id),
|
||||
|
@ -139,7 +139,7 @@ class SearchHandler(BaseHandler):
|
||||
# Filter to apply to results
|
||||
filter_dict = room_cat.get("filter", {})
|
||||
|
||||
# What to order results by (impacts whether pagination can be doen)
|
||||
# What to order results by (impacts whether pagination can be done)
|
||||
order_by = room_cat.get("order_by", "rank")
|
||||
|
||||
# Return the current state of the rooms?
|
||||
|
@ -32,7 +32,7 @@ class StateDeltasHandler:
|
||||
Returns:
|
||||
None if the field in the events either both match `public_value`
|
||||
or if neither do, i.e. there has been no change.
|
||||
True if it didnt match `public_value` but now does
|
||||
True if it didn't match `public_value` but now does
|
||||
False if it did match `public_value` but now doesn't
|
||||
"""
|
||||
prev_event = None
|
||||
|
@ -754,7 +754,7 @@ class SyncHandler:
|
||||
"""
|
||||
# TODO(mjark) Check if the state events were received by the server
|
||||
# after the previous sync, since we need to include those state
|
||||
# updates even if they occured logically before the previous event.
|
||||
# updates even if they occurred logically before the previous event.
|
||||
# TODO(mjark) Check for new redactions in the state events.
|
||||
|
||||
with Measure(self.clock, "compute_state_delta"):
|
||||
@ -1882,7 +1882,7 @@ class SyncHandler:
|
||||
# members (as the client otherwise doesn't have enough info to form
|
||||
# the name itself).
|
||||
if sync_config.filter_collection.lazy_load_members() and (
|
||||
# we recalulate the summary:
|
||||
# we recalculate the summary:
|
||||
# if there are membership changes in the timeline, or
|
||||
# if membership has changed during a gappy sync, or
|
||||
# if this is an initial sync.
|
||||
|
@ -371,7 +371,7 @@ class TypingWriterHandler(FollowerTypingHandler):
|
||||
between the requested tokens due to the limit.
|
||||
|
||||
The token returned can be used in a subsequent call to this
|
||||
function to get further updatees.
|
||||
function to get further updates.
|
||||
|
||||
The updates are a list of 2-tuples of stream ID and the row data
|
||||
"""
|
||||
|
@ -31,7 +31,7 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
N.B.: ASSUMES IT IS THE ONLY THING THAT MODIFIES THE USER DIRECTORY
|
||||
|
||||
The user directory is filled with users who this server can see are joined to a
|
||||
world_readable or publically joinable room. We keep a database table up to date
|
||||
world_readable or publicly joinable room. We keep a database table up to date
|
||||
by streaming changes of the current state and recalculating whether users should
|
||||
be in the directory or not when necessary.
|
||||
"""
|
||||
|
@ -172,7 +172,7 @@ class WellKnownResolver:
|
||||
had_valid_well_known = self._had_valid_well_known_cache.get(server_name, False)
|
||||
|
||||
# We do this in two steps to differentiate between possibly transient
|
||||
# errors (e.g. can't connect to host, 503 response) and more permenant
|
||||
# errors (e.g. can't connect to host, 503 response) and more permanent
|
||||
# errors (such as getting a 404 response).
|
||||
response, body = await self._make_well_known_request(
|
||||
server_name, retry=had_valid_well_known
|
||||
|
@ -587,7 +587,7 @@ class MatrixFederationHttpClient:
|
||||
"""
|
||||
Builds the Authorization headers for a federation request
|
||||
Args:
|
||||
destination (bytes|None): The desination homeserver of the request.
|
||||
destination (bytes|None): The destination homeserver of the request.
|
||||
May be None if the destination is an identity server, in which case
|
||||
destination_is must be non-None.
|
||||
method (bytes): The HTTP method of the request
|
||||
@ -640,7 +640,7 @@ class MatrixFederationHttpClient:
|
||||
backoff_on_404=False,
|
||||
try_trailing_slash_on_400=False,
|
||||
):
|
||||
""" Sends the specifed json data using PUT
|
||||
""" Sends the specified json data using PUT
|
||||
|
||||
Args:
|
||||
destination (str): The remote server to send the HTTP request
|
||||
@ -729,7 +729,7 @@ class MatrixFederationHttpClient:
|
||||
ignore_backoff=False,
|
||||
args={},
|
||||
):
|
||||
""" Sends the specifed json data using POST
|
||||
""" Sends the specified json data using POST
|
||||
|
||||
Args:
|
||||
destination (str): The remote server to send the HTTP request
|
||||
|
@ -109,7 +109,7 @@ in_flight_requests_db_sched_duration = Counter(
|
||||
# The set of all in flight requests, set[RequestMetrics]
|
||||
_in_flight_requests = set()
|
||||
|
||||
# Protects the _in_flight_requests set from concurrent accesss
|
||||
# Protects the _in_flight_requests set from concurrent access
|
||||
_in_flight_requests_lock = threading.Lock()
|
||||
|
||||
|
||||
|
@ -182,7 +182,7 @@ class HttpServer:
|
||||
""" Register a callback that gets fired if we receive a http request
|
||||
with the given method for a path that matches the given regex.
|
||||
|
||||
If the regex contains groups these gets passed to the calback via
|
||||
If the regex contains groups these gets passed to the callback via
|
||||
an unpacked tuple.
|
||||
|
||||
Args:
|
||||
@ -241,7 +241,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta):
|
||||
|
||||
async def _async_render(self, request: Request):
|
||||
"""Delegates to `_async_render_<METHOD>` methods, or returns a 400 if
|
||||
no appropriate method exists. Can be overriden in sub classes for
|
||||
no appropriate method exists. Can be overridden in sub classes for
|
||||
different routing.
|
||||
"""
|
||||
# Treat HEAD requests as GET requests.
|
||||
@ -386,7 +386,7 @@ class JsonResource(DirectServeJsonResource):
|
||||
async def _async_render(self, request):
|
||||
callback, servlet_classname, group_dict = self._get_handler_for_request(request)
|
||||
|
||||
# Make sure we have an appopriate name for this handler in prometheus
|
||||
# Make sure we have an appropriate name for this handler in prometheus
|
||||
# (rather than the default of JsonResource).
|
||||
request.request_metrics.name = servlet_classname
|
||||
|
||||
|
@ -167,7 +167,9 @@ class SynapseRequest(Request):
|
||||
yield
|
||||
except Exception:
|
||||
# this should already have been caught, and sent back to the client as a 500.
|
||||
logger.exception("Asynchronous messge handler raised an uncaught exception")
|
||||
logger.exception(
|
||||
"Asynchronous message handler raised an uncaught exception"
|
||||
)
|
||||
finally:
|
||||
# the request handler has finished its work and either sent the whole response
|
||||
# back, or handed over responsibility to a Producer.
|
||||
|
@ -266,7 +266,7 @@ class BackgroundProcessLoggingContext(LoggingContext):
|
||||
|
||||
super().__exit__(type, value, traceback)
|
||||
|
||||
# The background process has finished. We explictly remove and manually
|
||||
# The background process has finished. We explicitly remove and manually
|
||||
# update the metrics here so that if nothing is scraping metrics the set
|
||||
# doesn't infinitely grow.
|
||||
with _bg_metrics_lock:
|
||||
|
@ -393,7 +393,7 @@ class Notifier:
|
||||
)
|
||||
|
||||
def on_new_replication_data(self) -> None:
|
||||
"""Used to inform replication listeners that something has happend
|
||||
"""Used to inform replication listeners that something has happened
|
||||
without waking up any of the normal user event streams"""
|
||||
self.notify_replication()
|
||||
|
||||
|
@ -37,7 +37,7 @@ def list_with_base_rules(rawrules, use_new_defaults=False):
|
||||
modified_base_rules = {r["rule_id"]: r for r in rawrules if r["priority_class"] < 0}
|
||||
|
||||
# Remove the modified base rules from the list, They'll be added back
|
||||
# in the default postions in the list.
|
||||
# in the default positions in the list.
|
||||
rawrules = [r for r in rawrules if r["priority_class"] >= 0]
|
||||
|
||||
# shove the server default rules for each kind onto the end of each
|
||||
|
@ -390,12 +390,12 @@ class RulesForRoom:
|
||||
continue
|
||||
|
||||
# If a user has left a room we remove their push rule. If they
|
||||
# joined then we readd it later in _update_rules_with_member_event_ids
|
||||
# joined then we re-add it later in _update_rules_with_member_event_ids
|
||||
ret_rules_by_user.pop(user_id, None)
|
||||
missing_member_event_ids[user_id] = event_id
|
||||
|
||||
if missing_member_event_ids:
|
||||
# If we have some memebr events we haven't seen, look them up
|
||||
# If we have some member events we haven't seen, look them up
|
||||
# and fetch push rules for them if appropriate.
|
||||
logger.debug("Found new member events %r", missing_member_event_ids)
|
||||
await self._update_rules_with_member_event_ids(
|
||||
|
@ -104,7 +104,7 @@ class ConsentServerNotices:
|
||||
|
||||
|
||||
def copy_with_str_subst(x: Any, substitutions: Any) -> Any:
|
||||
"""Deep-copy a structure, carrying out string substitions on any strings
|
||||
"""Deep-copy a structure, carrying out string substitutions on any strings
|
||||
|
||||
Args:
|
||||
x (object): structure to be copied
|
||||
|
@ -547,7 +547,7 @@ class StateResolutionHandler:
|
||||
event_map:
|
||||
a dict from event_id to event, for any events that we happen to
|
||||
have in flight (eg, those currently being persisted). This will be
|
||||
used as a starting point fof finding the state we need; any missing
|
||||
used as a starting point for finding the state we need; any missing
|
||||
events will be requested via state_res_store.
|
||||
|
||||
If None, all events will be fetched via state_res_store.
|
||||
|
@ -56,7 +56,7 @@ async def resolve_events_with_store(
|
||||
event_map:
|
||||
a dict from event_id to event, for any events that we happen to
|
||||
have in flight (eg, those currently being persisted). This will be
|
||||
used as a starting point fof finding the state we need; any missing
|
||||
used as a starting point for finding the state we need; any missing
|
||||
events will be requested via state_map_factory.
|
||||
|
||||
If None, all events will be fetched via state_map_factory.
|
||||
|
@ -69,7 +69,7 @@ async def resolve_events_with_store(
|
||||
event_map:
|
||||
a dict from event_id to event, for any events that we happen to
|
||||
have in flight (eg, those currently being persisted). This will be
|
||||
used as a starting point fof finding the state we need; any missing
|
||||
used as a starting point for finding the state we need; any missing
|
||||
events will be requested via state_res_store.
|
||||
|
||||
If None, all events will be fetched via state_res_store.
|
||||
|
@ -182,7 +182,7 @@ matrixLogin.passwordLogin = function() {
|
||||
};
|
||||
|
||||
/*
|
||||
* The onLogin function gets called after a succesful login.
|
||||
* The onLogin function gets called after a successful login.
|
||||
*
|
||||
* It is expected that implementations override this to be notified when the
|
||||
* login is complete. The response to the login call is provided as the single
|
||||
|
Loading…
Reference in New Issue
Block a user