mirror of
https://git.anonymousland.org/anonymousland/synapse.git
synced 2025-12-15 15:28:50 -05:00
Update black, and run auto formatting over the codebase (#9381)
- Update black version to the latest
- Run black auto formatting over the codebase
- Run autoformatting according to [`docs/code_style.md
`](80d6dc9783/docs/code_style.md)
- Update `code_style.md` docs around installing black to use the correct version
This commit is contained in:
parent
5636e597c3
commit
0a00b7ff14
271 changed files with 2802 additions and 1713 deletions
|
|
@ -149,8 +149,7 @@ KNOWN_KEYS = {
|
|||
|
||||
|
||||
def intern_string(string):
|
||||
"""Takes a (potentially) unicode string and interns it if it's ascii
|
||||
"""
|
||||
"""Takes a (potentially) unicode string and interns it if it's ascii"""
|
||||
if string is None:
|
||||
return None
|
||||
|
||||
|
|
@ -161,8 +160,7 @@ def intern_string(string):
|
|||
|
||||
|
||||
def intern_dict(dictionary):
|
||||
"""Takes a dictionary and interns well known keys and their values
|
||||
"""
|
||||
"""Takes a dictionary and interns well known keys and their values"""
|
||||
return {
|
||||
KNOWN_KEYS.get(key, key): _intern_known_values(key, value)
|
||||
for key, value in dictionary.items()
|
||||
|
|
|
|||
|
|
@ -122,7 +122,8 @@ class _LruCachedFunction(Generic[F]):
|
|||
|
||||
|
||||
def lru_cache(
|
||||
max_entries: int = 1000, cache_context: bool = False,
|
||||
max_entries: int = 1000,
|
||||
cache_context: bool = False,
|
||||
) -> Callable[[F], _LruCachedFunction[F]]:
|
||||
"""A method decorator that applies a memoizing cache around the function.
|
||||
|
||||
|
|
@ -156,7 +157,9 @@ def lru_cache(
|
|||
|
||||
def func(orig: F) -> _LruCachedFunction[F]:
|
||||
desc = LruCacheDescriptor(
|
||||
orig, max_entries=max_entries, cache_context=cache_context,
|
||||
orig,
|
||||
max_entries=max_entries,
|
||||
cache_context=cache_context,
|
||||
)
|
||||
return cast(_LruCachedFunction[F], desc)
|
||||
|
||||
|
|
@ -170,14 +173,18 @@ class LruCacheDescriptor(_CacheDescriptorBase):
|
|||
sentinel = object()
|
||||
|
||||
def __init__(
|
||||
self, orig, max_entries: int = 1000, cache_context: bool = False,
|
||||
self,
|
||||
orig,
|
||||
max_entries: int = 1000,
|
||||
cache_context: bool = False,
|
||||
):
|
||||
super().__init__(orig, num_args=None, cache_context=cache_context)
|
||||
self.max_entries = max_entries
|
||||
|
||||
def __get__(self, obj, owner):
|
||||
cache = LruCache(
|
||||
cache_name=self.orig.__name__, max_size=self.max_entries,
|
||||
cache_name=self.orig.__name__,
|
||||
max_size=self.max_entries,
|
||||
) # type: LruCache[CacheKey, Any]
|
||||
|
||||
get_cache_key = self.cache_key_builder
|
||||
|
|
@ -212,7 +219,7 @@ class LruCacheDescriptor(_CacheDescriptorBase):
|
|||
|
||||
|
||||
class DeferredCacheDescriptor(_CacheDescriptorBase):
|
||||
""" A method decorator that applies a memoizing cache around the function.
|
||||
"""A method decorator that applies a memoizing cache around the function.
|
||||
|
||||
This caches deferreds, rather than the results themselves. Deferreds that
|
||||
fail are removed from the cache.
|
||||
|
|
|
|||
|
|
@ -84,8 +84,7 @@ class StreamChangeCache:
|
|||
return False
|
||||
|
||||
def has_entity_changed(self, entity: EntityType, stream_pos: int) -> bool:
|
||||
"""Returns True if the entity may have been updated since stream_pos
|
||||
"""
|
||||
"""Returns True if the entity may have been updated since stream_pos"""
|
||||
assert isinstance(stream_pos, int)
|
||||
|
||||
if stream_pos < self._earliest_known_stream_pos:
|
||||
|
|
@ -133,8 +132,7 @@ class StreamChangeCache:
|
|||
return result
|
||||
|
||||
def has_any_entity_changed(self, stream_pos: int) -> bool:
|
||||
"""Returns if any entity has changed
|
||||
"""
|
||||
"""Returns if any entity has changed"""
|
||||
assert type(stream_pos) is int
|
||||
|
||||
if not self._cache:
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue