Stop sub-classing object (#8249)

This commit is contained in:
Patrick Cloke 2020-09-04 06:54:56 -04:00 committed by GitHub
parent 9f8abdcc38
commit c619253db8
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
168 changed files with 293 additions and 292 deletions

View file

@ -37,7 +37,7 @@ from synapse.storage.state import StateGroupStorage
__all__ = ["DataStores", "DataStore"]
class Storage(object):
class Storage:
"""The high level interfaces for talking to various storage layers.
"""

View file

@ -24,7 +24,7 @@ from . import engines
logger = logging.getLogger(__name__)
class BackgroundUpdatePerformance(object):
class BackgroundUpdatePerformance:
"""Tracks the how long a background update is taking to update its items"""
def __init__(self, name):
@ -71,7 +71,7 @@ class BackgroundUpdatePerformance(object):
return float(self.total_item_count) / float(self.total_duration_ms)
class BackgroundUpdater(object):
class BackgroundUpdater:
""" Background updates are updates to the database that run in the
background. Each update processes a batch of data at once. We attempt to
limit the impact of each update by monitoring how long each batch takes to

View file

@ -248,7 +248,7 @@ class LoggingTransaction:
self.txn.close()
class PerformanceCounters(object):
class PerformanceCounters:
def __init__(self):
self.current_counters = {}
self.previous_counters = {}
@ -286,7 +286,7 @@ class PerformanceCounters(object):
R = TypeVar("R")
class DatabasePool(object):
class DatabasePool:
"""Wraps a single physical database and connection pool.
A single database may be used by multiple data stores.

View file

@ -24,7 +24,7 @@ from synapse.storage.prepare_database import prepare_database
logger = logging.getLogger(__name__)
class Databases(object):
class Databases:
"""The various databases.
These are low level interfaces to physical databases.

View file

@ -999,7 +999,7 @@ class RoomMemberStore(RoomMemberWorkerStore, RoomMemberBackgroundUpdateStore):
await self.db_pool.runInteraction("forget_membership", f)
class _JoinedHostsCache(object):
class _JoinedHostsCache:
"""Cache for joined hosts in a room that is optimised to handle updates
via state deltas.
"""

View file

@ -22,6 +22,6 @@ logger = logging.getLogger(__name__)
@attr.s(slots=True, frozen=True)
class FetchKeyResult(object):
class FetchKeyResult:
verify_key = attr.ib() # VerifyKey: the key itself
valid_until_ts = attr.ib() # int: how long we can use this key for

View file

@ -69,7 +69,7 @@ stale_forward_extremities_counter = Histogram(
)
class _EventPeristenceQueue(object):
class _EventPeristenceQueue:
"""Queues up events so that they can be persisted in bulk with only one
concurrent transaction per room.
"""
@ -172,7 +172,7 @@ class _EventPeristenceQueue(object):
pass
class EventsPersistenceStorage(object):
class EventsPersistenceStorage:
"""High level interface for handling persisting newly received events.
Takes care of batching up events by room, and calculating the necessary

View file

@ -569,7 +569,7 @@ def _get_or_create_schema_state(txn, database_engine):
@attr.s()
class _DirectoryListing(object):
class _DirectoryListing:
"""Helper class to store schema file name and the
absolute path to it.

View file

@ -20,7 +20,7 @@ from typing import Set
logger = logging.getLogger(__name__)
class PurgeEventsStorage(object):
class PurgeEventsStorage:
"""High level interface for purging rooms and event history.
"""

View file

@ -23,7 +23,7 @@ logger = logging.getLogger(__name__)
@attr.s
class PaginationChunk(object):
class PaginationChunk:
"""Returned by relation pagination APIs.
Attributes:
@ -51,7 +51,7 @@ class PaginationChunk(object):
@attr.s(frozen=True, slots=True)
class RelationPaginationToken(object):
class RelationPaginationToken:
"""Pagination token for relation pagination API.
As the results are in topological order, we can use the
@ -82,7 +82,7 @@ class RelationPaginationToken(object):
@attr.s(frozen=True, slots=True)
class AggregationPaginationToken(object):
class AggregationPaginationToken:
"""Pagination token for relation aggregation pagination API.
As the results are order by count and then MAX(stream_ordering) of the

View file

@ -29,7 +29,7 @@ T = TypeVar("T")
@attr.s(slots=True)
class StateFilter(object):
class StateFilter:
"""A filter used when querying for state.
Attributes:
@ -326,7 +326,7 @@ class StateFilter(object):
return member_filter, non_member_filter
class StateGroupStorage(object):
class StateGroupStorage:
"""High level interface to fetching state for event.
"""

View file

@ -25,7 +25,7 @@ from synapse.storage.database import DatabasePool, LoggingTransaction
from synapse.storage.util.sequence import PostgresSequenceGenerator
class IdGenerator(object):
class IdGenerator:
def __init__(self, db_conn, table, column):
self._lock = threading.Lock()
self._next_id = _load_current_id(db_conn, table, column)
@ -59,7 +59,7 @@ def _load_current_id(db_conn, table, column, step=1):
return (max if step > 0 else min)(current_id, step)
class StreamIdGenerator(object):
class StreamIdGenerator:
"""Used to generate new stream ids when persisting events while keeping
track of which transactions have been completed.