mirror of
https://git.anonymousland.org/anonymousland/synapse.git
synced 2025-05-02 14:06:02 -04:00
Merge branch 'develop' into rav/upsert_for_device_list
This commit is contained in:
commit
30a19daa02
74 changed files with 1244 additions and 779 deletions
|
@ -273,8 +273,7 @@ class RegistrationWorkerStore(SQLBaseStore):
|
|||
desc="delete_account_validity_for_user",
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def is_server_admin(self, user):
|
||||
async def is_server_admin(self, user):
|
||||
"""Determines if a user is an admin of this homeserver.
|
||||
|
||||
Args:
|
||||
|
@ -283,7 +282,7 @@ class RegistrationWorkerStore(SQLBaseStore):
|
|||
Returns (bool):
|
||||
true iff the user is a server admin, false otherwise.
|
||||
"""
|
||||
res = yield self.db.simple_select_one_onecol(
|
||||
res = await self.db.simple_select_one_onecol(
|
||||
table="users",
|
||||
keyvalues={"name": user.to_string()},
|
||||
retcol="admin",
|
||||
|
|
|
@ -35,9 +35,13 @@ DELETE FROM background_updates WHERE update_name IN (
|
|||
'populate_stats_cleanup'
|
||||
);
|
||||
|
||||
-- this relies on current_state_events.membership having been populated, so add
|
||||
-- a dependency on current_state_events_membership.
|
||||
INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
|
||||
('populate_stats_process_rooms', '{}', '');
|
||||
('populate_stats_process_rooms', '{}', 'current_state_events_membership');
|
||||
|
||||
-- this also relies on current_state_events.membership having been populated, but
|
||||
-- we get that as a side-effect of depending on populate_stats_process_rooms.
|
||||
INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
|
||||
('populate_stats_process_users', '{}', 'populate_stats_process_rooms');
|
||||
|
||||
|
|
|
@ -16,6 +16,11 @@
|
|||
import contextlib
|
||||
import threading
|
||||
from collections import deque
|
||||
from typing import Dict, Set, Tuple
|
||||
|
||||
from typing_extensions import Deque
|
||||
|
||||
from synapse.storage.database import Database, LoggingTransaction
|
||||
|
||||
|
||||
class IdGenerator(object):
|
||||
|
@ -87,7 +92,7 @@ class StreamIdGenerator(object):
|
|||
self._current = (max if step > 0 else min)(
|
||||
self._current, _load_current_id(db_conn, table, column, step)
|
||||
)
|
||||
self._unfinished_ids = deque()
|
||||
self._unfinished_ids = deque() # type: Deque[int]
|
||||
|
||||
def get_next(self):
|
||||
"""
|
||||
|
@ -163,7 +168,7 @@ class ChainedIdGenerator(object):
|
|||
self.chained_generator = chained_generator
|
||||
self._lock = threading.Lock()
|
||||
self._current_max = _load_current_id(db_conn, table, column)
|
||||
self._unfinished_ids = deque()
|
||||
self._unfinished_ids = deque() # type: Deque[Tuple[int, int]]
|
||||
|
||||
def get_next(self):
|
||||
"""
|
||||
|
@ -198,3 +203,163 @@ class ChainedIdGenerator(object):
|
|||
return stream_id - 1, chained_id
|
||||
|
||||
return self._current_max, self.chained_generator.get_current_token()
|
||||
|
||||
|
||||
class MultiWriterIdGenerator:
|
||||
"""An ID generator that tracks a stream that can have multiple writers.
|
||||
|
||||
Uses a Postgres sequence to coordinate ID assignment, but positions of other
|
||||
writers will only get updated when `advance` is called (by replication).
|
||||
|
||||
Note: Only works with Postgres.
|
||||
|
||||
Args:
|
||||
db_conn
|
||||
db
|
||||
instance_name: The name of this instance.
|
||||
table: Database table associated with stream.
|
||||
instance_column: Column that stores the row's writer's instance name
|
||||
id_column: Column that stores the stream ID.
|
||||
sequence_name: The name of the postgres sequence used to generate new
|
||||
IDs.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
db_conn,
|
||||
db: Database,
|
||||
instance_name: str,
|
||||
table: str,
|
||||
instance_column: str,
|
||||
id_column: str,
|
||||
sequence_name: str,
|
||||
):
|
||||
self._db = db
|
||||
self._instance_name = instance_name
|
||||
self._sequence_name = sequence_name
|
||||
|
||||
# We lock as some functions may be called from DB threads.
|
||||
self._lock = threading.Lock()
|
||||
|
||||
self._current_positions = self._load_current_ids(
|
||||
db_conn, table, instance_column, id_column
|
||||
)
|
||||
|
||||
# Set of local IDs that we're still processing. The current position
|
||||
# should be less than the minimum of this set (if not empty).
|
||||
self._unfinished_ids = set() # type: Set[int]
|
||||
|
||||
def _load_current_ids(
|
||||
self, db_conn, table: str, instance_column: str, id_column: str
|
||||
) -> Dict[str, int]:
|
||||
sql = """
|
||||
SELECT %(instance)s, MAX(%(id)s) FROM %(table)s
|
||||
GROUP BY %(instance)s
|
||||
""" % {
|
||||
"instance": instance_column,
|
||||
"id": id_column,
|
||||
"table": table,
|
||||
}
|
||||
|
||||
cur = db_conn.cursor()
|
||||
cur.execute(sql)
|
||||
|
||||
# `cur` is an iterable over returned rows, which are 2-tuples.
|
||||
current_positions = dict(cur)
|
||||
|
||||
cur.close()
|
||||
|
||||
return current_positions
|
||||
|
||||
def _load_next_id_txn(self, txn):
|
||||
txn.execute("SELECT nextval(?)", (self._sequence_name,))
|
||||
(next_id,) = txn.fetchone()
|
||||
return next_id
|
||||
|
||||
async def get_next(self):
|
||||
"""
|
||||
Usage:
|
||||
with await stream_id_gen.get_next() as stream_id:
|
||||
# ... persist event ...
|
||||
"""
|
||||
next_id = await self._db.runInteraction("_load_next_id", self._load_next_id_txn)
|
||||
|
||||
# Assert the fetched ID is actually greater than what we currently
|
||||
# believe the ID to be. If not, then the sequence and table have got
|
||||
# out of sync somehow.
|
||||
assert self.get_current_token() < next_id
|
||||
|
||||
with self._lock:
|
||||
self._unfinished_ids.add(next_id)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def manager():
|
||||
try:
|
||||
yield next_id
|
||||
finally:
|
||||
self._mark_id_as_finished(next_id)
|
||||
|
||||
return manager()
|
||||
|
||||
def get_next_txn(self, txn: LoggingTransaction):
|
||||
"""
|
||||
Usage:
|
||||
|
||||
stream_id = stream_id_gen.get_next(txn)
|
||||
# ... persist event ...
|
||||
"""
|
||||
|
||||
next_id = self._load_next_id_txn(txn)
|
||||
|
||||
with self._lock:
|
||||
self._unfinished_ids.add(next_id)
|
||||
|
||||
txn.call_after(self._mark_id_as_finished, next_id)
|
||||
txn.call_on_exception(self._mark_id_as_finished, next_id)
|
||||
|
||||
return next_id
|
||||
|
||||
def _mark_id_as_finished(self, next_id: int):
|
||||
"""The ID has finished being processed so we should advance the
|
||||
current poistion if possible.
|
||||
"""
|
||||
|
||||
with self._lock:
|
||||
self._unfinished_ids.discard(next_id)
|
||||
|
||||
# Figure out if its safe to advance the position by checking there
|
||||
# aren't any lower allocated IDs that are yet to finish.
|
||||
if all(c > next_id for c in self._unfinished_ids):
|
||||
curr = self._current_positions.get(self._instance_name, 0)
|
||||
self._current_positions[self._instance_name] = max(curr, next_id)
|
||||
|
||||
def get_current_token(self, instance_name: str = None) -> int:
|
||||
"""Gets the current position of a named writer (defaults to current
|
||||
instance).
|
||||
|
||||
Returns 0 if we don't have a position for the named writer (likely due
|
||||
to it being a new writer).
|
||||
"""
|
||||
|
||||
if instance_name is None:
|
||||
instance_name = self._instance_name
|
||||
|
||||
with self._lock:
|
||||
return self._current_positions.get(instance_name, 0)
|
||||
|
||||
def get_positions(self) -> Dict[str, int]:
|
||||
"""Get a copy of the current positon map.
|
||||
"""
|
||||
|
||||
with self._lock:
|
||||
return dict(self._current_positions)
|
||||
|
||||
def advance(self, instance_name: str, new_id: int):
|
||||
"""Advance the postion of the named writer to the given ID, if greater
|
||||
than existing entry.
|
||||
"""
|
||||
|
||||
with self._lock:
|
||||
self._current_positions[instance_name] = max(
|
||||
new_id, self._current_positions.get(instance_name, 0)
|
||||
)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue