mirror of
https://git.anonymousland.org/anonymousland/synapse.git
synced 2024-10-01 11:49:51 -04:00
Merge pull request #6454 from matrix-org/erikj/clean_base_Store
Move things out of SQLBaseStore
This commit is contained in:
commit
3eb15c01d9
1
changelog.d/6454.misc
Normal file
1
changelog.d/6454.misc
Normal file
@ -0,0 +1 @@
|
|||||||
|
Move data store specific code out of `SQLBaseStore`.
|
@ -18,7 +18,8 @@ from typing import Dict
|
|||||||
|
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from synapse.storage._base import _CURRENT_STATE_CACHE_NAME, SQLBaseStore
|
from synapse.storage._base import SQLBaseStore
|
||||||
|
from synapse.storage.data_stores.main.cache import CURRENT_STATE_CACHE_NAME
|
||||||
from synapse.storage.engines import PostgresEngine
|
from synapse.storage.engines import PostgresEngine
|
||||||
|
|
||||||
from ._slaved_id_tracker import SlavedIdTracker
|
from ._slaved_id_tracker import SlavedIdTracker
|
||||||
@ -62,7 +63,7 @@ class BaseSlavedStore(SQLBaseStore):
|
|||||||
if stream_name == "caches":
|
if stream_name == "caches":
|
||||||
self._cache_id_gen.advance(token)
|
self._cache_id_gen.advance(token)
|
||||||
for row in rows:
|
for row in rows:
|
||||||
if row.cache_func == _CURRENT_STATE_CACHE_NAME:
|
if row.cache_func == CURRENT_STATE_CACHE_NAME:
|
||||||
room_id = row.keys[0]
|
room_id = row.keys[0]
|
||||||
members_changed = set(row.keys[1:])
|
members_changed = set(row.keys[1:])
|
||||||
self._invalidate_state_caches(room_id, members_changed)
|
self._invalidate_state_caches(room_id, members_changed)
|
||||||
|
@ -14,11 +14,9 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
import itertools
|
|
||||||
import logging
|
import logging
|
||||||
import random
|
import random
|
||||||
import sys
|
import sys
|
||||||
import threading
|
|
||||||
import time
|
import time
|
||||||
from typing import Iterable, Tuple
|
from typing import Iterable, Tuple
|
||||||
|
|
||||||
@ -35,8 +33,6 @@ from synapse.logging.context import LoggingContext, make_deferred_yieldable
|
|||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.storage.engines import PostgresEngine, Sqlite3Engine
|
from synapse.storage.engines import PostgresEngine, Sqlite3Engine
|
||||||
from synapse.types import get_domain_from_id
|
from synapse.types import get_domain_from_id
|
||||||
from synapse.util import batch_iter
|
|
||||||
from synapse.util.caches.descriptors import Cache
|
|
||||||
from synapse.util.stringutils import exception_to_unicode
|
from synapse.util.stringutils import exception_to_unicode
|
||||||
|
|
||||||
# import a function which will return a monotonic time, in seconds
|
# import a function which will return a monotonic time, in seconds
|
||||||
@ -79,10 +75,6 @@ UNIQUE_INDEX_BACKGROUND_UPDATES = {
|
|||||||
"event_search": "event_search_event_id_idx",
|
"event_search": "event_search_event_id_idx",
|
||||||
}
|
}
|
||||||
|
|
||||||
# This is a special cache name we use to batch multiple invalidations of caches
|
|
||||||
# based on the current state when notifying workers over replication.
|
|
||||||
_CURRENT_STATE_CACHE_NAME = "cs_cache_fake"
|
|
||||||
|
|
||||||
|
|
||||||
class LoggingTransaction(object):
|
class LoggingTransaction(object):
|
||||||
"""An object that almost-transparently proxies for the 'txn' object
|
"""An object that almost-transparently proxies for the 'txn' object
|
||||||
@ -237,23 +229,11 @@ class SQLBaseStore(object):
|
|||||||
# to watch it
|
# to watch it
|
||||||
self._txn_perf_counters = PerformanceCounters()
|
self._txn_perf_counters = PerformanceCounters()
|
||||||
|
|
||||||
self._get_event_cache = Cache(
|
|
||||||
"*getEvent*", keylen=3, max_entries=hs.config.event_cache_size
|
|
||||||
)
|
|
||||||
|
|
||||||
self._event_fetch_lock = threading.Condition()
|
|
||||||
self._event_fetch_list = []
|
|
||||||
self._event_fetch_ongoing = 0
|
|
||||||
|
|
||||||
self._pending_ds = []
|
|
||||||
|
|
||||||
self.database_engine = hs.database_engine
|
self.database_engine = hs.database_engine
|
||||||
|
|
||||||
# A set of tables that are not safe to use native upserts in.
|
# A set of tables that are not safe to use native upserts in.
|
||||||
self._unsafe_to_upsert_tables = set(UNIQUE_INDEX_BACKGROUND_UPDATES.keys())
|
self._unsafe_to_upsert_tables = set(UNIQUE_INDEX_BACKGROUND_UPDATES.keys())
|
||||||
|
|
||||||
self._account_validity = self.hs.config.account_validity
|
|
||||||
|
|
||||||
# We add the user_directory_search table to the blacklist on SQLite
|
# We add the user_directory_search table to the blacklist on SQLite
|
||||||
# because the existing search table does not have an index, making it
|
# because the existing search table does not have an index, making it
|
||||||
# unsafe to use native upserts.
|
# unsafe to use native upserts.
|
||||||
@ -272,14 +252,6 @@ class SQLBaseStore(object):
|
|||||||
|
|
||||||
self.rand = random.SystemRandom()
|
self.rand = random.SystemRandom()
|
||||||
|
|
||||||
if self._account_validity.enabled:
|
|
||||||
self._clock.call_later(
|
|
||||||
0.0,
|
|
||||||
run_as_background_process,
|
|
||||||
"account_validity_set_expiration_dates",
|
|
||||||
self._set_expiration_date_when_missing,
|
|
||||||
)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _check_safe_to_upsert(self):
|
def _check_safe_to_upsert(self):
|
||||||
"""
|
"""
|
||||||
@ -312,62 +284,6 @@ class SQLBaseStore(object):
|
|||||||
self._check_safe_to_upsert,
|
self._check_safe_to_upsert,
|
||||||
)
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def _set_expiration_date_when_missing(self):
|
|
||||||
"""
|
|
||||||
Retrieves the list of registered users that don't have an expiration date, and
|
|
||||||
adds an expiration date for each of them.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def select_users_with_no_expiration_date_txn(txn):
|
|
||||||
"""Retrieves the list of registered users with no expiration date from the
|
|
||||||
database, filtering out deactivated users.
|
|
||||||
"""
|
|
||||||
sql = (
|
|
||||||
"SELECT users.name FROM users"
|
|
||||||
" LEFT JOIN account_validity ON (users.name = account_validity.user_id)"
|
|
||||||
" WHERE account_validity.user_id is NULL AND users.deactivated = 0;"
|
|
||||||
)
|
|
||||||
txn.execute(sql, [])
|
|
||||||
|
|
||||||
res = self.cursor_to_dict(txn)
|
|
||||||
if res:
|
|
||||||
for user in res:
|
|
||||||
self.set_expiration_date_for_user_txn(
|
|
||||||
txn, user["name"], use_delta=True
|
|
||||||
)
|
|
||||||
|
|
||||||
yield self.runInteraction(
|
|
||||||
"get_users_with_no_expiration_date",
|
|
||||||
select_users_with_no_expiration_date_txn,
|
|
||||||
)
|
|
||||||
|
|
||||||
def set_expiration_date_for_user_txn(self, txn, user_id, use_delta=False):
|
|
||||||
"""Sets an expiration date to the account with the given user ID.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
user_id (str): User ID to set an expiration date for.
|
|
||||||
use_delta (bool): If set to False, the expiration date for the user will be
|
|
||||||
now + validity period. If set to True, this expiration date will be a
|
|
||||||
random value in the [now + period - d ; now + period] range, d being a
|
|
||||||
delta equal to 10% of the validity period.
|
|
||||||
"""
|
|
||||||
now_ms = self._clock.time_msec()
|
|
||||||
expiration_ts = now_ms + self._account_validity.period
|
|
||||||
|
|
||||||
if use_delta:
|
|
||||||
expiration_ts = self.rand.randrange(
|
|
||||||
expiration_ts - self._account_validity.startup_job_max_delta,
|
|
||||||
expiration_ts,
|
|
||||||
)
|
|
||||||
|
|
||||||
self._simple_upsert_txn(
|
|
||||||
txn,
|
|
||||||
"account_validity",
|
|
||||||
keyvalues={"user_id": user_id},
|
|
||||||
values={"expiration_ts_ms": expiration_ts, "email_sent": False},
|
|
||||||
)
|
|
||||||
|
|
||||||
def start_profiling(self):
|
def start_profiling(self):
|
||||||
self._previous_loop_ts = monotonic_time()
|
self._previous_loop_ts = monotonic_time()
|
||||||
|
|
||||||
@ -1400,47 +1316,6 @@ class SQLBaseStore(object):
|
|||||||
|
|
||||||
return cache, min_val
|
return cache, min_val
|
||||||
|
|
||||||
def _invalidate_cache_and_stream(self, txn, cache_func, keys):
|
|
||||||
"""Invalidates the cache and adds it to the cache stream so slaves
|
|
||||||
will know to invalidate their caches.
|
|
||||||
|
|
||||||
This should only be used to invalidate caches where slaves won't
|
|
||||||
otherwise know from other replication streams that the cache should
|
|
||||||
be invalidated.
|
|
||||||
"""
|
|
||||||
txn.call_after(cache_func.invalidate, keys)
|
|
||||||
self._send_invalidation_to_replication(txn, cache_func.__name__, keys)
|
|
||||||
|
|
||||||
def _invalidate_state_caches_and_stream(self, txn, room_id, members_changed):
|
|
||||||
"""Special case invalidation of caches based on current state.
|
|
||||||
|
|
||||||
We special case this so that we can batch the cache invalidations into a
|
|
||||||
single replication poke.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
txn
|
|
||||||
room_id (str): Room where state changed
|
|
||||||
members_changed (iterable[str]): The user_ids of members that have changed
|
|
||||||
"""
|
|
||||||
txn.call_after(self._invalidate_state_caches, room_id, members_changed)
|
|
||||||
|
|
||||||
if members_changed:
|
|
||||||
# We need to be careful that the size of the `members_changed` list
|
|
||||||
# isn't so large that it causes problems sending over replication, so we
|
|
||||||
# send them in chunks.
|
|
||||||
# Max line length is 16K, and max user ID length is 255, so 50 should
|
|
||||||
# be safe.
|
|
||||||
for chunk in batch_iter(members_changed, 50):
|
|
||||||
keys = itertools.chain([room_id], chunk)
|
|
||||||
self._send_invalidation_to_replication(
|
|
||||||
txn, _CURRENT_STATE_CACHE_NAME, keys
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# if no members changed, we still need to invalidate the other caches.
|
|
||||||
self._send_invalidation_to_replication(
|
|
||||||
txn, _CURRENT_STATE_CACHE_NAME, [room_id]
|
|
||||||
)
|
|
||||||
|
|
||||||
def _invalidate_state_caches(self, room_id, members_changed):
|
def _invalidate_state_caches(self, room_id, members_changed):
|
||||||
"""Invalidates caches that are based on the current state, but does
|
"""Invalidates caches that are based on the current state, but does
|
||||||
not stream invalidations down replication.
|
not stream invalidations down replication.
|
||||||
@ -1474,63 +1349,6 @@ class SQLBaseStore(object):
|
|||||||
# which is fine.
|
# which is fine.
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def _send_invalidation_to_replication(self, txn, cache_name, keys):
|
|
||||||
"""Notifies replication that given cache has been invalidated.
|
|
||||||
|
|
||||||
Note that this does *not* invalidate the cache locally.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
txn
|
|
||||||
cache_name (str)
|
|
||||||
keys (iterable[str])
|
|
||||||
"""
|
|
||||||
|
|
||||||
if isinstance(self.database_engine, PostgresEngine):
|
|
||||||
# get_next() returns a context manager which is designed to wrap
|
|
||||||
# the transaction. However, we want to only get an ID when we want
|
|
||||||
# to use it, here, so we need to call __enter__ manually, and have
|
|
||||||
# __exit__ called after the transaction finishes.
|
|
||||||
ctx = self._cache_id_gen.get_next()
|
|
||||||
stream_id = ctx.__enter__()
|
|
||||||
txn.call_on_exception(ctx.__exit__, None, None, None)
|
|
||||||
txn.call_after(ctx.__exit__, None, None, None)
|
|
||||||
txn.call_after(self.hs.get_notifier().on_new_replication_data)
|
|
||||||
|
|
||||||
self._simple_insert_txn(
|
|
||||||
txn,
|
|
||||||
table="cache_invalidation_stream",
|
|
||||||
values={
|
|
||||||
"stream_id": stream_id,
|
|
||||||
"cache_func": cache_name,
|
|
||||||
"keys": list(keys),
|
|
||||||
"invalidation_ts": self.clock.time_msec(),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
def get_all_updated_caches(self, last_id, current_id, limit):
|
|
||||||
if last_id == current_id:
|
|
||||||
return defer.succeed([])
|
|
||||||
|
|
||||||
def get_all_updated_caches_txn(txn):
|
|
||||||
# We purposefully don't bound by the current token, as we want to
|
|
||||||
# send across cache invalidations as quickly as possible. Cache
|
|
||||||
# invalidations are idempotent, so duplicates are fine.
|
|
||||||
sql = (
|
|
||||||
"SELECT stream_id, cache_func, keys, invalidation_ts"
|
|
||||||
" FROM cache_invalidation_stream"
|
|
||||||
" WHERE stream_id > ? ORDER BY stream_id ASC LIMIT ?"
|
|
||||||
)
|
|
||||||
txn.execute(sql, (last_id, limit))
|
|
||||||
return txn.fetchall()
|
|
||||||
|
|
||||||
return self.runInteraction("get_all_updated_caches", get_all_updated_caches_txn)
|
|
||||||
|
|
||||||
def get_cache_stream_token(self):
|
|
||||||
if self._cache_id_gen:
|
|
||||||
return self._cache_id_gen.get_current_token()
|
|
||||||
else:
|
|
||||||
return 0
|
|
||||||
|
|
||||||
def _simple_select_list_paginate(
|
def _simple_select_list_paginate(
|
||||||
self,
|
self,
|
||||||
table,
|
table,
|
||||||
|
@ -32,6 +32,7 @@ from synapse.util.caches.stream_change_cache import StreamChangeCache
|
|||||||
|
|
||||||
from .account_data import AccountDataStore
|
from .account_data import AccountDataStore
|
||||||
from .appservice import ApplicationServiceStore, ApplicationServiceTransactionStore
|
from .appservice import ApplicationServiceStore, ApplicationServiceTransactionStore
|
||||||
|
from .cache import CacheInvalidationStore
|
||||||
from .client_ips import ClientIpStore
|
from .client_ips import ClientIpStore
|
||||||
from .deviceinbox import DeviceInboxStore
|
from .deviceinbox import DeviceInboxStore
|
||||||
from .devices import DeviceStore
|
from .devices import DeviceStore
|
||||||
@ -110,6 +111,7 @@ class DataStore(
|
|||||||
MonthlyActiveUsersStore,
|
MonthlyActiveUsersStore,
|
||||||
StatsStore,
|
StatsStore,
|
||||||
RelationsStore,
|
RelationsStore,
|
||||||
|
CacheInvalidationStore,
|
||||||
):
|
):
|
||||||
def __init__(self, db_conn, hs):
|
def __init__(self, db_conn, hs):
|
||||||
self.hs = hs
|
self.hs = hs
|
||||||
|
131
synapse/storage/data_stores/main/cache.py
Normal file
131
synapse/storage/data_stores/main/cache.py
Normal file
@ -0,0 +1,131 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
import itertools
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from twisted.internet import defer
|
||||||
|
|
||||||
|
from synapse.storage._base import SQLBaseStore
|
||||||
|
from synapse.storage.engines import PostgresEngine
|
||||||
|
from synapse.util import batch_iter
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
# This is a special cache name we use to batch multiple invalidations of caches
|
||||||
|
# based on the current state when notifying workers over replication.
|
||||||
|
CURRENT_STATE_CACHE_NAME = "cs_cache_fake"
|
||||||
|
|
||||||
|
|
||||||
|
class CacheInvalidationStore(SQLBaseStore):
|
||||||
|
def _invalidate_cache_and_stream(self, txn, cache_func, keys):
|
||||||
|
"""Invalidates the cache and adds it to the cache stream so slaves
|
||||||
|
will know to invalidate their caches.
|
||||||
|
|
||||||
|
This should only be used to invalidate caches where slaves won't
|
||||||
|
otherwise know from other replication streams that the cache should
|
||||||
|
be invalidated.
|
||||||
|
"""
|
||||||
|
txn.call_after(cache_func.invalidate, keys)
|
||||||
|
self._send_invalidation_to_replication(txn, cache_func.__name__, keys)
|
||||||
|
|
||||||
|
def _invalidate_state_caches_and_stream(self, txn, room_id, members_changed):
|
||||||
|
"""Special case invalidation of caches based on current state.
|
||||||
|
|
||||||
|
We special case this so that we can batch the cache invalidations into a
|
||||||
|
single replication poke.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
txn
|
||||||
|
room_id (str): Room where state changed
|
||||||
|
members_changed (iterable[str]): The user_ids of members that have changed
|
||||||
|
"""
|
||||||
|
txn.call_after(self._invalidate_state_caches, room_id, members_changed)
|
||||||
|
|
||||||
|
if members_changed:
|
||||||
|
# We need to be careful that the size of the `members_changed` list
|
||||||
|
# isn't so large that it causes problems sending over replication, so we
|
||||||
|
# send them in chunks.
|
||||||
|
# Max line length is 16K, and max user ID length is 255, so 50 should
|
||||||
|
# be safe.
|
||||||
|
for chunk in batch_iter(members_changed, 50):
|
||||||
|
keys = itertools.chain([room_id], chunk)
|
||||||
|
self._send_invalidation_to_replication(
|
||||||
|
txn, CURRENT_STATE_CACHE_NAME, keys
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# if no members changed, we still need to invalidate the other caches.
|
||||||
|
self._send_invalidation_to_replication(
|
||||||
|
txn, CURRENT_STATE_CACHE_NAME, [room_id]
|
||||||
|
)
|
||||||
|
|
||||||
|
def _send_invalidation_to_replication(self, txn, cache_name, keys):
|
||||||
|
"""Notifies replication that given cache has been invalidated.
|
||||||
|
|
||||||
|
Note that this does *not* invalidate the cache locally.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
txn
|
||||||
|
cache_name (str)
|
||||||
|
keys (iterable[str])
|
||||||
|
"""
|
||||||
|
|
||||||
|
if isinstance(self.database_engine, PostgresEngine):
|
||||||
|
# get_next() returns a context manager which is designed to wrap
|
||||||
|
# the transaction. However, we want to only get an ID when we want
|
||||||
|
# to use it, here, so we need to call __enter__ manually, and have
|
||||||
|
# __exit__ called after the transaction finishes.
|
||||||
|
ctx = self._cache_id_gen.get_next()
|
||||||
|
stream_id = ctx.__enter__()
|
||||||
|
txn.call_on_exception(ctx.__exit__, None, None, None)
|
||||||
|
txn.call_after(ctx.__exit__, None, None, None)
|
||||||
|
txn.call_after(self.hs.get_notifier().on_new_replication_data)
|
||||||
|
|
||||||
|
self._simple_insert_txn(
|
||||||
|
txn,
|
||||||
|
table="cache_invalidation_stream",
|
||||||
|
values={
|
||||||
|
"stream_id": stream_id,
|
||||||
|
"cache_func": cache_name,
|
||||||
|
"keys": list(keys),
|
||||||
|
"invalidation_ts": self.clock.time_msec(),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_all_updated_caches(self, last_id, current_id, limit):
|
||||||
|
if last_id == current_id:
|
||||||
|
return defer.succeed([])
|
||||||
|
|
||||||
|
def get_all_updated_caches_txn(txn):
|
||||||
|
# We purposefully don't bound by the current token, as we want to
|
||||||
|
# send across cache invalidations as quickly as possible. Cache
|
||||||
|
# invalidations are idempotent, so duplicates are fine.
|
||||||
|
sql = (
|
||||||
|
"SELECT stream_id, cache_func, keys, invalidation_ts"
|
||||||
|
" FROM cache_invalidation_stream"
|
||||||
|
" WHERE stream_id > ? ORDER BY stream_id ASC LIMIT ?"
|
||||||
|
)
|
||||||
|
txn.execute(sql, (last_id, limit))
|
||||||
|
return txn.fetchall()
|
||||||
|
|
||||||
|
return self.runInteraction("get_all_updated_caches", get_all_updated_caches_txn)
|
||||||
|
|
||||||
|
def get_cache_stream_token(self):
|
||||||
|
if self._cache_id_gen:
|
||||||
|
return self._cache_id_gen.get_current_token()
|
||||||
|
else:
|
||||||
|
return 0
|
@ -21,8 +21,8 @@ from twisted.internet import defer
|
|||||||
|
|
||||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||||
from synapse.storage import background_updates
|
from synapse.storage import background_updates
|
||||||
from synapse.storage._base import Cache
|
|
||||||
from synapse.util.caches import CACHE_SIZE_FACTOR
|
from synapse.util.caches import CACHE_SIZE_FACTOR
|
||||||
|
from synapse.util.caches.descriptors import Cache
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -30,16 +30,16 @@ from synapse.logging.opentracing import (
|
|||||||
whitelisted_homeserver,
|
whitelisted_homeserver,
|
||||||
)
|
)
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.storage._base import (
|
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
|
||||||
Cache,
|
|
||||||
SQLBaseStore,
|
|
||||||
db_to_json,
|
|
||||||
make_in_list_sql_clause,
|
|
||||||
)
|
|
||||||
from synapse.storage.background_updates import BackgroundUpdateStore
|
from synapse.storage.background_updates import BackgroundUpdateStore
|
||||||
from synapse.types import get_verify_key_from_cross_signing_key
|
from synapse.types import get_verify_key_from_cross_signing_key
|
||||||
from synapse.util import batch_iter
|
from synapse.util import batch_iter
|
||||||
from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList
|
from synapse.util.caches.descriptors import (
|
||||||
|
Cache,
|
||||||
|
cached,
|
||||||
|
cachedInlineCallbacks,
|
||||||
|
cachedList,
|
||||||
|
)
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -17,6 +17,7 @@ from __future__ import division
|
|||||||
|
|
||||||
import itertools
|
import itertools
|
||||||
import logging
|
import logging
|
||||||
|
import threading
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
|
|
||||||
from canonicaljson import json
|
from canonicaljson import json
|
||||||
@ -34,6 +35,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process
|
|||||||
from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause
|
from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause
|
||||||
from synapse.types import get_domain_from_id
|
from synapse.types import get_domain_from_id
|
||||||
from synapse.util import batch_iter
|
from synapse.util import batch_iter
|
||||||
|
from synapse.util.caches.descriptors import Cache
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -53,6 +55,17 @@ _EventCacheEntry = namedtuple("_EventCacheEntry", ("event", "redacted_event"))
|
|||||||
|
|
||||||
|
|
||||||
class EventsWorkerStore(SQLBaseStore):
|
class EventsWorkerStore(SQLBaseStore):
|
||||||
|
def __init__(self, db_conn, hs):
|
||||||
|
super(EventsWorkerStore, self).__init__(db_conn, hs)
|
||||||
|
|
||||||
|
self._get_event_cache = Cache(
|
||||||
|
"*getEvent*", keylen=3, max_entries=hs.config.event_cache_size
|
||||||
|
)
|
||||||
|
|
||||||
|
self._event_fetch_lock = threading.Condition()
|
||||||
|
self._event_fetch_list = []
|
||||||
|
self._event_fetch_ongoing = 0
|
||||||
|
|
||||||
def get_received_ts(self, event_id):
|
def get_received_ts(self, event_id):
|
||||||
"""Get received_ts (when it was persisted) for the event.
|
"""Get received_ts (when it was persisted) for the event.
|
||||||
|
|
||||||
|
@ -926,6 +926,14 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
|
|||||||
|
|
||||||
self._account_validity = hs.config.account_validity
|
self._account_validity = hs.config.account_validity
|
||||||
|
|
||||||
|
if self._account_validity.enabled:
|
||||||
|
self._clock.call_later(
|
||||||
|
0.0,
|
||||||
|
run_as_background_process,
|
||||||
|
"account_validity_set_expiration_dates",
|
||||||
|
self._set_expiration_date_when_missing,
|
||||||
|
)
|
||||||
|
|
||||||
# Create a background job for culling expired 3PID validity tokens
|
# Create a background job for culling expired 3PID validity tokens
|
||||||
def start_cull():
|
def start_cull():
|
||||||
# run as a background process to make sure that the database transactions
|
# run as a background process to make sure that the database transactions
|
||||||
@ -1502,3 +1510,59 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
|
|||||||
self._invalidate_cache_and_stream(
|
self._invalidate_cache_and_stream(
|
||||||
txn, self.get_user_deactivated_status, (user_id,)
|
txn, self.get_user_deactivated_status, (user_id,)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _set_expiration_date_when_missing(self):
|
||||||
|
"""
|
||||||
|
Retrieves the list of registered users that don't have an expiration date, and
|
||||||
|
adds an expiration date for each of them.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def select_users_with_no_expiration_date_txn(txn):
|
||||||
|
"""Retrieves the list of registered users with no expiration date from the
|
||||||
|
database, filtering out deactivated users.
|
||||||
|
"""
|
||||||
|
sql = (
|
||||||
|
"SELECT users.name FROM users"
|
||||||
|
" LEFT JOIN account_validity ON (users.name = account_validity.user_id)"
|
||||||
|
" WHERE account_validity.user_id is NULL AND users.deactivated = 0;"
|
||||||
|
)
|
||||||
|
txn.execute(sql, [])
|
||||||
|
|
||||||
|
res = self.cursor_to_dict(txn)
|
||||||
|
if res:
|
||||||
|
for user in res:
|
||||||
|
self.set_expiration_date_for_user_txn(
|
||||||
|
txn, user["name"], use_delta=True
|
||||||
|
)
|
||||||
|
|
||||||
|
yield self.runInteraction(
|
||||||
|
"get_users_with_no_expiration_date",
|
||||||
|
select_users_with_no_expiration_date_txn,
|
||||||
|
)
|
||||||
|
|
||||||
|
def set_expiration_date_for_user_txn(self, txn, user_id, use_delta=False):
|
||||||
|
"""Sets an expiration date to the account with the given user ID.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id (str): User ID to set an expiration date for.
|
||||||
|
use_delta (bool): If set to False, the expiration date for the user will be
|
||||||
|
now + validity period. If set to True, this expiration date will be a
|
||||||
|
random value in the [now + period - d ; now + period] range, d being a
|
||||||
|
delta equal to 10% of the validity period.
|
||||||
|
"""
|
||||||
|
now_ms = self._clock.time_msec()
|
||||||
|
expiration_ts = now_ms + self._account_validity.period
|
||||||
|
|
||||||
|
if use_delta:
|
||||||
|
expiration_ts = self.rand.randrange(
|
||||||
|
expiration_ts - self._account_validity.startup_job_max_delta,
|
||||||
|
expiration_ts,
|
||||||
|
)
|
||||||
|
|
||||||
|
self._simple_upsert_txn(
|
||||||
|
txn,
|
||||||
|
"account_validity",
|
||||||
|
keyvalues={"user_id": user_id},
|
||||||
|
values={"expiration_ts_ms": expiration_ts, "email_sent": False},
|
||||||
|
)
|
||||||
|
Loading…
Reference in New Issue
Block a user