mirror of
https://git.anonymousland.org/anonymousland/synapse.git
synced 2024-12-27 16:39:45 -05:00
f112cfe5bb
On startup `MultiWriteIdGenerator` fetches the maximum stream ID for each instance from the table and uses that as its initial "current position" for each writer. This is problematic as a) it involves either a scan of events table or an index (neither of which is ideal), and b) if rows are being persisted out of order elsewhere while the process restarts then using the maximum stream ID is not correct. This could theoretically lead to race conditions where e.g. events that are persisted out of order are not sent down sync streams. We fix this by creating a new table that tracks the current positions of each writer to the stream, and update it each time we finish persisting a new entry. This is a relatively small overhead when persisting events. However for the cache invalidation stream this is a much bigger relative overhead, so instead we note that for invalidation we don't actually care about reliability over restarts (as there's no caches to invalidate) and simply don't bother reading and writing to the new table in that particular case.
46 lines
1.7 KiB
Python
46 lines
1.7 KiB
Python
# -*- coding: utf-8 -*-
|
|
# Copyright 2016 OpenMarket Ltd
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
import logging
|
|
from typing import Optional
|
|
|
|
from synapse.storage.database import DatabasePool
|
|
from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
|
|
from synapse.storage.engines import PostgresEngine
|
|
from synapse.storage.util.id_generators import MultiWriterIdGenerator
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
class BaseSlavedStore(CacheInvalidationWorkerStore):
|
|
def __init__(self, database: DatabasePool, db_conn, hs):
|
|
super().__init__(database, db_conn, hs)
|
|
if isinstance(self.database_engine, PostgresEngine):
|
|
self._cache_id_gen = MultiWriterIdGenerator(
|
|
db_conn,
|
|
database,
|
|
stream_name="caches",
|
|
instance_name=hs.get_instance_name(),
|
|
table="cache_invalidation_stream_by_instance",
|
|
instance_column="instance_name",
|
|
id_column="stream_id",
|
|
sequence_name="cache_invalidation_stream_seq",
|
|
writers=[],
|
|
) # type: Optional[MultiWriterIdGenerator]
|
|
else:
|
|
self._cache_id_gen = None
|
|
|
|
self.hs = hs
|