mirror of
https://git.anonymousland.org/anonymousland/synapse.git
synced 2025-11-12 22:42:30 -05:00
Merge branch 'develop' into rav/fix_attribute_mapping
This commit is contained in:
commit
284e1cb027
363 changed files with 10632 additions and 5352 deletions
|
|
@ -55,7 +55,7 @@ class TTLCache(object):
|
|||
if e != SENTINEL:
|
||||
self._expiry_list.remove(e)
|
||||
|
||||
entry = _CacheEntry(expiry_time=expiry, key=key, value=value)
|
||||
entry = _CacheEntry(expiry_time=expiry, ttl=ttl, key=key, value=value)
|
||||
self._data[key] = entry
|
||||
self._expiry_list.add(entry)
|
||||
|
||||
|
|
@ -87,7 +87,8 @@ class TTLCache(object):
|
|||
key: key to look up
|
||||
|
||||
Returns:
|
||||
Tuple[Any, float]: the value from the cache, and the expiry time
|
||||
Tuple[Any, float, float]: the value from the cache, the expiry time
|
||||
and the TTL
|
||||
|
||||
Raises:
|
||||
KeyError if the entry is not found
|
||||
|
|
@ -99,7 +100,7 @@ class TTLCache(object):
|
|||
self._metrics.inc_misses()
|
||||
raise
|
||||
self._metrics.inc_hits()
|
||||
return e.value, e.expiry_time
|
||||
return e.value, e.expiry_time, e.ttl
|
||||
|
||||
def pop(self, key, default=SENTINEL):
|
||||
"""Remove a value from the cache
|
||||
|
|
@ -158,5 +159,6 @@ class _CacheEntry(object):
|
|||
|
||||
# expiry_time is the first attribute, so that entries are sorted by expiry.
|
||||
expiry_time = attr.ib()
|
||||
ttl = attr.ib()
|
||||
key = attr.ib()
|
||||
value = attr.ib()
|
||||
|
|
|
|||
33
synapse/util/hash.py
Normal file
33
synapse/util/hash.py
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import hashlib
|
||||
|
||||
import unpaddedbase64
|
||||
|
||||
|
||||
def sha256_and_url_safe_base64(input_text):
|
||||
"""SHA256 hash an input string, encode the digest as url-safe base64, and
|
||||
return
|
||||
|
||||
:param input_text: string to hash
|
||||
:type input_text: str
|
||||
|
||||
:returns a sha256 hashed and url-safe base64 encoded digest
|
||||
:rtype: str
|
||||
"""
|
||||
digest = hashlib.sha256(input_text.encode()).digest()
|
||||
return unpaddedbase64.encode_base64(digest, urlsafe=True)
|
||||
|
|
@ -22,6 +22,15 @@ from synapse.api.errors import CodeMessageException
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# the intial backoff, after the first transaction fails
|
||||
MIN_RETRY_INTERVAL = 10 * 60 * 1000
|
||||
|
||||
# how much we multiply the backoff by after each subsequent fail
|
||||
RETRY_MULTIPLIER = 5
|
||||
|
||||
# a cap on the backoff. (Essentially none)
|
||||
MAX_RETRY_INTERVAL = 2 ** 63
|
||||
|
||||
|
||||
class NotRetryingDestination(Exception):
|
||||
def __init__(self, retry_last_ts, retry_interval, destination):
|
||||
|
|
@ -71,11 +80,13 @@ def get_retry_limiter(destination, clock, store, ignore_backoff=False, **kwargs)
|
|||
# We aren't ready to retry that destination.
|
||||
raise
|
||||
"""
|
||||
failure_ts = None
|
||||
retry_last_ts, retry_interval = (0, 0)
|
||||
|
||||
retry_timings = yield store.get_destination_retry_timings(destination)
|
||||
|
||||
if retry_timings:
|
||||
failure_ts = retry_timings["failure_ts"]
|
||||
retry_last_ts, retry_interval = (
|
||||
retry_timings["retry_last_ts"],
|
||||
retry_timings["retry_interval"],
|
||||
|
|
@ -99,6 +110,7 @@ def get_retry_limiter(destination, clock, store, ignore_backoff=False, **kwargs)
|
|||
destination,
|
||||
clock,
|
||||
store,
|
||||
failure_ts,
|
||||
retry_interval,
|
||||
backoff_on_failure=backoff_on_failure,
|
||||
**kwargs
|
||||
|
|
@ -111,10 +123,8 @@ class RetryDestinationLimiter(object):
|
|||
destination,
|
||||
clock,
|
||||
store,
|
||||
failure_ts,
|
||||
retry_interval,
|
||||
min_retry_interval=10 * 60 * 1000,
|
||||
max_retry_interval=24 * 60 * 60 * 1000,
|
||||
multiplier_retry_interval=5,
|
||||
backoff_on_404=False,
|
||||
backoff_on_failure=True,
|
||||
):
|
||||
|
|
@ -127,15 +137,11 @@ class RetryDestinationLimiter(object):
|
|||
destination (str)
|
||||
clock (Clock)
|
||||
store (DataStore)
|
||||
failure_ts (int|None): when this destination started failing (in ms since
|
||||
the epoch), or zero if the last request was successful
|
||||
retry_interval (int): The next retry interval taken from the
|
||||
database in milliseconds, or zero if the last request was
|
||||
successful.
|
||||
min_retry_interval (int): The minimum retry interval to use after
|
||||
a failed request, in milliseconds.
|
||||
max_retry_interval (int): The maximum retry interval to use after
|
||||
a failed request, in milliseconds.
|
||||
multiplier_retry_interval (int): The multiplier to use to increase
|
||||
the retry interval after a failed request.
|
||||
backoff_on_404 (bool): Back off if we get a 404
|
||||
|
||||
backoff_on_failure (bool): set to False if we should not increase the
|
||||
|
|
@ -145,10 +151,8 @@ class RetryDestinationLimiter(object):
|
|||
self.store = store
|
||||
self.destination = destination
|
||||
|
||||
self.failure_ts = failure_ts
|
||||
self.retry_interval = retry_interval
|
||||
self.min_retry_interval = min_retry_interval
|
||||
self.max_retry_interval = max_retry_interval
|
||||
self.multiplier_retry_interval = multiplier_retry_interval
|
||||
self.backoff_on_404 = backoff_on_404
|
||||
self.backoff_on_failure = backoff_on_failure
|
||||
|
||||
|
|
@ -189,6 +193,7 @@ class RetryDestinationLimiter(object):
|
|||
logger.debug(
|
||||
"Connection to %s was successful; clearing backoff", self.destination
|
||||
)
|
||||
self.failure_ts = None
|
||||
retry_last_ts = 0
|
||||
self.retry_interval = 0
|
||||
elif not self.backoff_on_failure:
|
||||
|
|
@ -196,13 +201,14 @@ class RetryDestinationLimiter(object):
|
|||
else:
|
||||
# We couldn't connect.
|
||||
if self.retry_interval:
|
||||
self.retry_interval *= self.multiplier_retry_interval
|
||||
self.retry_interval *= int(random.uniform(0.8, 1.4))
|
||||
self.retry_interval = int(
|
||||
self.retry_interval * RETRY_MULTIPLIER * random.uniform(0.8, 1.4)
|
||||
)
|
||||
|
||||
if self.retry_interval >= self.max_retry_interval:
|
||||
self.retry_interval = self.max_retry_interval
|
||||
if self.retry_interval >= MAX_RETRY_INTERVAL:
|
||||
self.retry_interval = MAX_RETRY_INTERVAL
|
||||
else:
|
||||
self.retry_interval = self.min_retry_interval
|
||||
self.retry_interval = MIN_RETRY_INTERVAL
|
||||
|
||||
logger.info(
|
||||
"Connection to %s was unsuccessful (%s(%s)); backoff now %i",
|
||||
|
|
@ -213,11 +219,17 @@ class RetryDestinationLimiter(object):
|
|||
)
|
||||
retry_last_ts = int(self.clock.time_msec())
|
||||
|
||||
if self.failure_ts is None:
|
||||
self.failure_ts = retry_last_ts
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def store_retry_timings():
|
||||
try:
|
||||
yield self.store.set_destination_retry_timings(
|
||||
self.destination, retry_last_ts, self.retry_interval
|
||||
self.destination,
|
||||
self.failure_ts,
|
||||
retry_last_ts,
|
||||
self.retry_interval,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Failed to store destination_retry_timings")
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue