mirror of
https://git.anonymousland.org/anonymousland/synapse-product.git
synced 2024-12-12 13:54:19 -05:00
Synapse 1.20.0rc5 (2020-09-18)
============================== In addition to the below, Synapse 1.20.0rc5 also includes the bug fix that was included in 1.19.3. Features -------- - Add flags to the `/versions` endpoint for whether new rooms default to using E2EE. ([\#8343](https://github.com/matrix-org/synapse/issues/8343)) Bugfixes -------- - Fix rate limiting of federation `/send` requests. ([\#8342](https://github.com/matrix-org/synapse/issues/8342)) - Fix a longstanding bug where back pagination over federation could get stuck if it failed to handle a received event. ([\#8349](https://github.com/matrix-org/synapse/issues/8349)) Internal Changes ---------------- - Blacklist [MSC2753](https://github.com/matrix-org/matrix-doc/pull/2753) SyTests until it is implemented. ([\#8285](https://github.com/matrix-org/synapse/issues/8285)) -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEF3tZXk38tRDFVnUIM/xY9qcRMEgFAl9kzk8ACgkQM/xY9qcR MEim6A//aERkhyLGRlGpLd37lCyFQCeffTMH1rTvu04iIBQBaUZ6g7CYWOpK43zT U8kt379+5OShjdAXs/X4XP+ucdHVbrwsRSP3hBS/fFLiDT0fJgP8uiSf5QqO6NnT OqDyXYjcXvj/c6tMKglVtsdh8u4hFwNZjGPMGG68IzJu14uEhnD100cL9jSB9bLB ongWpsQzzdGBpJPSFRjv9dCUSeRbzyUdl1t0uqzrNqyN9s/JnzFTn7ZYo6y3lnSS dHGVMMo/12M2PkbBHnbJVvDY5Q/R7ZxyXlpz0gvSNOQIw8FqYFnuB0Niy5dQhXSR Sy5h4qbczLxqbql1x+lmzeQm4ZMORsW/Tl4C3z6yK6OYaOCJHIf9en4DplTSTqp1 t+85JxWR2wH10d99YHBpaYKmkVovpwgchrO4YWrtXljUFAhhavzf+YiAdOHYT52s RDsDLsvjMbxEHsz4cHfycmshYhjzjb340wkoDXuQpj0zrO99d+Zd83xdK8pS0UQn OaljLRAd/5iBjTSyZPSrB1U5141OzlM3QZVJzaYAnP12yhR9eaX2twSCk+lPYOWd nhLJjNnj1B1XSGArthuE5NLyEiCPz6KyN2RhO0EOx5YjZN9TwH7LS9upyNFe1nN1 GIhO5gz+jWLuBZE3xzRNjJyCx/I/LolpCwGMvKDu6638rpsbrPs= =tT5/ -----END PGP SIGNATURE----- Merge tag 'v1.20.0rc5' into develop Synapse 1.20.0rc5 (2020-09-18) ============================== In addition to the below, Synapse 1.20.0rc5 also includes the bug fix that was included in 1.19.3. Features -------- - Add flags to the `/versions` endpoint for whether new rooms default to using E2EE. ([\#8343](https://github.com/matrix-org/synapse/issues/8343)) Bugfixes -------- - Fix rate limiting of federation `/send` requests. ([\#8342](https://github.com/matrix-org/synapse/issues/8342)) - Fix a longstanding bug where back pagination over federation could get stuck if it failed to handle a received event. ([\#8349](https://github.com/matrix-org/synapse/issues/8349)) Internal Changes ---------------- - Blacklist [MSC2753](https://github.com/matrix-org/matrix-doc/pull/2753) SyTests until it is implemented. ([\#8285](https://github.com/matrix-org/synapse/issues/8285))
This commit is contained in:
commit
00db7786de
33
CHANGES.md
33
CHANGES.md
@ -1,3 +1,36 @@
|
||||
Synapse 1.20.0rc5 (2020-09-18)
|
||||
==============================
|
||||
|
||||
In addition to the below, Synapse 1.20.0rc5 also includes the bug fix that was included in 1.19.3.
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add flags to the `/versions` endpoint for whether new rooms default to using E2EE. ([\#8343](https://github.com/matrix-org/synapse/issues/8343))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix rate limiting of federation `/send` requests. ([\#8342](https://github.com/matrix-org/synapse/issues/8342))
|
||||
- Fix a longstanding bug where back pagination over federation could get stuck if it failed to handle a received event. ([\#8349](https://github.com/matrix-org/synapse/issues/8349))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Blacklist [MSC2753](https://github.com/matrix-org/matrix-doc/pull/2753) SyTests until it is implemented. ([\#8285](https://github.com/matrix-org/synapse/issues/8285))
|
||||
|
||||
|
||||
Synapse 1.19.3 (2020-09-18)
|
||||
===========================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Partially mitigate bug where newly joined servers couldn't get past events in a room when there is a malformed event. ([\#8350](https://github.com/matrix-org/synapse/issues/8350))
|
||||
|
||||
|
||||
Synapse 1.20.0rc4 (2020-09-16)
|
||||
==============================
|
||||
|
||||
|
@ -1 +0,0 @@
|
||||
Blacklist [MSC2753](https://github.com/matrix-org/matrix-doc/pull/2753) SyTests until it is implemented.
|
@ -1 +0,0 @@
|
||||
Fix ratelimitng of federation `/send` requests.
|
6
debian/changelog
vendored
6
debian/changelog
vendored
@ -4,6 +4,12 @@ matrix-synapse-py3 (1.20.0ubuntu1) UNRELEASED; urgency=medium
|
||||
|
||||
-- Dexter Chua <dec41@srcf.net> Wed, 26 Aug 2020 12:41:36 +0000
|
||||
|
||||
matrix-synapse-py3 (1.19.3) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.19.3.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 18 Sep 2020 14:59:30 +0100
|
||||
|
||||
matrix-synapse-py3 (1.19.2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.19.2.
|
||||
|
@ -48,7 +48,7 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.20.0rc4"
|
||||
__version__ = "1.20.0rc5"
|
||||
|
||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||
# We import here so that we don't have to install a bunch of deps when
|
||||
|
@ -217,11 +217,9 @@ class FederationClient(FederationBase):
|
||||
for p in transaction_data["pdus"]
|
||||
]
|
||||
|
||||
# FIXME: We should handle signature failures more gracefully.
|
||||
pdus[:] = await make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
self._check_sigs_and_hashes(room_version, pdus), consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
# Check signatures and hash of pdus, removing any from the list that fail checks
|
||||
pdus[:] = await self._check_sigs_and_hash_and_fetch(
|
||||
dest, pdus, outlier=True, room_version=room_version
|
||||
)
|
||||
|
||||
return pdus
|
||||
|
@ -917,15 +917,26 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
return events
|
||||
|
||||
async def maybe_backfill(self, room_id, current_depth):
|
||||
async def maybe_backfill(
|
||||
self, room_id: str, current_depth: int, limit: int
|
||||
) -> bool:
|
||||
"""Checks the database to see if we should backfill before paginating,
|
||||
and if so do.
|
||||
|
||||
Args:
|
||||
room_id
|
||||
current_depth: The depth from which we're paginating from. This is
|
||||
used to decide if we should backfill and what extremities to
|
||||
use.
|
||||
limit: The number of events that the pagination request will
|
||||
return. This is used as part of the heuristic to decide if we
|
||||
should back paginate.
|
||||
"""
|
||||
extremities = await self.store.get_oldest_events_with_depth_in_room(room_id)
|
||||
|
||||
if not extremities:
|
||||
logger.debug("Not backfilling as no extremeties found.")
|
||||
return
|
||||
return False
|
||||
|
||||
# We only want to paginate if we can actually see the events we'll get,
|
||||
# as otherwise we'll just spend a lot of resources to get redacted
|
||||
@ -978,16 +989,54 @@ class FederationHandler(BaseHandler):
|
||||
sorted_extremeties_tuple = sorted(extremities.items(), key=lambda e: -int(e[1]))
|
||||
max_depth = sorted_extremeties_tuple[0][1]
|
||||
|
||||
# If we're approaching an extremity we trigger a backfill, otherwise we
|
||||
# no-op.
|
||||
#
|
||||
# We chose twice the limit here as then clients paginating backwards
|
||||
# will send pagination requests that trigger backfill at least twice
|
||||
# using the most recent extremity before it gets removed (see below). We
|
||||
# chose more than one times the limit in case of failure, but choosing a
|
||||
# much larger factor will result in triggering a backfill request much
|
||||
# earlier than necessary.
|
||||
if current_depth - 2 * limit > max_depth:
|
||||
logger.debug(
|
||||
"Not backfilling as we don't need to. %d < %d - 2 * %d",
|
||||
max_depth,
|
||||
current_depth,
|
||||
limit,
|
||||
)
|
||||
return False
|
||||
|
||||
logger.debug(
|
||||
"room_id: %s, backfill: current_depth: %s, max_depth: %s, extrems: %s",
|
||||
room_id,
|
||||
current_depth,
|
||||
max_depth,
|
||||
sorted_extremeties_tuple,
|
||||
)
|
||||
|
||||
# We ignore extremities that have a greater depth than our current depth
|
||||
# as:
|
||||
# 1. we don't really care about getting events that have happened
|
||||
# before our current position; and
|
||||
# 2. we have likely previously tried and failed to backfill from that
|
||||
# extremity, so to avoid getting "stuck" requesting the same
|
||||
# backfill repeatedly we drop those extremities.
|
||||
filtered_sorted_extremeties_tuple = [
|
||||
t for t in sorted_extremeties_tuple if int(t[1]) <= current_depth
|
||||
]
|
||||
|
||||
# However, we need to check that the filtered extremities are non-empty.
|
||||
# If they are empty then either we can a) bail or b) still attempt to
|
||||
# backill. We opt to try backfilling anyway just in case we do get
|
||||
# relevant events.
|
||||
if filtered_sorted_extremeties_tuple:
|
||||
sorted_extremeties_tuple = filtered_sorted_extremeties_tuple
|
||||
|
||||
# We don't want to specify too many extremities as it causes the backfill
|
||||
# request URI to be too long.
|
||||
extremities = dict(sorted_extremeties_tuple[:5])
|
||||
|
||||
if current_depth > max_depth:
|
||||
logger.debug(
|
||||
"Not backfilling as we don't need to. %d < %d", max_depth, current_depth
|
||||
)
|
||||
return
|
||||
|
||||
# Now we need to decide which hosts to hit first.
|
||||
|
||||
# First we try hosts that are already in the room
|
||||
|
@ -358,9 +358,9 @@ class PaginationHandler:
|
||||
# if we're going backwards, we might need to backfill. This
|
||||
# requires that we have a topo token.
|
||||
if room_token.topological:
|
||||
max_topo = room_token.topological
|
||||
curr_topo = room_token.topological
|
||||
else:
|
||||
max_topo = await self.store.get_max_topological_token(
|
||||
curr_topo = await self.store.get_current_topological_token(
|
||||
room_id, room_token.stream
|
||||
)
|
||||
|
||||
@ -379,13 +379,13 @@ class PaginationHandler:
|
||||
leave_token = RoomStreamToken.parse(leave_token_str)
|
||||
assert leave_token.topological is not None
|
||||
|
||||
if leave_token.topological < max_topo:
|
||||
if leave_token.topological < curr_topo:
|
||||
from_token = from_token.copy_and_replace(
|
||||
"room_key", leave_token
|
||||
)
|
||||
|
||||
await self.hs.get_handlers().federation_handler.maybe_backfill(
|
||||
room_id, max_topo
|
||||
room_id, curr_topo, limit=source_config.limit,
|
||||
)
|
||||
|
||||
to_room_key = None
|
||||
|
@ -19,6 +19,7 @@
|
||||
import logging
|
||||
import re
|
||||
|
||||
from synapse.api.constants import RoomCreationPreset
|
||||
from synapse.http.servlet import RestServlet
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -31,6 +32,20 @@ class VersionsRestServlet(RestServlet):
|
||||
super().__init__()
|
||||
self.config = hs.config
|
||||
|
||||
# Calculate these once since they shouldn't change after start-up.
|
||||
self.e2ee_forced_public = (
|
||||
RoomCreationPreset.PUBLIC_CHAT
|
||||
in self.config.encryption_enabled_by_default_for_room_presets
|
||||
)
|
||||
self.e2ee_forced_private = (
|
||||
RoomCreationPreset.PRIVATE_CHAT
|
||||
in self.config.encryption_enabled_by_default_for_room_presets
|
||||
)
|
||||
self.e2ee_forced_trusted_private = (
|
||||
RoomCreationPreset.TRUSTED_PRIVATE_CHAT
|
||||
in self.config.encryption_enabled_by_default_for_room_presets
|
||||
)
|
||||
|
||||
def on_GET(self, request):
|
||||
return (
|
||||
200,
|
||||
@ -62,6 +77,10 @@ class VersionsRestServlet(RestServlet):
|
||||
"org.matrix.msc2432": True,
|
||||
# Implements additional endpoints as described in MSC2666
|
||||
"uk.half-shot.msc2666": True,
|
||||
# Whether new rooms will be set to encrypted or not (based on presets).
|
||||
"io.element.e2ee_forced.public": self.e2ee_forced_public,
|
||||
"io.element.e2ee_forced.private": self.e2ee_forced_private,
|
||||
"io.element.e2ee_forced.trusted_private": self.e2ee_forced_trusted_private,
|
||||
},
|
||||
},
|
||||
)
|
||||
|
@ -640,23 +640,20 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore, metaclass=abc.ABCMeta):
|
||||
)
|
||||
return "t%d-%d" % (row["topological_ordering"], row["stream_ordering"])
|
||||
|
||||
async def get_max_topological_token(self, room_id: str, stream_key: int) -> int:
|
||||
"""Get the max topological token in a room before the given stream
|
||||
async def get_current_topological_token(self, room_id: str, stream_key: int) -> int:
|
||||
"""Gets the topological token in a room after or at the given stream
|
||||
ordering.
|
||||
|
||||
Args:
|
||||
room_id
|
||||
stream_key
|
||||
|
||||
Returns:
|
||||
The maximum topological token.
|
||||
"""
|
||||
sql = (
|
||||
"SELECT coalesce(max(topological_ordering), 0) FROM events"
|
||||
" WHERE room_id = ? AND stream_ordering < ?"
|
||||
"SELECT coalesce(MIN(topological_ordering), 0) FROM events"
|
||||
" WHERE room_id = ? AND stream_ordering >= ?"
|
||||
)
|
||||
row = await self.db_pool.execute(
|
||||
"get_max_topological_token", None, sql, room_id, stream_key
|
||||
"get_current_topological_token", None, sql, room_id, stream_key
|
||||
)
|
||||
return row[0][0] if row else 0
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user