Merge remote-tracking branch 'upstream/release-v1.69'

This commit is contained in:
Tulir Asokan 2022-10-14 18:31:18 +03:00
commit 7390a8c313
6 changed files with 94 additions and 16 deletions

View File

@ -55,3 +55,8 @@ jobs:
tags: "${{ steps.set-tag.outputs.tags }}" tags: "${{ steps.set-tag.outputs.tags }}"
file: "docker/Dockerfile" file: "docker/Dockerfile"
platforms: linux/amd64,linux/arm64 platforms: linux/amd64,linux/arm64
# arm64 builds OOM without the git fetch setting. c.f.
# https://github.com/rust-lang/cargo/issues/10583
build-args: |
CARGO_NET_GIT_FETCH_WITH_CLI=true

View File

@ -1,4 +1,4 @@
Synapse 1.69.0rc3 (2022-10-12) Synapse 1.69.0rc4 (2022-10-14)
============================== ==============================
Please note that legacy Prometheus metric names are now deprecated and will be removed in Synapse 1.73.0. Please note that legacy Prometheus metric names are now deprecated and will be removed in Synapse 1.73.0.
@ -6,6 +6,21 @@ Server administrators should update their dashboards and alerting rules to avoid
See the [upgrade notes](https://matrix-org.github.io/synapse/v1.69/upgrade.html#upgrading-to-v1690) for more details. See the [upgrade notes](https://matrix-org.github.io/synapse/v1.69/upgrade.html#upgrading-to-v1690) for more details.
Bugfixes
--------
- Fix poor performance of the `event_push_backfill_thread_id` background update, which was introduced in Synapse 1.68.0rc1. ([\#14172](https://github.com/matrix-org/synapse/issues/14172), [\#14181](https://github.com/matrix-org/synapse/issues/14181))
Updates to the Docker image
---------------------------
- Fix docker build OOMing in CI for arm64 builds. ([\#14173](https://github.com/matrix-org/synapse/issues/14173))
Synapse 1.69.0rc3 (2022-10-12)
==============================
Bugfixes Bugfixes
-------- --------

6
debian/changelog vendored
View File

@ -1,3 +1,9 @@
matrix-synapse-py3 (1.69.0~rc4) stable; urgency=medium
* New Synapse release 1.69.0rc4.
-- Synapse Packaging team <packages@matrix.org> Fri, 14 Oct 2022 15:04:47 +0100
matrix-synapse-py3 (1.69.0~rc3) stable; urgency=medium matrix-synapse-py3 (1.69.0~rc3) stable; urgency=medium
* New Synapse release 1.69.0rc3. * New Synapse release 1.69.0rc3.

View File

@ -108,6 +108,12 @@ RUN mkdir /rust /cargo
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable
# arm64 builds consume a lot of memory if `CARGO_NET_GIT_FETCH_WITH_CLI` is not
# set to true, so we expose it as a build-arg.
ARG CARGO_NET_GIT_FETCH_WITH_CLI=false
ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_NET_GIT_FETCH_WITH_CLI
# To speed up rebuilds, install all of the dependencies before we copy over # To speed up rebuilds, install all of the dependencies before we copy over
# the whole synapse project, so that this layer in the Docker cache can be # the whole synapse project, so that this layer in the Docker cache can be
# used while you develop on the source # used while you develop on the source

View File

@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml"
[tool.poetry] [tool.poetry]
name = "matrix-synapse" name = "matrix-synapse"
version = "1.69.0rc3" version = "1.69.0rc4"
description = "Homeserver for the Matrix decentralised comms protocol" description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"] authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "Apache-2.0" license = "Apache-2.0"

View File

@ -269,11 +269,11 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
event_push_actions_done = progress.get("event_push_actions_done", False) event_push_actions_done = progress.get("event_push_actions_done", False)
def add_thread_id_txn( def add_thread_id_txn(
txn: LoggingTransaction, table_name: str, start_stream_ordering: int txn: LoggingTransaction, start_stream_ordering: int
) -> int: ) -> int:
sql = f""" sql = """
SELECT stream_ordering SELECT stream_ordering
FROM {table_name} FROM event_push_actions
WHERE WHERE
thread_id IS NULL thread_id IS NULL
AND stream_ordering > ? AND stream_ordering > ?
@ -285,7 +285,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
# No more rows to process. # No more rows to process.
rows = txn.fetchall() rows = txn.fetchall()
if not rows: if not rows:
progress[f"{table_name}_done"] = True progress["event_push_actions_done"] = True
self.db_pool.updates._background_update_progress_txn( self.db_pool.updates._background_update_progress_txn(
txn, "event_push_backfill_thread_id", progress txn, "event_push_backfill_thread_id", progress
) )
@ -294,16 +294,65 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
# Update the thread ID for any of those rows. # Update the thread ID for any of those rows.
max_stream_ordering = rows[-1][0] max_stream_ordering = rows[-1][0]
sql = f""" sql = """
UPDATE {table_name} UPDATE event_push_actions
SET thread_id = 'main' SET thread_id = 'main'
WHERE stream_ordering <= ? AND thread_id IS NULL WHERE ? < stream_ordering AND stream_ordering <= ? AND thread_id IS NULL
""" """
txn.execute(sql, (max_stream_ordering,)) txn.execute(
sql,
(
start_stream_ordering,
max_stream_ordering,
),
)
# Update progress. # Update progress.
processed_rows = txn.rowcount processed_rows = txn.rowcount
progress[f"max_{table_name}_stream_ordering"] = max_stream_ordering progress["max_event_push_actions_stream_ordering"] = max_stream_ordering
self.db_pool.updates._background_update_progress_txn(
txn, "event_push_backfill_thread_id", progress
)
return processed_rows
def add_thread_id_summary_txn(txn: LoggingTransaction) -> int:
min_user_id = progress.get("max_summary_user_id", "")
min_room_id = progress.get("max_summary_room_id", "")
# Slightly overcomplicated query for getting the Nth user ID / room
# ID tuple, or the last if there are less than N remaining.
sql = """
SELECT user_id, room_id FROM (
SELECT user_id, room_id FROM event_push_summary
WHERE (user_id, room_id) > (?, ?)
AND thread_id IS NULL
ORDER BY user_id, room_id
LIMIT ?
) AS e
ORDER BY user_id DESC, room_id DESC
LIMIT 1
"""
txn.execute(sql, (min_user_id, min_room_id, batch_size))
row = txn.fetchone()
if not row:
return 0
max_user_id, max_room_id = row
sql = """
UPDATE event_push_summary
SET thread_id = 'main'
WHERE
(?, ?) < (user_id, room_id) AND (user_id, room_id) <= (?, ?)
AND thread_id IS NULL
"""
txn.execute(sql, (min_user_id, min_room_id, max_user_id, max_room_id))
processed_rows = txn.rowcount
progress["max_summary_user_id"] = max_user_id
progress["max_summary_room_id"] = max_room_id
self.db_pool.updates._background_update_progress_txn( self.db_pool.updates._background_update_progress_txn(
txn, "event_push_backfill_thread_id", progress txn, "event_push_backfill_thread_id", progress
) )
@ -319,15 +368,12 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
result = await self.db_pool.runInteraction( result = await self.db_pool.runInteraction(
"event_push_backfill_thread_id", "event_push_backfill_thread_id",
add_thread_id_txn, add_thread_id_txn,
"event_push_actions",
progress.get("max_event_push_actions_stream_ordering", 0), progress.get("max_event_push_actions_stream_ordering", 0),
) )
else: else:
result = await self.db_pool.runInteraction( result = await self.db_pool.runInteraction(
"event_push_backfill_thread_id", "event_push_backfill_thread_id",
add_thread_id_txn, add_thread_id_summary_txn,
"event_push_summary",
progress.get("max_event_push_summary_stream_ordering", 0),
) )
# Only done after the event_push_summary table is done. # Only done after the event_push_summary table is done.