Merge remote-tracking branch 'upstream/release-v1.69'

This commit is contained in:
Tulir Asokan 2022-10-14 18:31:18 +03:00
commit 7390a8c313
6 changed files with 94 additions and 16 deletions

View File

@ -27,7 +27,7 @@ jobs:
- name: Inspect builder
run: docker buildx inspect
- name: Log in to DockerHub
uses: docker/login-action@v2
with:
@ -55,3 +55,8 @@ jobs:
tags: "${{ steps.set-tag.outputs.tags }}"
file: "docker/Dockerfile"
platforms: linux/amd64,linux/arm64
# arm64 builds OOM without the git fetch setting. c.f.
# https://github.com/rust-lang/cargo/issues/10583
build-args: |
CARGO_NET_GIT_FETCH_WITH_CLI=true

View File

@ -1,4 +1,4 @@
Synapse 1.69.0rc3 (2022-10-12)
Synapse 1.69.0rc4 (2022-10-14)
==============================
Please note that legacy Prometheus metric names are now deprecated and will be removed in Synapse 1.73.0.
@ -6,6 +6,21 @@ Server administrators should update their dashboards and alerting rules to avoid
See the [upgrade notes](https://matrix-org.github.io/synapse/v1.69/upgrade.html#upgrading-to-v1690) for more details.
Bugfixes
--------
- Fix poor performance of the `event_push_backfill_thread_id` background update, which was introduced in Synapse 1.68.0rc1. ([\#14172](https://github.com/matrix-org/synapse/issues/14172), [\#14181](https://github.com/matrix-org/synapse/issues/14181))
Updates to the Docker image
---------------------------
- Fix docker build OOMing in CI for arm64 builds. ([\#14173](https://github.com/matrix-org/synapse/issues/14173))
Synapse 1.69.0rc3 (2022-10-12)
==============================
Bugfixes
--------

6
debian/changelog vendored
View File

@ -1,3 +1,9 @@
matrix-synapse-py3 (1.69.0~rc4) stable; urgency=medium
* New Synapse release 1.69.0rc4.
-- Synapse Packaging team <packages@matrix.org> Fri, 14 Oct 2022 15:04:47 +0100
matrix-synapse-py3 (1.69.0~rc3) stable; urgency=medium
* New Synapse release 1.69.0rc3.

View File

@ -108,6 +108,12 @@ RUN mkdir /rust /cargo
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable
# arm64 builds consume a lot of memory if `CARGO_NET_GIT_FETCH_WITH_CLI` is not
# set to true, so we expose it as a build-arg.
ARG CARGO_NET_GIT_FETCH_WITH_CLI=false
ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_NET_GIT_FETCH_WITH_CLI
# To speed up rebuilds, install all of the dependencies before we copy over
# the whole synapse project, so that this layer in the Docker cache can be
# used while you develop on the source

View File

@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml"
[tool.poetry]
name = "matrix-synapse"
version = "1.69.0rc3"
version = "1.69.0rc4"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "Apache-2.0"

View File

@ -269,11 +269,11 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
event_push_actions_done = progress.get("event_push_actions_done", False)
def add_thread_id_txn(
txn: LoggingTransaction, table_name: str, start_stream_ordering: int
txn: LoggingTransaction, start_stream_ordering: int
) -> int:
sql = f"""
sql = """
SELECT stream_ordering
FROM {table_name}
FROM event_push_actions
WHERE
thread_id IS NULL
AND stream_ordering > ?
@ -285,7 +285,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
# No more rows to process.
rows = txn.fetchall()
if not rows:
progress[f"{table_name}_done"] = True
progress["event_push_actions_done"] = True
self.db_pool.updates._background_update_progress_txn(
txn, "event_push_backfill_thread_id", progress
)
@ -294,16 +294,65 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
# Update the thread ID for any of those rows.
max_stream_ordering = rows[-1][0]
sql = f"""
UPDATE {table_name}
sql = """
UPDATE event_push_actions
SET thread_id = 'main'
WHERE stream_ordering <= ? AND thread_id IS NULL
WHERE ? < stream_ordering AND stream_ordering <= ? AND thread_id IS NULL
"""
txn.execute(sql, (max_stream_ordering,))
txn.execute(
sql,
(
start_stream_ordering,
max_stream_ordering,
),
)
# Update progress.
processed_rows = txn.rowcount
progress[f"max_{table_name}_stream_ordering"] = max_stream_ordering
progress["max_event_push_actions_stream_ordering"] = max_stream_ordering
self.db_pool.updates._background_update_progress_txn(
txn, "event_push_backfill_thread_id", progress
)
return processed_rows
def add_thread_id_summary_txn(txn: LoggingTransaction) -> int:
min_user_id = progress.get("max_summary_user_id", "")
min_room_id = progress.get("max_summary_room_id", "")
# Slightly overcomplicated query for getting the Nth user ID / room
# ID tuple, or the last if there are less than N remaining.
sql = """
SELECT user_id, room_id FROM (
SELECT user_id, room_id FROM event_push_summary
WHERE (user_id, room_id) > (?, ?)
AND thread_id IS NULL
ORDER BY user_id, room_id
LIMIT ?
) AS e
ORDER BY user_id DESC, room_id DESC
LIMIT 1
"""
txn.execute(sql, (min_user_id, min_room_id, batch_size))
row = txn.fetchone()
if not row:
return 0
max_user_id, max_room_id = row
sql = """
UPDATE event_push_summary
SET thread_id = 'main'
WHERE
(?, ?) < (user_id, room_id) AND (user_id, room_id) <= (?, ?)
AND thread_id IS NULL
"""
txn.execute(sql, (min_user_id, min_room_id, max_user_id, max_room_id))
processed_rows = txn.rowcount
progress["max_summary_user_id"] = max_user_id
progress["max_summary_room_id"] = max_room_id
self.db_pool.updates._background_update_progress_txn(
txn, "event_push_backfill_thread_id", progress
)
@ -319,15 +368,12 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
result = await self.db_pool.runInteraction(
"event_push_backfill_thread_id",
add_thread_id_txn,
"event_push_actions",
progress.get("max_event_push_actions_stream_ordering", 0),
)
else:
result = await self.db_pool.runInteraction(
"event_push_backfill_thread_id",
add_thread_id_txn,
"event_push_summary",
progress.get("max_event_push_summary_stream_ordering", 0),
add_thread_id_summary_txn,
)
# Only done after the event_push_summary table is done.