Merge remote-tracking branch 'upstream/release-v1.73'

This commit is contained in:
Tulir Asokan 2022-12-01 14:10:08 +02:00
commit fa8d0208d8
7 changed files with 22 additions and 86 deletions

View File

@ -1,8 +1,17 @@
Synapse 1.73.0rc1 (2022-11-29)
Synapse 1.73.0rc2 (2022-12-01)
==============================
Please note that legacy Prometheus metric names have been removed in this release; see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.73/docs/upgrade.md#legacy-prometheus-metric-names-have-now-been-removed) for more details.
Bugfixes
--------
- Fix a regression in Synapse 1.73.0rc1 where Synapse's main process would stop responding to HTTP requests when a user with a large number of devices logs in. ([\#14582](https://github.com/matrix-org/synapse/issues/14582))
Synapse 1.73.0rc1 (2022-11-29)
==============================
Features
--------
@ -23,7 +32,7 @@ Bugfixes
- Fix a bug introduced in Synapse 1.70.0 where a receipt's thread ID was not sent over federation. ([\#14466](https://github.com/matrix-org/synapse/issues/14466))
- Fix a long-standing bug where the [List media admin API](https://matrix-org.github.io/synapse/latest/admin_api/media_admin_api.html#list-all-media-in-a-room) would fail when processing an image with broken thumbnail information. ([\#14537](https://github.com/matrix-org/synapse/issues/14537))
- Fix a bug introduced in Synapse 1.67.0 where two logging context warnings would be logged on startup. ([\#14574](https://github.com/matrix-org/synapse/issues/14574))
- In application service transactions that include the experimental `org.matrix.msc3202.device_one_time_key_counts` key, include a duplicate key of `org.matrix.msc3202.device_one_time_keys_count` to match the name proposed by [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/blob/travis/msc/otk-dl-appservice/proposals/3202-encrypted-appservices.md). ([\#14565](https://github.com/matrix-org/synapse/issues/14565))
- In application service transactions that include the experimental `org.matrix.msc3202.device_one_time_key_counts` key, include a duplicate key of `org.matrix.msc3202.device_one_time_keys_count` to match the name proposed by [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202). ([\#14565](https://github.com/matrix-org/synapse/issues/14565))
- Fix a bug introduced in Synapse 0.9 where Synapse would fail to fetch server keys whose IDs contain a forward slash. ([\#14490](https://github.com/matrix-org/synapse/issues/14490))

6
debian/changelog vendored
View File

@ -1,3 +1,9 @@
matrix-synapse-py3 (1.73.0~rc2) stable; urgency=medium
* New Synapse release 1.73.0rc2.
-- Synapse Packaging team <packages@matrix.org> Thu, 01 Dec 2022 10:02:19 +0000
matrix-synapse-py3 (1.73.0~rc1) stable; urgency=medium
* New Synapse release 1.73.0rc1.

View File

@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml"
[tool.poetry]
name = "matrix-synapse"
version = "1.73.0rc1"
version = "1.73.0rc2"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "Apache-2.0"

View File

@ -421,9 +421,6 @@ class DeviceHandler(DeviceWorkerHandler):
self._check_device_name_length(initial_device_display_name)
# Prune the user's device list if they already have a lot of devices.
await self._prune_too_many_devices(user_id)
if device_id is not None:
new_device = await self.store.store_device(
user_id=user_id,
@ -455,14 +452,6 @@ class DeviceHandler(DeviceWorkerHandler):
raise errors.StoreError(500, "Couldn't generate a device ID.")
async def _prune_too_many_devices(self, user_id: str) -> None:
"""Delete any excess old devices this user may have."""
device_ids = await self.store.check_too_many_devices_for_user(user_id)
if not device_ids:
return
await self.delete_devices(user_id, device_ids)
async def _delete_stale_devices(self) -> None:
"""Background task that deletes devices which haven't been accessed for more than
a configured time period.
@ -492,7 +481,7 @@ class DeviceHandler(DeviceWorkerHandler):
device_ids = [d for d in device_ids if d != except_device_id]
await self.delete_devices(user_id, device_ids)
async def delete_devices(self, user_id: str, device_ids: Collection[str]) -> None:
async def delete_devices(self, user_id: str, device_ids: List[str]) -> None:
"""Delete several devices
Args:

View File

@ -1533,71 +1533,6 @@ class DeviceBackgroundUpdateStore(SQLBaseStore):
return rows
async def check_too_many_devices_for_user(self, user_id: str) -> Collection[str]:
"""Check if the user has a lot of devices, and if so return the set of
devices we can prune.
This does *not* return hidden devices or devices with E2E keys.
"""
num_devices = await self.db_pool.simple_select_one_onecol(
table="devices",
keyvalues={"user_id": user_id, "hidden": False},
retcol="COALESCE(COUNT(*), 0)",
desc="count_devices",
)
# We let users have up to ten devices without pruning.
if num_devices <= 10:
return ()
# We prune everything older than N days.
max_last_seen = self._clock.time_msec() - 14 * 24 * 60 * 60 * 1000
if num_devices > 50:
# If the user has more than 50 devices, then we chose a last seen
# that ensures we keep at most 50 devices.
sql = """
SELECT last_seen FROM devices
LEFT JOIN e2e_device_keys_json USING (user_id, device_id)
WHERE
user_id = ?
AND NOT hidden
AND last_seen IS NOT NULL
AND key_json IS NULL
ORDER BY last_seen DESC
LIMIT 1
OFFSET 50
"""
rows = await self.db_pool.execute(
"check_too_many_devices_for_user_last_seen", None, sql, (user_id,)
)
if rows:
max_last_seen = max(rows[0][0], max_last_seen)
# Now fetch the devices to delete.
sql = """
SELECT DISTINCT device_id FROM devices
LEFT JOIN e2e_device_keys_json USING (user_id, device_id)
WHERE
user_id = ?
AND NOT hidden
AND last_seen < ?
AND key_json IS NULL
"""
def check_too_many_devices_for_user_txn(
txn: LoggingTransaction,
) -> Collection[str]:
txn.execute(sql, (user_id, max_last_seen))
return {device_id for device_id, in txn}
return await self.db_pool.runInteraction(
"check_too_many_devices_for_user",
check_too_many_devices_for_user_txn,
)
class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
# Because we have write access, this will be a StreamIdGenerator
@ -1656,7 +1591,6 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
values={},
insertion_values={
"display_name": initial_device_display_name,
"last_seen": self._clock.time_msec(),
"hidden": False,
},
desc="store_device",
@ -1702,7 +1636,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
)
raise StoreError(500, "Problem storing device.")
async def delete_devices(self, user_id: str, device_ids: Collection[str]) -> None:
async def delete_devices(self, user_id: str, device_ids: List[str]) -> None:
"""Deletes several devices.
Args:

View File

@ -115,7 +115,7 @@ class DeviceTestCase(unittest.HomeserverTestCase):
"device_id": "xyz",
"display_name": "display 0",
"last_seen_ip": None,
"last_seen_ts": 1000000,
"last_seen_ts": None,
},
device_map["xyz"],
)

View File

@ -169,8 +169,6 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
)
)
last_seen = self.clock.time_msec()
if after_persisting:
# Trigger the storage loop
self.reactor.advance(10)
@ -191,7 +189,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
"device_id": device_id,
"ip": None,
"user_agent": None,
"last_seen": last_seen,
"last_seen": None,
},
],
)