2016-07-15 08:19:07 -04:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
# Copyright 2016 OpenMarket Ltd
|
2019-07-24 23:21:52 -04:00
|
|
|
# Copyright 2019 New Vector Ltd
|
2020-10-07 08:00:17 -04:00
|
|
|
# Copyright 2019,2020 The Matrix.org Foundation C.I.C.
|
2016-07-15 08:19:07 -04:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2020-09-01 07:41:21 -04:00
|
|
|
import abc
|
2016-07-15 08:19:07 -04:00
|
|
|
import logging
|
2020-08-26 07:19:32 -04:00
|
|
|
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
|
2016-07-15 08:19:07 -04:00
|
|
|
|
2019-07-24 23:21:52 -04:00
|
|
|
from synapse.api.errors import Codes, StoreError
|
2019-08-22 13:21:10 -04:00
|
|
|
from synapse.logging.opentracing import (
|
|
|
|
get_active_span_text_map,
|
2019-09-03 05:21:30 -04:00
|
|
|
set_tag,
|
2019-08-22 13:21:10 -04:00
|
|
|
trace,
|
|
|
|
whitelisted_homeserver,
|
|
|
|
)
|
2020-10-09 07:37:51 -04:00
|
|
|
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
2019-12-03 09:28:46 -05:00
|
|
|
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
|
2020-04-07 18:06:39 -04:00
|
|
|
from synapse.storage.database import (
|
2020-08-05 16:38:57 -04:00
|
|
|
DatabasePool,
|
2020-04-07 18:06:39 -04:00
|
|
|
LoggingTransaction,
|
|
|
|
make_tuple_comparison_clause,
|
|
|
|
)
|
2020-08-12 10:51:42 -04:00
|
|
|
from synapse.types import Collection, JsonDict, get_verify_key_from_cross_signing_key
|
2020-10-07 08:00:17 -04:00
|
|
|
from synapse.util import json_decoder, json_encoder
|
2020-10-14 18:25:23 -04:00
|
|
|
from synapse.util.caches.descriptors import cached, cachedList
|
2020-10-19 07:20:29 -04:00
|
|
|
from synapse.util.caches.lrucache import LruCache
|
2020-01-14 06:58:02 -05:00
|
|
|
from synapse.util.iterutils import batch_iter
|
2020-03-30 14:06:52 -04:00
|
|
|
from synapse.util.stringutils import shortstr
|
2018-06-28 09:49:57 -04:00
|
|
|
|
2016-07-15 08:19:07 -04:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2018-11-01 15:10:33 -04:00
|
|
|
DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES = (
|
|
|
|
"drop_device_list_streams_non_unique_indexes"
|
|
|
|
)
|
2016-07-15 08:19:07 -04:00
|
|
|
|
2020-04-07 18:06:39 -04:00
|
|
|
BG_UPDATE_REMOVE_DUP_OUTBOUND_POKES = "remove_dup_outbound_pokes"
|
|
|
|
|
2018-11-01 15:10:33 -04:00
|
|
|
|
2019-03-04 13:03:29 -05:00
|
|
|
class DeviceWorkerStore(SQLBaseStore):
|
2020-10-09 07:37:51 -04:00
|
|
|
def __init__(self, database: DatabasePool, db_conn, hs):
|
|
|
|
super().__init__(database, db_conn, hs)
|
|
|
|
|
|
|
|
if hs.config.run_background_tasks:
|
|
|
|
self._clock.looping_call(
|
|
|
|
self._prune_old_outbound_device_pokes, 60 * 60 * 1000
|
|
|
|
)
|
|
|
|
|
2020-12-11 05:42:47 -05:00
|
|
|
async def count_devices_by_users(self, user_ids: Optional[List[str]] = None) -> int:
|
|
|
|
"""Retrieve number of all devices of given users.
|
|
|
|
Only returns number of devices that are not marked as hidden.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
user_ids: The IDs of the users which owns devices
|
|
|
|
Returns:
|
|
|
|
Number of devices of this users.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def count_devices_by_users_txn(txn, user_ids):
|
|
|
|
sql = """
|
|
|
|
SELECT count(*)
|
|
|
|
FROM devices
|
|
|
|
WHERE
|
|
|
|
hidden = '0' AND
|
|
|
|
"""
|
|
|
|
|
|
|
|
clause, args = make_in_list_sql_clause(
|
|
|
|
txn.database_engine, "user_id", user_ids
|
|
|
|
)
|
|
|
|
|
|
|
|
txn.execute(sql + clause, args)
|
|
|
|
return txn.fetchone()[0]
|
|
|
|
|
|
|
|
if not user_ids:
|
|
|
|
return 0
|
|
|
|
|
|
|
|
return await self.db_pool.runInteraction(
|
|
|
|
"count_devices_by_users", count_devices_by_users_txn, user_ids
|
|
|
|
)
|
|
|
|
|
2020-08-26 07:19:32 -04:00
|
|
|
async def get_device(self, user_id: str, device_id: str) -> Dict[str, Any]:
|
2019-07-30 23:09:50 -04:00
|
|
|
"""Retrieve a device. Only returns devices that are not marked as
|
|
|
|
hidden.
|
2016-07-15 08:19:07 -04:00
|
|
|
|
|
|
|
Args:
|
2020-08-12 10:51:42 -04:00
|
|
|
user_id: The ID of the user which owns the device
|
|
|
|
device_id: The ID of the device to retrieve
|
2016-07-15 08:19:07 -04:00
|
|
|
Returns:
|
2020-08-26 07:19:32 -04:00
|
|
|
A dict containing the device information
|
2016-07-15 08:19:07 -04:00
|
|
|
Raises:
|
|
|
|
StoreError: if the device is not found
|
|
|
|
"""
|
2020-08-26 07:19:32 -04:00
|
|
|
return await self.db_pool.simple_select_one(
|
2016-07-15 08:19:07 -04:00
|
|
|
table="devices",
|
2019-07-30 23:09:50 -04:00
|
|
|
keyvalues={"user_id": user_id, "device_id": device_id, "hidden": False},
|
2019-08-01 02:16:09 -04:00
|
|
|
retcols=("user_id", "device_id", "display_name"),
|
2016-07-15 08:19:07 -04:00
|
|
|
desc="get_device",
|
|
|
|
)
|
2016-07-20 11:34:00 -04:00
|
|
|
|
2020-08-12 10:51:42 -04:00
|
|
|
async def get_devices_by_user(self, user_id: str) -> Dict[str, Dict[str, str]]:
|
2019-07-30 23:09:50 -04:00
|
|
|
"""Retrieve all of a user's registered devices. Only returns devices
|
|
|
|
that are not marked as hidden.
|
2016-07-20 11:34:00 -04:00
|
|
|
|
|
|
|
Args:
|
2020-08-12 10:51:42 -04:00
|
|
|
user_id:
|
2016-07-20 11:34:00 -04:00
|
|
|
Returns:
|
2020-08-12 10:51:42 -04:00
|
|
|
A mapping from device_id to a dict containing "device_id", "user_id"
|
|
|
|
and "display_name" for each device.
|
2016-07-20 11:34:00 -04:00
|
|
|
"""
|
2020-08-12 10:51:42 -04:00
|
|
|
devices = await self.db_pool.simple_select_list(
|
2016-07-20 11:34:00 -04:00
|
|
|
table="devices",
|
2019-07-30 23:09:50 -04:00
|
|
|
keyvalues={"user_id": user_id, "hidden": False},
|
|
|
|
retcols=("user_id", "device_id", "display_name"),
|
2019-04-03 05:07:29 -04:00
|
|
|
desc="get_devices_by_user",
|
2016-07-20 11:34:00 -04:00
|
|
|
)
|
|
|
|
|
2019-07-23 09:00:55 -04:00
|
|
|
return {d["device_id"]: d for d in devices}
|
2017-01-25 09:27:27 -05:00
|
|
|
|
2019-08-22 13:21:10 -04:00
|
|
|
@trace
|
2020-08-12 10:51:42 -04:00
|
|
|
async def get_device_updates_by_remote(
|
|
|
|
self, destination: str, from_stream_id: int, limit: int
|
|
|
|
) -> Tuple[int, List[Tuple[str, dict]]]:
|
2019-10-30 10:01:53 -04:00
|
|
|
"""Get a stream of device updates to send to the given remote server.
|
2019-03-04 13:03:29 -05:00
|
|
|
|
2019-10-30 10:01:53 -04:00
|
|
|
Args:
|
2020-08-12 10:51:42 -04:00
|
|
|
destination: The host the device updates are intended for
|
|
|
|
from_stream_id: The minimum stream_id to filter updates by, exclusive
|
|
|
|
limit: Maximum number of device updates to return
|
|
|
|
|
2019-03-04 13:03:29 -05:00
|
|
|
Returns:
|
2020-08-12 10:51:42 -04:00
|
|
|
A mapping from the current stream id (ie, the stream id of the last
|
|
|
|
update included in the response), and the list of updates, where
|
|
|
|
each update is a pair of EDU type and EDU contents.
|
2017-01-26 11:30:37 -05:00
|
|
|
"""
|
2020-09-01 07:41:21 -04:00
|
|
|
now_stream_id = self.get_device_stream_token()
|
2017-01-26 11:06:54 -05:00
|
|
|
|
2019-03-04 13:03:29 -05:00
|
|
|
has_changed = self._device_list_federation_stream_cache.has_entity_changed(
|
|
|
|
destination, int(from_stream_id)
|
2017-02-27 11:22:12 -05:00
|
|
|
)
|
2019-03-04 13:03:29 -05:00
|
|
|
if not has_changed:
|
2019-08-30 11:28:26 -04:00
|
|
|
return now_stream_id, []
|
2017-02-27 11:22:12 -05:00
|
|
|
|
2020-08-12 10:51:42 -04:00
|
|
|
updates = await self.db_pool.runInteraction(
|
2019-10-30 14:57:34 -04:00
|
|
|
"get_device_updates_by_remote",
|
|
|
|
self._get_device_updates_by_remote_txn,
|
2019-04-03 05:07:29 -04:00
|
|
|
destination,
|
|
|
|
from_stream_id,
|
|
|
|
now_stream_id,
|
2020-02-28 06:45:35 -05:00
|
|
|
limit,
|
2019-03-04 13:03:29 -05:00
|
|
|
)
|
2017-02-27 11:22:12 -05:00
|
|
|
|
2019-06-06 18:54:00 -04:00
|
|
|
# Return an empty list if there are no updates
|
|
|
|
if not updates:
|
2019-08-30 11:28:26 -04:00
|
|
|
return now_stream_id, []
|
2019-06-06 18:54:00 -04:00
|
|
|
|
2019-10-30 10:01:53 -04:00
|
|
|
# get the cross-signing keys of the users in the list, so that we can
|
|
|
|
# determine which of the device changes were cross-signing keys
|
2020-02-21 07:15:07 -05:00
|
|
|
users = {r[0] for r in updates}
|
2019-05-22 16:42:00 -04:00
|
|
|
master_key_by_user = {}
|
|
|
|
self_signing_key_by_user = {}
|
|
|
|
for user in users:
|
2020-08-12 10:51:42 -04:00
|
|
|
cross_signing_key = await self.get_e2e_cross_signing_key(user, "master")
|
2019-05-22 21:24:21 -04:00
|
|
|
if cross_signing_key:
|
|
|
|
key_id, verify_key = get_verify_key_from_cross_signing_key(
|
|
|
|
cross_signing_key
|
|
|
|
)
|
2019-10-30 10:01:53 -04:00
|
|
|
# verify_key is a VerifyKey from signedjson, which uses
|
|
|
|
# .version to denote the portion of the key ID after the
|
|
|
|
# algorithm and colon, which is the device ID
|
2019-05-22 21:24:21 -04:00
|
|
|
master_key_by_user[user] = {
|
|
|
|
"key_info": cross_signing_key,
|
2019-10-30 10:01:53 -04:00
|
|
|
"device_id": verify_key.version,
|
2019-05-22 21:24:21 -04:00
|
|
|
}
|
2019-05-22 16:42:00 -04:00
|
|
|
|
2020-08-12 10:51:42 -04:00
|
|
|
cross_signing_key = await self.get_e2e_cross_signing_key(
|
|
|
|
user, "self_signing"
|
2019-07-22 13:04:55 -04:00
|
|
|
)
|
2019-05-22 21:24:21 -04:00
|
|
|
if cross_signing_key:
|
|
|
|
key_id, verify_key = get_verify_key_from_cross_signing_key(
|
|
|
|
cross_signing_key
|
|
|
|
)
|
|
|
|
self_signing_key_by_user[user] = {
|
|
|
|
"key_info": cross_signing_key,
|
2019-10-30 10:01:53 -04:00
|
|
|
"device_id": verify_key.version,
|
2019-05-22 21:24:21 -04:00
|
|
|
}
|
2019-05-22 16:42:00 -04:00
|
|
|
|
2019-06-06 18:54:00 -04:00
|
|
|
# Perform the equivalent of a GROUP BY
|
|
|
|
#
|
|
|
|
# Iterate through the updates list and copy non-duplicate
|
|
|
|
# (user_id, device_id) entries into a map, with the value being
|
|
|
|
# the max stream_id across each set of duplicate entries
|
|
|
|
#
|
2019-08-22 13:21:10 -04:00
|
|
|
# maps (user_id, device_id) -> (stream_id, opentracing_context)
|
|
|
|
#
|
|
|
|
# opentracing_context contains the opentracing metadata for the request
|
|
|
|
# that created the poke
|
|
|
|
#
|
|
|
|
# The most recent request's opentracing_context is used as the
|
|
|
|
# context which created the Edu.
|
|
|
|
|
2019-06-06 18:54:00 -04:00
|
|
|
query_map = {}
|
2019-10-30 10:01:53 -04:00
|
|
|
cross_signing_keys_by_user = {}
|
|
|
|
for user_id, device_id, update_stream_id, update_context in updates:
|
2019-07-22 13:04:55 -04:00
|
|
|
if (
|
2019-10-30 10:01:53 -04:00
|
|
|
user_id in master_key_by_user
|
|
|
|
and device_id == master_key_by_user[user_id]["device_id"]
|
2019-07-22 13:04:55 -04:00
|
|
|
):
|
2019-05-22 16:42:00 -04:00
|
|
|
result = cross_signing_keys_by_user.setdefault(user_id, {})
|
2019-07-22 13:04:55 -04:00
|
|
|
result["master_key"] = master_key_by_user[user_id]["key_info"]
|
2019-10-30 10:01:53 -04:00
|
|
|
elif (
|
2019-10-31 22:49:48 -04:00
|
|
|
user_id in self_signing_key_by_user
|
2019-10-30 10:01:53 -04:00
|
|
|
and device_id == self_signing_key_by_user[user_id]["device_id"]
|
2019-07-22 13:04:55 -04:00
|
|
|
):
|
2019-05-22 16:42:00 -04:00
|
|
|
result = cross_signing_keys_by_user.setdefault(user_id, {})
|
2019-07-22 13:04:55 -04:00
|
|
|
result["self_signing_key"] = self_signing_key_by_user[user_id][
|
|
|
|
"key_info"
|
|
|
|
]
|
2019-10-30 10:01:53 -04:00
|
|
|
else:
|
|
|
|
key = (user_id, device_id)
|
2019-05-22 16:42:00 -04:00
|
|
|
|
2019-10-30 10:01:53 -04:00
|
|
|
previous_update_stream_id, _ = query_map.get(key, (0, None))
|
2019-05-22 16:42:00 -04:00
|
|
|
|
2019-10-30 10:01:53 -04:00
|
|
|
if update_stream_id > previous_update_stream_id:
|
|
|
|
query_map[key] = (update_stream_id, update_context)
|
|
|
|
|
2020-08-12 10:51:42 -04:00
|
|
|
results = await self._get_device_update_edus_by_remote(
|
2019-06-06 18:54:00 -04:00
|
|
|
destination, from_stream_id, query_map
|
|
|
|
)
|
2019-10-30 10:01:53 -04:00
|
|
|
|
|
|
|
# add the updated cross-signing keys to the results list
|
2020-06-15 07:03:36 -04:00
|
|
|
for user_id, result in cross_signing_keys_by_user.items():
|
2019-10-30 10:01:53 -04:00
|
|
|
result["user_id"] = user_id
|
|
|
|
# FIXME: switch to m.signing_key_update when MSC1756 is merged into the spec
|
|
|
|
results.append(("org.matrix.signing_key_update", result))
|
2019-06-06 18:54:00 -04:00
|
|
|
|
2019-08-30 11:28:26 -04:00
|
|
|
return now_stream_id, results
|
2019-06-06 18:54:00 -04:00
|
|
|
|
2019-10-30 14:57:34 -04:00
|
|
|
def _get_device_updates_by_remote_txn(
|
2020-08-12 10:51:42 -04:00
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
destination: str,
|
|
|
|
from_stream_id: int,
|
|
|
|
now_stream_id: int,
|
|
|
|
limit: int,
|
2019-04-03 05:07:29 -04:00
|
|
|
):
|
2019-06-06 18:54:00 -04:00
|
|
|
"""Return device update information for a given remote destination
|
|
|
|
|
|
|
|
Args:
|
2020-08-12 10:51:42 -04:00
|
|
|
txn: The transaction to execute
|
|
|
|
destination: The host the device updates are intended for
|
|
|
|
from_stream_id: The minimum stream_id to filter updates by, exclusive
|
|
|
|
now_stream_id: The maximum stream_id to filter updates by, inclusive
|
|
|
|
limit: Maximum number of device updates to return
|
2019-06-06 18:54:00 -04:00
|
|
|
|
|
|
|
Returns:
|
|
|
|
List: List of device updates
|
|
|
|
"""
|
2019-05-22 16:42:00 -04:00
|
|
|
# get the list of device updates that need to be sent
|
2019-03-04 13:03:29 -05:00
|
|
|
sql = """
|
2019-08-22 13:21:10 -04:00
|
|
|
SELECT user_id, device_id, stream_id, opentracing_context FROM device_lists_outbound_pokes
|
2020-04-07 10:19:19 -04:00
|
|
|
WHERE destination = ? AND ? < stream_id AND stream_id <= ?
|
2019-06-06 18:54:00 -04:00
|
|
|
ORDER BY stream_id
|
|
|
|
LIMIT ?
|
2017-01-27 05:31:06 -05:00
|
|
|
"""
|
2020-04-07 10:19:19 -04:00
|
|
|
txn.execute(sql, (destination, from_stream_id, now_stream_id, limit))
|
2017-01-26 11:39:33 -05:00
|
|
|
|
2019-06-06 18:54:00 -04:00
|
|
|
return list(txn)
|
|
|
|
|
2020-08-12 10:51:42 -04:00
|
|
|
async def _get_device_update_edus_by_remote(
|
|
|
|
self,
|
|
|
|
destination: str,
|
|
|
|
from_stream_id: int,
|
|
|
|
query_map: Dict[Tuple[str, str], Tuple[int, Optional[str]]],
|
|
|
|
) -> List[Tuple[str, dict]]:
|
2019-06-06 18:54:00 -04:00
|
|
|
"""Returns a list of device update EDUs as well as E2EE keys
|
2018-11-01 15:01:29 -04:00
|
|
|
|
2019-06-06 18:54:00 -04:00
|
|
|
Args:
|
2020-08-12 10:51:42 -04:00
|
|
|
destination: The host the device updates are intended for
|
|
|
|
from_stream_id: The minimum stream_id to filter updates by, exclusive
|
2019-08-22 13:21:10 -04:00
|
|
|
query_map (Dict[(str, str): (int, str|None)]): Dictionary mapping
|
2020-08-07 13:36:29 -04:00
|
|
|
user_id/device_id to update stream_id and the relevant json-encoded
|
2019-08-22 13:21:10 -04:00
|
|
|
opentracing context
|
2018-11-01 15:01:29 -04:00
|
|
|
|
2019-06-06 18:54:00 -04:00
|
|
|
Returns:
|
2020-08-12 10:51:42 -04:00
|
|
|
List of objects representing an device update EDU
|
2019-06-06 18:54:00 -04:00
|
|
|
"""
|
2019-07-22 13:04:55 -04:00
|
|
|
devices = (
|
2020-09-03 06:50:49 -04:00
|
|
|
await self.get_e2e_device_keys_and_signatures(
|
2019-07-22 13:04:55 -04:00
|
|
|
query_map.keys(),
|
|
|
|
include_all_devices=True,
|
|
|
|
include_deleted_devices=True,
|
|
|
|
)
|
|
|
|
if query_map
|
|
|
|
else {}
|
|
|
|
)
|
2018-11-01 15:01:29 -04:00
|
|
|
|
2019-03-04 13:03:29 -05:00
|
|
|
results = []
|
2020-06-15 07:03:36 -04:00
|
|
|
for user_id, user_devices in devices.items():
|
2019-03-04 13:03:29 -05:00
|
|
|
# The prev_id for the first row is always the last row before
|
|
|
|
# `from_stream_id`
|
2020-08-12 10:51:42 -04:00
|
|
|
prev_id = await self._get_last_device_update_for_remote_user(
|
2019-06-06 18:54:00 -04:00
|
|
|
destination, user_id, from_stream_id
|
|
|
|
)
|
Fix device list update stream ids going backward (#7158)
Occasionally we could get a federation device list update transaction which
looked like:
```
[
{'edu_type': 'm.device_list_update', 'content': {'user_id': '@user:test', 'device_id': 'D2', 'prev_id': [], 'stream_id': 12, 'deleted': True}},
{'edu_type': 'm.device_list_update', 'content': {'user_id': '@user:test', 'device_id': 'D1', 'prev_id': [12], 'stream_id': 11, 'deleted': True}},
{'edu_type': 'm.device_list_update', 'content': {'user_id': '@user:test', 'device_id': 'D3', 'prev_id': [11], 'stream_id': 13, 'deleted': True}}
]
```
Having `stream_ids` which are lower than `prev_ids` looks odd. It might work
(I'm not actually sure), but in any case it doesn't seem like a reasonable
thing to expect other implementations to support.
2020-04-03 05:40:22 -04:00
|
|
|
|
|
|
|
# make sure we go through the devices in stream order
|
|
|
|
device_ids = sorted(
|
|
|
|
user_devices.keys(),
|
|
|
|
key=lambda i: query_map[(user_id, i)][0],
|
|
|
|
)
|
|
|
|
|
|
|
|
for device_id in device_ids:
|
|
|
|
device = user_devices[device_id]
|
2019-08-22 13:21:10 -04:00
|
|
|
stream_id, opentracing_context = query_map[(user_id, device_id)]
|
2019-03-04 13:03:29 -05:00
|
|
|
result = {
|
2018-07-11 20:32:39 -04:00
|
|
|
"user_id": user_id,
|
|
|
|
"device_id": device_id,
|
2019-03-04 13:03:29 -05:00
|
|
|
"prev_id": [prev_id] if prev_id else [],
|
|
|
|
"stream_id": stream_id,
|
2019-08-22 13:21:10 -04:00
|
|
|
"org.matrix.opentracing_context": opentracing_context,
|
2019-03-04 13:03:29 -05:00
|
|
|
}
|
2018-07-11 20:32:39 -04:00
|
|
|
|
2019-03-04 13:03:29 -05:00
|
|
|
prev_id = stream_id
|
2018-11-01 15:01:29 -04:00
|
|
|
|
2019-03-04 13:03:29 -05:00
|
|
|
if device is not None:
|
2020-09-04 10:06:05 -04:00
|
|
|
keys = device.keys
|
|
|
|
if keys:
|
|
|
|
result["keys"] = keys
|
2020-03-31 09:51:22 -04:00
|
|
|
|
2020-09-02 06:47:26 -04:00
|
|
|
device_display_name = device.display_name
|
2019-03-04 13:03:29 -05:00
|
|
|
if device_display_name:
|
|
|
|
result["device_display_name"] = device_display_name
|
|
|
|
else:
|
|
|
|
result["deleted"] = True
|
2017-01-26 11:06:54 -05:00
|
|
|
|
2019-05-22 16:42:00 -04:00
|
|
|
results.append(("m.device_list_update", result))
|
2019-03-04 13:03:29 -05:00
|
|
|
|
2019-07-23 09:00:55 -04:00
|
|
|
return results
|
2019-06-06 18:54:00 -04:00
|
|
|
|
2020-09-01 09:21:48 -04:00
|
|
|
async def _get_last_device_update_for_remote_user(
|
2020-08-12 10:51:42 -04:00
|
|
|
self, destination: str, user_id: str, from_stream_id: int
|
2020-09-01 09:21:48 -04:00
|
|
|
) -> int:
|
2019-06-06 18:54:00 -04:00
|
|
|
def f(txn):
|
|
|
|
prev_sent_id_sql = """
|
|
|
|
SELECT coalesce(max(stream_id), 0) as stream_id
|
|
|
|
FROM device_lists_outbound_last_success
|
|
|
|
WHERE destination = ? AND user_id = ? AND stream_id <= ?
|
|
|
|
"""
|
|
|
|
txn.execute(prev_sent_id_sql, (destination, user_id, from_stream_id))
|
|
|
|
rows = txn.fetchall()
|
|
|
|
return rows[0][0]
|
|
|
|
|
2020-09-01 09:21:48 -04:00
|
|
|
return await self.db_pool.runInteraction(
|
|
|
|
"get_last_device_update_for_remote_user", f
|
|
|
|
)
|
2019-03-04 13:03:29 -05:00
|
|
|
|
2020-09-01 09:21:48 -04:00
|
|
|
async def mark_as_sent_devices_by_remote(
|
|
|
|
self, destination: str, stream_id: int
|
|
|
|
) -> None:
|
2019-03-04 13:03:29 -05:00
|
|
|
"""Mark that updates have successfully been sent to the destination."""
|
2020-09-01 09:21:48 -04:00
|
|
|
await self.db_pool.runInteraction(
|
2019-04-03 05:07:29 -04:00
|
|
|
"mark_as_sent_devices_by_remote",
|
|
|
|
self._mark_as_sent_devices_by_remote_txn,
|
|
|
|
destination,
|
|
|
|
stream_id,
|
2017-02-27 11:22:12 -05:00
|
|
|
)
|
|
|
|
|
2020-08-12 10:51:42 -04:00
|
|
|
def _mark_as_sent_devices_by_remote_txn(
|
|
|
|
self, txn: LoggingTransaction, destination: str, stream_id: int
|
|
|
|
) -> None:
|
2019-03-04 13:03:29 -05:00
|
|
|
# We update the device_lists_outbound_last_success with the successfully
|
2020-05-05 20:16:53 -04:00
|
|
|
# poked users.
|
2019-03-04 13:03:29 -05:00
|
|
|
sql = """
|
2020-05-05 20:16:53 -04:00
|
|
|
SELECT user_id, coalesce(max(o.stream_id), 0)
|
2019-03-04 13:03:29 -05:00
|
|
|
FROM device_lists_outbound_pokes as o
|
|
|
|
WHERE destination = ? AND o.stream_id <= ?
|
|
|
|
GROUP BY user_id
|
|
|
|
"""
|
2019-04-03 05:07:29 -04:00
|
|
|
txn.execute(sql, (destination, stream_id))
|
2019-03-04 13:03:29 -05:00
|
|
|
rows = txn.fetchall()
|
2018-11-01 15:01:29 -04:00
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_upsert_many_txn(
|
2020-05-05 20:16:53 -04:00
|
|
|
txn=txn,
|
|
|
|
table="device_lists_outbound_last_success",
|
|
|
|
key_names=("destination", "user_id"),
|
|
|
|
key_values=((destination, user_id) for user_id, _ in rows),
|
|
|
|
value_names=("stream_id",),
|
|
|
|
value_values=((stream_id,) for _, stream_id in rows),
|
2019-03-04 13:03:29 -05:00
|
|
|
)
|
2018-11-01 15:01:29 -04:00
|
|
|
|
2019-03-04 13:03:29 -05:00
|
|
|
# Delete all sent outbound pokes
|
|
|
|
sql = """
|
|
|
|
DELETE FROM device_lists_outbound_pokes
|
|
|
|
WHERE destination = ? AND stream_id <= ?
|
|
|
|
"""
|
2019-04-03 05:07:29 -04:00
|
|
|
txn.execute(sql, (destination, stream_id))
|
2019-03-04 13:03:29 -05:00
|
|
|
|
2020-08-12 10:51:42 -04:00
|
|
|
async def add_user_signature_change_to_streams(
|
|
|
|
self, from_user_id: str, user_ids: List[str]
|
|
|
|
) -> int:
|
2019-07-25 11:08:24 -04:00
|
|
|
"""Persist that a user has made new signatures
|
|
|
|
|
|
|
|
Args:
|
2020-08-12 10:51:42 -04:00
|
|
|
from_user_id: the user who made the signatures
|
|
|
|
user_ids: the users who were signed
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
THe new stream ID.
|
2019-07-25 11:08:24 -04:00
|
|
|
"""
|
|
|
|
|
2020-09-23 11:11:18 -04:00
|
|
|
async with self._device_list_id_gen.get_next() as stream_id:
|
2020-08-12 10:51:42 -04:00
|
|
|
await self.db_pool.runInteraction(
|
2019-07-25 11:08:24 -04:00
|
|
|
"add_user_sig_change_to_streams",
|
|
|
|
self._add_user_signature_change_txn,
|
|
|
|
from_user_id,
|
|
|
|
user_ids,
|
|
|
|
stream_id,
|
|
|
|
)
|
2019-08-21 16:19:35 -04:00
|
|
|
return stream_id
|
2019-07-25 11:08:24 -04:00
|
|
|
|
2020-08-12 10:51:42 -04:00
|
|
|
def _add_user_signature_change_txn(
|
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
from_user_id: str,
|
|
|
|
user_ids: List[str],
|
|
|
|
stream_id: int,
|
|
|
|
) -> None:
|
2019-07-25 11:08:24 -04:00
|
|
|
txn.call_after(
|
|
|
|
self._user_signature_stream_cache.entity_has_changed,
|
|
|
|
from_user_id,
|
|
|
|
stream_id,
|
|
|
|
)
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_insert_txn(
|
2019-07-25 11:08:24 -04:00
|
|
|
txn,
|
|
|
|
"user_signature_stream",
|
|
|
|
values={
|
|
|
|
"stream_id": stream_id,
|
|
|
|
"from_user_id": from_user_id,
|
2020-08-07 08:02:55 -04:00
|
|
|
"user_ids": json_encoder.encode(user_ids),
|
2019-07-25 11:08:24 -04:00
|
|
|
},
|
|
|
|
)
|
|
|
|
|
2020-09-01 07:41:21 -04:00
|
|
|
@abc.abstractmethod
|
2020-08-12 10:51:42 -04:00
|
|
|
def get_device_stream_token(self) -> int:
|
2020-09-01 07:41:21 -04:00
|
|
|
"""Get the current stream id from the _device_list_id_gen"""
|
|
|
|
...
|
2019-03-04 13:03:29 -05:00
|
|
|
|
2019-09-03 05:21:30 -04:00
|
|
|
@trace
|
2020-08-12 10:51:42 -04:00
|
|
|
async def get_user_devices_from_cache(
|
|
|
|
self, query_list: List[Tuple[str, str]]
|
|
|
|
) -> Tuple[Set[str], Dict[str, Dict[str, JsonDict]]]:
|
2019-03-04 13:03:29 -05:00
|
|
|
"""Get the devices (and keys if any) for remote users from the cache.
|
2018-11-01 15:01:29 -04:00
|
|
|
|
|
|
|
Args:
|
2020-08-12 10:51:42 -04:00
|
|
|
query_list: List of (user_id, device_ids), if device_ids is
|
2019-03-04 13:03:29 -05:00
|
|
|
falsey then return all device ids for that user.
|
2018-11-01 15:01:29 -04:00
|
|
|
|
|
|
|
Returns:
|
2020-08-12 10:51:42 -04:00
|
|
|
A tuple of (user_ids_not_in_cache, results_map), where
|
|
|
|
user_ids_not_in_cache is a set of user_ids and results_map is a
|
|
|
|
mapping of user_id -> device_id -> device_info.
|
2017-01-26 11:30:37 -05:00
|
|
|
"""
|
2020-02-21 07:15:07 -05:00
|
|
|
user_ids = {user_id for user_id, _ in query_list}
|
2020-08-12 10:51:42 -04:00
|
|
|
user_map = await self.get_device_list_last_stream_id_for_remotes(list(user_ids))
|
2020-01-30 10:06:58 -05:00
|
|
|
|
|
|
|
# We go and check if any of the users need to have their device lists
|
|
|
|
# resynced. If they do then we remove them from the cached list.
|
2020-08-12 10:51:42 -04:00
|
|
|
users_needing_resync = await self.get_user_ids_requiring_device_list_resync(
|
2020-01-30 10:06:58 -05:00
|
|
|
user_ids
|
|
|
|
)
|
2020-02-21 07:15:07 -05:00
|
|
|
user_ids_in_cache = {
|
|
|
|
user_id for user_id, stream_id in user_map.items() if stream_id
|
|
|
|
} - users_needing_resync
|
2019-03-04 13:03:29 -05:00
|
|
|
user_ids_not_in_cache = user_ids - user_ids_in_cache
|
2017-01-26 11:06:54 -05:00
|
|
|
|
2019-03-04 13:03:29 -05:00
|
|
|
results = {}
|
|
|
|
for user_id, device_id in query_list:
|
|
|
|
if user_id not in user_ids_in_cache:
|
|
|
|
continue
|
|
|
|
|
|
|
|
if device_id:
|
2020-08-12 10:51:42 -04:00
|
|
|
device = await self._get_cached_user_device(user_id, device_id)
|
2019-03-04 13:03:29 -05:00
|
|
|
results.setdefault(user_id, {})[device_id] = device
|
|
|
|
else:
|
2020-08-12 10:51:42 -04:00
|
|
|
results[user_id] = await self.get_cached_devices_for_user(user_id)
|
2019-03-04 13:03:29 -05:00
|
|
|
|
2019-09-03 05:21:30 -04:00
|
|
|
set_tag("in_cache", results)
|
|
|
|
set_tag("not_in_cache", user_ids_not_in_cache)
|
|
|
|
|
2019-08-30 11:28:26 -04:00
|
|
|
return user_ids_not_in_cache, results
|
2019-03-04 13:03:29 -05:00
|
|
|
|
2020-08-12 10:51:42 -04:00
|
|
|
@cached(num_args=2, tree=True)
|
|
|
|
async def _get_cached_user_device(self, user_id: str, device_id: str) -> JsonDict:
|
|
|
|
content = await self.db_pool.simple_select_one_onecol(
|
2017-01-26 11:06:54 -05:00
|
|
|
table="device_lists_remote_cache",
|
2019-04-03 05:07:29 -04:00
|
|
|
keyvalues={"user_id": user_id, "device_id": device_id},
|
2019-03-04 13:03:29 -05:00
|
|
|
retcol="content",
|
|
|
|
desc="_get_cached_user_device",
|
2017-01-26 11:06:54 -05:00
|
|
|
)
|
2019-07-23 09:00:55 -04:00
|
|
|
return db_to_json(content)
|
2017-01-26 11:06:54 -05:00
|
|
|
|
2020-08-12 10:51:42 -04:00
|
|
|
@cached()
|
|
|
|
async def get_cached_devices_for_user(self, user_id: str) -> Dict[str, JsonDict]:
|
|
|
|
devices = await self.db_pool.simple_select_list(
|
2017-01-26 11:06:54 -05:00
|
|
|
table="device_lists_remote_cache",
|
2019-04-03 05:07:29 -04:00
|
|
|
keyvalues={"user_id": user_id},
|
2019-03-04 13:03:29 -05:00
|
|
|
retcols=("device_id", "content"),
|
2020-01-28 09:43:21 -05:00
|
|
|
desc="get_cached_devices_for_user",
|
2017-01-26 11:06:54 -05:00
|
|
|
)
|
2019-07-23 09:00:55 -04:00
|
|
|
return {
|
|
|
|
device["device_id"]: db_to_json(device["content"]) for device in devices
|
|
|
|
}
|
2017-01-26 11:06:54 -05:00
|
|
|
|
2020-08-12 10:51:42 -04:00
|
|
|
async def get_users_whose_devices_changed(
|
2020-09-08 11:48:15 -04:00
|
|
|
self, from_key: int, user_ids: Iterable[str]
|
2020-08-12 10:51:42 -04:00
|
|
|
) -> Set[str]:
|
2019-06-26 06:56:52 -04:00
|
|
|
"""Get set of users whose devices have changed since `from_key` that
|
|
|
|
are in the given list of user_ids.
|
|
|
|
|
|
|
|
Args:
|
2020-08-12 10:51:42 -04:00
|
|
|
from_key: The device lists stream token
|
|
|
|
user_ids: The user IDs to query for devices.
|
2019-06-26 06:56:52 -04:00
|
|
|
|
|
|
|
Returns:
|
2020-08-12 10:51:42 -04:00
|
|
|
The set of user_ids whose devices have changed since `from_key`
|
2019-03-04 13:03:29 -05:00
|
|
|
"""
|
2019-06-26 06:56:52 -04:00
|
|
|
|
|
|
|
# Get set of users who *may* have changed. Users not in the returned
|
|
|
|
# list have definitely not changed.
|
2020-05-05 12:07:59 -04:00
|
|
|
to_check = self._device_list_stream_cache.get_entities_changed(
|
|
|
|
user_ids, from_key
|
2019-06-26 06:56:52 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
if not to_check:
|
2020-08-12 10:51:42 -04:00
|
|
|
return set()
|
2019-06-26 06:56:52 -04:00
|
|
|
|
2019-06-26 14:09:10 -04:00
|
|
|
def _get_users_whose_devices_changed_txn(txn):
|
2019-06-26 06:56:52 -04:00
|
|
|
changes = set()
|
|
|
|
|
2019-06-26 14:10:38 -04:00
|
|
|
sql = """
|
|
|
|
SELECT DISTINCT user_id FROM device_lists_stream
|
|
|
|
WHERE stream_id > ?
|
2019-10-02 14:07:07 -04:00
|
|
|
AND
|
2019-06-26 14:10:38 -04:00
|
|
|
"""
|
|
|
|
|
|
|
|
for chunk in batch_iter(to_check, 100):
|
2019-10-02 14:07:07 -04:00
|
|
|
clause, args = make_in_list_sql_clause(
|
|
|
|
txn.database_engine, "user_id", chunk
|
|
|
|
)
|
|
|
|
txn.execute(sql + clause, (from_key,) + tuple(args))
|
2019-06-26 06:56:52 -04:00
|
|
|
changes.update(user_id for user_id, in txn)
|
|
|
|
|
|
|
|
return changes
|
|
|
|
|
2020-08-12 10:51:42 -04:00
|
|
|
return await self.db_pool.runInteraction(
|
2019-06-26 14:09:10 -04:00
|
|
|
"get_users_whose_devices_changed", _get_users_whose_devices_changed_txn
|
2019-04-03 05:07:29 -04:00
|
|
|
)
|
2019-03-04 13:03:29 -05:00
|
|
|
|
2020-08-12 10:51:42 -04:00
|
|
|
async def get_users_whose_signatures_changed(
|
2020-09-08 11:48:15 -04:00
|
|
|
self, user_id: str, from_key: int
|
2020-08-12 10:51:42 -04:00
|
|
|
) -> Set[str]:
|
2019-07-25 11:08:24 -04:00
|
|
|
"""Get the users who have new cross-signing signatures made by `user_id` since
|
|
|
|
`from_key`.
|
|
|
|
|
|
|
|
Args:
|
2020-08-12 10:51:42 -04:00
|
|
|
user_id: the user who made the signatures
|
|
|
|
from_key: The device lists stream token
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A set of user IDs with updated signatures.
|
2019-07-25 11:08:24 -04:00
|
|
|
"""
|
2020-09-08 11:48:15 -04:00
|
|
|
|
2019-07-25 11:08:24 -04:00
|
|
|
if self._user_signature_stream_cache.has_entity_changed(user_id, from_key):
|
|
|
|
sql = """
|
|
|
|
SELECT DISTINCT user_ids FROM user_signature_stream
|
|
|
|
WHERE from_user_id = ? AND stream_id > ?
|
|
|
|
"""
|
2020-08-12 10:51:42 -04:00
|
|
|
rows = await self.db_pool.execute(
|
2019-07-25 11:08:24 -04:00
|
|
|
"get_users_whose_signatures_changed", None, sql, user_id, from_key
|
|
|
|
)
|
2020-07-16 11:32:19 -04:00
|
|
|
return {user for row in rows for user in db_to_json(row[0])}
|
2019-07-25 11:08:24 -04:00
|
|
|
else:
|
2019-08-21 16:19:35 -04:00
|
|
|
return set()
|
2019-07-25 11:08:24 -04:00
|
|
|
|
2020-03-18 06:13:55 -04:00
|
|
|
async def get_all_device_list_changes_for_remotes(
|
2020-07-07 07:11:35 -04:00
|
|
|
self, instance_name: str, last_id: int, current_id: int, limit: int
|
|
|
|
) -> Tuple[List[Tuple[int, tuple]], int, bool]:
|
|
|
|
"""Get updates for device lists replication stream.
|
2020-02-28 06:24:05 -05:00
|
|
|
|
2020-07-07 07:11:35 -04:00
|
|
|
Args:
|
|
|
|
instance_name: The writer we want to fetch updates from. Unused
|
|
|
|
here since there is only ever one writer.
|
|
|
|
last_id: The token to fetch updates from. Exclusive.
|
|
|
|
current_id: The token to fetch updates up to. Inclusive.
|
|
|
|
limit: The requested limit for the number of rows to return. The
|
|
|
|
function may return more or fewer rows.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A tuple consisting of: the updates, a token to use to fetch
|
|
|
|
subsequent updates, and whether we returned fewer rows than exists
|
|
|
|
between the requested tokens due to the limit.
|
|
|
|
|
|
|
|
The token returned can be used in a subsequent call to this
|
2020-08-07 13:36:29 -04:00
|
|
|
function to get further updates.
|
2020-07-07 07:11:35 -04:00
|
|
|
|
|
|
|
The updates are a list of 2-tuples of stream ID and the row data
|
2019-03-04 13:03:29 -05:00
|
|
|
"""
|
2020-02-28 06:24:05 -05:00
|
|
|
|
2020-07-07 07:11:35 -04:00
|
|
|
if last_id == current_id:
|
|
|
|
return [], current_id, False
|
|
|
|
|
|
|
|
def _get_all_device_list_changes_for_remotes(txn):
|
|
|
|
# This query Does The Right Thing where it'll correctly apply the
|
|
|
|
# bounds to the inner queries.
|
|
|
|
sql = """
|
|
|
|
SELECT stream_id, entity FROM (
|
|
|
|
SELECT stream_id, user_id AS entity FROM device_lists_stream
|
|
|
|
UNION ALL
|
|
|
|
SELECT stream_id, destination AS entity FROM device_lists_outbound_pokes
|
|
|
|
) AS e
|
|
|
|
WHERE ? < stream_id AND stream_id <= ?
|
|
|
|
LIMIT ?
|
|
|
|
"""
|
|
|
|
|
|
|
|
txn.execute(sql, (last_id, current_id, limit))
|
|
|
|
updates = [(row[0], row[1:]) for row in txn]
|
|
|
|
limited = False
|
|
|
|
upto_token = current_id
|
|
|
|
if len(updates) >= limit:
|
|
|
|
upto_token = updates[-1][0]
|
|
|
|
limited = True
|
|
|
|
|
|
|
|
return updates, upto_token, limited
|
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
return await self.db_pool.runInteraction(
|
2020-03-20 10:40:47 -04:00
|
|
|
"get_all_device_list_changes_for_remotes",
|
2020-07-07 07:11:35 -04:00
|
|
|
_get_all_device_list_changes_for_remotes,
|
2017-01-25 09:27:27 -05:00
|
|
|
)
|
|
|
|
|
2019-03-04 13:03:29 -05:00
|
|
|
@cached(max_entries=10000)
|
2020-08-26 07:19:32 -04:00
|
|
|
async def get_device_list_last_stream_id_for_remote(
|
|
|
|
self, user_id: str
|
|
|
|
) -> Optional[Any]:
|
2019-03-04 13:03:29 -05:00
|
|
|
"""Get the last stream_id we got for a user. May be None if we haven't
|
|
|
|
got any information for them.
|
|
|
|
"""
|
2020-08-26 07:19:32 -04:00
|
|
|
return await self.db_pool.simple_select_one_onecol(
|
2019-03-04 13:03:29 -05:00
|
|
|
table="device_lists_remote_extremeties",
|
|
|
|
keyvalues={"user_id": user_id},
|
|
|
|
retcol="stream_id",
|
|
|
|
desc="get_device_list_last_stream_id_for_remote",
|
|
|
|
allow_none=True,
|
|
|
|
)
|
2017-01-25 09:27:27 -05:00
|
|
|
|
2019-04-03 05:07:29 -04:00
|
|
|
@cachedList(
|
|
|
|
cached_method_name="get_device_list_last_stream_id_for_remote",
|
|
|
|
list_name="user_ids",
|
|
|
|
)
|
2020-08-14 07:24:26 -04:00
|
|
|
async def get_device_list_last_stream_id_for_remotes(self, user_ids: str):
|
|
|
|
rows = await self.db_pool.simple_select_many_batch(
|
2019-03-04 13:03:29 -05:00
|
|
|
table="device_lists_remote_extremeties",
|
|
|
|
column="user_id",
|
|
|
|
iterable=user_ids,
|
2019-04-03 05:07:29 -04:00
|
|
|
retcols=("user_id", "stream_id"),
|
2019-03-04 13:03:29 -05:00
|
|
|
desc="get_device_list_last_stream_id_for_remotes",
|
|
|
|
)
|
2017-03-24 10:44:49 -04:00
|
|
|
|
2019-03-04 13:03:29 -05:00
|
|
|
results = {user_id: None for user_id in user_ids}
|
2019-04-03 05:07:29 -04:00
|
|
|
results.update({row["user_id"]: row["stream_id"] for row in rows})
|
2019-03-04 13:03:29 -05:00
|
|
|
|
2019-07-23 09:00:55 -04:00
|
|
|
return results
|
2019-03-04 13:03:29 -05:00
|
|
|
|
2020-08-12 10:51:42 -04:00
|
|
|
async def get_user_ids_requiring_device_list_resync(
|
2020-05-21 11:41:12 -04:00
|
|
|
self,
|
|
|
|
user_ids: Optional[Collection[str]] = None,
|
|
|
|
) -> Set[str]:
|
2020-01-30 10:06:58 -05:00
|
|
|
"""Given a list of remote users return the list of users that we
|
2020-05-21 11:41:12 -04:00
|
|
|
should resync the device lists for. If None is given instead of a list,
|
|
|
|
return every user that we should resync the device lists for.
|
2020-01-30 10:06:58 -05:00
|
|
|
|
|
|
|
Returns:
|
2020-05-21 11:41:12 -04:00
|
|
|
The IDs of users whose device lists need resync.
|
2020-01-30 10:06:58 -05:00
|
|
|
"""
|
2020-05-21 11:41:12 -04:00
|
|
|
if user_ids:
|
2020-08-12 10:51:42 -04:00
|
|
|
rows = await self.db_pool.simple_select_many_batch(
|
2020-05-21 11:41:12 -04:00
|
|
|
table="device_lists_remote_resync",
|
|
|
|
column="user_id",
|
|
|
|
iterable=user_ids,
|
|
|
|
retcols=("user_id",),
|
|
|
|
desc="get_user_ids_requiring_device_list_resync_with_iterable",
|
|
|
|
)
|
|
|
|
else:
|
2020-08-12 10:51:42 -04:00
|
|
|
rows = await self.db_pool.simple_select_list(
|
2020-05-21 11:41:12 -04:00
|
|
|
table="device_lists_remote_resync",
|
|
|
|
keyvalues=None,
|
|
|
|
retcols=("user_id",),
|
|
|
|
desc="get_user_ids_requiring_device_list_resync",
|
|
|
|
)
|
2020-01-30 10:06:58 -05:00
|
|
|
|
|
|
|
return {row["user_id"] for row in rows}
|
|
|
|
|
2020-08-27 13:38:41 -04:00
|
|
|
async def mark_remote_user_device_cache_as_stale(self, user_id: str) -> None:
|
2020-01-28 09:43:21 -05:00
|
|
|
"""Records that the server has reason to believe the cache of the devices
|
|
|
|
for the remote users is out of date.
|
|
|
|
"""
|
2020-08-27 13:38:41 -04:00
|
|
|
await self.db_pool.simple_upsert(
|
2020-01-28 09:43:21 -05:00
|
|
|
table="device_lists_remote_resync",
|
|
|
|
keyvalues={"user_id": user_id},
|
|
|
|
values={},
|
|
|
|
insertion_values={"added_ts": self._clock.time_msec()},
|
|
|
|
desc="make_remote_user_device_cache_as_stale",
|
|
|
|
)
|
|
|
|
|
2020-09-01 09:21:48 -04:00
|
|
|
async def mark_remote_user_device_list_as_unsubscribed(self, user_id: str) -> None:
|
2020-05-22 11:11:35 -04:00
|
|
|
"""Mark that we no longer track device lists for remote user."""
|
|
|
|
|
|
|
|
def _mark_remote_user_device_list_as_unsubscribed_txn(txn):
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_delete_txn(
|
2020-05-22 11:11:35 -04:00
|
|
|
txn,
|
|
|
|
table="device_lists_remote_extremeties",
|
|
|
|
keyvalues={"user_id": user_id},
|
|
|
|
)
|
|
|
|
self._invalidate_cache_and_stream(
|
|
|
|
txn, self.get_device_list_last_stream_id_for_remote, (user_id,)
|
|
|
|
)
|
|
|
|
|
2020-09-01 09:21:48 -04:00
|
|
|
await self.db_pool.runInteraction(
|
2020-05-22 11:11:35 -04:00
|
|
|
"mark_remote_user_device_list_as_unsubscribed",
|
|
|
|
_mark_remote_user_device_list_as_unsubscribed_txn,
|
|
|
|
)
|
|
|
|
|
2020-10-07 08:00:17 -04:00
|
|
|
async def get_dehydrated_device(
|
|
|
|
self, user_id: str
|
|
|
|
) -> Optional[Tuple[str, JsonDict]]:
|
|
|
|
"""Retrieve the information for a dehydrated device.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
user_id: the user whose dehydrated device we are looking for
|
|
|
|
Returns:
|
|
|
|
a tuple whose first item is the device ID, and the second item is
|
|
|
|
the dehydrated device information
|
|
|
|
"""
|
|
|
|
# FIXME: make sure device ID still exists in devices table
|
|
|
|
row = await self.db_pool.simple_select_one(
|
|
|
|
table="dehydrated_devices",
|
|
|
|
keyvalues={"user_id": user_id},
|
|
|
|
retcols=["device_id", "device_data"],
|
|
|
|
allow_none=True,
|
|
|
|
)
|
|
|
|
return (
|
|
|
|
(row["device_id"], json_decoder.decode(row["device_data"])) if row else None
|
|
|
|
)
|
|
|
|
|
|
|
|
def _store_dehydrated_device_txn(
|
|
|
|
self, txn, user_id: str, device_id: str, device_data: str
|
|
|
|
) -> Optional[str]:
|
|
|
|
old_device_id = self.db_pool.simple_select_one_onecol_txn(
|
|
|
|
txn,
|
|
|
|
table="dehydrated_devices",
|
|
|
|
keyvalues={"user_id": user_id},
|
|
|
|
retcol="device_id",
|
|
|
|
allow_none=True,
|
|
|
|
)
|
|
|
|
self.db_pool.simple_upsert_txn(
|
|
|
|
txn,
|
|
|
|
table="dehydrated_devices",
|
|
|
|
keyvalues={"user_id": user_id},
|
|
|
|
values={"device_id": device_id, "device_data": device_data},
|
|
|
|
)
|
|
|
|
return old_device_id
|
|
|
|
|
|
|
|
async def store_dehydrated_device(
|
|
|
|
self, user_id: str, device_id: str, device_data: JsonDict
|
|
|
|
) -> Optional[str]:
|
|
|
|
"""Store a dehydrated device for a user.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
user_id: the user that we are storing the device for
|
|
|
|
device_id: the ID of the dehydrated device
|
|
|
|
device_data: the dehydrated device information
|
|
|
|
Returns:
|
|
|
|
device id of the user's previous dehydrated device, if any
|
|
|
|
"""
|
|
|
|
return await self.db_pool.runInteraction(
|
|
|
|
"store_dehydrated_device_txn",
|
|
|
|
self._store_dehydrated_device_txn,
|
|
|
|
user_id,
|
|
|
|
device_id,
|
|
|
|
json_encoder.encode(device_data),
|
|
|
|
)
|
|
|
|
|
|
|
|
async def remove_dehydrated_device(self, user_id: str, device_id: str) -> bool:
|
|
|
|
"""Remove a dehydrated device.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
user_id: the user that the dehydrated device belongs to
|
|
|
|
device_id: the ID of the dehydrated device
|
|
|
|
"""
|
|
|
|
count = await self.db_pool.simple_delete(
|
|
|
|
"dehydrated_devices",
|
|
|
|
{"user_id": user_id, "device_id": device_id},
|
|
|
|
desc="remove_dehydrated_device",
|
|
|
|
)
|
|
|
|
return count >= 1
|
|
|
|
|
2020-10-09 07:37:51 -04:00
|
|
|
@wrap_as_background_process("prune_old_outbound_device_pokes")
|
|
|
|
async def _prune_old_outbound_device_pokes(
|
|
|
|
self, prune_age: int = 24 * 60 * 60 * 1000
|
|
|
|
) -> None:
|
|
|
|
"""Delete old entries out of the device_lists_outbound_pokes to ensure
|
|
|
|
that we don't fill up due to dead servers.
|
|
|
|
|
|
|
|
Normally, we try to send device updates as a delta since a previous known point:
|
|
|
|
this is done by setting the prev_id in the m.device_list_update EDU. However,
|
|
|
|
for that to work, we have to have a complete record of each change to
|
|
|
|
each device, which can add up to quite a lot of data.
|
|
|
|
|
|
|
|
An alternative mechanism is that, if the remote server sees that it has missed
|
|
|
|
an entry in the stream_id sequence for a given user, it will request a full
|
|
|
|
list of that user's devices. Hence, we can reduce the amount of data we have to
|
|
|
|
store (and transmit in some future transaction), by clearing almost everything
|
|
|
|
for a given destination out of the database, and having the remote server
|
|
|
|
resync.
|
|
|
|
|
|
|
|
All we need to do is make sure we keep at least one row for each
|
|
|
|
(user, destination) pair, to remind us to send a m.device_list_update EDU for
|
|
|
|
that user when the destination comes back. It doesn't matter which device
|
|
|
|
we keep.
|
|
|
|
"""
|
|
|
|
yesterday = self._clock.time_msec() - prune_age
|
|
|
|
|
|
|
|
def _prune_txn(txn):
|
|
|
|
# look for (user, destination) pairs which have an update older than
|
|
|
|
# the cutoff.
|
|
|
|
#
|
|
|
|
# For each pair, we also need to know the most recent stream_id, and
|
|
|
|
# an arbitrary device_id at that stream_id.
|
|
|
|
select_sql = """
|
|
|
|
SELECT
|
|
|
|
dlop1.destination,
|
|
|
|
dlop1.user_id,
|
|
|
|
MAX(dlop1.stream_id) AS stream_id,
|
|
|
|
(SELECT MIN(dlop2.device_id) AS device_id FROM
|
|
|
|
device_lists_outbound_pokes dlop2
|
|
|
|
WHERE dlop2.destination = dlop1.destination AND
|
|
|
|
dlop2.user_id=dlop1.user_id AND
|
|
|
|
dlop2.stream_id=MAX(dlop1.stream_id)
|
|
|
|
)
|
|
|
|
FROM device_lists_outbound_pokes dlop1
|
|
|
|
GROUP BY destination, user_id
|
|
|
|
HAVING min(ts) < ? AND count(*) > 1
|
|
|
|
"""
|
|
|
|
|
|
|
|
txn.execute(select_sql, (yesterday,))
|
|
|
|
rows = txn.fetchall()
|
|
|
|
|
|
|
|
if not rows:
|
|
|
|
return
|
|
|
|
|
|
|
|
logger.info(
|
|
|
|
"Pruning old outbound device list updates for %i users/destinations: %s",
|
|
|
|
len(rows),
|
|
|
|
shortstr((row[0], row[1]) for row in rows),
|
|
|
|
)
|
|
|
|
|
|
|
|
# we want to keep the update with the highest stream_id for each user.
|
|
|
|
#
|
|
|
|
# there might be more than one update (with different device_ids) with the
|
|
|
|
# same stream_id, so we also delete all but one rows with the max stream id.
|
|
|
|
delete_sql = """
|
|
|
|
DELETE FROM device_lists_outbound_pokes
|
|
|
|
WHERE destination = ? AND user_id = ? AND (
|
|
|
|
stream_id < ? OR
|
|
|
|
(stream_id = ? AND device_id != ?)
|
|
|
|
)
|
|
|
|
"""
|
|
|
|
count = 0
|
|
|
|
for (destination, user_id, stream_id, device_id) in rows:
|
|
|
|
txn.execute(
|
|
|
|
delete_sql, (destination, user_id, stream_id, stream_id, device_id)
|
|
|
|
)
|
|
|
|
count += txn.rowcount
|
|
|
|
|
|
|
|
# Since we've deleted unsent deltas, we need to remove the entry
|
|
|
|
# of last successful sent so that the prev_ids are correctly set.
|
|
|
|
sql = """
|
|
|
|
DELETE FROM device_lists_outbound_last_success
|
|
|
|
WHERE destination = ? AND user_id = ?
|
|
|
|
"""
|
2021-01-21 09:44:12 -05:00
|
|
|
txn.execute_batch(sql, ((row[0], row[1]) for row in rows))
|
2020-10-09 07:37:51 -04:00
|
|
|
|
|
|
|
logger.info("Pruned %d device list outbound pokes", count)
|
|
|
|
|
|
|
|
await self.db_pool.runInteraction(
|
|
|
|
"_prune_old_outbound_device_pokes",
|
|
|
|
_prune_txn,
|
|
|
|
)
|
|
|
|
|
2019-03-04 13:03:29 -05:00
|
|
|
|
2019-12-04 10:09:36 -05:00
|
|
|
class DeviceBackgroundUpdateStore(SQLBaseStore):
|
2020-08-05 16:38:57 -04:00
|
|
|
def __init__(self, database: DatabasePool, db_conn, hs):
|
2020-09-18 09:56:44 -04:00
|
|
|
super().__init__(database, db_conn, hs)
|
2019-03-04 13:03:29 -05:00
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.updates.register_background_index_update(
|
2019-03-04 13:03:29 -05:00
|
|
|
"device_lists_stream_idx",
|
|
|
|
index_name="device_lists_stream_user_id",
|
|
|
|
table="device_lists_stream",
|
|
|
|
columns=["user_id", "device_id"],
|
|
|
|
)
|
|
|
|
|
|
|
|
# create a unique index on device_lists_remote_cache
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.updates.register_background_index_update(
|
2019-03-04 13:03:29 -05:00
|
|
|
"device_lists_remote_cache_unique_idx",
|
|
|
|
index_name="device_lists_remote_cache_unique_id",
|
|
|
|
table="device_lists_remote_cache",
|
|
|
|
columns=["user_id", "device_id"],
|
|
|
|
unique=True,
|
|
|
|
)
|
|
|
|
|
|
|
|
# And one on device_lists_remote_extremeties
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.updates.register_background_index_update(
|
2019-03-04 13:03:29 -05:00
|
|
|
"device_lists_remote_extremeties_unique_idx",
|
|
|
|
index_name="device_lists_remote_extremeties_unique_idx",
|
|
|
|
table="device_lists_remote_extremeties",
|
|
|
|
columns=["user_id"],
|
|
|
|
unique=True,
|
|
|
|
)
|
|
|
|
|
|
|
|
# once they complete, we can remove the old non-unique indexes.
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.updates.register_background_update_handler(
|
2019-03-04 13:03:29 -05:00
|
|
|
DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES,
|
|
|
|
self._drop_device_list_streams_non_unique_indexes,
|
|
|
|
)
|
|
|
|
|
2020-04-07 18:06:39 -04:00
|
|
|
# clear out duplicate device list outbound pokes
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.updates.register_background_update_handler(
|
2020-04-07 18:06:39 -04:00
|
|
|
BG_UPDATE_REMOVE_DUP_OUTBOUND_POKES,
|
|
|
|
self._remove_duplicate_outbound_pokes,
|
|
|
|
)
|
|
|
|
|
2020-05-26 06:43:17 -04:00
|
|
|
# a pair of background updates that were added during the 1.14 release cycle,
|
|
|
|
# but replaced with 58/06dlols_unique_idx.py
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.updates.register_noop_background_update(
|
2020-05-05 20:16:53 -04:00
|
|
|
"device_lists_outbound_last_success_unique_idx",
|
|
|
|
)
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.updates.register_noop_background_update(
|
2020-05-26 06:43:17 -04:00
|
|
|
"drop_device_lists_outbound_last_success_non_unique_idx",
|
2020-05-05 20:16:53 -04:00
|
|
|
)
|
|
|
|
|
2020-08-12 10:51:42 -04:00
|
|
|
async def _drop_device_list_streams_non_unique_indexes(self, progress, batch_size):
|
2019-10-03 12:24:03 -04:00
|
|
|
def f(conn):
|
|
|
|
txn = conn.cursor()
|
|
|
|
txn.execute("DROP INDEX IF EXISTS device_lists_remote_cache_id")
|
|
|
|
txn.execute("DROP INDEX IF EXISTS device_lists_remote_extremeties_id")
|
|
|
|
txn.close()
|
|
|
|
|
2020-08-12 10:51:42 -04:00
|
|
|
await self.db_pool.runWithConnection(f)
|
|
|
|
await self.db_pool.updates._end_background_update(
|
2019-12-04 10:09:36 -05:00
|
|
|
DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES
|
|
|
|
)
|
2019-10-03 12:24:03 -04:00
|
|
|
return 1
|
|
|
|
|
2020-04-07 18:06:39 -04:00
|
|
|
async def _remove_duplicate_outbound_pokes(self, progress, batch_size):
|
|
|
|
# for some reason, we have accumulated duplicate entries in
|
|
|
|
# device_lists_outbound_pokes, which makes prune_outbound_device_list_pokes less
|
|
|
|
# efficient.
|
|
|
|
#
|
|
|
|
# For each duplicate, we delete all the existing rows and put one back.
|
|
|
|
|
|
|
|
KEY_COLS = ["stream_id", "destination", "user_id", "device_id"]
|
|
|
|
last_row = progress.get(
|
|
|
|
"last_row",
|
|
|
|
{"stream_id": 0, "destination": "", "user_id": "", "device_id": ""},
|
|
|
|
)
|
|
|
|
|
|
|
|
def _txn(txn):
|
|
|
|
clause, args = make_tuple_comparison_clause(
|
2021-04-08 13:29:57 -04:00
|
|
|
[(x, last_row[x]) for x in KEY_COLS]
|
2020-04-07 18:06:39 -04:00
|
|
|
)
|
|
|
|
sql = """
|
|
|
|
SELECT stream_id, destination, user_id, device_id, MAX(ts) AS ts
|
|
|
|
FROM device_lists_outbound_pokes
|
|
|
|
WHERE %s
|
|
|
|
GROUP BY %s
|
|
|
|
HAVING count(*) > 1
|
|
|
|
ORDER BY %s
|
|
|
|
LIMIT ?
|
|
|
|
""" % (
|
|
|
|
clause, # WHERE
|
|
|
|
",".join(KEY_COLS), # GROUP BY
|
|
|
|
",".join(KEY_COLS), # ORDER BY
|
|
|
|
)
|
|
|
|
txn.execute(sql, args + [batch_size])
|
2020-08-05 16:38:57 -04:00
|
|
|
rows = self.db_pool.cursor_to_dict(txn)
|
2020-04-07 18:06:39 -04:00
|
|
|
|
|
|
|
row = None
|
|
|
|
for row in rows:
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_delete_txn(
|
2020-04-07 18:06:39 -04:00
|
|
|
txn,
|
|
|
|
"device_lists_outbound_pokes",
|
|
|
|
{x: row[x] for x in KEY_COLS},
|
|
|
|
)
|
|
|
|
|
|
|
|
row["sent"] = False
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_insert_txn(
|
2020-04-07 18:06:39 -04:00
|
|
|
txn,
|
|
|
|
"device_lists_outbound_pokes",
|
|
|
|
row,
|
|
|
|
)
|
|
|
|
|
|
|
|
if row:
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.updates._background_update_progress_txn(
|
2020-04-07 18:06:39 -04:00
|
|
|
txn,
|
|
|
|
BG_UPDATE_REMOVE_DUP_OUTBOUND_POKES,
|
|
|
|
{"last_row": row},
|
|
|
|
)
|
|
|
|
|
|
|
|
return len(rows)
|
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
rows = await self.db_pool.runInteraction(
|
|
|
|
BG_UPDATE_REMOVE_DUP_OUTBOUND_POKES, _txn
|
|
|
|
)
|
2020-04-07 18:06:39 -04:00
|
|
|
|
|
|
|
if not rows:
|
2020-08-05 16:38:57 -04:00
|
|
|
await self.db_pool.updates._end_background_update(
|
2020-04-07 18:06:39 -04:00
|
|
|
BG_UPDATE_REMOVE_DUP_OUTBOUND_POKES
|
|
|
|
)
|
|
|
|
|
|
|
|
return rows
|
|
|
|
|
2019-10-03 12:24:03 -04:00
|
|
|
|
|
|
|
class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
2020-08-05 16:38:57 -04:00
|
|
|
def __init__(self, database: DatabasePool, db_conn, hs):
|
2020-09-18 09:56:44 -04:00
|
|
|
super().__init__(database, db_conn, hs)
|
2019-10-03 12:24:03 -04:00
|
|
|
|
|
|
|
# Map of (user_id, device_id) -> bool. If there is an entry that implies
|
|
|
|
# the device exists.
|
2020-10-19 07:20:29 -04:00
|
|
|
self.device_id_exists_cache = LruCache(
|
|
|
|
cache_name="device_id_exists", keylen=2, max_size=10000
|
2019-10-03 12:24:03 -04:00
|
|
|
)
|
|
|
|
|
2020-08-12 10:51:42 -04:00
|
|
|
async def store_device(
|
2020-10-07 08:58:21 -04:00
|
|
|
self, user_id: str, device_id: str, initial_device_display_name: Optional[str]
|
2020-08-12 10:51:42 -04:00
|
|
|
) -> bool:
|
2019-03-04 13:03:29 -05:00
|
|
|
"""Ensure the given device is known; add it to the store if not
|
|
|
|
|
|
|
|
Args:
|
2020-08-12 10:51:42 -04:00
|
|
|
user_id: id of user associated with the device
|
|
|
|
device_id: id of device
|
|
|
|
initial_device_display_name: initial displayname of the device.
|
|
|
|
Ignored if device exists.
|
|
|
|
|
2019-03-04 13:03:29 -05:00
|
|
|
Returns:
|
2020-08-12 10:51:42 -04:00
|
|
|
Whether the device was inserted or an existing device existed with that ID.
|
|
|
|
|
2019-07-24 23:21:52 -04:00
|
|
|
Raises:
|
|
|
|
StoreError: if the device is already in use
|
2017-01-25 09:27:27 -05:00
|
|
|
"""
|
2019-03-04 13:03:29 -05:00
|
|
|
key = (user_id, device_id)
|
|
|
|
if self.device_id_exists_cache.get(key, None):
|
2019-07-23 09:00:55 -04:00
|
|
|
return False
|
2017-01-25 09:27:27 -05:00
|
|
|
|
2019-03-04 13:03:29 -05:00
|
|
|
try:
|
2020-08-12 10:51:42 -04:00
|
|
|
inserted = await self.db_pool.simple_insert(
|
2019-03-04 13:03:29 -05:00
|
|
|
"devices",
|
|
|
|
values={
|
2017-01-25 09:27:27 -05:00
|
|
|
"user_id": user_id,
|
|
|
|
"device_id": device_id,
|
2019-04-03 05:07:29 -04:00
|
|
|
"display_name": initial_device_display_name,
|
2019-07-24 23:21:52 -04:00
|
|
|
"hidden": False,
|
2019-03-04 13:03:29 -05:00
|
|
|
},
|
|
|
|
desc="store_device",
|
|
|
|
or_ignore=True,
|
|
|
|
)
|
2019-07-24 23:21:52 -04:00
|
|
|
if not inserted:
|
|
|
|
# if the device already exists, check if it's a real device, or
|
|
|
|
# if the device ID is reserved by something else
|
2020-08-12 10:51:42 -04:00
|
|
|
hidden = await self.db_pool.simple_select_one_onecol(
|
2019-07-24 23:21:52 -04:00
|
|
|
"devices",
|
|
|
|
keyvalues={"user_id": user_id, "device_id": device_id},
|
|
|
|
retcol="hidden",
|
|
|
|
)
|
|
|
|
if hidden:
|
|
|
|
raise StoreError(400, "The device ID is in use", Codes.FORBIDDEN)
|
2020-10-19 07:20:29 -04:00
|
|
|
self.device_id_exists_cache.set(key, True)
|
2019-07-23 09:00:55 -04:00
|
|
|
return inserted
|
2019-07-24 23:21:52 -04:00
|
|
|
except StoreError:
|
|
|
|
raise
|
2019-03-04 13:03:29 -05:00
|
|
|
except Exception as e:
|
2019-04-03 05:07:29 -04:00
|
|
|
logger.error(
|
|
|
|
"store_device with device_id=%s(%r) user_id=%s(%r)"
|
|
|
|
" display_name=%s(%r) failed: %s",
|
|
|
|
type(device_id).__name__,
|
|
|
|
device_id,
|
|
|
|
type(user_id).__name__,
|
|
|
|
user_id,
|
|
|
|
type(initial_device_display_name).__name__,
|
|
|
|
initial_device_display_name,
|
|
|
|
e,
|
|
|
|
)
|
2019-03-04 13:03:29 -05:00
|
|
|
raise StoreError(500, "Problem storing device.")
|
2017-01-25 09:27:27 -05:00
|
|
|
|
2020-08-12 10:51:42 -04:00
|
|
|
async def delete_device(self, user_id: str, device_id: str) -> None:
|
2019-03-04 13:03:29 -05:00
|
|
|
"""Delete a device.
|
2017-01-25 09:27:27 -05:00
|
|
|
|
2019-03-04 13:03:29 -05:00
|
|
|
Args:
|
2020-08-12 10:51:42 -04:00
|
|
|
user_id: The ID of the user which owns the device
|
|
|
|
device_id: The ID of the device to delete
|
2019-03-04 13:03:29 -05:00
|
|
|
"""
|
2020-08-12 10:51:42 -04:00
|
|
|
await self.db_pool.simple_delete_one(
|
2019-07-31 18:37:05 -04:00
|
|
|
table="devices",
|
|
|
|
keyvalues={"user_id": user_id, "device_id": device_id, "hidden": False},
|
|
|
|
desc="delete_device",
|
|
|
|
)
|
2017-01-25 09:27:27 -05:00
|
|
|
|
2019-03-04 13:03:29 -05:00
|
|
|
self.device_id_exists_cache.invalidate((user_id, device_id))
|
2017-01-25 09:27:27 -05:00
|
|
|
|
2020-08-12 10:51:42 -04:00
|
|
|
async def delete_devices(self, user_id: str, device_ids: List[str]) -> None:
|
2019-03-04 13:03:29 -05:00
|
|
|
"""Deletes several devices.
|
2017-01-26 11:30:37 -05:00
|
|
|
|
|
|
|
Args:
|
2020-08-12 10:51:42 -04:00
|
|
|
user_id: The ID of the user which owns the devices
|
|
|
|
device_ids: The IDs of the devices to delete
|
2019-03-04 13:03:29 -05:00
|
|
|
"""
|
2020-08-12 10:51:42 -04:00
|
|
|
await self.db_pool.simple_delete_many(
|
2019-07-31 18:37:05 -04:00
|
|
|
table="devices",
|
|
|
|
column="device_id",
|
|
|
|
iterable=device_ids,
|
|
|
|
keyvalues={"user_id": user_id, "hidden": False},
|
|
|
|
desc="delete_devices",
|
2019-03-04 13:03:29 -05:00
|
|
|
)
|
|
|
|
for device_id in device_ids:
|
|
|
|
self.device_id_exists_cache.invalidate((user_id, device_id))
|
2017-01-26 11:30:37 -05:00
|
|
|
|
2020-08-12 10:51:42 -04:00
|
|
|
async def update_device(
|
|
|
|
self, user_id: str, device_id: str, new_display_name: Optional[str] = None
|
|
|
|
) -> None:
|
2019-07-30 23:09:50 -04:00
|
|
|
"""Update a device. Only updates the device if it is not marked as
|
|
|
|
hidden.
|
2019-03-04 13:03:29 -05:00
|
|
|
|
|
|
|
Args:
|
2020-08-12 10:51:42 -04:00
|
|
|
user_id: The ID of the user which owns the device
|
|
|
|
device_id: The ID of the device to update
|
|
|
|
new_display_name: new displayname for device; None to leave unchanged
|
2019-03-04 13:03:29 -05:00
|
|
|
Raises:
|
|
|
|
StoreError: if the device is not found
|
2017-01-26 11:30:37 -05:00
|
|
|
"""
|
2019-03-04 13:03:29 -05:00
|
|
|
updates = {}
|
|
|
|
if new_display_name is not None:
|
|
|
|
updates["display_name"] = new_display_name
|
|
|
|
if not updates:
|
2020-08-12 10:51:42 -04:00
|
|
|
return None
|
|
|
|
await self.db_pool.simple_update_one(
|
2019-03-04 13:03:29 -05:00
|
|
|
table="devices",
|
2019-07-30 23:09:50 -04:00
|
|
|
keyvalues={"user_id": user_id, "device_id": device_id, "hidden": False},
|
2019-03-04 13:03:29 -05:00
|
|
|
updatevalues=updates,
|
|
|
|
desc="update_device",
|
2017-01-26 11:06:54 -05:00
|
|
|
)
|
2017-02-27 11:22:12 -05:00
|
|
|
|
2020-09-01 09:21:48 -04:00
|
|
|
async def update_remote_device_list_cache_entry(
|
2020-10-07 08:58:21 -04:00
|
|
|
self, user_id: str, device_id: str, content: JsonDict, stream_id: str
|
2020-09-01 09:21:48 -04:00
|
|
|
) -> None:
|
2019-03-04 13:03:29 -05:00
|
|
|
"""Updates a single device in the cache of a remote user's devicelist.
|
2017-01-26 11:06:54 -05:00
|
|
|
|
2019-03-04 13:03:29 -05:00
|
|
|
Note: assumes that we are the only thread that can be updating this user's
|
|
|
|
device list.
|
|
|
|
|
|
|
|
Args:
|
2020-08-12 10:51:42 -04:00
|
|
|
user_id: User to update device list for
|
|
|
|
device_id: ID of decivice being updated
|
|
|
|
content: new data on this device
|
|
|
|
stream_id: the version of the device list
|
2017-01-26 11:30:37 -05:00
|
|
|
"""
|
2020-09-01 09:21:48 -04:00
|
|
|
await self.db_pool.runInteraction(
|
2019-03-04 13:03:29 -05:00
|
|
|
"update_remote_device_list_cache_entry",
|
|
|
|
self._update_remote_device_list_cache_entry_txn,
|
2019-04-03 05:07:29 -04:00
|
|
|
user_id,
|
|
|
|
device_id,
|
|
|
|
content,
|
|
|
|
stream_id,
|
2017-01-26 11:06:54 -05:00
|
|
|
)
|
|
|
|
|
2019-04-03 05:07:29 -04:00
|
|
|
def _update_remote_device_list_cache_entry_txn(
|
2020-08-12 10:51:42 -04:00
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
user_id: str,
|
|
|
|
device_id: str,
|
|
|
|
content: JsonDict,
|
2020-10-07 08:58:21 -04:00
|
|
|
stream_id: str,
|
2020-08-12 10:51:42 -04:00
|
|
|
) -> None:
|
2019-03-04 13:03:29 -05:00
|
|
|
if content.get("deleted"):
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_delete_txn(
|
2019-03-04 13:03:29 -05:00
|
|
|
txn,
|
|
|
|
table="device_lists_remote_cache",
|
2019-04-03 05:07:29 -04:00
|
|
|
keyvalues={"user_id": user_id, "device_id": device_id},
|
2019-03-04 13:03:29 -05:00
|
|
|
)
|
2017-01-26 11:06:54 -05:00
|
|
|
|
2019-04-03 05:07:29 -04:00
|
|
|
txn.call_after(self.device_id_exists_cache.invalidate, (user_id, device_id))
|
2019-03-04 13:03:29 -05:00
|
|
|
else:
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_upsert_txn(
|
2019-03-04 13:03:29 -05:00
|
|
|
txn,
|
|
|
|
table="device_lists_remote_cache",
|
2019-04-03 05:07:29 -04:00
|
|
|
keyvalues={"user_id": user_id, "device_id": device_id},
|
2020-08-07 08:02:55 -04:00
|
|
|
values={"content": json_encoder.encode(content)},
|
2019-03-04 13:03:29 -05:00
|
|
|
# we don't need to lock, because we assume we are the only thread
|
|
|
|
# updating this user's devices.
|
|
|
|
lock=False,
|
|
|
|
)
|
|
|
|
|
2019-04-03 05:07:29 -04:00
|
|
|
txn.call_after(self._get_cached_user_device.invalidate, (user_id, device_id))
|
2020-01-28 09:43:21 -05:00
|
|
|
txn.call_after(self.get_cached_devices_for_user.invalidate, (user_id,))
|
2019-03-04 13:03:29 -05:00
|
|
|
txn.call_after(
|
|
|
|
self.get_device_list_last_stream_id_for_remote.invalidate, (user_id,)
|
2017-01-26 11:06:54 -05:00
|
|
|
)
|
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_upsert_txn(
|
2019-03-04 13:03:29 -05:00
|
|
|
txn,
|
|
|
|
table="device_lists_remote_extremeties",
|
2019-04-03 05:07:29 -04:00
|
|
|
keyvalues={"user_id": user_id},
|
|
|
|
values={"stream_id": stream_id},
|
2019-03-04 13:03:29 -05:00
|
|
|
# again, we can assume we are the only thread updating this user's
|
|
|
|
# extremity.
|
|
|
|
lock=False,
|
|
|
|
)
|
2017-01-26 11:06:54 -05:00
|
|
|
|
2020-09-01 09:21:48 -04:00
|
|
|
async def update_remote_device_list_cache(
|
2020-08-12 10:51:42 -04:00
|
|
|
self, user_id: str, devices: List[dict], stream_id: int
|
2020-09-01 09:21:48 -04:00
|
|
|
) -> None:
|
2019-03-04 13:03:29 -05:00
|
|
|
"""Replace the entire cache of the remote user's devices.
|
2017-01-26 11:06:54 -05:00
|
|
|
|
2019-03-04 13:03:29 -05:00
|
|
|
Note: assumes that we are the only thread that can be updating this user's
|
|
|
|
device list.
|
2017-01-26 11:06:54 -05:00
|
|
|
|
2019-03-04 13:03:29 -05:00
|
|
|
Args:
|
2020-08-12 10:51:42 -04:00
|
|
|
user_id: User to update device list for
|
|
|
|
devices: list of device objects supplied over federation
|
|
|
|
stream_id: the version of the device list
|
2017-01-26 11:30:37 -05:00
|
|
|
"""
|
2020-09-01 09:21:48 -04:00
|
|
|
await self.db_pool.runInteraction(
|
2019-03-04 13:03:29 -05:00
|
|
|
"update_remote_device_list_cache",
|
|
|
|
self._update_remote_device_list_cache_txn,
|
2019-04-03 05:07:29 -04:00
|
|
|
user_id,
|
|
|
|
devices,
|
|
|
|
stream_id,
|
2017-01-25 09:27:27 -05:00
|
|
|
)
|
|
|
|
|
2020-08-12 10:51:42 -04:00
|
|
|
def _update_remote_device_list_cache_txn(
|
|
|
|
self, txn: LoggingTransaction, user_id: str, devices: List[dict], stream_id: int
|
2020-09-01 09:21:48 -04:00
|
|
|
) -> None:
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_delete_txn(
|
2019-04-03 05:07:29 -04:00
|
|
|
txn, table="device_lists_remote_cache", keyvalues={"user_id": user_id}
|
2017-01-31 08:22:41 -05:00
|
|
|
)
|
2017-01-25 09:27:27 -05:00
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_insert_many_txn(
|
2019-03-04 13:03:29 -05:00
|
|
|
txn,
|
|
|
|
table="device_lists_remote_cache",
|
|
|
|
values=[
|
|
|
|
{
|
|
|
|
"user_id": user_id,
|
|
|
|
"device_id": content["device_id"],
|
2020-08-07 08:02:55 -04:00
|
|
|
"content": json_encoder.encode(content),
|
2019-03-04 13:03:29 -05:00
|
|
|
}
|
|
|
|
for content in devices
|
2019-04-03 05:07:29 -04:00
|
|
|
],
|
2017-06-07 06:02:38 -04:00
|
|
|
)
|
|
|
|
|
2020-01-28 09:43:21 -05:00
|
|
|
txn.call_after(self.get_cached_devices_for_user.invalidate, (user_id,))
|
2019-03-04 13:03:29 -05:00
|
|
|
txn.call_after(self._get_cached_user_device.invalidate_many, (user_id,))
|
|
|
|
txn.call_after(
|
|
|
|
self.get_device_list_last_stream_id_for_remote.invalidate, (user_id,)
|
|
|
|
)
|
2017-01-25 11:55:21 -05:00
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_upsert_txn(
|
2019-03-04 13:03:29 -05:00
|
|
|
txn,
|
|
|
|
table="device_lists_remote_extremeties",
|
2019-04-03 05:07:29 -04:00
|
|
|
keyvalues={"user_id": user_id},
|
|
|
|
values={"stream_id": stream_id},
|
2019-03-04 13:03:29 -05:00
|
|
|
# we don't need to lock, because we can assume we are the only thread
|
|
|
|
# updating this user's extremity.
|
|
|
|
lock=False,
|
2017-01-27 08:36:39 -05:00
|
|
|
)
|
|
|
|
|
2020-01-31 09:04:15 -05:00
|
|
|
# If we're replacing the remote user's device list cache presumably
|
|
|
|
# we've done a full resync, so we remove the entry that says we need
|
|
|
|
# to resync
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_delete_txn(
|
2020-01-31 09:04:15 -05:00
|
|
|
txn,
|
|
|
|
table="device_lists_remote_resync",
|
|
|
|
keyvalues={"user_id": user_id},
|
|
|
|
)
|
|
|
|
|
2020-08-12 10:51:42 -04:00
|
|
|
async def add_device_change_to_streams(
|
|
|
|
self, user_id: str, device_ids: Collection[str], hosts: List[str]
|
|
|
|
):
|
2017-01-26 11:30:37 -05:00
|
|
|
"""Persist that a user's devices have been updated, and which hosts
|
|
|
|
(if any) should be poked.
|
|
|
|
"""
|
2020-02-28 06:21:25 -05:00
|
|
|
if not device_ids:
|
|
|
|
return
|
|
|
|
|
2020-09-23 11:11:18 -04:00
|
|
|
async with self._device_list_id_gen.get_next_mult(
|
2020-08-25 10:10:08 -04:00
|
|
|
len(device_ids)
|
|
|
|
) as stream_ids:
|
2020-08-12 10:51:42 -04:00
|
|
|
await self.db_pool.runInteraction(
|
2020-02-28 06:21:25 -05:00
|
|
|
"add_device_change_to_stream",
|
|
|
|
self._add_device_change_to_stream_txn,
|
|
|
|
user_id,
|
|
|
|
device_ids,
|
|
|
|
stream_ids,
|
|
|
|
)
|
|
|
|
|
|
|
|
if not hosts:
|
|
|
|
return stream_ids[-1]
|
|
|
|
|
|
|
|
context = get_active_span_text_map()
|
2020-09-23 11:11:18 -04:00
|
|
|
async with self._device_list_id_gen.get_next_mult(
|
2020-02-28 06:21:25 -05:00
|
|
|
len(hosts) * len(device_ids)
|
|
|
|
) as stream_ids:
|
2020-08-12 10:51:42 -04:00
|
|
|
await self.db_pool.runInteraction(
|
2020-02-28 06:21:25 -05:00
|
|
|
"add_device_outbound_poke_to_stream",
|
|
|
|
self._add_device_outbound_poke_to_stream_txn,
|
2019-04-03 05:07:29 -04:00
|
|
|
user_id,
|
|
|
|
device_ids,
|
|
|
|
hosts,
|
2020-02-28 06:21:25 -05:00
|
|
|
stream_ids,
|
|
|
|
context,
|
2017-01-25 09:27:27 -05:00
|
|
|
)
|
|
|
|
|
2020-02-28 06:21:25 -05:00
|
|
|
return stream_ids[-1]
|
2017-01-27 10:23:48 -05:00
|
|
|
|
2020-03-18 06:13:55 -04:00
|
|
|
def _add_device_change_to_stream_txn(
|
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
user_id: str,
|
|
|
|
device_ids: Collection[str],
|
|
|
|
stream_ids: List[str],
|
|
|
|
):
|
2017-01-25 09:27:27 -05:00
|
|
|
txn.call_after(
|
2020-02-28 06:21:25 -05:00
|
|
|
self._device_list_stream_cache.entity_has_changed,
|
|
|
|
user_id,
|
|
|
|
stream_ids[-1],
|
2017-01-25 09:27:27 -05:00
|
|
|
)
|
|
|
|
|
2020-03-18 06:13:55 -04:00
|
|
|
min_stream_id = stream_ids[0]
|
|
|
|
|
2017-03-01 05:21:30 -05:00
|
|
|
# Delete older entries in the table, as we really only care about
|
|
|
|
# when the latest change happened.
|
2021-01-21 09:44:12 -05:00
|
|
|
txn.execute_batch(
|
2017-03-01 05:21:30 -05:00
|
|
|
"""
|
|
|
|
DELETE FROM device_lists_stream
|
|
|
|
WHERE user_id = ? AND device_id = ? AND stream_id < ?
|
|
|
|
""",
|
2020-03-18 06:13:55 -04:00
|
|
|
[(user_id, device_id, min_stream_id) for device_id in device_ids],
|
2017-03-01 05:21:30 -05:00
|
|
|
)
|
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_insert_many_txn(
|
2017-01-25 09:27:27 -05:00
|
|
|
txn,
|
|
|
|
table="device_lists_stream",
|
2017-01-26 11:06:54 -05:00
|
|
|
values=[
|
2019-04-03 05:07:29 -04:00
|
|
|
{"stream_id": stream_id, "user_id": user_id, "device_id": device_id}
|
2020-02-28 06:21:25 -05:00
|
|
|
for stream_id, device_id in zip(stream_ids, device_ids)
|
2019-04-03 05:07:29 -04:00
|
|
|
],
|
2017-01-25 09:27:27 -05:00
|
|
|
)
|
|
|
|
|
2020-02-28 06:21:25 -05:00
|
|
|
def _add_device_outbound_poke_to_stream_txn(
|
2020-08-12 10:51:42 -04:00
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
user_id: str,
|
|
|
|
device_ids: Collection[str],
|
|
|
|
hosts: List[str],
|
|
|
|
stream_ids: List[str],
|
|
|
|
context: Dict[str, str],
|
2020-02-28 06:21:25 -05:00
|
|
|
):
|
|
|
|
for host in hosts:
|
|
|
|
txn.call_after(
|
|
|
|
self._device_list_federation_stream_cache.entity_has_changed,
|
|
|
|
host,
|
|
|
|
stream_ids[-1],
|
|
|
|
)
|
|
|
|
|
|
|
|
now = self._clock.time_msec()
|
|
|
|
next_stream_id = iter(stream_ids)
|
2019-08-22 13:21:10 -04:00
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_insert_many_txn(
|
2017-01-25 09:27:27 -05:00
|
|
|
txn,
|
|
|
|
table="device_lists_outbound_pokes",
|
|
|
|
values=[
|
|
|
|
{
|
|
|
|
"destination": destination,
|
2020-02-28 06:21:25 -05:00
|
|
|
"stream_id": next(next_stream_id),
|
2017-01-25 09:27:27 -05:00
|
|
|
"user_id": user_id,
|
|
|
|
"device_id": device_id,
|
|
|
|
"sent": False,
|
2017-01-27 10:23:48 -05:00
|
|
|
"ts": now,
|
2020-08-07 08:02:55 -04:00
|
|
|
"opentracing_context": json_encoder.encode(context)
|
2019-08-22 13:21:10 -04:00
|
|
|
if whitelisted_homeserver(destination)
|
2019-09-05 10:06:13 -04:00
|
|
|
else "{}",
|
2017-01-25 09:27:27 -05:00
|
|
|
}
|
|
|
|
for destination in hosts
|
2017-01-26 11:06:54 -05:00
|
|
|
for device_id in device_ids
|
2019-04-03 05:07:29 -04:00
|
|
|
],
|
2017-01-25 09:27:27 -05:00
|
|
|
)
|