mirror of
https://gitlab.com/veilid/veilidchat.git
synced 2025-07-21 05:38:42 -04:00
more refactor and dhtrecord multiple-open support
This commit is contained in:
parent
c4c7b264aa
commit
e262b0f777
19 changed files with 782 additions and 419 deletions
|
@ -2,7 +2,6 @@
|
|||
|
||||
library dht_support;
|
||||
|
||||
export 'src/dht_record.dart';
|
||||
export 'src/dht_record_crypto.dart';
|
||||
export 'src/dht_record_cubit.dart';
|
||||
export 'src/dht_record_pool.dart';
|
||||
|
|
|
@ -1,12 +1,4 @@
|
|||
import 'dart:async';
|
||||
import 'dart:typed_data';
|
||||
|
||||
import 'package:equatable/equatable.dart';
|
||||
import 'package:fast_immutable_collections/fast_immutable_collections.dart';
|
||||
import 'package:meta/meta.dart';
|
||||
import 'package:protobuf/protobuf.dart';
|
||||
|
||||
import '../../../veilid_support.dart';
|
||||
part of 'dht_record_pool.dart';
|
||||
|
||||
@immutable
|
||||
class DHTRecordWatchChange extends Equatable {
|
||||
|
@ -14,7 +6,7 @@ class DHTRecordWatchChange extends Equatable {
|
|||
{required this.local, required this.data, required this.subkeys});
|
||||
|
||||
final bool local;
|
||||
final Uint8List data;
|
||||
final Uint8List? data;
|
||||
final List<ValueSubkeyRange> subkeys;
|
||||
|
||||
@override
|
||||
|
@ -26,46 +18,41 @@ class DHTRecordWatchChange extends Equatable {
|
|||
class DHTRecord {
|
||||
DHTRecord(
|
||||
{required VeilidRoutingContext routingContext,
|
||||
required DHTRecordDescriptor recordDescriptor,
|
||||
int defaultSubkey = 0,
|
||||
KeyPair? writer,
|
||||
DHTRecordCrypto crypto = const DHTRecordCryptoPublic()})
|
||||
required SharedDHTRecordData sharedDHTRecordData,
|
||||
required int defaultSubkey,
|
||||
required KeyPair? writer,
|
||||
required DHTRecordCrypto crypto})
|
||||
: _crypto = crypto,
|
||||
_routingContext = routingContext,
|
||||
_recordDescriptor = recordDescriptor,
|
||||
_defaultSubkey = defaultSubkey,
|
||||
_writer = writer,
|
||||
_open = true,
|
||||
_valid = true,
|
||||
_subkeySeqCache = {},
|
||||
needsWatchStateUpdate = false,
|
||||
inWatchStateUpdate = false;
|
||||
_sharedDHTRecordData = sharedDHTRecordData;
|
||||
|
||||
final SharedDHTRecordData _sharedDHTRecordData;
|
||||
final VeilidRoutingContext _routingContext;
|
||||
final DHTRecordDescriptor _recordDescriptor;
|
||||
final int _defaultSubkey;
|
||||
final KeyPair? _writer;
|
||||
final Map<int, int> _subkeySeqCache;
|
||||
final DHTRecordCrypto _crypto;
|
||||
|
||||
bool _open;
|
||||
bool _valid;
|
||||
@internal
|
||||
StreamController<DHTRecordWatchChange>? watchController;
|
||||
@internal
|
||||
bool needsWatchStateUpdate;
|
||||
@internal
|
||||
bool inWatchStateUpdate;
|
||||
@internal
|
||||
WatchState? watchState;
|
||||
|
||||
int subkeyOrDefault(int subkey) => (subkey == -1) ? _defaultSubkey : subkey;
|
||||
|
||||
VeilidRoutingContext get routingContext => _routingContext;
|
||||
TypedKey get key => _recordDescriptor.key;
|
||||
PublicKey get owner => _recordDescriptor.owner;
|
||||
KeyPair? get ownerKeyPair => _recordDescriptor.ownerKeyPair();
|
||||
DHTSchema get schema => _recordDescriptor.schema;
|
||||
int get subkeyCount => _recordDescriptor.schema.subkeyCount();
|
||||
TypedKey get key => _sharedDHTRecordData.recordDescriptor.key;
|
||||
PublicKey get owner => _sharedDHTRecordData.recordDescriptor.owner;
|
||||
KeyPair? get ownerKeyPair =>
|
||||
_sharedDHTRecordData.recordDescriptor.ownerKeyPair();
|
||||
DHTSchema get schema => _sharedDHTRecordData.recordDescriptor.schema;
|
||||
int get subkeyCount =>
|
||||
_sharedDHTRecordData.recordDescriptor.schema.subkeyCount();
|
||||
KeyPair? get writer => _writer;
|
||||
DHTRecordCrypto get crypto => _crypto;
|
||||
OwnedDHTRecordPointer get ownedDHTRecordPointer =>
|
||||
|
@ -79,22 +66,16 @@ class DHTRecord {
|
|||
return;
|
||||
}
|
||||
await watchController?.close();
|
||||
await _routingContext.closeDHTRecord(_recordDescriptor.key);
|
||||
DHTRecordPool.instance.recordClosed(_recordDescriptor.key);
|
||||
await DHTRecordPool.instance._recordClosed(this);
|
||||
_open = false;
|
||||
}
|
||||
|
||||
Future<void> delete() async {
|
||||
if (!_valid) {
|
||||
throw StateError('already deleted');
|
||||
}
|
||||
if (_open) {
|
||||
await close();
|
||||
}
|
||||
await DHTRecordPool.instance.deleteDeep(key);
|
||||
void _markDeleted() {
|
||||
_valid = false;
|
||||
}
|
||||
|
||||
Future<void> delete() => DHTRecordPool.instance.delete(key);
|
||||
|
||||
Future<T> scope<T>(Future<T> Function(DHTRecord) scopeFunction) async {
|
||||
try {
|
||||
return await scopeFunction(this);
|
||||
|
@ -134,17 +115,17 @@ class DHTRecord {
|
|||
bool forceRefresh = false,
|
||||
bool onlyUpdates = false}) async {
|
||||
subkey = subkeyOrDefault(subkey);
|
||||
final valueData = await _routingContext.getDHTValue(
|
||||
_recordDescriptor.key, subkey, forceRefresh);
|
||||
final valueData = await _routingContext.getDHTValue(key, subkey,
|
||||
forceRefresh: forceRefresh);
|
||||
if (valueData == null) {
|
||||
return null;
|
||||
}
|
||||
final lastSeq = _subkeySeqCache[subkey];
|
||||
final lastSeq = _sharedDHTRecordData.subkeySeqCache[subkey];
|
||||
if (onlyUpdates && lastSeq != null && valueData.seq <= lastSeq) {
|
||||
return null;
|
||||
}
|
||||
final out = _crypto.decrypt(valueData.data, subkey);
|
||||
_subkeySeqCache[subkey] = valueData.seq;
|
||||
_sharedDHTRecordData.subkeySeqCache[subkey] = valueData.seq;
|
||||
return out;
|
||||
}
|
||||
|
||||
|
@ -176,17 +157,16 @@ class DHTRecord {
|
|||
Future<Uint8List?> tryWriteBytes(Uint8List newValue,
|
||||
{int subkey = -1}) async {
|
||||
subkey = subkeyOrDefault(subkey);
|
||||
final lastSeq = _subkeySeqCache[subkey];
|
||||
final lastSeq = _sharedDHTRecordData.subkeySeqCache[subkey];
|
||||
final encryptedNewValue = await _crypto.encrypt(newValue, subkey);
|
||||
|
||||
// Set the new data if possible
|
||||
var newValueData = await _routingContext.setDHTValue(
|
||||
_recordDescriptor.key, subkey, encryptedNewValue);
|
||||
var newValueData =
|
||||
await _routingContext.setDHTValue(key, subkey, encryptedNewValue);
|
||||
if (newValueData == null) {
|
||||
// A newer value wasn't found on the set, but
|
||||
// we may get a newer value when getting the value for the sequence number
|
||||
newValueData = await _routingContext.getDHTValue(
|
||||
_recordDescriptor.key, subkey, false);
|
||||
newValueData = await _routingContext.getDHTValue(key, subkey);
|
||||
if (newValueData == null) {
|
||||
assert(newValueData != null, "can't get value that was just set");
|
||||
return null;
|
||||
|
@ -195,13 +175,13 @@ class DHTRecord {
|
|||
|
||||
// Record new sequence number
|
||||
final isUpdated = newValueData.seq != lastSeq;
|
||||
_subkeySeqCache[subkey] = newValueData.seq;
|
||||
_sharedDHTRecordData.subkeySeqCache[subkey] = newValueData.seq;
|
||||
|
||||
// See if the encrypted data returned is exactly the same
|
||||
// if so, shortcut and don't bother decrypting it
|
||||
if (newValueData.data.equals(encryptedNewValue)) {
|
||||
if (isUpdated) {
|
||||
addLocalValueChange(newValue, subkey);
|
||||
_addLocalValueChange(newValue, subkey);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
@ -209,36 +189,35 @@ class DHTRecord {
|
|||
// Decrypt value to return it
|
||||
final decryptedNewValue = await _crypto.decrypt(newValueData.data, subkey);
|
||||
if (isUpdated) {
|
||||
addLocalValueChange(decryptedNewValue, subkey);
|
||||
_addLocalValueChange(decryptedNewValue, subkey);
|
||||
}
|
||||
return decryptedNewValue;
|
||||
}
|
||||
|
||||
Future<void> eventualWriteBytes(Uint8List newValue, {int subkey = -1}) async {
|
||||
subkey = subkeyOrDefault(subkey);
|
||||
final lastSeq = _subkeySeqCache[subkey];
|
||||
final lastSeq = _sharedDHTRecordData.subkeySeqCache[subkey];
|
||||
final encryptedNewValue = await _crypto.encrypt(newValue, subkey);
|
||||
|
||||
ValueData? newValueData;
|
||||
do {
|
||||
do {
|
||||
// Set the new data
|
||||
newValueData = await _routingContext.setDHTValue(
|
||||
_recordDescriptor.key, subkey, encryptedNewValue);
|
||||
newValueData =
|
||||
await _routingContext.setDHTValue(key, subkey, encryptedNewValue);
|
||||
|
||||
// Repeat if newer data on the network was found
|
||||
} while (newValueData != null);
|
||||
|
||||
// Get the data to check its sequence number
|
||||
newValueData = await _routingContext.getDHTValue(
|
||||
_recordDescriptor.key, subkey, false);
|
||||
newValueData = await _routingContext.getDHTValue(key, subkey);
|
||||
if (newValueData == null) {
|
||||
assert(newValueData != null, "can't get value that was just set");
|
||||
return;
|
||||
}
|
||||
|
||||
// Record new sequence number
|
||||
_subkeySeqCache[subkey] = newValueData.seq;
|
||||
_sharedDHTRecordData.subkeySeqCache[subkey] = newValueData.seq;
|
||||
|
||||
// The encrypted data returned should be exactly the same
|
||||
// as what we are trying to set,
|
||||
|
@ -247,7 +226,7 @@ class DHTRecord {
|
|||
|
||||
final isUpdated = newValueData.seq != lastSeq;
|
||||
if (isUpdated) {
|
||||
addLocalValueChange(newValue, subkey);
|
||||
_addLocalValueChange(newValue, subkey);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -258,8 +237,7 @@ class DHTRecord {
|
|||
|
||||
// Get the existing data, do not allow force refresh here
|
||||
// because if we need a refresh the setDHTValue will fail anyway
|
||||
var oldValue =
|
||||
await get(subkey: subkey, forceRefresh: false, onlyUpdates: false);
|
||||
var oldValue = await get(subkey: subkey);
|
||||
|
||||
do {
|
||||
// Update the data
|
||||
|
@ -314,16 +292,16 @@ class DHTRecord {
|
|||
int? count}) async {
|
||||
// Set up watch requirements which will get picked up by the next tick
|
||||
final oldWatchState = watchState;
|
||||
watchState = WatchState(
|
||||
subkeys: subkeys?.lock, expiration: expiration, count: count);
|
||||
watchState =
|
||||
WatchState(subkeys: subkeys, expiration: expiration, count: count);
|
||||
if (oldWatchState != watchState) {
|
||||
needsWatchStateUpdate = true;
|
||||
_sharedDHTRecordData.needsWatchStateUpdate = true;
|
||||
}
|
||||
}
|
||||
|
||||
Future<StreamSubscription<DHTRecordWatchChange>> listen(
|
||||
Future<void> Function(
|
||||
DHTRecord record, Uint8List data, List<ValueSubkeyRange> subkeys)
|
||||
DHTRecord record, Uint8List? data, List<ValueSubkeyRange> subkeys)
|
||||
onUpdate,
|
||||
{bool localChanges = true}) async {
|
||||
// Set up watch requirements
|
||||
|
@ -339,14 +317,16 @@ class DHTRecord {
|
|||
return;
|
||||
}
|
||||
Future.delayed(Duration.zero, () async {
|
||||
final Uint8List data;
|
||||
final Uint8List? data;
|
||||
if (change.local) {
|
||||
// local changes are not encrypted
|
||||
data = change.data;
|
||||
} else {
|
||||
// incoming/remote changes are encrypted
|
||||
data =
|
||||
await _crypto.decrypt(change.data, change.subkeys.first.low);
|
||||
final changeData = change.data;
|
||||
data = changeData == null
|
||||
? null
|
||||
: await _crypto.decrypt(changeData, change.subkeys.first.low);
|
||||
}
|
||||
await onUpdate(this, data, change.subkeys);
|
||||
});
|
||||
|
@ -362,17 +342,48 @@ class DHTRecord {
|
|||
// Tear down watch requirements
|
||||
if (watchState != null) {
|
||||
watchState = null;
|
||||
needsWatchStateUpdate = true;
|
||||
_sharedDHTRecordData.needsWatchStateUpdate = true;
|
||||
}
|
||||
}
|
||||
|
||||
void addLocalValueChange(Uint8List data, int subkey) {
|
||||
watchController?.add(DHTRecordWatchChange(
|
||||
local: true, data: data, subkeys: [ValueSubkeyRange.single(subkey)]));
|
||||
void _addValueChange(
|
||||
{required bool local,
|
||||
required Uint8List data,
|
||||
required List<ValueSubkeyRange> subkeys}) {
|
||||
final ws = watchState;
|
||||
if (ws != null) {
|
||||
final watchedSubkeys = ws.subkeys;
|
||||
if (watchedSubkeys == null) {
|
||||
// Report all subkeys
|
||||
watchController?.add(
|
||||
DHTRecordWatchChange(local: false, data: data, subkeys: subkeys));
|
||||
} else {
|
||||
// Only some subkeys are being watched, see if the reported update
|
||||
// overlaps the subkeys being watched
|
||||
final overlappedSubkeys = watchedSubkeys.intersectSubkeys(subkeys);
|
||||
// If the reported data isn't within the
|
||||
// range we care about, don't pass it through
|
||||
final overlappedFirstSubkey = overlappedSubkeys.firstSubkey;
|
||||
final updateFirstSubkey = subkeys.firstSubkey;
|
||||
final updatedData = (overlappedFirstSubkey != null &&
|
||||
updateFirstSubkey != null &&
|
||||
overlappedFirstSubkey == updateFirstSubkey)
|
||||
? data
|
||||
: null;
|
||||
// Report only wathced subkeys
|
||||
watchController?.add(DHTRecordWatchChange(
|
||||
local: local, data: updatedData, subkeys: overlappedSubkeys));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void _addLocalValueChange(Uint8List data, int subkey) {
|
||||
_addValueChange(
|
||||
local: true, data: data, subkeys: [ValueSubkeyRange.single(subkey)]);
|
||||
}
|
||||
|
||||
void addRemoteValueChange(VeilidUpdateValueChange update) {
|
||||
watchController?.add(DHTRecordWatchChange(
|
||||
local: false, data: update.valueData.data, subkeys: update.subkeys));
|
||||
_addValueChange(
|
||||
local: false, data: update.valueData.data, subkeys: update.subkeys);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,8 +3,8 @@ import 'dart:typed_data';
|
|||
import '../../../../veilid_support.dart';
|
||||
|
||||
abstract class DHTRecordCrypto {
|
||||
FutureOr<Uint8List> encrypt(Uint8List data, int subkey);
|
||||
FutureOr<Uint8List> decrypt(Uint8List data, int subkey);
|
||||
Future<Uint8List> encrypt(Uint8List data, int subkey);
|
||||
Future<Uint8List> decrypt(Uint8List data, int subkey);
|
||||
}
|
||||
|
||||
////////////////////////////////////
|
||||
|
@ -32,11 +32,11 @@ class DHTRecordCryptoPrivate implements DHTRecordCrypto {
|
|||
}
|
||||
|
||||
@override
|
||||
FutureOr<Uint8List> encrypt(Uint8List data, int subkey) =>
|
||||
Future<Uint8List> encrypt(Uint8List data, int subkey) =>
|
||||
_cryptoSystem.encryptNoAuthWithNonce(data, _secretKey);
|
||||
|
||||
@override
|
||||
FutureOr<Uint8List> decrypt(Uint8List data, int subkey) =>
|
||||
Future<Uint8List> decrypt(Uint8List data, int subkey) =>
|
||||
_cryptoSystem.decryptNoAuthWithNonce(data, _secretKey);
|
||||
}
|
||||
|
||||
|
@ -46,8 +46,8 @@ class DHTRecordCryptoPublic implements DHTRecordCrypto {
|
|||
const DHTRecordCryptoPublic();
|
||||
|
||||
@override
|
||||
FutureOr<Uint8List> encrypt(Uint8List data, int subkey) => data;
|
||||
Future<Uint8List> encrypt(Uint8List data, int subkey) async => data;
|
||||
|
||||
@override
|
||||
FutureOr<Uint8List> decrypt(Uint8List data, int subkey) => data;
|
||||
Future<Uint8List> decrypt(Uint8List data, int subkey) async => data;
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@ import '../../veilid_support.dart';
|
|||
|
||||
typedef InitialStateFunction<T> = Future<T?> Function(DHTRecord);
|
||||
typedef StateFunction<T> = Future<T?> Function(
|
||||
DHTRecord, List<ValueSubkeyRange>, Uint8List);
|
||||
DHTRecord, List<ValueSubkeyRange>, Uint8List?);
|
||||
|
||||
class DHTRecordCubit<T> extends Cubit<AsyncValue<T>> {
|
||||
DHTRecordCubit({
|
||||
|
@ -28,9 +28,8 @@ class DHTRecordCubit<T> extends Cubit<AsyncValue<T>> {
|
|||
|
||||
DHTRecordCubit.value({
|
||||
required DHTRecord record,
|
||||
required Future<T?> Function(DHTRecord) initialStateFunction,
|
||||
required Future<T?> Function(DHTRecord, List<ValueSubkeyRange>, Uint8List)
|
||||
stateFunction,
|
||||
required InitialStateFunction<T> initialStateFunction,
|
||||
required StateFunction<T> stateFunction,
|
||||
}) : _record = record,
|
||||
_stateFunction = stateFunction,
|
||||
_wantsCloseRecord = false,
|
||||
|
@ -41,9 +40,8 @@ class DHTRecordCubit<T> extends Cubit<AsyncValue<T>> {
|
|||
}
|
||||
|
||||
Future<void> _init(
|
||||
Future<T?> Function(DHTRecord) initialStateFunction,
|
||||
Future<T?> Function(DHTRecord, List<ValueSubkeyRange>, Uint8List)
|
||||
stateFunction,
|
||||
InitialStateFunction<T> initialStateFunction,
|
||||
StateFunction<T> stateFunction,
|
||||
) async {
|
||||
// Make initial state update
|
||||
try {
|
||||
|
@ -142,7 +140,7 @@ class DefaultDHTRecordCubit<T> extends DHTRecordCubit<T> {
|
|||
if (subkeys.containsSubkey(defaultSubkey)) {
|
||||
final Uint8List data;
|
||||
final firstSubkey = subkeys.firstOrNull!.low;
|
||||
if (firstSubkey != defaultSubkey) {
|
||||
if (firstSubkey != defaultSubkey || updatedata == null) {
|
||||
final maybeData = await record.get(forceRefresh: true);
|
||||
if (maybeData == null) {
|
||||
return null;
|
||||
|
|
|
@ -1,15 +1,21 @@
|
|||
import 'dart:async';
|
||||
import 'dart:math';
|
||||
import 'dart:typed_data';
|
||||
|
||||
import 'package:async_tools/async_tools.dart';
|
||||
import 'package:equatable/equatable.dart';
|
||||
import 'package:fast_immutable_collections/fast_immutable_collections.dart';
|
||||
import 'package:freezed_annotation/freezed_annotation.dart';
|
||||
import 'package:mutex/mutex.dart';
|
||||
import 'package:protobuf/protobuf.dart';
|
||||
|
||||
import '../../../../veilid_support.dart';
|
||||
|
||||
part 'dht_record_pool.freezed.dart';
|
||||
part 'dht_record_pool.g.dart';
|
||||
|
||||
part 'dht_record.dart';
|
||||
|
||||
/// Record pool that managed DHTRecords and allows for tagged deletion
|
||||
@freezed
|
||||
class DHTRecordPoolAllocations with _$DHTRecordPoolAllocations {
|
||||
|
@ -39,13 +45,14 @@ class OwnedDHTRecordPointer with _$OwnedDHTRecordPointer {
|
|||
}
|
||||
|
||||
/// Watch state
|
||||
@immutable
|
||||
class WatchState extends Equatable {
|
||||
const WatchState(
|
||||
{required this.subkeys,
|
||||
required this.expiration,
|
||||
required this.count,
|
||||
this.realExpiration});
|
||||
final IList<ValueSubkeyRange>? subkeys;
|
||||
final List<ValueSubkeyRange>? subkeys;
|
||||
final Timestamp? expiration;
|
||||
final int? count;
|
||||
final Timestamp? realExpiration;
|
||||
|
@ -54,23 +61,51 @@ class WatchState extends Equatable {
|
|||
List<Object?> get props => [subkeys, expiration, count, realExpiration];
|
||||
}
|
||||
|
||||
/// Data shared amongst all DHTRecord instances
|
||||
class SharedDHTRecordData {
|
||||
SharedDHTRecordData(
|
||||
{required this.recordDescriptor,
|
||||
required this.defaultWriter,
|
||||
required this.defaultRoutingContext});
|
||||
DHTRecordDescriptor recordDescriptor;
|
||||
KeyPair? defaultWriter;
|
||||
VeilidRoutingContext defaultRoutingContext;
|
||||
Map<int, int> subkeySeqCache = {};
|
||||
bool inWatchStateUpdate = false;
|
||||
bool needsWatchStateUpdate = false;
|
||||
}
|
||||
|
||||
// Per opened record data
|
||||
class OpenedRecordInfo {
|
||||
OpenedRecordInfo(
|
||||
{required DHTRecordDescriptor recordDescriptor,
|
||||
required KeyPair? defaultWriter,
|
||||
required VeilidRoutingContext defaultRoutingContext})
|
||||
: shared = SharedDHTRecordData(
|
||||
recordDescriptor: recordDescriptor,
|
||||
defaultWriter: defaultWriter,
|
||||
defaultRoutingContext: defaultRoutingContext);
|
||||
SharedDHTRecordData shared;
|
||||
Set<DHTRecord> records = {};
|
||||
}
|
||||
|
||||
class DHTRecordPool with TableDBBacked<DHTRecordPoolAllocations> {
|
||||
DHTRecordPool._(Veilid veilid, VeilidRoutingContext routingContext)
|
||||
: _state = DHTRecordPoolAllocations(
|
||||
childrenByParent: IMap(),
|
||||
parentByChild: IMap(),
|
||||
rootRecords: ISet()),
|
||||
_opened = <TypedKey, DHTRecord>{},
|
||||
_locks = AsyncTagLock(),
|
||||
_mutex = Mutex(),
|
||||
_opened = <TypedKey, OpenedRecordInfo>{},
|
||||
_routingContext = routingContext,
|
||||
_veilid = veilid;
|
||||
|
||||
// Persistent DHT record list
|
||||
DHTRecordPoolAllocations _state;
|
||||
// Lock table to ensure we don't open the same record more than once
|
||||
final AsyncTagLock<TypedKey> _locks;
|
||||
// Create/open Mutex
|
||||
final Mutex _mutex;
|
||||
// Which DHT records are currently open
|
||||
final Map<TypedKey, DHTRecord> _opened;
|
||||
final Map<TypedKey, OpenedRecordInfo> _opened;
|
||||
// Default routing context to use for new keys
|
||||
final VeilidRoutingContext _routingContext;
|
||||
// Convenience accessor
|
||||
|
@ -107,30 +142,106 @@ class DHTRecordPool with TableDBBacked<DHTRecordPoolAllocations> {
|
|||
|
||||
Veilid get veilid => _veilid;
|
||||
|
||||
void _recordOpened(DHTRecord record) {
|
||||
if (_opened.containsKey(record.key)) {
|
||||
throw StateError('record already opened');
|
||||
Future<OpenedRecordInfo> _recordCreateInner(
|
||||
{required VeilidRoutingContext dhtctx,
|
||||
required DHTSchema schema,
|
||||
KeyPair? writer,
|
||||
TypedKey? parent}) async {
|
||||
assert(_mutex.isLocked, 'should be locked here');
|
||||
|
||||
// Create the record
|
||||
final recordDescriptor = await dhtctx.createDHTRecord(schema);
|
||||
|
||||
// Reopen if a writer is specified to ensure
|
||||
// we switch the default writer
|
||||
if (writer != null) {
|
||||
await dhtctx.openDHTRecord(recordDescriptor.key, writer: writer);
|
||||
}
|
||||
_opened[record.key] = record;
|
||||
final openedRecordInfo = OpenedRecordInfo(
|
||||
recordDescriptor: recordDescriptor,
|
||||
defaultWriter: writer ?? recordDescriptor.ownerKeyPair(),
|
||||
defaultRoutingContext: dhtctx);
|
||||
_opened[recordDescriptor.key] = openedRecordInfo;
|
||||
|
||||
// Register the dependency
|
||||
await _addDependencyInner(parent, recordDescriptor.key);
|
||||
|
||||
return openedRecordInfo;
|
||||
}
|
||||
|
||||
void recordClosed(TypedKey key) {
|
||||
final rec = _opened.remove(key);
|
||||
if (rec == null) {
|
||||
throw StateError('record already closed');
|
||||
Future<OpenedRecordInfo> _recordOpenInner(
|
||||
{required VeilidRoutingContext dhtctx,
|
||||
required TypedKey recordKey,
|
||||
KeyPair? writer,
|
||||
TypedKey? parent}) async {
|
||||
assert(_mutex.isLocked, 'should be locked here');
|
||||
|
||||
// If we are opening a key that already exists
|
||||
// make sure we are using the same parent if one was specified
|
||||
_validateParent(parent, recordKey);
|
||||
|
||||
// See if this has been opened yet
|
||||
final openedRecordInfo = _opened[recordKey];
|
||||
if (openedRecordInfo == null) {
|
||||
// Fresh open, just open the record
|
||||
final recordDescriptor =
|
||||
await dhtctx.openDHTRecord(recordKey, writer: writer);
|
||||
final newOpenedRecordInfo = OpenedRecordInfo(
|
||||
recordDescriptor: recordDescriptor,
|
||||
defaultWriter: writer,
|
||||
defaultRoutingContext: dhtctx);
|
||||
_opened[recordDescriptor.key] = newOpenedRecordInfo;
|
||||
|
||||
// Register the dependency
|
||||
await _addDependencyInner(parent, recordKey);
|
||||
|
||||
return newOpenedRecordInfo;
|
||||
}
|
||||
_locks.unlockTag(key);
|
||||
|
||||
// Already opened
|
||||
|
||||
// See if we need to reopen the record with a default writer and possibly
|
||||
// a different routing context
|
||||
if (writer != null && openedRecordInfo.shared.defaultWriter == null) {
|
||||
final newRecordDescriptor =
|
||||
await dhtctx.openDHTRecord(recordKey, writer: writer);
|
||||
openedRecordInfo.shared.defaultWriter = writer;
|
||||
openedRecordInfo.shared.defaultRoutingContext = dhtctx;
|
||||
if (openedRecordInfo.shared.recordDescriptor.ownerSecret == null) {
|
||||
openedRecordInfo.shared.recordDescriptor = newRecordDescriptor;
|
||||
}
|
||||
}
|
||||
|
||||
// Register the dependency
|
||||
await _addDependencyInner(parent, recordKey);
|
||||
|
||||
return openedRecordInfo;
|
||||
}
|
||||
|
||||
Future<void> deleteDeep(TypedKey parent) async {
|
||||
// Collect all dependencies
|
||||
Future<void> _recordClosed(DHTRecord record) async {
|
||||
await _mutex.protect(() async {
|
||||
final key = record.key;
|
||||
final openedRecordInfo = _opened[key];
|
||||
if (openedRecordInfo == null ||
|
||||
!openedRecordInfo.records.remove(record)) {
|
||||
throw StateError('record already closed');
|
||||
}
|
||||
if (openedRecordInfo.records.isEmpty) {
|
||||
await _routingContext.closeDHTRecord(key);
|
||||
_opened.remove(key);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
Future<void> delete(TypedKey recordKey) async {
|
||||
// Collect all dependencies (including the record itself)
|
||||
final allDeps = <TypedKey>[];
|
||||
final currentDeps = [parent];
|
||||
final currentDeps = [recordKey];
|
||||
while (currentDeps.isNotEmpty) {
|
||||
final nextDep = currentDeps.removeLast();
|
||||
|
||||
// Remove this child from its parent
|
||||
await _removeDependency(nextDep);
|
||||
await _removeDependencyInner(nextDep);
|
||||
|
||||
allDeps.add(nextDep);
|
||||
final childDeps =
|
||||
|
@ -138,18 +249,27 @@ class DHTRecordPool with TableDBBacked<DHTRecordPoolAllocations> {
|
|||
currentDeps.addAll(childDeps);
|
||||
}
|
||||
|
||||
// Delete all dependent records in parallel
|
||||
final allFutures = <Future<void>>[];
|
||||
// Delete all dependent records in parallel (including the record itself)
|
||||
final allDeleteFutures = <Future<void>>[];
|
||||
final allCloseFutures = <Future<void>>[];
|
||||
final allDeletedRecords = <DHTRecord>{};
|
||||
for (final dep in allDeps) {
|
||||
// If record is opened, close it first
|
||||
final rec = _opened[dep];
|
||||
if (rec != null) {
|
||||
await rec.close();
|
||||
final openinfo = _opened[dep];
|
||||
if (openinfo != null) {
|
||||
for (final rec in openinfo.records) {
|
||||
allCloseFutures.add(rec.close());
|
||||
allDeletedRecords.add(rec);
|
||||
}
|
||||
}
|
||||
// Then delete
|
||||
allFutures.add(_routingContext.deleteDHTRecord(dep));
|
||||
allDeleteFutures.add(_routingContext.deleteDHTRecord(dep));
|
||||
}
|
||||
await Future.wait(allCloseFutures);
|
||||
await Future.wait(allDeleteFutures);
|
||||
for (final deletedRecord in allDeletedRecords) {
|
||||
deletedRecord._markDeleted();
|
||||
}
|
||||
await Future.wait(allFutures);
|
||||
}
|
||||
|
||||
void _validateParent(TypedKey? parent, TypedKey child) {
|
||||
|
@ -169,7 +289,8 @@ class DHTRecordPool with TableDBBacked<DHTRecordPoolAllocations> {
|
|||
}
|
||||
}
|
||||
|
||||
Future<void> _addDependency(TypedKey? parent, TypedKey child) async {
|
||||
Future<void> _addDependencyInner(TypedKey? parent, TypedKey child) async {
|
||||
assert(_mutex.isLocked, 'should be locked here');
|
||||
if (parent == null) {
|
||||
if (_state.rootRecords.contains(child)) {
|
||||
// Dependency already added
|
||||
|
@ -191,7 +312,8 @@ class DHTRecordPool with TableDBBacked<DHTRecordPoolAllocations> {
|
|||
}
|
||||
}
|
||||
|
||||
Future<void> _removeDependency(TypedKey child) async {
|
||||
Future<void> _removeDependencyInner(TypedKey child) async {
|
||||
assert(_mutex.isLocked, 'should be locked here');
|
||||
if (_state.rootRecords.contains(child)) {
|
||||
_state = await store(
|
||||
_state.copyWith(rootRecords: _state.rootRecords.remove(child)));
|
||||
|
@ -226,57 +348,52 @@ class DHTRecordPool with TableDBBacked<DHTRecordPoolAllocations> {
|
|||
int defaultSubkey = 0,
|
||||
DHTRecordCrypto? crypto,
|
||||
KeyPair? writer,
|
||||
}) async {
|
||||
final dhtctx = routingContext ?? _routingContext;
|
||||
final recordDescriptor = await dhtctx.createDHTRecord(schema);
|
||||
}) async =>
|
||||
_mutex.protect(() async {
|
||||
final dhtctx = routingContext ?? _routingContext;
|
||||
|
||||
await _locks.lockTag(recordDescriptor.key);
|
||||
final openedRecordInfo = await _recordCreateInner(
|
||||
dhtctx: dhtctx, schema: schema, writer: writer, parent: parent);
|
||||
|
||||
final rec = DHTRecord(
|
||||
routingContext: dhtctx,
|
||||
recordDescriptor: recordDescriptor,
|
||||
defaultSubkey: defaultSubkey,
|
||||
writer: writer ?? recordDescriptor.ownerKeyPair(),
|
||||
crypto: crypto ??
|
||||
await DHTRecordCryptoPrivate.fromTypedKeyPair(
|
||||
recordDescriptor.ownerTypedKeyPair()!));
|
||||
final rec = DHTRecord(
|
||||
routingContext: dhtctx,
|
||||
defaultSubkey: defaultSubkey,
|
||||
sharedDHTRecordData: openedRecordInfo.shared,
|
||||
writer: writer ??
|
||||
openedRecordInfo.shared.recordDescriptor.ownerKeyPair(),
|
||||
crypto: crypto ??
|
||||
await DHTRecordCryptoPrivate.fromTypedKeyPair(openedRecordInfo
|
||||
.shared.recordDescriptor
|
||||
.ownerTypedKeyPair()!));
|
||||
|
||||
await _addDependency(parent, rec.key);
|
||||
openedRecordInfo.records.add(rec);
|
||||
|
||||
_recordOpened(rec);
|
||||
|
||||
return rec;
|
||||
}
|
||||
return rec;
|
||||
});
|
||||
|
||||
/// Open a DHTRecord readonly
|
||||
Future<DHTRecord> openRead(TypedKey recordKey,
|
||||
{VeilidRoutingContext? routingContext,
|
||||
TypedKey? parent,
|
||||
int defaultSubkey = 0,
|
||||
DHTRecordCrypto? crypto}) async {
|
||||
await _locks.lockTag(recordKey);
|
||||
{VeilidRoutingContext? routingContext,
|
||||
TypedKey? parent,
|
||||
int defaultSubkey = 0,
|
||||
DHTRecordCrypto? crypto}) async =>
|
||||
_mutex.protect(() async {
|
||||
final dhtctx = routingContext ?? _routingContext;
|
||||
|
||||
final dhtctx = routingContext ?? _routingContext;
|
||||
final openedRecordInfo = await _recordOpenInner(
|
||||
dhtctx: dhtctx, recordKey: recordKey, parent: parent);
|
||||
|
||||
late final DHTRecord rec;
|
||||
// If we are opening a key that already exists
|
||||
// make sure we are using the same parent if one was specified
|
||||
_validateParent(parent, recordKey);
|
||||
final rec = DHTRecord(
|
||||
routingContext: dhtctx,
|
||||
defaultSubkey: defaultSubkey,
|
||||
sharedDHTRecordData: openedRecordInfo.shared,
|
||||
writer: null,
|
||||
crypto: crypto ?? const DHTRecordCryptoPublic());
|
||||
|
||||
// Open from the veilid api
|
||||
final recordDescriptor = await dhtctx.openDHTRecord(recordKey, null);
|
||||
rec = DHTRecord(
|
||||
routingContext: dhtctx,
|
||||
recordDescriptor: recordDescriptor,
|
||||
defaultSubkey: defaultSubkey,
|
||||
crypto: crypto ?? const DHTRecordCryptoPublic());
|
||||
openedRecordInfo.records.add(rec);
|
||||
|
||||
// Register the dependency
|
||||
await _addDependency(parent, rec.key);
|
||||
_recordOpened(rec);
|
||||
|
||||
return rec;
|
||||
}
|
||||
return rec;
|
||||
});
|
||||
|
||||
/// Open a DHTRecord writable
|
||||
Future<DHTRecord> openWrite(
|
||||
|
@ -286,33 +403,29 @@ class DHTRecordPool with TableDBBacked<DHTRecordPoolAllocations> {
|
|||
TypedKey? parent,
|
||||
int defaultSubkey = 0,
|
||||
DHTRecordCrypto? crypto,
|
||||
}) async {
|
||||
await _locks.lockTag(recordKey);
|
||||
}) async =>
|
||||
_mutex.protect(() async {
|
||||
final dhtctx = routingContext ?? _routingContext;
|
||||
|
||||
final dhtctx = routingContext ?? _routingContext;
|
||||
final openedRecordInfo = await _recordOpenInner(
|
||||
dhtctx: dhtctx,
|
||||
recordKey: recordKey,
|
||||
parent: parent,
|
||||
writer: writer);
|
||||
|
||||
late final DHTRecord rec;
|
||||
// If we are opening a key that already exists
|
||||
// make sure we are using the same parent if one was specified
|
||||
_validateParent(parent, recordKey);
|
||||
final rec = DHTRecord(
|
||||
routingContext: dhtctx,
|
||||
defaultSubkey: defaultSubkey,
|
||||
writer: writer,
|
||||
sharedDHTRecordData: openedRecordInfo.shared,
|
||||
crypto: crypto ??
|
||||
await DHTRecordCryptoPrivate.fromTypedKeyPair(
|
||||
TypedKeyPair.fromKeyPair(recordKey.kind, writer)));
|
||||
|
||||
// Open from the veilid api
|
||||
final recordDescriptor = await dhtctx.openDHTRecord(recordKey, writer);
|
||||
rec = DHTRecord(
|
||||
routingContext: dhtctx,
|
||||
recordDescriptor: recordDescriptor,
|
||||
defaultSubkey: defaultSubkey,
|
||||
writer: writer,
|
||||
crypto: crypto ??
|
||||
await DHTRecordCryptoPrivate.fromTypedKeyPair(
|
||||
TypedKeyPair.fromKeyPair(recordKey.kind, writer)));
|
||||
openedRecordInfo.records.add(rec);
|
||||
|
||||
// Register the dependency if specified
|
||||
await _addDependency(parent, rec.key);
|
||||
_recordOpened(rec);
|
||||
|
||||
return rec;
|
||||
}
|
||||
return rec;
|
||||
});
|
||||
|
||||
/// Open a DHTRecord owned
|
||||
/// This is the same as writable but uses an OwnedDHTRecordPointer
|
||||
|
@ -336,9 +449,6 @@ class DHTRecordPool with TableDBBacked<DHTRecordPoolAllocations> {
|
|||
crypto: crypto,
|
||||
);
|
||||
|
||||
/// Look up an opened DHTRecord
|
||||
DHTRecord? getOpenedRecord(TypedKey recordKey) => _opened[recordKey];
|
||||
|
||||
/// Get the parent of a DHTRecord key if it exists
|
||||
TypedKey? getParentRecordKey(TypedKey child) {
|
||||
final childJson = child.toJson();
|
||||
|
@ -351,33 +461,107 @@ class DHTRecordPool with TableDBBacked<DHTRecordPoolAllocations> {
|
|||
// Change
|
||||
for (final kv in _opened.entries) {
|
||||
if (kv.key == updateValueChange.key) {
|
||||
kv.value.addRemoteValueChange(updateValueChange);
|
||||
for (final rec in kv.value.records) {
|
||||
rec.addRemoteValueChange(updateValueChange);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
final now = Veilid.instance.now().value;
|
||||
// Expired, process renewal if desired
|
||||
for (final kv in _opened.entries) {
|
||||
if (kv.key == updateValueChange.key) {
|
||||
// Renew watch state
|
||||
kv.value.needsWatchStateUpdate = true;
|
||||
for (final entry in _opened.entries) {
|
||||
final openedKey = entry.key;
|
||||
final openedRecordInfo = entry.value;
|
||||
|
||||
// See if the watch had an expiration and if it has expired
|
||||
// otherwise the renewal will keep the same parameters
|
||||
final watchState = kv.value.watchState;
|
||||
if (watchState != null) {
|
||||
final exp = watchState.expiration;
|
||||
if (exp != null && exp.value < Veilid.instance.now().value) {
|
||||
// Has expiration, and it has expired, clear watch state
|
||||
kv.value.watchState = null;
|
||||
if (openedKey == updateValueChange.key) {
|
||||
// Renew watch state for each opened recrod
|
||||
for (final rec in openedRecordInfo.records) {
|
||||
// See if the watch had an expiration and if it has expired
|
||||
// otherwise the renewal will keep the same parameters
|
||||
final watchState = rec.watchState;
|
||||
if (watchState != null) {
|
||||
final exp = watchState.expiration;
|
||||
if (exp != null && exp.value < now) {
|
||||
// Has expiration, and it has expired, clear watch state
|
||||
rec.watchState = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
openedRecordInfo.shared.needsWatchStateUpdate = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
WatchState? _collectUnionWatchState(Iterable<DHTRecord> records) {
|
||||
// Collect union of opened record watch states
|
||||
int? totalCount;
|
||||
Timestamp? maxExpiration;
|
||||
List<ValueSubkeyRange>? allSubkeys;
|
||||
|
||||
var noExpiration = false;
|
||||
var everySubkey = false;
|
||||
var cancelWatch = true;
|
||||
|
||||
for (final rec in records) {
|
||||
final ws = rec.watchState;
|
||||
if (ws != null) {
|
||||
cancelWatch = false;
|
||||
final wsCount = ws.count;
|
||||
if (wsCount != null) {
|
||||
totalCount = totalCount ?? 0 + min(wsCount, 0x7FFFFFFF);
|
||||
totalCount = min(totalCount, 0x7FFFFFFF);
|
||||
}
|
||||
final wsExp = ws.expiration;
|
||||
if (wsExp != null && !noExpiration) {
|
||||
maxExpiration = maxExpiration == null
|
||||
? wsExp
|
||||
: wsExp.value > maxExpiration.value
|
||||
? wsExp
|
||||
: maxExpiration;
|
||||
} else {
|
||||
noExpiration = true;
|
||||
}
|
||||
final wsSubkeys = ws.subkeys;
|
||||
if (wsSubkeys != null && !everySubkey) {
|
||||
allSubkeys = allSubkeys == null
|
||||
? wsSubkeys
|
||||
: allSubkeys.unionSubkeys(wsSubkeys);
|
||||
} else {
|
||||
everySubkey = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (noExpiration) {
|
||||
maxExpiration = null;
|
||||
}
|
||||
if (everySubkey) {
|
||||
allSubkeys = null;
|
||||
}
|
||||
if (cancelWatch) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return WatchState(
|
||||
subkeys: allSubkeys, expiration: maxExpiration, count: totalCount);
|
||||
}
|
||||
|
||||
void _updateWatchExpirations(
|
||||
Iterable<DHTRecord> records, Timestamp realExpiration) {
|
||||
for (final rec in records) {
|
||||
final ws = rec.watchState;
|
||||
if (ws != null) {
|
||||
rec.watchState = WatchState(
|
||||
subkeys: ws.subkeys,
|
||||
expiration: ws.expiration,
|
||||
count: ws.count,
|
||||
realExpiration: realExpiration);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Ticker to check watch state change requests
|
||||
Future<void> tick() async {
|
||||
if (inTick) {
|
||||
|
@ -386,53 +570,55 @@ class DHTRecordPool with TableDBBacked<DHTRecordPoolAllocations> {
|
|||
inTick = true;
|
||||
try {
|
||||
// See if any opened records need watch state changes
|
||||
final unord = List<Future<void>>.empty(growable: true);
|
||||
final unord = <Future<void>>[];
|
||||
|
||||
for (final kv in _opened.entries) {
|
||||
final openedRecordKey = kv.key;
|
||||
final openedRecordInfo = kv.value;
|
||||
final dhtctx = openedRecordInfo.shared.defaultRoutingContext;
|
||||
|
||||
// Check if already updating
|
||||
if (kv.value.inWatchStateUpdate) {
|
||||
if (openedRecordInfo.shared.inWatchStateUpdate) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (kv.value.needsWatchStateUpdate) {
|
||||
kv.value.inWatchStateUpdate = true;
|
||||
if (openedRecordInfo.shared.needsWatchStateUpdate) {
|
||||
openedRecordInfo.shared.inWatchStateUpdate = true;
|
||||
|
||||
final ws = kv.value.watchState;
|
||||
if (ws == null) {
|
||||
final watchState = _collectUnionWatchState(openedRecordInfo.records);
|
||||
|
||||
// Apply watch changes for record
|
||||
if (watchState == null) {
|
||||
unord.add(() async {
|
||||
// Record needs watch cancel
|
||||
try {
|
||||
final done =
|
||||
await kv.value.routingContext.cancelDHTWatch(kv.key);
|
||||
final done = await dhtctx.cancelDHTWatch(openedRecordKey);
|
||||
assert(done,
|
||||
'should always be done when cancelling whole subkey range');
|
||||
kv.value.needsWatchStateUpdate = false;
|
||||
openedRecordInfo.shared.needsWatchStateUpdate = false;
|
||||
} on VeilidAPIException {
|
||||
// Failed to cancel DHT watch, try again next tick
|
||||
}
|
||||
kv.value.inWatchStateUpdate = false;
|
||||
openedRecordInfo.shared.inWatchStateUpdate = false;
|
||||
}());
|
||||
} else {
|
||||
unord.add(() async {
|
||||
// Record needs new watch
|
||||
try {
|
||||
final realExpiration = await kv.value.routingContext
|
||||
.watchDHTValues(kv.key,
|
||||
subkeys: ws.subkeys?.toList(),
|
||||
count: ws.count,
|
||||
expiration: ws.expiration);
|
||||
kv.value.needsWatchStateUpdate = false;
|
||||
final realExpiration = await dhtctx.watchDHTValues(
|
||||
openedRecordKey,
|
||||
subkeys: watchState.subkeys?.toList(),
|
||||
count: watchState.count,
|
||||
expiration: watchState.expiration);
|
||||
openedRecordInfo.shared.needsWatchStateUpdate = false;
|
||||
|
||||
// Update watch state with real expiration
|
||||
kv.value.watchState = WatchState(
|
||||
subkeys: ws.subkeys,
|
||||
expiration: ws.expiration,
|
||||
count: ws.count,
|
||||
realExpiration: realExpiration);
|
||||
// Update watch states with real expiration
|
||||
_updateWatchExpirations(
|
||||
openedRecordInfo.records, realExpiration);
|
||||
} on VeilidAPIException {
|
||||
// Failed to cancel DHT watch, try again next tick
|
||||
}
|
||||
kv.value.inWatchStateUpdate = false;
|
||||
openedRecordInfo.shared.inWatchStateUpdate = false;
|
||||
}());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -86,16 +86,12 @@ class DHTShortArray {
|
|||
final schema = DHTSchema.smpl(
|
||||
oCnt: 0,
|
||||
members: [DHTSchemaMember(mKey: smplWriter.key, mCnt: stride + 1)]);
|
||||
final dhtCreateRecord = await pool.create(
|
||||
dhtRecord = await pool.create(
|
||||
parent: parent,
|
||||
routingContext: routingContext,
|
||||
schema: schema,
|
||||
crypto: crypto,
|
||||
writer: smplWriter);
|
||||
// Reopen with SMPL writer
|
||||
await dhtCreateRecord.close();
|
||||
dhtRecord = await pool.openWrite(dhtCreateRecord.key, smplWriter,
|
||||
parent: parent, routingContext: routingContext, crypto: crypto);
|
||||
} else {
|
||||
final schema = DHTSchema.dflt(oCnt: stride + 1);
|
||||
dhtRecord = await pool.create(
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue