From c4d25fecb0e226b87ed8042bae1fb4e5ac6d70d2 Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Mon, 13 May 2024 10:03:25 -0400 Subject: [PATCH 1/7] progress on dht_log --- .../cubits/single_contact_messages_cubit.dart | 4 +- lib/chat_list/cubits/chat_list_cubit.dart | 16 +- .../cubits/contact_invitation_list_cubit.dart | 10 +- lib/contacts/cubits/contact_list_cubit.dart | 10 +- lib/contacts/cubits/conversation_cubit.dart | 2 +- packages/veilid_support/build.yaml | 10 + .../test_dht_short_array.dart | 18 +- .../lib/dht_support/dht_support.dart | 2 + .../lib/dht_support/proto/dht.proto | 26 +- .../lib/dht_support/src/dht_log/barrel.dart | 2 + .../lib/dht_support/src/dht_log/dht_log.dart | 273 +++++++++ .../src/dht_log/dht_log_append.dart | 42 ++ .../src/dht_log/dht_log_cubit.dart | 119 ++++ .../dht_support/src/dht_log/dht_log_read.dart | 103 ++++ .../src/dht_log/dht_log_spine.dart | 527 ++++++++++++++++++ .../dht_record/default_dht_record_cubit.dart | 3 +- .../src/dht_record/dht_record.dart | 188 ++++--- .../src/dht_record/dht_record_cubit.dart | 2 +- .../src/dht_short_array/dht_short_array.dart | 88 ++- .../dht_short_array_cubit.dart | 34 +- .../dht_short_array/dht_short_array_head.dart | 21 +- .../dht_short_array/dht_short_array_read.dart | 99 +--- .../dht_short_array_write.dart | 128 +---- .../src/interfaces/dht_append_truncate.dart | 44 ++ .../src/interfaces/dht_openable.dart | 49 ++ .../src/interfaces/dht_random_read.dart | 63 +++ .../src/interfaces/dht_random_write.dart | 104 ++++ .../src/interfaces/exceptions.dart | 5 + .../src/interfaces/interfaces.dart | 4 + packages/veilid_support/lib/proto/dht.pb.dart | 124 ++--- .../veilid_support/lib/proto/dht.pbjson.dart | 32 +- packages/veilid_support/lib/src/identity.dart | 4 +- packages/veilid_support/lib/src/output.dart | 33 ++ .../veilid_support/lib/veilid_support.dart | 1 + pubspec.lock | 56 +- pubspec.yaml | 10 +- 36 files changed, 1754 insertions(+), 502 deletions(-) create mode 100644 packages/veilid_support/build.yaml create mode 100644 packages/veilid_support/lib/dht_support/src/dht_log/barrel.dart create mode 100644 packages/veilid_support/lib/dht_support/src/dht_log/dht_log.dart create mode 100644 packages/veilid_support/lib/dht_support/src/dht_log/dht_log_append.dart create mode 100644 packages/veilid_support/lib/dht_support/src/dht_log/dht_log_cubit.dart create mode 100644 packages/veilid_support/lib/dht_support/src/dht_log/dht_log_read.dart create mode 100644 packages/veilid_support/lib/dht_support/src/dht_log/dht_log_spine.dart create mode 100644 packages/veilid_support/lib/dht_support/src/interfaces/dht_append_truncate.dart create mode 100644 packages/veilid_support/lib/dht_support/src/interfaces/dht_openable.dart create mode 100644 packages/veilid_support/lib/dht_support/src/interfaces/dht_random_read.dart create mode 100644 packages/veilid_support/lib/dht_support/src/interfaces/dht_random_write.dart create mode 100644 packages/veilid_support/lib/dht_support/src/interfaces/exceptions.dart create mode 100644 packages/veilid_support/lib/dht_support/src/interfaces/interfaces.dart create mode 100644 packages/veilid_support/lib/src/output.dart diff --git a/lib/chat/cubits/single_contact_messages_cubit.dart b/lib/chat/cubits/single_contact_messages_cubit.dart index de281fe..72c820e 100644 --- a/lib/chat/cubits/single_contact_messages_cubit.dart +++ b/lib/chat/cubits/single_contact_messages_cubit.dart @@ -227,7 +227,7 @@ class SingleContactMessagesCubit extends Cubit { } Future _reconcileMessagesInner( - {required DHTShortArrayWrite reconciledMessagesWriter, + {required DHTRandomReadWrite reconciledMessagesWriter, required IList messages}) async { // Ensure remoteMessages is sorted by timestamp final newMessages = messages @@ -236,7 +236,7 @@ class SingleContactMessagesCubit extends Cubit { // Existing messages will always be sorted by timestamp so merging is easy final existingMessages = await reconciledMessagesWriter - .getAllItemsProtobuf(proto.Message.fromBuffer); + .getItemRangeProtobuf(proto.Message.fromBuffer, 0); if (existingMessages == null) { throw Exception( 'Could not load existing reconciled messages at this time'); diff --git a/lib/chat_list/cubits/chat_list_cubit.dart b/lib/chat_list/cubits/chat_list_cubit.dart index 5023d51..9204a0a 100644 --- a/lib/chat_list/cubits/chat_list_cubit.dart +++ b/lib/chat_list/cubits/chat_list_cubit.dart @@ -92,31 +92,29 @@ class ChatListCubit extends DHTShortArrayCubit // Remove Chat from account's list // if this fails, don't keep retrying, user can try again later - final (deletedItem, success) = + final deletedItem = // Ensure followers get their changes before we return await syncFollowers(() => operateWrite((writer) async { if (activeChatCubit.state == remoteConversationRecordKey) { activeChatCubit.setActiveChat(null); } for (var i = 0; i < writer.length; i++) { - final cbuf = await writer.getItem(i); - if (cbuf == null) { + final c = + await writer.getItemProtobuf(proto.Chat.fromBuffer, i); + if (c == null) { throw Exception('Failed to get chat'); } - final c = proto.Chat.fromBuffer(cbuf); if (c.remoteConversationRecordKey == remoteConversationKey) { // Found the right chat - if (await writer.tryRemoveItem(i) != null) { - return c; - } - return null; + await writer.removeItem(i); + return c; } } return null; })); // Since followers are synced, we can safetly remove the reconciled // chat record now - if (success && deletedItem != null) { + if (deletedItem != null) { try { await DHTRecordPool.instance.deleteRecord( deletedItem.reconciledChatRecord.toVeilid().recordKey); diff --git a/lib/contact_invitation/cubits/contact_invitation_list_cubit.dart b/lib/contact_invitation/cubits/contact_invitation_list_cubit.dart index 8e133b1..afe91c0 100644 --- a/lib/contact_invitation/cubits/contact_invitation_list_cubit.dart +++ b/lib/contact_invitation/cubits/contact_invitation_list_cubit.dart @@ -177,7 +177,7 @@ class ContactInvitationListCubit _activeAccountInfo.userLogin.accountRecordInfo.accountRecord.recordKey; // Remove ContactInvitationRecord from account's list - final (deletedItem, success) = await operateWrite((writer) async { + final deletedItem = await operateWrite((writer) async { for (var i = 0; i < writer.length; i++) { final item = await writer.getItemProtobuf( proto.ContactInvitationRecord.fromBuffer, i); @@ -186,16 +186,14 @@ class ContactInvitationListCubit } if (item.contactRequestInbox.recordKey.toVeilid() == contactRequestInboxRecordKey) { - if (await writer.tryRemoveItem(i) != null) { - return item; - } - return null; + await writer.removeItem(i); + return item; } } return null; }); - if (success && deletedItem != null) { + if (deletedItem != null) { // Delete the contact request inbox final contactRequestInbox = deletedItem.contactRequestInbox.toVeilid(); await (await pool.openRecordOwned(contactRequestInbox, diff --git a/lib/contacts/cubits/contact_list_cubit.dart b/lib/contacts/cubits/contact_list_cubit.dart index e30c8ed..a139b89 100644 --- a/lib/contacts/cubits/contact_list_cubit.dart +++ b/lib/contacts/cubits/contact_list_cubit.dart @@ -70,7 +70,7 @@ class ContactListCubit extends DHTShortArrayCubit { contact.remoteConversationRecordKey.toVeilid(); // Remove Contact from account's list - final (deletedItem, success) = await operateWrite((writer) async { + final deletedItem = await operateWrite((writer) async { for (var i = 0; i < writer.length; i++) { final item = await writer.getItemProtobuf(proto.Contact.fromBuffer, i); if (item == null) { @@ -78,16 +78,14 @@ class ContactListCubit extends DHTShortArrayCubit { } if (item.remoteConversationRecordKey == contact.remoteConversationRecordKey) { - if (await writer.tryRemoveItem(i) != null) { - return item; - } - return null; + await writer.removeItem(i); + return item; } } return null; }); - if (success && deletedItem != null) { + if (deletedItem != null) { try { // Make a conversation cubit to manipulate the conversation final conversationCubit = ConversationCubit( diff --git a/lib/contacts/cubits/conversation_cubit.dart b/lib/contacts/cubits/conversation_cubit.dart index 253dbba..b4d8ee9 100644 --- a/lib/contacts/cubits/conversation_cubit.dart +++ b/lib/contacts/cubits/conversation_cubit.dart @@ -295,7 +295,7 @@ class ConversationCubit extends Cubit> { debugName: 'ConversationCubit::initLocalMessages::LocalMessages', parent: localConversationKey, crypto: crypto, - smplWriter: writer)) + writer: writer)) .deleteScope((messages) async => await callback(messages)); } diff --git a/packages/veilid_support/build.yaml b/packages/veilid_support/build.yaml new file mode 100644 index 0000000..84fde8c --- /dev/null +++ b/packages/veilid_support/build.yaml @@ -0,0 +1,10 @@ +targets: + $default: + sources: + exclude: + - example/** + builders: + json_serializable: + options: + explicit_to_json: true + field_rename: snake diff --git a/packages/veilid_support/example/integration_test/test_dht_short_array.dart b/packages/veilid_support/example/integration_test/test_dht_short_array.dart index ad3e22a..c2fcc2b 100644 --- a/packages/veilid_support/example/integration_test/test_dht_short_array.dart +++ b/packages/veilid_support/example/integration_test/test_dht_short_array.dart @@ -63,7 +63,7 @@ Future Function() makeTestDHTShortArrayAdd({required int stride}) => print('adding\n'); { - final (res, ok) = await arr.operateWrite((w) async { + final res = await arr.operateWrite((w) async { for (var n = 0; n < dataset.length; n++) { print('$n '); final success = await w.tryAddItem(dataset[n]); @@ -71,26 +71,28 @@ Future Function() makeTestDHTShortArrayAdd({required int stride}) => } }); expect(res, isNull); - expect(ok, isTrue); } //print('get all\n'); { - final dataset2 = await arr.operate((r) async => r.getAllItems()); + final dataset2 = await arr.operate((r) async => r.getItemRange(0)); expect(dataset2, equals(dataset)); } + { + final dataset3 = + await arr.operate((r) async => r.getItemRange(64, length: 128)); + expect(dataset3, equals(dataset.sublist(64, 64 + 128))); + } //print('clear\n'); { - final (res, ok) = await arr.operateWrite((w) async => w.tryClear()); - expect(res, isTrue); - expect(ok, isTrue); + await arr.operateWrite((w) async => w.clear()); } //print('get all\n'); { - final dataset3 = await arr.operate((r) async => r.getAllItems()); - expect(dataset3, isEmpty); + final dataset4 = await arr.operate((r) async => r.getItemRange(0)); + expect(dataset4, isEmpty); } await arr.delete(); diff --git a/packages/veilid_support/lib/dht_support/dht_support.dart b/packages/veilid_support/lib/dht_support/dht_support.dart index 869a267..cc2a8be 100644 --- a/packages/veilid_support/lib/dht_support/dht_support.dart +++ b/packages/veilid_support/lib/dht_support/dht_support.dart @@ -2,5 +2,7 @@ library dht_support; +export 'src/dht_log/barrel.dart'; export 'src/dht_record/barrel.dart'; export 'src/dht_short_array/barrel.dart'; +export 'src/interfaces/interfaces.dart'; diff --git a/packages/veilid_support/lib/dht_support/proto/dht.proto b/packages/veilid_support/lib/dht_support/proto/dht.proto index 023c3cf..6796753 100644 --- a/packages/veilid_support/lib/dht_support/proto/dht.proto +++ b/packages/veilid_support/lib/dht_support/proto/dht.proto @@ -23,6 +23,18 @@ message DHTData { uint32 size = 4; } + +// DHTLog - represents a ring buffer of many elements with append/truncate semantics +// Header in subkey 0 of first key follows this structure +message DHTLog { + // Position of the start of the log (oldest items) + uint32 head = 1; + // Position of the end of the log (newest items) + uint32 tail = 2; + // Stride of each segment of the dhtlog + uint32 stride = 3; +} + // DHTShortArray - represents a re-orderable collection of up to 256 individual elements // Header in subkey 0 of first key follows this structure // @@ -50,20 +62,6 @@ message DHTShortArray { // calculated through iteration } -// DHTLog - represents a long ring buffer of elements utilizing a multi-level -// indirection table of DHTShortArrays. - -message DHTLog { - // Keys to concatenate - repeated veilid.TypedKey keys = 1; - // Back link to another DHTLog further back - veilid.TypedKey back = 2; - // Count of subkeys in all keys in this DHTLog - repeated uint32 subkey_counts = 3; - // Total count of subkeys in all keys in this DHTLog including all backlogs - uint32 total_subkeys = 4; -} - // DataReference // Pointer to data somewhere in Veilid // Abstraction over DHTData and BlockStore diff --git a/packages/veilid_support/lib/dht_support/src/dht_log/barrel.dart b/packages/veilid_support/lib/dht_support/src/dht_log/barrel.dart new file mode 100644 index 0000000..18686f2 --- /dev/null +++ b/packages/veilid_support/lib/dht_support/src/dht_log/barrel.dart @@ -0,0 +1,2 @@ +export 'dht_array.dart'; +export 'dht_array_cubit.dart'; diff --git a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log.dart b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log.dart new file mode 100644 index 0000000..a132bdb --- /dev/null +++ b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log.dart @@ -0,0 +1,273 @@ +import 'dart:async'; +import 'dart:typed_data'; + +import 'package:async_tools/async_tools.dart'; +import 'package:collection/collection.dart'; +import 'package:equatable/equatable.dart'; + +import '../../../veilid_support.dart'; +import '../../proto/proto.dart' as proto; +import '../interfaces/dht_append_truncate.dart'; + +part 'dht_log_spine.dart'; +part 'dht_log_read.dart'; +part 'dht_log_append.dart'; + +/////////////////////////////////////////////////////////////////////// + +/// DHTLog is a ring-buffer queue like data structure with the following +/// operations: +/// * Add elements to the tail +/// * Remove elements from the head +/// The structure has a 'spine' record that acts as an indirection table of +/// DHTShortArray record pointers spread over its subkeys. +/// Subkey 0 of the DHTLog is a head subkey that contains housekeeping data: +/// * The head and tail position of the log +/// - subkeyIdx = pos / recordsPerSubkey +/// - recordIdx = pos % recordsPerSubkey +class DHTLog implements DHTOpenable { + //////////////////////////////////////////////////////////////// + // Constructors + + DHTLog._({required _DHTLogSpine spine}) : _spine = spine { + _spine.onUpdatedSpine = () { + _watchController?.sink.add(null); + }; + } + + /// Create a DHTLog + static Future create( + {required String debugName, + int stride = DHTShortArray.maxElements, + VeilidRoutingContext? routingContext, + TypedKey? parent, + DHTRecordCrypto? crypto, + KeyPair? writer}) async { + assert(stride <= DHTShortArray.maxElements, 'stride too long'); + final pool = DHTRecordPool.instance; + + late final DHTRecord spineRecord; + if (writer != null) { + final schema = DHTSchema.smpl( + oCnt: 0, + members: [DHTSchemaMember(mKey: writer.key, mCnt: spineSubkeys + 1)]); + spineRecord = await pool.createRecord( + debugName: debugName, + parent: parent, + routingContext: routingContext, + schema: schema, + crypto: crypto, + writer: writer); + } else { + const schema = DHTSchema.dflt(oCnt: spineSubkeys + 1); + spineRecord = await pool.createRecord( + debugName: debugName, + parent: parent, + routingContext: routingContext, + schema: schema, + crypto: crypto); + } + + try { + final spine = await _DHTLogSpine.create( + spineRecord: spineRecord, segmentStride: stride); + return DHTLog._(spine: spine); + } on Exception catch (_) { + await spineRecord.close(); + await spineRecord.delete(); + rethrow; + } + } + + static Future openRead(TypedKey logRecordKey, + {required String debugName, + VeilidRoutingContext? routingContext, + TypedKey? parent, + DHTRecordCrypto? crypto}) async { + final spineRecord = await DHTRecordPool.instance.openRecordRead( + logRecordKey, + debugName: debugName, + parent: parent, + routingContext: routingContext, + crypto: crypto); + try { + final spine = await _DHTLogSpine.load(spineRecord: spineRecord); + final dhtLog = DHTLog._(spine: spine); + return dhtLog; + } on Exception catch (_) { + await spineRecord.close(); + rethrow; + } + } + + static Future openWrite( + TypedKey logRecordKey, + KeyPair writer, { + required String debugName, + VeilidRoutingContext? routingContext, + TypedKey? parent, + DHTRecordCrypto? crypto, + }) async { + final spineRecord = await DHTRecordPool.instance.openRecordWrite( + logRecordKey, writer, + debugName: debugName, + parent: parent, + routingContext: routingContext, + crypto: crypto); + try { + final spine = await _DHTLogSpine.load(spineRecord: spineRecord); + final dhtLog = DHTLog._(spine: spine); + return dhtLog; + } on Exception catch (_) { + await spineRecord.close(); + rethrow; + } + } + + static Future openOwned( + OwnedDHTRecordPointer ownedLogRecordPointer, { + required String debugName, + required TypedKey parent, + VeilidRoutingContext? routingContext, + DHTRecordCrypto? crypto, + }) => + openWrite( + ownedLogRecordPointer.recordKey, + ownedLogRecordPointer.owner, + debugName: debugName, + routingContext: routingContext, + parent: parent, + crypto: crypto, + ); + + //////////////////////////////////////////////////////////////////////////// + // DHTOpenable + + /// Check if the DHTLog is open + @override + bool get isOpen => _spine.isOpen; + + /// Free all resources for the DHTLog + @override + Future close() async { + if (!isOpen) { + return; + } + await _watchController?.close(); + _watchController = null; + await _spine.close(); + } + + /// Free all resources for the DHTLog and delete it from the DHT + /// Will wait until the short array is closed to delete it + @override + Future delete() async { + await _spine.delete(); + } + + //////////////////////////////////////////////////////////////////////////// + // Public API + + /// Get the record key for this log + TypedKey get recordKey => _spine.recordKey; + + /// Get the record pointer foir this log + OwnedDHTRecordPointer get recordPointer => _spine.recordPointer; + + /// Runs a closure allowing read-only access to the log + Future operate(Future Function(DHTRandomRead) closure) async { + if (!isOpen) { + throw StateError('log is not open"'); + } + + return _spine.operate((spine) async { + final reader = _DHTLogRead._(spine); + return closure(reader); + }); + } + + /// Runs a closure allowing append/truncate access to the log + /// Makes only one attempt to consistently write the changes to the DHT + /// Returns result of the closure if the write could be performed + /// Throws DHTOperateException if the write could not be performed + /// at this time + Future operateAppend( + Future Function(DHTAppendTruncateRandomRead) closure) async { + if (!isOpen) { + throw StateError('log is not open"'); + } + + return _spine.operateAppend((spine) async { + final writer = _DHTLogAppend._(spine); + return closure(writer); + }); + } + + /// Runs a closure allowing append/truncate access to the log + /// Will execute the closure multiple times if a consistent write to the DHT + /// is not achieved. Timeout if specified will be thrown as a + /// TimeoutException. The closure should return true if its changes also + /// succeeded, returning false will trigger another eventual consistency + /// attempt. + Future operateAppendEventual( + Future Function(DHTAppendTruncateRandomRead) closure, + {Duration? timeout}) async { + if (!isOpen) { + throw StateError('log is not open"'); + } + + return _spine.operateAppendEventual((spine) async { + final writer = _DHTLogAppend._(spine); + return closure(writer); + }, timeout: timeout); + } + + /// Listen to and any all changes to the structure of this log + /// regardless of where the changes are coming from + Future> listen( + void Function() onChanged, + ) { + if (!isOpen) { + throw StateError('log is not open"'); + } + + return _listenMutex.protect(() async { + // If don't have a controller yet, set it up + if (_watchController == null) { + // Set up watch requirements + _watchController = StreamController.broadcast(onCancel: () { + // If there are no more listeners then we can get + // rid of the controller and drop our subscriptions + unawaited(_listenMutex.protect(() async { + // Cancel watches of head record + await _spine.cancelWatch(); + _watchController = null; + })); + }); + + // Start watching head subkey of the spine + await _spine.watch(); + } + // Return subscription + return _watchController!.stream.listen((_) => onChanged()); + }); + } + + //////////////////////////////////////////////////////////////// + // Fields + + // 56 subkeys * 512 segments * 36 bytes per typedkey = + // 1032192 bytes per record + // 512*36 = 18432 bytes per subkey + // 28672 shortarrays * 256 elements = 7340032 elements + static const spineSubkeys = 56; + static const segmentsPerSubkey = 512; + + // Internal representation refreshed from spine record + final _DHTLogSpine _spine; + + // Watch mutex to ensure we keep the representation valid + final Mutex _listenMutex = Mutex(); + // Stream of external changes + StreamController? _watchController; +} diff --git a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_append.dart b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_append.dart new file mode 100644 index 0000000..6a172a7 --- /dev/null +++ b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_append.dart @@ -0,0 +1,42 @@ +part of 'dht_log.dart'; + +//////////////////////////////////////////////////////////////////////////// +// Append/truncate implementation + +class _DHTLogAppend extends _DHTLogRead implements DHTAppendTruncateRandomRead { + _DHTLogAppend._(super.spine) : super._(); + + @override + Future tryAppendItem(Uint8List value) async { + // Allocate empty index at the end of the list + final endPos = _spine.length; + _spine.allocateTail(1); + final lookup = await _spine.lookupPosition(endPos); + if (lookup == null) { + throw StateError("can't write to dht log"); + } + // Write item to the segment + return lookup.shortArray + .operateWrite((write) async => write.tryWriteItem(lookup.pos, value)); + } + + @override + Future truncate(int count) async { + final len = _spine.length; + if (count > len) { + count = len; + } + if (count == 0) { + return; + } + if (count < 0) { + throw StateError('can not remove negative items'); + } + _spine.releaseHead(count); + } + + @override + Future clear() async { + _spine.releaseHead(_spine.length); + } +} diff --git a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_cubit.dart b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_cubit.dart new file mode 100644 index 0000000..a7d5333 --- /dev/null +++ b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_cubit.dart @@ -0,0 +1,119 @@ +import 'dart:async'; + +import 'package:async_tools/async_tools.dart'; +import 'package:bloc/bloc.dart'; +import 'package:bloc_advanced_tools/bloc_advanced_tools.dart'; +import 'package:equatable/equatable.dart'; +import 'package:fast_immutable_collections/fast_immutable_collections.dart'; +import 'package:meta/meta.dart'; + +import '../../../veilid_support.dart'; + +// xxx paginate and remember to paginate watches (could use short array cubit as a subcubit here?) + +// @immutable +// class DHTArrayElementState extends Equatable { +// const DHTArrayElementState( +// {required this.value, required this.isOffline}); +// final T value; +// final bool isOffline; + +// @override +// List get props => [value, isOffline]; +// } + +// typedef DHTArrayState = AsyncValue>>; +// typedef DHTArrayBusyState = BlocBusyState>; + +// class DHTArrayCubit extends Cubit> +// with BlocBusyWrapper> { +// DHTArrayCubit({ +// required Future Function() open, +// required T Function(List data) decodeElement, +// }) : _decodeElement = decodeElement, +// super(const BlocBusyState(AsyncValue.loading())) { +// _initWait.add(() async { +// // Open DHT record +// _array = await open(); +// _wantsCloseRecord = true; + +// // Make initial state update +// await _refreshNoWait(); +// _subscription = await _array.listen(_update); +// }); +// } + +// Future refresh({bool forceRefresh = false}) async { +// await _initWait(); +// await _refreshNoWait(forceRefresh: forceRefresh); +// } + +// Future _refreshNoWait({bool forceRefresh = false}) async => +// busy((emit) async => _refreshInner(emit, forceRefresh: forceRefresh)); + +// Future _refreshInner(void Function(DHTShortArrayState) emit, +// {bool forceRefresh = false}) async { +// try { +// final newState = await _shortArray.operate((reader) async { +// final offlinePositions = await reader.getOfflinePositions(); +// final allItems = (await reader.getAllItems(forceRefresh: forceRefresh)) +// ?.indexed +// .map((x) => DHTShortArrayElementState( +// value: _decodeElement(x.$2), +// isOffline: offlinePositions.contains(x.$1))) +// .toIList(); +// return allItems; +// }); +// if (newState != null) { +// emit(AsyncValue.data(newState)); +// } +// } on Exception catch (e) { +// emit(AsyncValue.error(e)); +// } +// } + +// void _update() { +// // Run at most one background update process +// // Because this is async, we could get an update while we're +// // still processing the last one. Only called after init future has run +// // so we dont have to wait for that here. +// _sspUpdate.busyUpdate>( +// busy, (emit) async => _refreshInner(emit)); +// } + +// @override +// Future close() async { +// await _initWait(); +// await _subscription?.cancel(); +// _subscription = null; +// if (_wantsCloseRecord) { +// await _shortArray.close(); +// } +// await super.close(); +// } + +// Future operate(Future Function(DHTShortArrayRead) closure) async { +// await _initWait(); +// return _shortArray.operate(closure); +// } + +// Future<(R?, bool)> operateWrite( +// Future Function(DHTShortArrayWrite) closure) async { +// await _initWait(); +// return _shortArray.operateWrite(closure); +// } + +// Future operateWriteEventual( +// Future Function(DHTShortArrayWrite) closure, +// {Duration? timeout}) async { +// await _initWait(); +// return _shortArray.operateWriteEventual(closure, timeout: timeout); +// } + +// final WaitSet _initWait = WaitSet(); +// late final DHTShortArray _shortArray; +// final T Function(List data) _decodeElement; +// StreamSubscription? _subscription; +// bool _wantsCloseRecord = false; +// final _sspUpdate = SingleStatelessProcessor(); +// } diff --git a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_read.dart b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_read.dart new file mode 100644 index 0000000..0919412 --- /dev/null +++ b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_read.dart @@ -0,0 +1,103 @@ +part of 'dht_log.dart'; + +//////////////////////////////////////////////////////////////////////////// +// Reader-only implementation + +class _DHTLogRead implements DHTRandomRead { + _DHTLogRead._(_DHTLogSpine spine) : _spine = spine; + + @override + int get length => _spine.length; + + @override + Future getItem(int pos, {bool forceRefresh = false}) async { + if (pos < 0 || pos >= length) { + throw IndexError.withLength(pos, length); + } + final lookup = await _spine.lookupPosition(pos); + if (lookup == null) { + return null; + } + + return lookup.shortArray.operate( + (read) => read.getItem(lookup.pos, forceRefresh: forceRefresh)); + } + + (int, int) _clampStartLen(int start, int? len) { + len ??= _spine.length; + if (start < 0) { + throw IndexError.withLength(start, _spine.length); + } + if (start > _spine.length) { + throw IndexError.withLength(start, _spine.length); + } + if ((len + start) > _spine.length) { + len = _spine.length - start; + } + return (start, len); + } + + @override + Future?> getItemRange(int start, + {int? length, bool forceRefresh = false}) async { + final out = []; + (start, length) = _clampStartLen(start, length); + + final chunks = Iterable.generate(length).slices(maxDHTConcurrency).map( + (chunk) => chunk + .map((pos) => getItem(pos + start, forceRefresh: forceRefresh))); + + for (final chunk in chunks) { + final elems = await chunk.wait; + if (elems.contains(null)) { + return null; + } + out.addAll(elems.cast()); + } + + return out; + } + + @override + Future> getOfflinePositions() async { + final positionOffline = {}; + + // Iterate positions backward from most recent + for (var pos = _spine.length - 1; pos >= 0; pos--) { + final lookup = await _spine.lookupPosition(pos); + if (lookup == null) { + throw StateError('Unable to look up position'); + } + + // Check each segment for offline positions + var foundOffline = false; + await lookup.shortArray.operate((read) async { + final segmentOffline = await read.getOfflinePositions(); + + // For each shortarray segment go through their segment positions + // in reverse order and see if they are offline + for (var segmentPos = lookup.pos; + segmentPos >= 0 && pos >= 0; + segmentPos--, pos--) { + // If the position in the segment is offline, then + // mark the position in the log as offline + if (segmentOffline.contains(segmentPos)) { + positionOffline.add(pos); + foundOffline = true; + } + } + }); + + // If we found nothing offline in this segment then we can stop + if (!foundOffline) { + break; + } + } + + return positionOffline; + } + + //////////////////////////////////////////////////////////////////////////// + // Fields + final _DHTLogSpine _spine; +} diff --git a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_spine.dart b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_spine.dart new file mode 100644 index 0000000..76a3f0c --- /dev/null +++ b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_spine.dart @@ -0,0 +1,527 @@ +part of 'dht_log.dart'; + +class DHTLogPositionLookup { + const DHTLogPositionLookup({required this.shortArray, required this.pos}); + final DHTShortArray shortArray; + final int pos; +} + +class _DHTLogSegmentLookup extends Equatable { + const _DHTLogSegmentLookup({required this.subkey, required this.segment}); + final int subkey; + final int segment; + + @override + List get props => [subkey, segment]; +} + +class _DHTLogSpine { + _DHTLogSpine._( + {required DHTRecord spineRecord, + required int head, + required int tail, + required int stride}) + : _spineRecord = spineRecord, + _head = head, + _tail = tail, + _segmentStride = stride, + _spineCache = []; + + // Create a new spine record and push it to the network + static Future<_DHTLogSpine> create( + {required DHTRecord spineRecord, required int segmentStride}) async { + // Construct new spinehead + final spine = _DHTLogSpine._( + spineRecord: spineRecord, head: 0, tail: 0, stride: segmentStride); + + // Write new spine head record to the network + await spine.operate((spine) async { + final success = await spine.writeSpineHead(); + assert(success, 'false return should never happen on create'); + }); + + return spine; + } + + // Pull the latest or updated copy of the spine head record from the network + static Future<_DHTLogSpine> load({required DHTRecord spineRecord}) async { + // Get an updated spine head record copy if one exists + final spineHead = await spineRecord.getProtobuf(proto.DHTLog.fromBuffer, + subkey: 0, refreshMode: DHTRecordRefreshMode.refresh); + if (spineHead == null) { + throw StateError('spine head missing during refresh'); + } + return _DHTLogSpine._( + spineRecord: spineRecord, + head: spineHead.head, + tail: spineHead.tail, + stride: spineHead.stride); + } + + proto.DHTLog _toProto() { + assert(_spineMutex.isLocked, 'should be in mutex here'); + + final logHead = proto.DHTLog() + ..head = _head + ..tail = _tail + ..stride = _segmentStride; + return logHead; + } + + Future close() async { + await _spineMutex.protect(() async { + if (!isOpen) { + return; + } + final futures = >[_spineRecord.close()]; + for (final (_, sc) in _spineCache) { + futures.add(sc.close()); + } + await Future.wait(futures); + }); + } + + Future delete() async { + await _spineMutex.protect(() async { + final pool = DHTRecordPool.instance; + final futures = >[pool.deleteRecord(_spineRecord.key)]; + for (final (_, sc) in _spineCache) { + futures.add(sc.delete()); + } + await Future.wait(futures); + }); + } + + Future operate(Future Function(_DHTLogSpine) closure) async => + // ignore: prefer_expression_function_bodies + _spineMutex.protect(() async { + return closure(this); + }); + + Future operateAppend(Future Function(_DHTLogSpine) closure) async => + _spineMutex.protect(() async { + final oldHead = _head; + final oldTail = _tail; + try { + final out = await closure(this); + // Write head assuming it has been changed + if (!await writeSpineHead()) { + // Failed to write head means head got overwritten so write should + // be considered failed + throw DHTExceptionTryAgain(); + } + + onUpdatedSpine?.call(); + return out; + } on Exception { + // Exception means state needs to be reverted + _head = oldHead; + _tail = oldTail; + rethrow; + } + }); + + Future operateAppendEventual( + Future Function(_DHTLogSpine) closure, + {Duration? timeout}) async { + final timeoutTs = timeout == null + ? null + : Veilid.instance.now().offset(TimestampDuration.fromDuration(timeout)); + + await _spineMutex.protect(() async { + late int oldHead; + late int oldTail; + + try { + // Iterate until we have a successful element and head write + + do { + // Save off old values each pass of writeSpineHead because the head + // will have changed + oldHead = _head; + oldTail = _tail; + + // Try to do the element write + while (true) { + if (timeoutTs != null) { + final now = Veilid.instance.now(); + if (now >= timeoutTs) { + throw TimeoutException('timeout reached'); + } + } + if (await closure(this)) { + break; + } + // Failed to write in closure resets state + _head = oldHead; + _tail = oldTail; + } + + // Try to do the head write + } while (!await writeSpineHead()); + + onUpdatedSpine?.call(); + } on Exception { + // Exception means state needs to be reverted + _head = oldHead; + _tail = oldTail; + rethrow; + } + }); + } + + /// Serialize and write out the current spine head subkey, possibly updating + /// it if a newer copy is available online. Returns true if the write was + /// successful + Future writeSpineHead() async { + assert(_spineMutex.isLocked, 'should be in mutex here'); + + final headBuffer = _toProto().writeToBuffer(); + + final existingData = await _spineRecord.tryWriteBytes(headBuffer); + if (existingData != null) { + // Head write failed, incorporate update + await _updateHead(proto.DHTLog.fromBuffer(existingData)); + return false; + } + + return true; + } + + /// Validate a new spine head subkey that has come in from the network + Future _updateHead(proto.DHTLog spineHead) async { + assert(_spineMutex.isLocked, 'should be in mutex here'); + + _head = spineHead.head; + _tail = spineHead.tail; + } + + ///////////////////////////////////////////////////////////////////////////// + // Spine element management + + static final Uint8List _emptySegmentKey = + Uint8List.fromList(List.filled(TypedKey.decodedLength(), 0)); + static Uint8List _makeEmptySubkey() => Uint8List.fromList(List.filled( + DHTLog.segmentsPerSubkey * TypedKey.decodedLength(), 0)); + + static TypedKey? _getSegmentKey(Uint8List subkeyData, int segment) { + final decodedLength = TypedKey.decodedLength(); + final segmentKeyBytes = subkeyData.sublist( + decodedLength * segment, (decodedLength + 1) * segment); + if (segmentKeyBytes.equals(_emptySegmentKey)) { + return null; + } + return TypedKey.fromBytes(segmentKeyBytes); + } + + static void _setSegmentKey( + Uint8List subkeyData, int segment, TypedKey? segmentKey) { + final decodedLength = TypedKey.decodedLength(); + late final Uint8List segmentKeyBytes; + if (segmentKey == null) { + segmentKeyBytes = _emptySegmentKey; + } else { + segmentKeyBytes = segmentKey.decode(); + } + subkeyData.setRange(decodedLength * segment, (decodedLength + 1) * segment, + segmentKeyBytes); + } + + Future _getOrCreateSegmentInner(int segmentNumber) async { + assert(_spineMutex.isLocked, 'should be in mutex here'); + assert(_spineRecord.writer != null, 'should be writable'); + + // Lookup what subkey and segment subrange has this position's segment + // shortarray + final l = lookupSegment(segmentNumber); + final subkey = l.subkey; + final segment = l.segment; + + var subkeyData = await _spineRecord.get(subkey: subkey); + subkeyData ??= _makeEmptySubkey(); + while (true) { + final segmentKey = _getSegmentKey(subkeyData!, segment); + if (segmentKey == null) { + // Create a shortarray segment + final segmentRec = await DHTShortArray.create( + debugName: '${_spineRecord.debugName}_spine_${subkey}_$segment', + stride: _segmentStride, + crypto: _spineRecord.crypto, + parent: _spineRecord.key, + routingContext: _spineRecord.routingContext, + writer: _spineRecord.writer, + ); + var success = false; + try { + // Write it back to the spine record + _setSegmentKey(subkeyData, segment, segmentRec.recordKey); + subkeyData = + await _spineRecord.tryWriteBytes(subkeyData, subkey: subkey); + // If the write was successful then we're done + if (subkeyData == null) { + // Return it + success = true; + return segmentRec; + } + } finally { + if (!success) { + await segmentRec.close(); + await segmentRec.delete(); + } + } + } else { + // Open a shortarray segment + final segmentRec = await DHTShortArray.openWrite( + segmentKey, + _spineRecord.writer!, + debugName: '${_spineRecord.debugName}_spine_${subkey}_$segment', + crypto: _spineRecord.crypto, + parent: _spineRecord.key, + routingContext: _spineRecord.routingContext, + ); + return segmentRec; + } + // Loop if we need to try again with the new data from the network + } + } + + Future _getSegmentInner(int segmentNumber) async { + assert(_spineMutex.isLocked, 'should be in mutex here'); + + // Lookup what subkey and segment subrange has this position's segment + // shortarray + final l = lookupSegment(segmentNumber); + final subkey = l.subkey; + final segment = l.segment; + + final subkeyData = await _spineRecord.get(subkey: subkey); + if (subkeyData == null) { + return null; + } + final segmentKey = _getSegmentKey(subkeyData, segment); + if (segmentKey == null) { + return null; + } + + // Open a shortarray segment + final segmentRec = await DHTShortArray.openRead( + segmentKey, + debugName: '${_spineRecord.debugName}_spine_${subkey}_$segment', + crypto: _spineRecord.crypto, + parent: _spineRecord.key, + routingContext: _spineRecord.routingContext, + ); + return segmentRec; + } + + Future getOrCreateSegment(int segmentNumber) async { + assert(_spineMutex.isLocked, 'should be in mutex here'); + + // See if we already have this in the cache + for (var i = 0; i < _spineCache.length; i++) { + if (_spineCache[i].$1 == segmentNumber) { + // Touch the element + final x = _spineCache.removeAt(i); + _spineCache.add(x); + // Return the shortarray for this position + return x.$2; + } + } + + // If we don't have it in the cache, get/create it and then cache it + final segment = await _getOrCreateSegmentInner(segmentNumber); + _spineCache.add((segmentNumber, segment)); + if (_spineCache.length > _spineCacheLength) { + // Trim the LRU cache + _spineCache.removeAt(0); + } + return segment; + } + + Future getSegment(int segmentNumber) async { + assert(_spineMutex.isLocked, 'should be in mutex here'); + + // See if we already have this in the cache + for (var i = 0; i < _spineCache.length; i++) { + if (_spineCache[i].$1 == segmentNumber) { + // Touch the element + final x = _spineCache.removeAt(i); + _spineCache.add(x); + // Return the shortarray for this position + return x.$2; + } + } + + // If we don't have it in the cache, get it and then cache it + final segment = await _getSegmentInner(segmentNumber); + if (segment == null) { + return null; + } + _spineCache.add((segmentNumber, segment)); + if (_spineCache.length > _spineCacheLength) { + // Trim the LRU cache + _spineCache.removeAt(0); + } + return segment; + } + + _DHTLogSegmentLookup lookupSegment(int segmentNumber) { + assert(_spineMutex.isLocked, 'should be in mutex here'); + + if (segmentNumber < 0) { + throw IndexError.withLength( + segmentNumber, DHTLog.spineSubkeys * DHTLog.segmentsPerSubkey); + } + final subkey = segmentNumber ~/ DHTLog.segmentsPerSubkey; + if (subkey >= DHTLog.spineSubkeys) { + throw IndexError.withLength( + segmentNumber, DHTLog.spineSubkeys * DHTLog.segmentsPerSubkey); + } + final segment = segmentNumber % DHTLog.segmentsPerSubkey; + return _DHTLogSegmentLookup(subkey: subkey + 1, segment: segment); + } + + /////////////////////////////////////////// + // API for public interfaces + + Future lookupPosition(int pos) async { + assert(_spineMutex.isLocked, 'should be locked'); + + // Check if our position is in bounds + final endPos = length; + if (pos < 0 || pos >= endPos) { + throw IndexError.withLength(pos, endPos); + } + + // Calculate absolute position, ring-buffer style + final absolutePosition = (_head + pos) % _positionLimit; + + // Determine the segment number and position within the segment + final segmentNumber = absolutePosition ~/ DHTShortArray.maxElements; + final segmentPos = absolutePosition % DHTShortArray.maxElements; + + // Get the segment shortArray + final shortArray = (_spineRecord.writer == null) + ? await getSegment(segmentNumber) + : await getOrCreateSegment(segmentNumber); + if (shortArray == null) { + return null; + } + return DHTLogPositionLookup(shortArray: shortArray, pos: segmentPos); + } + + void allocateTail(int count) { + assert(_spineMutex.isLocked, 'should be locked'); + + final currentLength = length; + if (count <= 0) { + throw StateError('count should be > 0'); + } + if (currentLength + count >= _positionLimit) { + throw StateError('ring buffer overflow'); + } + + _tail = (_tail + count) % _positionLimit; + } + + void releaseHead(int count) { + assert(_spineMutex.isLocked, 'should be locked'); + + final currentLength = length; + if (count <= 0) { + throw StateError('count should be > 0'); + } + if (count > currentLength) { + throw StateError('ring buffer underflow'); + } + + _head = (_head + count) % _positionLimit; + } + + ///////////////////////////////////////////////////////////////////////////// + // Watch For Updates + + // Watch head for changes + Future watch() async { + // This will update any existing watches if necessary + try { + await _spineRecord.watch(subkeys: [ValueSubkeyRange.single(0)]); + + // Update changes to the head record + // Don't watch for local changes because this class already handles + // notifying listeners and knows when it makes local changes + _subscription ??= + await _spineRecord.listen(localChanges: false, _onSpineChanged); + } on Exception { + // If anything fails, try to cancel the watches + await cancelWatch(); + rethrow; + } + } + + // Stop watching for changes to head and linked records + Future cancelWatch() async { + await _spineRecord.cancelWatch(); + await _subscription?.cancel(); + _subscription = null; + } + + // Called when the log changes online and we find out from a watch + // but not when we make a change locally + Future _onSpineChanged( + DHTRecord record, Uint8List? data, List subkeys) async { + // If head record subkey zero changes, then the layout + // of the dhtshortarray has changed + if (data == null) { + throw StateError('spine head changed without data'); + } + if (record.key != _spineRecord.key || + subkeys.length != 1 || + subkeys[0] != ValueSubkeyRange.single(0)) { + throw StateError('watch returning wrong subkey range'); + } + + // Decode updated head + final headData = proto.DHTLog.fromBuffer(data); + + // Then update the head record + await _spineMutex.protect(() async { + await _updateHead(headData); + onUpdatedSpine?.call(); + }); + } + + //////////////////////////////////////////////////////////////////////////// + + TypedKey get recordKey => _spineRecord.key; + OwnedDHTRecordPointer get recordPointer => _spineRecord.ownedDHTRecordPointer; + int get length => + (_tail < _head) ? (_positionLimit - _head) + _tail : _tail - _head; + bool get isOpen => _spineRecord.isOpen; + + static const _positionLimit = DHTLog.segmentsPerSubkey * + DHTLog.spineSubkeys * + DHTShortArray.maxElements; + + // Spine head mutex to ensure we keep the representation valid + final Mutex _spineMutex = Mutex(); + // Subscription to head record internal changes + StreamSubscription? _subscription; + // Notify closure for external spine head changes + void Function()? onUpdatedSpine; + + // Spine DHT record + final DHTRecord _spineRecord; + + // Position of the start of the log (oldest items) + int _head; + // Position of the end of the log (newest items) + int _tail; + + // LRU cache of DHT spine elements accessed recently + // Pair of position and associated shortarray segment + final List<(int, DHTShortArray)> _spineCache; + static const int _spineCacheLength = 3; + // Segment stride to use for spine elements + final int _segmentStride; +} diff --git a/packages/veilid_support/lib/dht_support/src/dht_record/default_dht_record_cubit.dart b/packages/veilid_support/lib/dht_support/src/dht_record/default_dht_record_cubit.dart index 1cf97d5..0b4e0b6 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_record/default_dht_record_cubit.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_record/default_dht_record_cubit.dart @@ -38,7 +38,8 @@ class DefaultDHTRecordCubit extends DHTRecordCubit { final Uint8List data; final firstSubkey = subkeys.firstOrNull!.low; if (firstSubkey != defaultSubkey || updatedata == null) { - final maybeData = await record.get(forceRefresh: true); + final maybeData = + await record.get(refreshMode: DHTRecordRefreshMode.refresh); if (maybeData == null) { return null; } diff --git a/packages/veilid_support/lib/dht_support/src/dht_record/dht_record.dart b/packages/veilid_support/lib/dht_support/src/dht_record/dht_record.dart index ccb09f8..3d625f8 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_record/dht_record.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_record/dht_record.dart @@ -13,9 +13,22 @@ class DHTRecordWatchChange extends Equatable { List get props => [local, data, subkeys]; } +/// Refresh mode for DHT record 'get' +enum DHTRecordRefreshMode { + /// Return existing subkey values if they exist locally already + existing, + + /// Always check the network for a newer subkey value + refresh, + + /// Always check the network for a newer subkey value but only + /// return that value if its sequence number is newer than the local value + refreshOnlyUpdates, +} + ///////////////////////////////////////////////// -class DHTRecord { +class DHTRecord implements DHTOpenable { DHTRecord._( {required VeilidRoutingContext routingContext, required SharedDHTRecordData sharedDHTRecordData, @@ -30,20 +43,33 @@ class DHTRecord { _open = true, _sharedDHTRecordData = sharedDHTRecordData; - final SharedDHTRecordData _sharedDHTRecordData; - final VeilidRoutingContext _routingContext; - final int _defaultSubkey; - final KeyPair? _writer; - final DHTRecordCrypto _crypto; - final String debugName; + //////////////////////////////////////////////////////////////////////////// + // DHTOpenable - bool _open; - @internal - StreamController? watchController; - @internal - WatchState? watchState; + /// Check if the DHTRecord is open + @override + bool get isOpen => _open; - int subkeyOrDefault(int subkey) => (subkey == -1) ? _defaultSubkey : subkey; + /// Free all resources for the DHTRecord + @override + Future close() async { + if (!_open) { + return; + } + await watchController?.close(); + await DHTRecordPool.instance._recordClosed(this); + _open = false; + } + + /// Free all resources for the DHTRecord and delete it from the DHT + /// Will wait until the record is closed to delete it + @override + Future delete() async { + await DHTRecordPool.instance.deleteRecord(key); + } + + //////////////////////////////////////////////////////////////////////////// + // Public API VeilidRoutingContext get routingContext => _routingContext; TypedKey get key => _sharedDHTRecordData.recordDescriptor.key; @@ -57,64 +83,30 @@ class DHTRecord { DHTRecordCrypto get crypto => _crypto; OwnedDHTRecordPointer get ownedDHTRecordPointer => OwnedDHTRecordPointer(recordKey: key, owner: ownerKeyPair!); - bool get isOpen => _open; - - Future close() async { - if (!_open) { - return; - } - await watchController?.close(); - await DHTRecordPool.instance._recordClosed(this); - _open = false; - } - - Future scope(Future Function(DHTRecord) scopeFunction) async { - try { - return await scopeFunction(this); - } finally { - await close(); - } - } - - Future deleteScope(Future Function(DHTRecord) scopeFunction) async { - try { - final out = await scopeFunction(this); - if (_open) { - await close(); - } - return out; - } on Exception catch (_) { - if (_open) { - await close(); - } - await DHTRecordPool.instance.deleteRecord(key); - rethrow; - } - } - - Future maybeDeleteScope( - bool delete, Future Function(DHTRecord) scopeFunction) async { - if (delete) { - return deleteScope(scopeFunction); - } else { - return scope(scopeFunction); - } - } + int subkeyOrDefault(int subkey) => (subkey == -1) ? _defaultSubkey : subkey; + /// Get a subkey value from this record. + /// Returns the most recent value data for this subkey or null if this subkey + /// has not yet been written to. + /// * 'refreshMode' determines whether or not to return a locally existing + /// value or always check the network + /// * 'outSeqNum' optionally returns the sequence number of the value being + /// returned if one was returned. Future get( {int subkey = -1, DHTRecordCrypto? crypto, - bool forceRefresh = false, - bool onlyUpdates = false, + DHTRecordRefreshMode refreshMode = DHTRecordRefreshMode.existing, Output? outSeqNum}) async { subkey = subkeyOrDefault(subkey); final valueData = await _routingContext.getDHTValue(key, subkey, - forceRefresh: forceRefresh); + forceRefresh: refreshMode != DHTRecordRefreshMode.existing); if (valueData == null) { return null; } final lastSeq = _sharedDHTRecordData.subkeySeqCache[subkey]; - if (onlyUpdates && lastSeq != null && valueData.seq <= lastSeq) { + if (refreshMode == DHTRecordRefreshMode.refreshOnlyUpdates && + lastSeq != null && + valueData.seq <= lastSeq) { return null; } final out = (crypto ?? _crypto).decrypt(valueData.data, subkey); @@ -125,17 +117,23 @@ class DHTRecord { return out; } + /// Get a subkey value from this record. + /// Process the record returned with a JSON unmarshal function 'fromJson'. + /// Returns the most recent value data for this subkey or null if this subkey + /// has not yet been written to. + /// * 'refreshMode' determines whether or not to return a locally existing + /// value or always check the network + /// * 'outSeqNum' optionally returns the sequence number of the value being + /// returned if one was returned. Future getJson(T Function(dynamic) fromJson, {int subkey = -1, DHTRecordCrypto? crypto, - bool forceRefresh = false, - bool onlyUpdates = false, + DHTRecordRefreshMode refreshMode = DHTRecordRefreshMode.existing, Output? outSeqNum}) async { final data = await get( subkey: subkey, crypto: crypto, - forceRefresh: forceRefresh, - onlyUpdates: onlyUpdates, + refreshMode: refreshMode, outSeqNum: outSeqNum); if (data == null) { return null; @@ -143,18 +141,25 @@ class DHTRecord { return jsonDecodeBytes(fromJson, data); } + /// Get a subkey value from this record. + /// Process the record returned with a protobuf unmarshal + /// function 'fromBuffer'. + /// Returns the most recent value data for this subkey or null if this subkey + /// has not yet been written to. + /// * 'refreshMode' determines whether or not to return a locally existing + /// value or always check the network + /// * 'outSeqNum' optionally returns the sequence number of the value being + /// returned if one was returned. Future getProtobuf( T Function(List i) fromBuffer, {int subkey = -1, DHTRecordCrypto? crypto, - bool forceRefresh = false, - bool onlyUpdates = false, + DHTRecordRefreshMode refreshMode = DHTRecordRefreshMode.existing, Output? outSeqNum}) async { final data = await get( subkey: subkey, crypto: crypto, - forceRefresh: forceRefresh, - onlyUpdates: onlyUpdates, + refreshMode: refreshMode, outSeqNum: outSeqNum); if (data == null) { return null; @@ -162,6 +167,9 @@ class DHTRecord { return fromBuffer(data.toList()); } + /// Attempt to write a byte buffer to a DHTRecord subkey + /// If a newer value was found on the network, it is returned + /// If the value was succesfully written, null is returned Future tryWriteBytes(Uint8List newValue, {int subkey = -1, DHTRecordCrypto? crypto, @@ -211,6 +219,9 @@ class DHTRecord { return decryptedNewValue; } + /// Attempt to write a byte buffer to a DHTRecord subkey + /// If a newer value was found on the network, another attempt + /// will be made to write the subkey until this succeeds Future eventualWriteBytes(Uint8List newValue, {int subkey = -1, DHTRecordCrypto? crypto, @@ -256,6 +267,11 @@ class DHTRecord { } } + /// Attempt to write a byte buffer to a DHTRecord subkey + /// If a newer value was found on the network, another attempt + /// will be made to write the subkey until this succeeds + /// Each attempt to write the value calls an update function with the + /// old value to determine what new value should be attempted for that write. Future eventualUpdateBytes( Future Function(Uint8List? oldValue) update, {int subkey = -1, @@ -281,6 +297,7 @@ class DHTRecord { } while (oldValue != null); } + /// Like 'tryWriteBytes' but with JSON marshal/unmarshal of the value Future tryWriteJson(T Function(dynamic) fromJson, T newValue, {int subkey = -1, DHTRecordCrypto? crypto, @@ -298,6 +315,7 @@ class DHTRecord { return jsonDecodeBytes(fromJson, out); }); + /// Like 'tryWriteBytes' but with protobuf marshal/unmarshal of the value Future tryWriteProtobuf( T Function(List) fromBuffer, T newValue, {int subkey = -1, @@ -316,6 +334,7 @@ class DHTRecord { return fromBuffer(out); }); + /// Like 'eventualWriteBytes' but with JSON marshal/unmarshal of the value Future eventualWriteJson(T newValue, {int subkey = -1, DHTRecordCrypto? crypto, @@ -324,6 +343,7 @@ class DHTRecord { eventualWriteBytes(jsonEncodeBytes(newValue), subkey: subkey, crypto: crypto, writer: writer, outSeqNum: outSeqNum); + /// Like 'eventualWriteBytes' but with protobuf marshal/unmarshal of the value Future eventualWriteProtobuf(T newValue, {int subkey = -1, DHTRecordCrypto? crypto, @@ -332,6 +352,7 @@ class DHTRecord { eventualWriteBytes(newValue.writeToBuffer(), subkey: subkey, crypto: crypto, writer: writer, outSeqNum: outSeqNum); + /// Like 'eventualUpdateBytes' but with JSON marshal/unmarshal of the value Future eventualUpdateJson( T Function(dynamic) fromJson, Future Function(T?) update, {int subkey = -1, @@ -341,6 +362,7 @@ class DHTRecord { eventualUpdateBytes(jsonUpdate(fromJson, update), subkey: subkey, crypto: crypto, writer: writer, outSeqNum: outSeqNum); + /// Like 'eventualUpdateBytes' but with protobuf marshal/unmarshal of the value Future eventualUpdateProtobuf( T Function(List) fromBuffer, Future Function(T?) update, {int subkey = -1, @@ -350,6 +372,8 @@ class DHTRecord { eventualUpdateBytes(protobufUpdate(fromBuffer, update), subkey: subkey, crypto: crypto, writer: writer, outSeqNum: outSeqNum); + /// Watch a subkey range of this DHT record for changes + /// Takes effect on the next DHTRecordPool tick Future watch( {List? subkeys, Timestamp? expiration, @@ -363,6 +387,13 @@ class DHTRecord { } } + /// Register a callback for changes made on this this DHT record. + /// You must 'watch' the record as well as listen to it in order for this + /// call back to be called. + /// * 'localChanges' also enables calling the callback if changed are made + /// locally, otherwise only changes seen from the network itself are + /// reported + /// Future> listen( Future Function( DHTRecord record, Uint8List? data, List subkeys) @@ -405,6 +436,8 @@ class DHTRecord { }); } + /// Stop watching this record for changes + /// Takes effect on the next DHTRecordPool tick Future cancelWatch() async { // Tear down watch requirements if (watchState != null) { @@ -413,11 +446,15 @@ class DHTRecord { } } + /// Return the inspection state of a set of subkeys of the DHTRecord + /// See Veilid's 'inspectDHTRecord' call for details on how this works Future inspect( {List? subkeys, DHTReportScope scope = DHTReportScope.local}) => _routingContext.inspectDHTRecord(key, subkeys: subkeys, scope: scope); + ////////////////////////////////////////////////////////////////////////// + void _addValueChange( {required bool local, required Uint8List? data, @@ -458,4 +495,19 @@ class DHTRecord { _addValueChange( local: false, data: update.value?.data, subkeys: update.subkeys); } + + ////////////////////////////////////////////////////////////// + + final SharedDHTRecordData _sharedDHTRecordData; + final VeilidRoutingContext _routingContext; + final int _defaultSubkey; + final KeyPair? _writer; + final DHTRecordCrypto _crypto; + final String debugName; + + bool _open; + @internal + StreamController? watchController; + @internal + WatchState? watchState; } diff --git a/packages/veilid_support/lib/dht_support/src/dht_record/dht_record_cubit.dart b/packages/veilid_support/lib/dht_support/src/dht_record/dht_record_cubit.dart index 15919f9..8616658 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_record/dht_record_cubit.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_record/dht_record_cubit.dart @@ -93,7 +93,7 @@ class DHTRecordCubit extends Cubit> { for (final skr in subkeys) { for (var sk = skr.low; sk <= skr.high; sk++) { final data = await _record.get( - subkey: sk, forceRefresh: true, onlyUpdates: true); + subkey: sk, refreshMode: DHTRecordRefreshMode.refreshOnlyUpdates); if (data != null) { final newState = await _stateFunction(_record, updateSubkeys, data); if (newState != null) { diff --git a/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array.dart b/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array.dart index f15987a..a305e22 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array.dart @@ -3,7 +3,6 @@ import 'dart:typed_data'; import 'package:async_tools/async_tools.dart'; import 'package:collection/collection.dart'; -import 'package:protobuf/protobuf.dart'; import '../../../veilid_support.dart'; import '../../proto/proto.dart' as proto; @@ -14,7 +13,7 @@ part 'dht_short_array_write.dart'; /////////////////////////////////////////////////////////////////////// -class DHTShortArray { +class DHTShortArray implements DHTOpenable { //////////////////////////////////////////////////////////////// // Constructors @@ -34,22 +33,22 @@ class DHTShortArray { VeilidRoutingContext? routingContext, TypedKey? parent, DHTRecordCrypto? crypto, - KeyPair? smplWriter}) async { + KeyPair? writer}) async { assert(stride <= maxElements, 'stride too long'); final pool = DHTRecordPool.instance; late final DHTRecord dhtRecord; - if (smplWriter != null) { + if (writer != null) { final schema = DHTSchema.smpl( oCnt: 0, - members: [DHTSchemaMember(mKey: smplWriter.key, mCnt: stride + 1)]); + members: [DHTSchemaMember(mKey: writer.key, mCnt: stride + 1)]); dhtRecord = await pool.createRecord( debugName: debugName, parent: parent, routingContext: routingContext, schema: schema, crypto: crypto, - writer: smplWriter); + writer: writer); } else { final schema = DHTSchema.dflt(oCnt: stride + 1); dhtRecord = await pool.createRecord( @@ -120,15 +119,15 @@ class DHTShortArray { } static Future openOwned( - OwnedDHTRecordPointer ownedDHTRecordPointer, { + OwnedDHTRecordPointer ownedShortArrayRecordPointer, { required String debugName, required TypedKey parent, VeilidRoutingContext? routingContext, DHTRecordCrypto? crypto, }) => openWrite( - ownedDHTRecordPointer.recordKey, - ownedDHTRecordPointer.owner, + ownedShortArrayRecordPointer.recordKey, + ownedShortArrayRecordPointer.owner, debugName: debugName, routingContext: routingContext, parent: parent, @@ -136,18 +135,14 @@ class DHTShortArray { ); //////////////////////////////////////////////////////////////////////////// - // Public API - - /// Get the record key for this shortarray - TypedKey get recordKey => _head.recordKey; - - /// Get the record pointer foir this shortarray - OwnedDHTRecordPointer get recordPointer => _head.recordPointer; + // DHTOpenable /// Check if the shortarray is open + @override bool get isOpen => _head.isOpen; /// Free all resources for the DHTShortArray + @override Future close() async { if (!isOpen) { return; @@ -159,44 +154,22 @@ class DHTShortArray { /// Free all resources for the DHTShortArray and delete it from the DHT /// Will wait until the short array is closed to delete it + @override Future delete() async { await _head.delete(); } - /// Runs a closure that guarantees the DHTShortArray - /// will be closed upon exit, even if an uncaught exception is thrown - Future scope(Future Function(DHTShortArray) scopeFunction) async { - if (!isOpen) { - throw StateError('short array is not open"'); - } - try { - return await scopeFunction(this); - } finally { - await close(); - } - } + //////////////////////////////////////////////////////////////////////////// + // Public API - /// Runs a closure that guarantees the DHTShortArray - /// will be closed upon exit, and deleted if an an - /// uncaught exception is thrown - Future deleteScope( - Future Function(DHTShortArray) scopeFunction) async { - if (!isOpen) { - throw StateError('short array is not open"'); - } + /// Get the record key for this shortarray + TypedKey get recordKey => _head.recordKey; - try { - final out = await scopeFunction(this); - await close(); - return out; - } on Exception catch (_) { - await delete(); - rethrow; - } - } + /// Get the record pointer foir this shortarray + OwnedDHTRecordPointer get recordPointer => _head.recordPointer; /// Runs a closure allowing read-only access to the shortarray - Future operate(Future Function(DHTShortArrayRead) closure) async { + Future operate(Future Function(DHTRandomRead) closure) async { if (!isOpen) { throw StateError('short array is not open"'); } @@ -209,14 +182,19 @@ class DHTShortArray { /// Runs a closure allowing read-write access to the shortarray /// Makes only one attempt to consistently write the changes to the DHT - /// Returns (result, true) of the closure if the write could be performed - /// Returns (null, false) if the write could not be performed at this time - Future<(T?, bool)> operateWrite( - Future Function(DHTShortArrayWrite) closure) async => - _head.operateWrite((head) async { - final writer = _DHTShortArrayWrite._(head); - return closure(writer); - }); + /// Returns result of the closure if the write could be performed + /// Throws DHTOperateException if the write could not be performed at this time + Future operateWrite( + Future Function(DHTRandomReadWrite) closure) async { + if (!isOpen) { + throw StateError('short array is not open"'); + } + + return _head.operateWrite((head) async { + final writer = _DHTShortArrayWrite._(head); + return closure(writer); + }); + } /// Runs a closure allowing read-write access to the shortarray /// Will execute the closure multiple times if a consistent write to the DHT @@ -225,7 +203,7 @@ class DHTShortArray { /// succeeded, returning false will trigger another eventual consistency /// attempt. Future operateWriteEventual( - Future Function(DHTShortArrayWrite) closure, + Future Function(DHTRandomReadWrite) closure, {Duration? timeout}) async { if (!isOpen) { throw StateError('short array is not open"'); diff --git a/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_cubit.dart b/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_cubit.dart index 7465715..e0b2504 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_cubit.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_cubit.dart @@ -41,19 +41,6 @@ class DHTShortArrayCubit extends Cubit> }); } - // DHTShortArrayCubit.value({ - // required DHTShortArray shortArray, - // required T Function(List data) decodeElement, - // }) : _shortArray = shortArray, - // _decodeElement = decodeElement, - // super(const BlocBusyState(AsyncValue.loading())) { - // _initFuture = Future(() async { - // // Make initial state update - // unawaited(_refreshNoWait()); - // _subscription = await shortArray.listen(_update); - // }); - // } - Future refresh({bool forceRefresh = false}) async { await _initWait(); await _refreshNoWait(forceRefresh: forceRefresh); @@ -67,12 +54,13 @@ class DHTShortArrayCubit extends Cubit> try { final newState = await _shortArray.operate((reader) async { final offlinePositions = await reader.getOfflinePositions(); - final allItems = (await reader.getAllItems(forceRefresh: forceRefresh)) - ?.indexed - .map((x) => DHTShortArrayElementState( - value: _decodeElement(x.$2), - isOffline: offlinePositions.contains(x.$1))) - .toIList(); + final allItems = + (await reader.getItemRange(0, forceRefresh: forceRefresh)) + ?.indexed + .map((x) => DHTShortArrayElementState( + value: _decodeElement(x.$2), + isOffline: offlinePositions.contains(x.$1))) + .toIList(); return allItems; }); if (newState != null) { @@ -103,19 +91,19 @@ class DHTShortArrayCubit extends Cubit> await super.close(); } - Future operate(Future Function(DHTShortArrayRead) closure) async { + Future operate(Future Function(DHTRandomRead) closure) async { await _initWait(); return _shortArray.operate(closure); } - Future<(R?, bool)> operateWrite( - Future Function(DHTShortArrayWrite) closure) async { + Future operateWrite( + Future Function(DHTRandomReadWrite) closure) async { await _initWait(); return _shortArray.operateWrite(closure); } Future operateWriteEventual( - Future Function(DHTShortArrayWrite) closure, + Future Function(DHTRandomReadWrite) closure, {Duration? timeout}) async { await _initWait(); return _shortArray.operateWriteEventual(closure, timeout: timeout); diff --git a/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_head.dart b/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_head.dart index 6da7791..0a2b7d2 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_head.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_head.dart @@ -82,8 +82,8 @@ class _DHTShortArrayHead { return closure(this); }); - Future<(T?, bool)> operateWrite( - Future Function(_DHTShortArrayHead) closure) async => + Future operateWrite( + Future Function(_DHTShortArrayHead) closure) async => _headMutex.protect(() async { final oldLinkedRecords = List.of(_linkedRecords); final oldIndex = List.of(_index); @@ -95,11 +95,11 @@ class _DHTShortArrayHead { if (!await _writeHead()) { // Failed to write head means head got overwritten so write should // be considered failed - return (null, false); + throw DHTExceptionTryAgain(); } onUpdatedHead?.call(); - return (out, true); + return out; } on Exception { // Exception means state needs to be reverted _linkedRecords = oldLinkedRecords; @@ -249,22 +249,15 @@ class _DHTShortArrayHead { } // Pull the latest or updated copy of the head record from the network - Future _loadHead( - {bool forceRefresh = true, bool onlyUpdates = false}) async { + Future _loadHead() async { // Get an updated head record copy if one exists final head = await _headRecord.getProtobuf(proto.DHTShortArray.fromBuffer, - subkey: 0, forceRefresh: forceRefresh, onlyUpdates: onlyUpdates); + subkey: 0, refreshMode: DHTRecordRefreshMode.refresh); if (head == null) { - if (onlyUpdates) { - // No update - return false; - } - throw StateError('head missing during refresh'); + throw StateError('shortarray head missing during refresh'); } await _updateHead(head); - - return true; } ///////////////////////////////////////////////////////////////////////////// diff --git a/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_read.dart b/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_read.dart index 342e67a..88cefde 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_read.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_read.dart @@ -1,70 +1,14 @@ part of 'dht_short_array.dart'; -//////////////////////////////////////////////////////////////////////////// -// Reader interface -abstract class DHTShortArrayRead { - /// Returns the number of elements in the DHTShortArray - int get length; - - /// Return the item at position 'pos' in the DHTShortArray. If 'forceRefresh' - /// is specified, the network will always be checked for newer values - /// rather than returning the existing locally stored copy of the elements. - Future getItem(int pos, {bool forceRefresh = false}); - - /// Return a list of all of the items in the DHTShortArray. If 'forceRefresh' - /// is specified, the network will always be checked for newer values - /// rather than returning the existing locally stored copy of the elements. - Future?> getAllItems({bool forceRefresh = false}); - - /// Get a list of the positions that were written offline and not flushed yet - Future> getOfflinePositions(); -} - -extension DHTShortArrayReadExt on DHTShortArrayRead { - /// Convenience function: - /// Like getItem but also parses the returned element as JSON - Future getItemJson(T Function(dynamic) fromJson, int pos, - {bool forceRefresh = false}) => - getItem(pos, forceRefresh: forceRefresh) - .then((out) => jsonDecodeOptBytes(fromJson, out)); - - /// Convenience function: - /// Like getAllItems but also parses the returned elements as JSON - Future?> getAllItemsJson(T Function(dynamic) fromJson, - {bool forceRefresh = false}) => - getAllItems(forceRefresh: forceRefresh) - .then((out) => out?.map(fromJson).toList()); - - /// Convenience function: - /// Like getItem but also parses the returned element as a protobuf object - Future getItemProtobuf( - T Function(List) fromBuffer, int pos, - {bool forceRefresh = false}) => - getItem(pos, forceRefresh: forceRefresh) - .then((out) => (out == null) ? null : fromBuffer(out)); - - /// Convenience function: - /// Like getAllItems but also parses the returned elements as protobuf objects - Future?> getAllItemsProtobuf( - T Function(List) fromBuffer, - {bool forceRefresh = false}) => - getAllItems(forceRefresh: forceRefresh) - .then((out) => out?.map(fromBuffer).toList()); -} - //////////////////////////////////////////////////////////////////////////// // Reader-only implementation -class _DHTShortArrayRead implements DHTShortArrayRead { +class _DHTShortArrayRead implements DHTRandomRead { _DHTShortArrayRead._(_DHTShortArrayHead head) : _head = head; - /// Returns the number of elements in the DHTShortArray @override int get length => _head.length; - /// Return the item at position 'pos' in the DHTShortArray. If 'forceRefresh' - /// is specified, the network will always be checked for newer values - /// rather than returning the existing locally stored copy of the elements. @override Future getItem(int pos, {bool forceRefresh = false}) async { if (pos < 0 || pos >= length) { @@ -77,7 +21,9 @@ class _DHTShortArrayRead implements DHTShortArrayRead { final outSeqNum = Output(); final out = lookup.record.get( subkey: lookup.recordSubkey, - forceRefresh: refresh, + refreshMode: refresh + ? DHTRecordRefreshMode.refresh + : DHTRecordRefreshMode.existing, outSeqNum: outSeqNum); if (outSeqNum.value != null) { _head.updatePositionSeq(pos, false, outSeqNum.value!); @@ -86,17 +32,29 @@ class _DHTShortArrayRead implements DHTShortArrayRead { return out; } - /// Return a list of all of the items in the DHTShortArray. If 'forceRefresh' - /// is specified, the network will always be checked for newer values - /// rather than returning the existing locally stored copy of the elements. - @override - Future?> getAllItems({bool forceRefresh = false}) async { - final out = []; + (int, int) _clampStartLen(int start, int? len) { + len ??= _head.length; + if (start < 0) { + throw IndexError.withLength(start, _head.length); + } + if (start > _head.length) { + throw IndexError.withLength(start, _head.length); + } + if ((len + start) > _head.length) { + len = _head.length - start; + } + return (start, len); + } - final chunks = Iterable.generate(_head.length) - .slices(maxDHTConcurrency) - .map((chunk) => - chunk.map((pos) => getItem(pos, forceRefresh: forceRefresh))); + @override + Future?> getItemRange(int start, + {int? length, bool forceRefresh = false}) async { + final out = []; + (start, length) = _clampStartLen(start, length); + + final chunks = Iterable.generate(length).slices(maxDHTConcurrency).map( + (chunk) => chunk + .map((pos) => getItem(pos + start, forceRefresh: forceRefresh))); for (final chunk in chunks) { final elems = await chunk.wait; @@ -109,9 +67,10 @@ class _DHTShortArrayRead implements DHTShortArrayRead { return out; } - /// Get a list of the positions that were written offline and not flushed yet @override Future> getOfflinePositions() async { + final (start, length) = _clampStartLen(0, DHTShortArray.maxElements); + final indexOffline = {}; final inspects = await [ _head._headRecord.inspect(), @@ -134,7 +93,7 @@ class _DHTShortArrayRead implements DHTShortArrayRead { // See which positions map to offline indexes final positionOffline = {}; - for (var i = 0; i < _head._index.length; i++) { + for (var i = start; i < (start + length); i++) { final idx = _head._index[i]; if (indexOffline.contains(idx)) { positionOffline.add(i); diff --git a/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_write.dart b/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_write.dart index d1c8b2f..0d51663 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_write.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_write.dart @@ -1,101 +1,10 @@ part of 'dht_short_array.dart'; -//////////////////////////////////////////////////////////////////////////// -// Writer interface -abstract class DHTShortArrayWrite implements DHTShortArrayRead { - /// Try to add an item to the end of the DHTShortArray. Return true if the - /// element was successfully added, and false if the state changed before - /// the element could be added or a newer value was found on the network. - /// This may throw an exception if the number elements added exceeds the - /// built-in limit of 'maxElements = 256' entries. - Future tryAddItem(Uint8List value); - - /// Try to insert an item as position 'pos' of the DHTShortArray. - /// Return true if the element was successfully inserted, and false if the - /// state changed before the element could be inserted or a newer value was - /// found on the network. - /// This may throw an exception if the number elements added exceeds the - /// built-in limit of 'maxElements = 256' entries. - Future tryInsertItem(int pos, Uint8List value); - - /// Try to swap items at position 'aPos' and 'bPos' in the DHTShortArray. - /// Return true if the elements were successfully swapped, and false if the - /// state changed before the elements could be swapped or newer values were - /// found on the network. - /// This may throw an exception if either of the positions swapped exceed - /// the length of the list - Future trySwapItem(int aPos, int bPos); - - /// Try to remove an item at position 'pos' in the DHTShortArray. - /// Return the element if it was successfully removed, and null if the - /// state changed before the elements could be removed or newer values were - /// found on the network. - /// This may throw an exception if the position removed exceeeds the length of - /// the list. - Future tryRemoveItem(int pos); - - /// Try to remove all items in the DHTShortArray. - /// Return true if it was successfully cleared, and false if the - /// state changed before the elements could be cleared or newer values were - /// found on the network. - Future tryClear(); - - /// Try to set an item at position 'pos' of the DHTShortArray. - /// If the set was successful this returns: - /// * The prior contents of the element, or null if there was no value yet - /// * A boolean true - /// If the set was found a newer value on the network: - /// * The newer value of the element, or null if the head record - /// changed. - /// * A boolean false - /// This may throw an exception if the position exceeds the built-in limit of - /// 'maxElements = 256' entries. - Future<(Uint8List?, bool)> tryWriteItem(int pos, Uint8List newValue); -} - -extension DHTShortArrayWriteExt on DHTShortArrayWrite { - /// Convenience function: - /// Like removeItem but also parses the returned element as JSON - Future tryRemoveItemJson( - T Function(dynamic) fromJson, - int pos, - ) => - tryRemoveItem(pos).then((out) => jsonDecodeOptBytes(fromJson, out)); - - /// Convenience function: - /// Like removeItem but also parses the returned element as JSON - Future tryRemoveItemProtobuf( - T Function(List) fromBuffer, int pos) => - getItem(pos).then((out) => (out == null) ? null : fromBuffer(out)); - - /// Convenience function: - /// Like tryWriteItem but also encodes the input value as JSON and parses the - /// returned element as JSON - Future<(T?, bool)> tryWriteItemJson( - T Function(dynamic) fromJson, - int pos, - T newValue, - ) => - tryWriteItem(pos, jsonEncodeBytes(newValue)) - .then((out) => (jsonDecodeOptBytes(fromJson, out.$1), out.$2)); - - /// Convenience function: - /// Like tryWriteItem but also encodes the input value as a protobuf object - /// and parses the returned element as a protobuf object - Future<(T?, bool)> tryWriteItemProtobuf( - T Function(List) fromBuffer, - int pos, - T newValue, - ) => - tryWriteItem(pos, newValue.writeToBuffer()).then( - (out) => ((out.$1 == null ? null : fromBuffer(out.$1!)), out.$2)); -} - //////////////////////////////////////////////////////////////////////////// // Writer implementation class _DHTShortArrayWrite extends _DHTShortArrayRead - implements DHTShortArrayWrite { + implements DHTRandomReadWrite { _DHTShortArrayWrite._(super.head) : super._(); @override @@ -105,12 +14,12 @@ class _DHTShortArrayWrite extends _DHTShortArrayRead _head.allocateIndex(pos); // Write item - final (_, wasSet) = await tryWriteItem(pos, value); - if (!wasSet) { - return false; + final ok = await tryWriteItem(pos, value); + if (!ok) { + _head.freeIndex(pos); } - return true; + return ok; } @override @@ -119,16 +28,15 @@ class _DHTShortArrayWrite extends _DHTShortArrayRead _head.allocateIndex(pos); // Write item - final (_, wasSet) = await tryWriteItem(pos, value); - if (!wasSet) { - return false; + final ok = await tryWriteItem(pos, value); + if (!ok) { + _head.freeIndex(pos); } - return true; } @override - Future trySwapItem(int aPos, int bPos) async { + Future swapItem(int aPos, int bPos) async { if (aPos < 0 || aPos >= _head.length) { throw IndexError.withLength(aPos, _head.length); } @@ -137,12 +45,10 @@ class _DHTShortArrayWrite extends _DHTShortArrayRead } // Swap indices _head.swapIndex(aPos, bPos); - - return true; } @override - Future tryRemoveItem(int pos) async { + Future removeItem(int pos, {Output? output}) async { if (pos < 0 || pos >= _head.length) { throw IndexError.withLength(pos, _head.length); } @@ -162,17 +68,17 @@ class _DHTShortArrayWrite extends _DHTShortArrayRead throw StateError('Element does not exist'); } _head.freeIndex(pos); - return result; + output?.save(result); } @override - Future tryClear() async { + Future clear() async { _head.clearIndex(); - return true; } @override - Future<(Uint8List?, bool)> tryWriteItem(int pos, Uint8List newValue) async { + Future tryWriteItem(int pos, Uint8List newValue, + {Output? output}) async { if (pos < 0 || pos >= _head.length) { throw IndexError.withLength(pos, _head.length); } @@ -198,8 +104,10 @@ class _DHTShortArrayWrite extends _DHTShortArrayRead if (result != null) { // A result coming back means the element was overwritten already - return (result, false); + output?.save(result); + return false; } - return (oldValue, true); + output?.save(oldValue); + return true; } } diff --git a/packages/veilid_support/lib/dht_support/src/interfaces/dht_append_truncate.dart b/packages/veilid_support/lib/dht_support/src/interfaces/dht_append_truncate.dart new file mode 100644 index 0000000..babcc7d --- /dev/null +++ b/packages/veilid_support/lib/dht_support/src/interfaces/dht_append_truncate.dart @@ -0,0 +1,44 @@ +import 'dart:typed_data'; + +import 'package:protobuf/protobuf.dart'; + +import '../../../veilid_support.dart'; + +//////////////////////////////////////////////////////////////////////////// +// Append/truncate interface +abstract class DHTAppendTruncate { + /// Try to add an item to the end of the DHT data structure. + /// Return true if the element was successfully added, and false if the state + /// changed before the element could be added or a newer value was found on + /// the network. + /// This may throw an exception if the number elements added exceeds limits. + Future tryAppendItem(Uint8List value); + + /// Try to remove a number of items from the head of the DHT data structure. + /// Throws StateError if count < 0 + Future truncate(int count); + + /// Remove all items in the DHT data structure. + Future clear(); +} + +abstract class DHTAppendTruncateRandomRead + implements DHTAppendTruncate, DHTRandomRead {} + +extension DHTAppendTruncateExt on DHTAppendTruncate { + /// Convenience function: + /// Like tryAppendItem but also encodes the input value as JSON and parses the + /// returned element as JSON + Future tryAppendItemJson( + T newValue, + ) => + tryAppendItem(jsonEncodeBytes(newValue)); + + /// Convenience function: + /// Like tryAppendItem but also encodes the input value as a protobuf object + /// and parses the returned element as a protobuf object + Future tryAppendItemProtobuf( + T newValue, + ) => + tryAppendItem(newValue.writeToBuffer()); +} diff --git a/packages/veilid_support/lib/dht_support/src/interfaces/dht_openable.dart b/packages/veilid_support/lib/dht_support/src/interfaces/dht_openable.dart new file mode 100644 index 0000000..1ee1140 --- /dev/null +++ b/packages/veilid_support/lib/dht_support/src/interfaces/dht_openable.dart @@ -0,0 +1,49 @@ +import 'dart:async'; + +abstract class DHTOpenable { + bool get isOpen; + Future close(); + Future delete(); +} + +extension DHTOpenableExt on D { + /// Runs a closure that guarantees the DHTOpenable + /// will be closed upon exit, even if an uncaught exception is thrown + Future scope(Future Function(D) scopeFunction) async { + if (!isOpen) { + throw StateError('not open in scope'); + } + try { + return await scopeFunction(this); + } finally { + await close(); + } + } + + /// Runs a closure that guarantees the DHTOpenable + /// will be closed upon exit, and deleted if an an + /// uncaught exception is thrown + Future deleteScope(Future Function(D) scopeFunction) async { + if (!isOpen) { + throw StateError('not open in deleteScope'); + } + + try { + final out = await scopeFunction(this); + await close(); + return out; + } on Exception catch (_) { + await delete(); + rethrow; + } + } + + /// Scopes a closure that conditionally deletes the DHTOpenable on exit + Future maybeDeleteScope( + bool delete, Future Function(D) scopeFunction) async { + if (delete) { + return deleteScope(scopeFunction); + } + return scope(scopeFunction); + } +} diff --git a/packages/veilid_support/lib/dht_support/src/interfaces/dht_random_read.dart b/packages/veilid_support/lib/dht_support/src/interfaces/dht_random_read.dart new file mode 100644 index 0000000..d52676e --- /dev/null +++ b/packages/veilid_support/lib/dht_support/src/interfaces/dht_random_read.dart @@ -0,0 +1,63 @@ +import 'dart:typed_data'; + +import 'package:protobuf/protobuf.dart'; + +import '../../../veilid_support.dart'; + +//////////////////////////////////////////////////////////////////////////// +// Reader interface +abstract class DHTRandomRead { + /// Returns the number of elements in the DHTArray + /// This number will be >= 0 and <= DHTShortArray.maxElements (256) + int get length; + + /// Return the item at position 'pos' in the DHTArray. If 'forceRefresh' + /// is specified, the network will always be checked for newer values + /// rather than returning the existing locally stored copy of the elements. + /// * 'pos' must be >= 0 and < 'length' + Future getItem(int pos, {bool forceRefresh = false}); + + /// Return a list of a range of items in the DHTArray. If 'forceRefresh' + /// is specified, the network will always be checked for newer values + /// rather than returning the existing locally stored copy of the elements. + /// * 'start' must be >= 0 + /// * 'len' must be >= 0 and <= DHTShortArray.maxElements (256) and defaults + /// to the maximum length + Future?> getItemRange(int start, + {int? length, bool forceRefresh = false}); + + /// Get a list of the positions that were written offline and not flushed yet + Future> getOfflinePositions(); +} + +extension DHTRandomReadExt on DHTRandomRead { + /// Convenience function: + /// Like getItem but also parses the returned element as JSON + Future getItemJson(T Function(dynamic) fromJson, int pos, + {bool forceRefresh = false}) => + getItem(pos, forceRefresh: forceRefresh) + .then((out) => jsonDecodeOptBytes(fromJson, out)); + + /// Convenience function: + /// Like getAllItems but also parses the returned elements as JSON + Future?> getItemRangeJson(T Function(dynamic) fromJson, int start, + {int? length, bool forceRefresh = false}) => + getItemRange(start, length: length, forceRefresh: forceRefresh) + .then((out) => out?.map(fromJson).toList()); + + /// Convenience function: + /// Like getItem but also parses the returned element as a protobuf object + Future getItemProtobuf( + T Function(List) fromBuffer, int pos, + {bool forceRefresh = false}) => + getItem(pos, forceRefresh: forceRefresh) + .then((out) => (out == null) ? null : fromBuffer(out)); + + /// Convenience function: + /// Like getAllItems but also parses the returned elements as protobuf objects + Future?> getItemRangeProtobuf( + T Function(List) fromBuffer, int start, + {int? length, bool forceRefresh = false}) => + getItemRange(start, length: length, forceRefresh: forceRefresh) + .then((out) => out?.map(fromBuffer).toList()); +} diff --git a/packages/veilid_support/lib/dht_support/src/interfaces/dht_random_write.dart b/packages/veilid_support/lib/dht_support/src/interfaces/dht_random_write.dart new file mode 100644 index 0000000..53f307c --- /dev/null +++ b/packages/veilid_support/lib/dht_support/src/interfaces/dht_random_write.dart @@ -0,0 +1,104 @@ +import 'dart:typed_data'; + +import 'package:protobuf/protobuf.dart'; + +import '../../../veilid_support.dart'; + +//////////////////////////////////////////////////////////////////////////// +// Writer interface +abstract class DHTRandomWrite { + /// Try to set an item at position 'pos' of the DHTArray. + /// If the set was successful this returns: + /// * A boolean true + /// * outValue will return the prior contents of the element, + /// or null if there was no value yet + /// + /// If the set was found a newer value on the network this returns: + /// * A boolean false + /// * outValue will return the newer value of the element, + /// or null if the head record changed. + /// + /// This may throw an exception if the position exceeds the built-in limit of + /// 'maxElements = 256' entries. + Future tryWriteItem(int pos, Uint8List newValue, + {Output? output}); + + /// Try to add an item to the end of the DHTArray. Return true if the + /// element was successfully added, and false if the state changed before + /// the element could be added or a newer value was found on the network. + /// This may throw an exception if the number elements added exceeds the + /// built-in limit of 'maxElements = 256' entries. + Future tryAddItem(Uint8List value); + + /// Try to insert an item as position 'pos' of the DHTArray. + /// Return true if the element was successfully inserted, and false if the + /// state changed before the element could be inserted or a newer value was + /// found on the network. + /// This may throw an exception if the number elements added exceeds the + /// built-in limit of 'maxElements = 256' entries. + Future tryInsertItem(int pos, Uint8List value); + + /// Swap items at position 'aPos' and 'bPos' in the DHTArray. + /// Throws IndexError if either of the positions swapped exceed + /// the length of the list + Future swapItem(int aPos, int bPos); + + /// Remove an item at position 'pos' in the DHTArray. + /// If the remove was successful this returns: + /// * outValue will return the prior contents of the element + /// Throws IndexError if the position removed exceeds the length of + /// the list. + Future removeItem(int pos, {Output? output}); + + /// Remove all items in the DHTShortArray. + Future clear(); +} + +extension DHTRandomWriteExt on DHTRandomWrite { + /// Convenience function: + /// Like tryWriteItem but also encodes the input value as JSON and parses the + /// returned element as JSON + Future tryWriteItemJson( + T Function(dynamic) fromJson, int pos, T newValue, + {Output? output}) async { + final outValueBytes = output == null ? null : Output(); + final out = await tryWriteItem(pos, jsonEncodeBytes(newValue), + output: outValueBytes); + output.mapSave(outValueBytes, (b) => jsonDecodeBytes(fromJson, b)); + return out; + } + + /// Convenience function: + /// Like tryWriteItem but also encodes the input value as a protobuf object + /// and parses the returned element as a protobuf object + Future tryWriteItemProtobuf( + T Function(List) fromBuffer, int pos, T newValue, + {Output? output}) async { + final outValueBytes = output == null ? null : Output(); + final out = await tryWriteItem(pos, newValue.writeToBuffer(), + output: outValueBytes); + output.mapSave(outValueBytes, fromBuffer); + return out; + } + + /// Convenience function: + /// Like removeItem but also parses the returned element as JSON + Future removeItemJson(T Function(dynamic) fromJson, int pos, + {Output? output}) async { + final outValueBytes = output == null ? null : Output(); + await removeItem(pos, output: outValueBytes); + output.mapSave(outValueBytes, (b) => jsonDecodeBytes(fromJson, b)); + } + + /// Convenience function: + /// Like removeItem but also parses the returned element as JSON + Future removeItemProtobuf( + T Function(List) fromBuffer, int pos, + {Output? output}) async { + final outValueBytes = output == null ? null : Output(); + await removeItem(pos, output: outValueBytes); + output.mapSave(outValueBytes, fromBuffer); + } +} + +abstract class DHTRandomReadWrite implements DHTRandomRead, DHTRandomWrite {} diff --git a/packages/veilid_support/lib/dht_support/src/interfaces/exceptions.dart b/packages/veilid_support/lib/dht_support/src/interfaces/exceptions.dart new file mode 100644 index 0000000..2b95033 --- /dev/null +++ b/packages/veilid_support/lib/dht_support/src/interfaces/exceptions.dart @@ -0,0 +1,5 @@ +class DHTExceptionTryAgain implements Exception { + DHTExceptionTryAgain( + [this.cause = 'operation failed due to newer dht value']); + String cause; +} diff --git a/packages/veilid_support/lib/dht_support/src/interfaces/interfaces.dart b/packages/veilid_support/lib/dht_support/src/interfaces/interfaces.dart new file mode 100644 index 0000000..6c61075 --- /dev/null +++ b/packages/veilid_support/lib/dht_support/src/interfaces/interfaces.dart @@ -0,0 +1,4 @@ +export 'dht_openable.dart'; +export 'dht_random_read.dart'; +export 'dht_random_write.dart'; +export 'exceptions.dart'; diff --git a/packages/veilid_support/lib/proto/dht.pb.dart b/packages/veilid_support/lib/proto/dht.pb.dart index 7c96a7f..4007d3d 100644 --- a/packages/veilid_support/lib/proto/dht.pb.dart +++ b/packages/veilid_support/lib/proto/dht.pb.dart @@ -83,6 +83,68 @@ class DHTData extends $pb.GeneratedMessage { void clearSize() => clearField(4); } +class DHTLog extends $pb.GeneratedMessage { + factory DHTLog() => create(); + DHTLog._() : super(); + factory DHTLog.fromBuffer($core.List<$core.int> i, [$pb.ExtensionRegistry r = $pb.ExtensionRegistry.EMPTY]) => create()..mergeFromBuffer(i, r); + factory DHTLog.fromJson($core.String i, [$pb.ExtensionRegistry r = $pb.ExtensionRegistry.EMPTY]) => create()..mergeFromJson(i, r); + + static final $pb.BuilderInfo _i = $pb.BuilderInfo(_omitMessageNames ? '' : 'DHTLog', package: const $pb.PackageName(_omitMessageNames ? '' : 'dht'), createEmptyInstance: create) + ..a<$core.int>(1, _omitFieldNames ? '' : 'head', $pb.PbFieldType.OU3) + ..a<$core.int>(2, _omitFieldNames ? '' : 'tail', $pb.PbFieldType.OU3) + ..a<$core.int>(3, _omitFieldNames ? '' : 'stride', $pb.PbFieldType.OU3) + ..hasRequiredFields = false + ; + + @$core.Deprecated( + 'Using this can add significant overhead to your binary. ' + 'Use [GeneratedMessageGenericExtensions.deepCopy] instead. ' + 'Will be removed in next major version') + DHTLog clone() => DHTLog()..mergeFromMessage(this); + @$core.Deprecated( + 'Using this can add significant overhead to your binary. ' + 'Use [GeneratedMessageGenericExtensions.rebuild] instead. ' + 'Will be removed in next major version') + DHTLog copyWith(void Function(DHTLog) updates) => super.copyWith((message) => updates(message as DHTLog)) as DHTLog; + + $pb.BuilderInfo get info_ => _i; + + @$core.pragma('dart2js:noInline') + static DHTLog create() => DHTLog._(); + DHTLog createEmptyInstance() => create(); + static $pb.PbList createRepeated() => $pb.PbList(); + @$core.pragma('dart2js:noInline') + static DHTLog getDefault() => _defaultInstance ??= $pb.GeneratedMessage.$_defaultFor(create); + static DHTLog? _defaultInstance; + + @$pb.TagNumber(1) + $core.int get head => $_getIZ(0); + @$pb.TagNumber(1) + set head($core.int v) { $_setUnsignedInt32(0, v); } + @$pb.TagNumber(1) + $core.bool hasHead() => $_has(0); + @$pb.TagNumber(1) + void clearHead() => clearField(1); + + @$pb.TagNumber(2) + $core.int get tail => $_getIZ(1); + @$pb.TagNumber(2) + set tail($core.int v) { $_setUnsignedInt32(1, v); } + @$pb.TagNumber(2) + $core.bool hasTail() => $_has(1); + @$pb.TagNumber(2) + void clearTail() => clearField(2); + + @$pb.TagNumber(3) + $core.int get stride => $_getIZ(2); + @$pb.TagNumber(3) + set stride($core.int v) { $_setUnsignedInt32(2, v); } + @$pb.TagNumber(3) + $core.bool hasStride() => $_has(2); + @$pb.TagNumber(3) + void clearStride() => clearField(3); +} + class DHTShortArray extends $pb.GeneratedMessage { factory DHTShortArray() => create(); DHTShortArray._() : super(); @@ -133,68 +195,6 @@ class DHTShortArray extends $pb.GeneratedMessage { $core.List<$core.int> get seqs => $_getList(2); } -class DHTLog extends $pb.GeneratedMessage { - factory DHTLog() => create(); - DHTLog._() : super(); - factory DHTLog.fromBuffer($core.List<$core.int> i, [$pb.ExtensionRegistry r = $pb.ExtensionRegistry.EMPTY]) => create()..mergeFromBuffer(i, r); - factory DHTLog.fromJson($core.String i, [$pb.ExtensionRegistry r = $pb.ExtensionRegistry.EMPTY]) => create()..mergeFromJson(i, r); - - static final $pb.BuilderInfo _i = $pb.BuilderInfo(_omitMessageNames ? '' : 'DHTLog', package: const $pb.PackageName(_omitMessageNames ? '' : 'dht'), createEmptyInstance: create) - ..pc<$0.TypedKey>(1, _omitFieldNames ? '' : 'keys', $pb.PbFieldType.PM, subBuilder: $0.TypedKey.create) - ..aOM<$0.TypedKey>(2, _omitFieldNames ? '' : 'back', subBuilder: $0.TypedKey.create) - ..p<$core.int>(3, _omitFieldNames ? '' : 'subkeyCounts', $pb.PbFieldType.KU3) - ..a<$core.int>(4, _omitFieldNames ? '' : 'totalSubkeys', $pb.PbFieldType.OU3) - ..hasRequiredFields = false - ; - - @$core.Deprecated( - 'Using this can add significant overhead to your binary. ' - 'Use [GeneratedMessageGenericExtensions.deepCopy] instead. ' - 'Will be removed in next major version') - DHTLog clone() => DHTLog()..mergeFromMessage(this); - @$core.Deprecated( - 'Using this can add significant overhead to your binary. ' - 'Use [GeneratedMessageGenericExtensions.rebuild] instead. ' - 'Will be removed in next major version') - DHTLog copyWith(void Function(DHTLog) updates) => super.copyWith((message) => updates(message as DHTLog)) as DHTLog; - - $pb.BuilderInfo get info_ => _i; - - @$core.pragma('dart2js:noInline') - static DHTLog create() => DHTLog._(); - DHTLog createEmptyInstance() => create(); - static $pb.PbList createRepeated() => $pb.PbList(); - @$core.pragma('dart2js:noInline') - static DHTLog getDefault() => _defaultInstance ??= $pb.GeneratedMessage.$_defaultFor(create); - static DHTLog? _defaultInstance; - - @$pb.TagNumber(1) - $core.List<$0.TypedKey> get keys => $_getList(0); - - @$pb.TagNumber(2) - $0.TypedKey get back => $_getN(1); - @$pb.TagNumber(2) - set back($0.TypedKey v) { setField(2, v); } - @$pb.TagNumber(2) - $core.bool hasBack() => $_has(1); - @$pb.TagNumber(2) - void clearBack() => clearField(2); - @$pb.TagNumber(2) - $0.TypedKey ensureBack() => $_ensure(1); - - @$pb.TagNumber(3) - $core.List<$core.int> get subkeyCounts => $_getList(2); - - @$pb.TagNumber(4) - $core.int get totalSubkeys => $_getIZ(3); - @$pb.TagNumber(4) - set totalSubkeys($core.int v) { $_setUnsignedInt32(3, v); } - @$pb.TagNumber(4) - $core.bool hasTotalSubkeys() => $_has(3); - @$pb.TagNumber(4) - void clearTotalSubkeys() => clearField(4); -} - enum DataReference_Kind { dhtData, notSet diff --git a/packages/veilid_support/lib/proto/dht.pbjson.dart b/packages/veilid_support/lib/proto/dht.pbjson.dart index bf31c30..6c99cb7 100644 --- a/packages/veilid_support/lib/proto/dht.pbjson.dart +++ b/packages/veilid_support/lib/proto/dht.pbjson.dart @@ -30,6 +30,21 @@ final $typed_data.Uint8List dHTDataDescriptor = $convert.base64Decode( 'gCIAEoCzIQLnZlaWxpZC5UeXBlZEtleVIEaGFzaBIUCgVjaHVuaxgDIAEoDVIFY2h1bmsSEgoE' 'c2l6ZRgEIAEoDVIEc2l6ZQ=='); +@$core.Deprecated('Use dHTLogDescriptor instead') +const DHTLog$json = { + '1': 'DHTLog', + '2': [ + {'1': 'head', '3': 1, '4': 1, '5': 13, '10': 'head'}, + {'1': 'tail', '3': 2, '4': 1, '5': 13, '10': 'tail'}, + {'1': 'stride', '3': 3, '4': 1, '5': 13, '10': 'stride'}, + ], +}; + +/// Descriptor for `DHTLog`. Decode as a `google.protobuf.DescriptorProto`. +final $typed_data.Uint8List dHTLogDescriptor = $convert.base64Decode( + 'CgZESFRMb2cSEgoEaGVhZBgBIAEoDVIEaGVhZBISCgR0YWlsGAIgASgNUgR0YWlsEhYKBnN0cm' + 'lkZRgDIAEoDVIGc3RyaWRl'); + @$core.Deprecated('Use dHTShortArrayDescriptor instead') const DHTShortArray$json = { '1': 'DHTShortArray', @@ -45,23 +60,6 @@ final $typed_data.Uint8List dHTShortArrayDescriptor = $convert.base64Decode( 'Cg1ESFRTaG9ydEFycmF5EiQKBGtleXMYASADKAsyEC52ZWlsaWQuVHlwZWRLZXlSBGtleXMSFA' 'oFaW5kZXgYAiABKAxSBWluZGV4EhIKBHNlcXMYAyADKA1SBHNlcXM='); -@$core.Deprecated('Use dHTLogDescriptor instead') -const DHTLog$json = { - '1': 'DHTLog', - '2': [ - {'1': 'keys', '3': 1, '4': 3, '5': 11, '6': '.veilid.TypedKey', '10': 'keys'}, - {'1': 'back', '3': 2, '4': 1, '5': 11, '6': '.veilid.TypedKey', '10': 'back'}, - {'1': 'subkey_counts', '3': 3, '4': 3, '5': 13, '10': 'subkeyCounts'}, - {'1': 'total_subkeys', '3': 4, '4': 1, '5': 13, '10': 'totalSubkeys'}, - ], -}; - -/// Descriptor for `DHTLog`. Decode as a `google.protobuf.DescriptorProto`. -final $typed_data.Uint8List dHTLogDescriptor = $convert.base64Decode( - 'CgZESFRMb2cSJAoEa2V5cxgBIAMoCzIQLnZlaWxpZC5UeXBlZEtleVIEa2V5cxIkCgRiYWNrGA' - 'IgASgLMhAudmVpbGlkLlR5cGVkS2V5UgRiYWNrEiMKDXN1YmtleV9jb3VudHMYAyADKA1SDHN1' - 'YmtleUNvdW50cxIjCg10b3RhbF9zdWJrZXlzGAQgASgNUgx0b3RhbFN1YmtleXM='); - @$core.Deprecated('Use dataReferenceDescriptor instead') const DataReference$json = { '1': 'DataReference', diff --git a/packages/veilid_support/lib/src/identity.dart b/packages/veilid_support/lib/src/identity.dart index baae797..2645894 100644 --- a/packages/veilid_support/lib/src/identity.dart +++ b/packages/veilid_support/lib/src/identity.dart @@ -300,8 +300,8 @@ Future openIdentityMaster( debugName: 'IdentityMaster::openIdentityMaster::IdentityMasterRecord')) .deleteScope((masterRec) async { - final identityMaster = - (await masterRec.getJson(IdentityMaster.fromJson, forceRefresh: true))!; + final identityMaster = (await masterRec.getJson(IdentityMaster.fromJson, + refreshMode: DHTRecordRefreshMode.refresh))!; // Validate IdentityMaster final masterRecordKey = masterRec.key; diff --git a/packages/veilid_support/lib/src/output.dart b/packages/veilid_support/lib/src/output.dart new file mode 100644 index 0000000..78902b3 --- /dev/null +++ b/packages/veilid_support/lib/src/output.dart @@ -0,0 +1,33 @@ +import 'package:fast_immutable_collections/fast_immutable_collections.dart'; + +export 'package:fast_immutable_collections/fast_immutable_collections.dart' + show Output; + +extension OutputNullExt on Output? { + void mapSave(Output? other, T Function(S output) closure) { + if (this == null) { + return; + } + if (other == null) { + return; + } + final v = other.value; + if (v == null) { + return; + } + return this!.save(closure(v)); + } +} + +extension OutputExt on Output { + void mapSave(Output? other, T Function(S output) closure) { + if (other == null) { + return; + } + final v = other.value; + if (v == null) { + return; + } + return save(closure(v)); + } +} diff --git a/packages/veilid_support/lib/veilid_support.dart b/packages/veilid_support/lib/veilid_support.dart index fcbbaf4..e741990 100644 --- a/packages/veilid_support/lib/veilid_support.dart +++ b/packages/veilid_support/lib/veilid_support.dart @@ -10,6 +10,7 @@ export 'src/config.dart'; export 'src/identity.dart'; export 'src/json_tools.dart'; export 'src/memory_tools.dart'; +export 'src/output.dart'; export 'src/persistent_queue.dart'; export 'src/protobuf_tools.dart'; export 'src/table_db.dart'; diff --git a/pubspec.lock b/pubspec.lock index 21e4f2e..07b05e5 100644 --- a/pubspec.lock +++ b/pubspec.lock @@ -37,10 +37,10 @@ packages: dependency: "direct main" description: name: archive - sha256: "22600aa1e926be775fa5fe7e6894e7fb3df9efda8891c73f70fb3262399a432d" + sha256: ecf4273855368121b1caed0d10d4513c7241dfc813f7d3c8933b36622ae9b265 url: "https://pub.dev" source: hosted - version: "3.4.10" + version: "3.5.1" args: dependency: transitive description: @@ -203,18 +203,18 @@ packages: dependency: transitive description: name: cached_network_image_web - sha256: "42a835caa27c220d1294311ac409a43361088625a4f23c820b006dd9bffb3316" + sha256: "205d6a9f1862de34b93184f22b9d2d94586b2f05c581d546695e3d8f6a805cd7" url: "https://pub.dev" source: hosted - version: "1.1.1" + version: "1.2.0" camera: dependency: transitive description: name: camera - sha256: "9499cbc2e51d8eb0beadc158b288380037618ce4e30c9acbc4fae1ac3ecb5797" + sha256: dfa8fc5a1adaeb95e7a54d86a5bd56f4bb0e035515354c8ac6d262e35cec2ec8 url: "https://pub.dev" source: hosted - version: "0.10.5+9" + version: "0.10.6" camera_android: dependency: transitive description: @@ -227,10 +227,10 @@ packages: dependency: transitive description: name: camera_avfoundation - sha256: "9dbbb253aaf201a69c40cf95571f366ca936305d2de012684e21f6f1b1433d31" + sha256: "7d021e8cd30d9b71b8b92b4ad669e80af432d722d18d6aac338572754a786c15" url: "https://pub.dev" source: hosted - version: "0.9.15+4" + version: "0.9.16" camera_platform_interface: dependency: transitive description: @@ -456,10 +456,10 @@ packages: dependency: transitive description: name: flutter_cache_manager - sha256: "8207f27539deb83732fdda03e259349046a39a4c767269285f449ade355d54ba" + sha256: "395d6b7831f21f3b989ebedbb785545932adb9afe2622c1ffacf7f4b53a7e544" url: "https://pub.dev" source: hosted - version: "3.3.1" + version: "3.3.2" flutter_chat_types: dependency: "direct main" description: @@ -634,10 +634,10 @@ packages: dependency: "direct main" description: name: go_router - sha256: "771c8feb40ad0ef639973d7ecf1b43d55ffcedb2207fd43fab030f5639e40446" + sha256: b465e99ce64ba75e61c8c0ce3d87b66d8ac07f0b35d0a7e0263fcfc10f99e836 url: "https://pub.dev" source: hosted - version: "13.2.4" + version: "13.2.5" graphs: dependency: transitive description: @@ -898,10 +898,10 @@ packages: dependency: transitive description: name: path_provider_foundation - sha256: "5a7999be66e000916500be4f15a3633ebceb8302719b47b9cc49ce924125350f" + sha256: f234384a3fdd67f989b4d54a5d73ca2a6c422fa55ae694381ae0f4375cd1ea16 url: "https://pub.dev" source: hosted - version: "2.3.2" + version: "2.4.0" path_provider_linux: dependency: transitive description: @@ -970,10 +970,10 @@ packages: dependency: transitive description: name: pointycastle - sha256: "79fbafed02cfdbe85ef3fd06c7f4bc2cbcba0177e61b765264853d4253b21744" + sha256: "4be0097fcf3fd3e8449e53730c631200ebc7b88016acecab2b0da2f0149222fe" url: "https://pub.dev" source: hosted - version: "3.9.0" + version: "3.9.1" pool: dependency: transitive description: @@ -1106,10 +1106,10 @@ packages: dependency: "direct main" description: name: searchable_listview - sha256: d8513a968bdd540cb011220a5670b23b346e04a7bcb99690a859ed58092f72a4 + sha256: f9bc1a57dfcba49ce2d190d642567fb82309dd23849b3b0a328266e3f90054db url: "https://pub.dev" source: hosted - version: "2.11.2" + version: "2.12.0" share_plus: dependency: "direct main" description: @@ -1146,10 +1146,10 @@ packages: dependency: transitive description: name: shared_preferences_foundation - sha256: "7708d83064f38060c7b39db12aefe449cb8cdc031d6062280087bc4cdb988f5c" + sha256: "0a8a893bf4fd1152f93fec03a415d11c27c74454d96e2318a7ac38dd18683ab7" url: "https://pub.dev" source: hosted - version: "2.3.5" + version: "2.4.0" shared_preferences_linux: dependency: transitive description: @@ -1263,10 +1263,10 @@ packages: dependency: transitive description: name: sqflite - sha256: "5ce2e1a15e822c3b4bfb5400455775e421da7098eed8adc8f26298ada7c9308c" + sha256: a43e5a27235518c03ca238e7b4732cf35eabe863a369ceba6cbefa537a66f16d url: "https://pub.dev" source: hosted - version: "2.3.3" + version: "2.3.3+1" sqflite_common: dependency: transitive description: @@ -1407,10 +1407,10 @@ packages: dependency: transitive description: name: url_launcher_ios - sha256: "9149d493b075ed740901f3ee844a38a00b33116c7c5c10d7fb27df8987fb51d5" + sha256: "7068716403343f6ba4969b4173cbf3b84fc768042124bc2c011e5d782b24fe89" url: "https://pub.dev" source: hosted - version: "6.2.5" + version: "6.3.0" url_launcher_linux: dependency: transitive description: @@ -1423,10 +1423,10 @@ packages: dependency: transitive description: name: url_launcher_macos - sha256: b7244901ea3cf489c5335bdacda07264a6e960b1c1b1a9f91e4bc371d9e68234 + sha256: "9a1a42d5d2d95400c795b2914c36fdcb525870c752569438e4ebb09a2b5d90de" url: "https://pub.dev" source: hosted - version: "3.1.0" + version: "3.2.0" url_launcher_platform_interface: dependency: transitive description: @@ -1541,10 +1541,10 @@ packages: dependency: transitive description: name: win32 - sha256: "0a989dc7ca2bb51eac91e8fd00851297cfffd641aa7538b165c62637ca0eaa4a" + sha256: "0eaf06e3446824099858367950a813472af675116bf63f008a4c2a75ae13e9cb" url: "https://pub.dev" source: hosted - version: "5.4.0" + version: "5.5.0" window_manager: dependency: "direct main" description: diff --git a/pubspec.yaml b/pubspec.yaml index 821c214..d3e5a50 100644 --- a/pubspec.yaml +++ b/pubspec.yaml @@ -10,7 +10,7 @@ environment: dependencies: animated_theme_switcher: ^2.0.10 ansicolor: ^2.0.2 - archive: ^3.4.10 + archive: ^3.5.1 async_tools: ^0.1.1 awesome_extensions: ^2.0.14 badges: ^3.1.2 @@ -44,11 +44,11 @@ dependencies: flutter_translate: ^4.0.4 form_builder_validators: ^9.1.0 freezed_annotation: ^2.4.1 - go_router: ^13.2.4 + go_router: ^13.2.5 hydrated_bloc: ^9.1.5 image: ^4.1.7 intl: ^0.18.1 - json_annotation: ^4.8.1 + json_annotation: ^4.9.0 loggy: ^2.0.3 meta: ^1.11.0 mobile_scanner: ^4.0.1 @@ -65,7 +65,7 @@ dependencies: quickalert: ^1.1.0 radix_colors: ^1.0.4 reorderable_grid: ^1.0.10 - searchable_listview: ^2.11.2 + searchable_listview: ^2.12.0 share_plus: ^8.0.3 shared_preferences: ^2.2.3 signal_strength_indicator: ^0.4.1 @@ -93,7 +93,7 @@ dev_dependencies: build_runner: ^2.4.9 freezed: ^2.5.2 icons_launcher: ^2.1.7 - json_serializable: ^6.7.1 + json_serializable: ^6.8.0 lint_hard: ^4.0.0 flutter_native_splash: From 3315644ba886bcbd0c1a8544b76b587c76ee199d Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Tue, 14 May 2024 10:06:43 -0400 Subject: [PATCH 2/7] dht log implementation --- .../lib/dht_support/src/dht_log/barrel.dart | 4 +- .../lib/dht_support/src/dht_log/dht_log.dart | 29 +- .../src/dht_log/dht_log_cubit.dart | 289 ++++++++++++------ .../src/dht_log/dht_log_spine.dart | 43 ++- .../src/dht_short_array/dht_short_array.dart | 3 +- 5 files changed, 252 insertions(+), 116 deletions(-) diff --git a/packages/veilid_support/lib/dht_support/src/dht_log/barrel.dart b/packages/veilid_support/lib/dht_support/src/dht_log/barrel.dart index 18686f2..39d1c41 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_log/barrel.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_log/barrel.dart @@ -1,2 +1,2 @@ -export 'dht_array.dart'; -export 'dht_array_cubit.dart'; +export 'dht_log.dart'; +export 'dht_log_cubit.dart'; diff --git a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log.dart b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log.dart index a132bdb..7513c8b 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log.dart @@ -4,6 +4,7 @@ import 'dart:typed_data'; import 'package:async_tools/async_tools.dart'; import 'package:collection/collection.dart'; import 'package:equatable/equatable.dart'; +import 'package:meta/meta.dart'; import '../../../veilid_support.dart'; import '../../proto/proto.dart' as proto; @@ -15,6 +16,21 @@ part 'dht_log_append.dart'; /////////////////////////////////////////////////////////////////////// +@immutable +class DHTLogUpdate extends Equatable { + const DHTLogUpdate( + {required this.headDelta, required this.tailDelta, required this.length}) + : assert(headDelta >= 0, 'should never have negative head delta'), + assert(tailDelta >= 0, 'should never have negative tail delta'), + assert(length >= 0, 'should never have negative length'); + final int headDelta; + final int tailDelta; + final int length; + + @override + List get props => [headDelta, tailDelta, length]; +} + /// DHTLog is a ring-buffer queue like data structure with the following /// operations: /// * Add elements to the tail @@ -30,8 +46,8 @@ class DHTLog implements DHTOpenable { // Constructors DHTLog._({required _DHTLogSpine spine}) : _spine = spine { - _spine.onUpdatedSpine = () { - _watchController?.sink.add(null); + _spine.onUpdatedSpine = (update) { + _watchController?.sink.add(update); }; } @@ -225,7 +241,7 @@ class DHTLog implements DHTOpenable { /// Listen to and any all changes to the structure of this log /// regardless of where the changes are coming from Future> listen( - void Function() onChanged, + void Function(DHTLogUpdate) onChanged, ) { if (!isOpen) { throw StateError('log is not open"'); @@ -235,7 +251,8 @@ class DHTLog implements DHTOpenable { // If don't have a controller yet, set it up if (_watchController == null) { // Set up watch requirements - _watchController = StreamController.broadcast(onCancel: () { + _watchController = + StreamController.broadcast(onCancel: () { // If there are no more listeners then we can get // rid of the controller and drop our subscriptions unawaited(_listenMutex.protect(() async { @@ -249,7 +266,7 @@ class DHTLog implements DHTOpenable { await _spine.watch(); } // Return subscription - return _watchController!.stream.listen((_) => onChanged()); + return _watchController!.stream.listen((upd) => onChanged(upd)); }); } @@ -269,5 +286,5 @@ class DHTLog implements DHTOpenable { // Watch mutex to ensure we keep the representation valid final Mutex _listenMutex = Mutex(); // Stream of external changes - StreamController? _watchController; + StreamController? _watchController; } diff --git a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_cubit.dart b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_cubit.dart index a7d5333..30bac27 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_cubit.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_cubit.dart @@ -8,112 +8,213 @@ import 'package:fast_immutable_collections/fast_immutable_collections.dart'; import 'package:meta/meta.dart'; import '../../../veilid_support.dart'; +import '../interfaces/dht_append_truncate.dart'; -// xxx paginate and remember to paginate watches (could use short array cubit as a subcubit here?) +@immutable +class DHTLogElementState extends Equatable { + const DHTLogElementState({required this.value, required this.isOffline}); + final T value; + final bool isOffline; -// @immutable -// class DHTArrayElementState extends Equatable { -// const DHTArrayElementState( -// {required this.value, required this.isOffline}); -// final T value; -// final bool isOffline; + @override + List get props => [value, isOffline]; +} -// @override -// List get props => [value, isOffline]; -// } +@immutable +class DHTLogStateData extends Equatable { + const DHTLogStateData( + {required this.elements, + required this.tail, + required this.count, + required this.follow}); + // The view of the elements in the dhtlog + // Span is from [tail-length, tail) + final IList> elements; + // One past the end of the last element + final int tail; + // The total number of elements to try to keep in 'elements' + final int count; + // If we should have the tail following the log + final bool follow; -// typedef DHTArrayState = AsyncValue>>; -// typedef DHTArrayBusyState = BlocBusyState>; + @override + List get props => [elements, tail, count, follow]; +} -// class DHTArrayCubit extends Cubit> -// with BlocBusyWrapper> { -// DHTArrayCubit({ -// required Future Function() open, -// required T Function(List data) decodeElement, -// }) : _decodeElement = decodeElement, -// super(const BlocBusyState(AsyncValue.loading())) { -// _initWait.add(() async { -// // Open DHT record -// _array = await open(); -// _wantsCloseRecord = true; +typedef DHTLogState = AsyncValue>; +typedef DHTLogBusyState = BlocBusyState>; -// // Make initial state update -// await _refreshNoWait(); -// _subscription = await _array.listen(_update); -// }); -// } +class DHTLogCubit extends Cubit> + with BlocBusyWrapper> { + DHTLogCubit({ + required Future Function() open, + required T Function(List data) decodeElement, + }) : _decodeElement = decodeElement, + super(const BlocBusyState(AsyncValue.loading())) { + _initWait.add(() async { + // Open DHT record + _log = await open(); + _wantsCloseRecord = true; -// Future refresh({bool forceRefresh = false}) async { -// await _initWait(); -// await _refreshNoWait(forceRefresh: forceRefresh); -// } + // Make initial state update + await _refreshNoWait(); + _subscription = await _log.listen(_update); + }); + } -// Future _refreshNoWait({bool forceRefresh = false}) async => -// busy((emit) async => _refreshInner(emit, forceRefresh: forceRefresh)); + // Set the tail position of the log for pagination. + // If tail is 0, the end of the log is used. + // If tail is negative, the position is subtracted from the current log + // length. + // If tail is positive, the position is absolute from the head of the log + // If follow is enabled, the tail offset will update when the log changes + Future setWindow( + {int? tail, int? count, bool? follow, bool forceRefresh = false}) async { + await _initWait(); + if (tail != null) { + _tail = tail; + } + if (count != null) { + _count = count; + } + if (follow != null) { + _follow = follow; + } + await _refreshNoWait(forceRefresh: forceRefresh); + } -// Future _refreshInner(void Function(DHTShortArrayState) emit, -// {bool forceRefresh = false}) async { -// try { -// final newState = await _shortArray.operate((reader) async { -// final offlinePositions = await reader.getOfflinePositions(); -// final allItems = (await reader.getAllItems(forceRefresh: forceRefresh)) -// ?.indexed -// .map((x) => DHTShortArrayElementState( -// value: _decodeElement(x.$2), -// isOffline: offlinePositions.contains(x.$1))) -// .toIList(); -// return allItems; -// }); -// if (newState != null) { -// emit(AsyncValue.data(newState)); -// } -// } on Exception catch (e) { -// emit(AsyncValue.error(e)); -// } -// } + Future refresh({bool forceRefresh = false}) async { + await _initWait(); + await _refreshNoWait(forceRefresh: forceRefresh); + } -// void _update() { -// // Run at most one background update process -// // Because this is async, we could get an update while we're -// // still processing the last one. Only called after init future has run -// // so we dont have to wait for that here. -// _sspUpdate.busyUpdate>( -// busy, (emit) async => _refreshInner(emit)); -// } + Future _refreshNoWait({bool forceRefresh = false}) async => + busy((emit) async => _refreshInner(emit, forceRefresh: forceRefresh)); -// @override -// Future close() async { -// await _initWait(); -// await _subscription?.cancel(); -// _subscription = null; -// if (_wantsCloseRecord) { -// await _shortArray.close(); -// } -// await super.close(); -// } + Future _refreshInner(void Function(AsyncValue>) emit, + {bool forceRefresh = false}) async { + final avElements = await _loadElements(_tail, _count); + final err = avElements.asError; + if (err != null) { + emit(AsyncValue.error(err.error, err.stackTrace)); + return; + } + final loading = avElements.asLoading; + if (loading != null) { + emit(const AsyncValue.loading()); + return; + } + final elements = avElements.asData!.value; + emit(AsyncValue.data(DHTLogStateData( + elements: elements, tail: _tail, count: _count, follow: _follow))); + } -// Future operate(Future Function(DHTShortArrayRead) closure) async { -// await _initWait(); -// return _shortArray.operate(closure); -// } + Future>>> _loadElements( + int tail, int count, + {bool forceRefresh = false}) async { + try { + final allItems = await _log.operate((reader) async { + final length = reader.length; + final end = ((tail - 1) % length) + 1; + final start = (count < end) ? end - count : 0; -// Future<(R?, bool)> operateWrite( -// Future Function(DHTShortArrayWrite) closure) async { -// await _initWait(); -// return _shortArray.operateWrite(closure); -// } + final offlinePositions = await reader.getOfflinePositions(); + final allItems = (await reader.getItemRange(start, + length: end - start, forceRefresh: forceRefresh)) + ?.indexed + .map((x) => DHTLogElementState( + value: _decodeElement(x.$2), + isOffline: offlinePositions.contains(x.$1))) + .toIList(); + return allItems; + }); + if (allItems == null) { + return const AsyncValue.loading(); + } + return AsyncValue.data(allItems); + } on Exception catch (e, st) { + return AsyncValue.error(e, st); + } + } -// Future operateWriteEventual( -// Future Function(DHTShortArrayWrite) closure, -// {Duration? timeout}) async { -// await _initWait(); -// return _shortArray.operateWriteEventual(closure, timeout: timeout); -// } + void _update(DHTLogUpdate upd) { + // Run at most one background update process + // Because this is async, we could get an update while we're + // still processing the last one. Only called after init future has run + // so we dont have to wait for that here. -// final WaitSet _initWait = WaitSet(); -// late final DHTShortArray _shortArray; -// final T Function(List data) _decodeElement; -// StreamSubscription? _subscription; -// bool _wantsCloseRecord = false; -// final _sspUpdate = SingleStatelessProcessor(); -// } + // Accumulate head and tail deltas + _headDelta += upd.headDelta; + _tailDelta += upd.tailDelta; + + _sspUpdate.busyUpdate>(busy, (emit) async { + // apply follow + if (_follow) { + if (_tail <= 0) { + // Negative tail is already following tail changes + } else { + // Positive tail is measured from the head, so apply deltas + _tail = (_tail + _tailDelta - _headDelta) % upd.length; + } + } else { + if (_tail <= 0) { + // Negative tail is following tail changes so apply deltas + var posTail = _tail + upd.length; + posTail = (posTail + _tailDelta - _headDelta) % upd.length; + _tail = posTail - upd.length; + } else { + // Positive tail is measured from head so not following tail + } + } + _headDelta = 0; + _tailDelta = 0; + + await _refreshInner(emit); + }); + } + + @override + Future close() async { + await _initWait(); + await _subscription?.cancel(); + _subscription = null; + if (_wantsCloseRecord) { + await _log.close(); + } + await super.close(); + } + + Future operate(Future Function(DHTRandomRead) closure) async { + await _initWait(); + return _log.operate(closure); + } + + Future operateAppend( + Future Function(DHTAppendTruncateRandomRead) closure) async { + await _initWait(); + return _log.operateAppend(closure); + } + + Future operateAppendEventual( + Future Function(DHTAppendTruncateRandomRead) closure, + {Duration? timeout}) async { + await _initWait(); + return _log.operateAppendEventual(closure, timeout: timeout); + } + + final WaitSet _initWait = WaitSet(); + late final DHTLog _log; + final T Function(List data) _decodeElement; + StreamSubscription? _subscription; + bool _wantsCloseRecord = false; + final _sspUpdate = SingleStatelessProcessor(); + + // Accumulated deltas since last update + var _headDelta = 0; + var _tailDelta = 0; + + // Cubit window into the DHTLog + var _tail = 0; + var _count = DHTShortArray.maxElements; + var _follow = true; +} diff --git a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_spine.dart b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_spine.dart index 76a3f0c..65f7110 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_spine.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_spine.dart @@ -105,13 +105,11 @@ class _DHTLogSpine { try { final out = await closure(this); // Write head assuming it has been changed - if (!await writeSpineHead()) { + if (!await writeSpineHead(old: (oldHead, oldTail))) { // Failed to write head means head got overwritten so write should // be considered failed throw DHTExceptionTryAgain(); } - - onUpdatedSpine?.call(); return out; } on Exception { // Exception means state needs to be reverted @@ -134,7 +132,6 @@ class _DHTLogSpine { try { // Iterate until we have a successful element and head write - do { // Save off old values each pass of writeSpineHead because the head // will have changed @@ -158,9 +155,7 @@ class _DHTLogSpine { } // Try to do the head write - } while (!await writeSpineHead()); - - onUpdatedSpine?.call(); + } while (!await writeSpineHead(old: (oldHead, oldTail))); } on Exception { // Exception means state needs to be reverted _head = oldHead; @@ -173,7 +168,7 @@ class _DHTLogSpine { /// Serialize and write out the current spine head subkey, possibly updating /// it if a newer copy is available online. Returns true if the write was /// successful - Future writeSpineHead() async { + Future writeSpineHead({(int, int)? old}) async { assert(_spineMutex.isLocked, 'should be in mutex here'); final headBuffer = _toProto().writeToBuffer(); @@ -182,12 +177,28 @@ class _DHTLogSpine { if (existingData != null) { // Head write failed, incorporate update await _updateHead(proto.DHTLog.fromBuffer(existingData)); + if (old != null) { + sendUpdate(old.$1, old.$2); + } return false; } - + if (old != null) { + sendUpdate(old.$1, old.$2); + } return true; } + /// Send a spine update callback + void sendUpdate(int oldHead, int oldTail) { + final oldLength = _ringDistance(oldTail, oldHead); + if (oldHead != _head || oldTail != _tail || oldLength != length) { + onUpdatedSpine?.call(DHTLogUpdate( + headDelta: _ringDistance(_head, oldHead), + tailDelta: _ringDistance(_tail, oldTail), + length: length)); + } + } + /// Validate a new spine head subkey that has come in from the network Future _updateHead(proto.DHTLog spineHead) async { assert(_spineMutex.isLocked, 'should be in mutex here'); @@ -486,8 +497,10 @@ class _DHTLogSpine { // Then update the head record await _spineMutex.protect(() async { + final oldHead = _head; + final oldTail = _tail; await _updateHead(headData); - onUpdatedSpine?.call(); + sendUpdate(oldHead, oldTail); }); } @@ -495,10 +508,14 @@ class _DHTLogSpine { TypedKey get recordKey => _spineRecord.key; OwnedDHTRecordPointer get recordPointer => _spineRecord.ownedDHTRecordPointer; - int get length => - (_tail < _head) ? (_positionLimit - _head) + _tail : _tail - _head; + int get length => _ringDistance(_tail, _head); + bool get isOpen => _spineRecord.isOpen; + // Ring buffer distance from old to new + static int _ringDistance(int n, int o) => + (n < o) ? (_positionLimit - o) + n : n - o; + static const _positionLimit = DHTLog.segmentsPerSubkey * DHTLog.spineSubkeys * DHTShortArray.maxElements; @@ -508,7 +525,7 @@ class _DHTLogSpine { // Subscription to head record internal changes StreamSubscription? _subscription; // Notify closure for external spine head changes - void Function()? onUpdatedSpine; + void Function(DHTLogUpdate)? onUpdatedSpine; // Spine DHT record final DHTRecord _spineRecord; diff --git a/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array.dart b/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array.dart index a305e22..082a391 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array.dart @@ -183,7 +183,8 @@ class DHTShortArray implements DHTOpenable { /// Runs a closure allowing read-write access to the shortarray /// Makes only one attempt to consistently write the changes to the DHT /// Returns result of the closure if the write could be performed - /// Throws DHTOperateException if the write could not be performed at this time + /// Throws DHTOperateException if the write could not be performed + /// at this time Future operateWrite( Future Function(DHTRandomReadWrite) closure) async { if (!isOpen) { From 8cd73b2844c3894dc6fde77a1e18890294e29288 Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Wed, 15 May 2024 22:45:50 -0400 Subject: [PATCH 3/7] checkpoint --- .../example/integration_test/app_test.dart | 44 ++++-- .../fixtures/dht_record_pool_fixture.dart | 13 +- .../integration_test/test_dht_log.dart | 130 ++++++++++++++++++ .../test_dht_record_pool.dart | 74 +++++----- .../test_dht_short_array.dart | 38 ++++- .../lib/dht_support/src/dht_log/dht_log.dart | 1 + .../src/dht_log/dht_log_append.dart | 60 ++++++-- .../src/dht_log/dht_log_spine.dart | 83 +++++++++-- .../src/dht_record/dht_record_pool.dart | 106 +++++++++++--- .../dht_short_array/dht_short_array_head.dart | 8 +- .../dht_short_array_write.dart | 40 ++++-- .../src/interfaces/dht_append_truncate.dart | 7 + .../src/interfaces/dht_openable.dart | 8 +- .../src/interfaces/dht_random_write.dart | 15 ++ 14 files changed, 513 insertions(+), 114 deletions(-) create mode 100644 packages/veilid_support/example/integration_test/test_dht_log.dart diff --git a/packages/veilid_support/example/integration_test/app_test.dart b/packages/veilid_support/example/integration_test/app_test.dart index 4a5b1e1..ba7785c 100644 --- a/packages/veilid_support/example/integration_test/app_test.dart +++ b/packages/veilid_support/example/integration_test/app_test.dart @@ -7,6 +7,7 @@ import 'package:integration_test/integration_test.dart'; import 'package:veilid_test/veilid_test.dart'; import 'fixtures/fixtures.dart'; +import 'test_dht_log.dart'; import 'test_dht_record_pool.dart'; import 'test_dht_short_array.dart'; @@ -38,24 +39,37 @@ void main() { test('create pool', testDHTRecordPoolCreate); - group('DHTRecordPool Tests', () { + // group('DHTRecordPool Tests', () { + // setUpAll(dhtRecordPoolFixture.setUp); + // tearDownAll(dhtRecordPoolFixture.tearDown); + + // test('create/delete record', testDHTRecordCreateDelete); + // test('record scopes', testDHTRecordScopes); + // test('create/delete deep record', testDHTRecordDeepCreateDelete); + // }); + + // group('DHTShortArray Tests', () { + // setUpAll(dhtRecordPoolFixture.setUp); + // tearDownAll(dhtRecordPoolFixture.tearDown); + + // for (final stride in [256, 16 /*64, 32, 16, 8, 4, 2, 1 */]) { + // test('create shortarray stride=$stride', + // makeTestDHTShortArrayCreateDelete(stride: stride)); + // test('add shortarray stride=$stride', + // makeTestDHTShortArrayAdd(stride: 256)); + // } + // }); + + group('DHTLog Tests', () { setUpAll(dhtRecordPoolFixture.setUp); tearDownAll(dhtRecordPoolFixture.tearDown); - test('create/delete record', testDHTRecordCreateDelete); - test('record scopes', testDHTRecordScopes); - test('create/delete deep record', testDHTRecordDeepCreateDelete); - }); - - group('DHTShortArray Tests', () { - setUpAll(dhtRecordPoolFixture.setUp); - tearDownAll(dhtRecordPoolFixture.tearDown); - - for (final stride in [256, 64, 32, 16, 8, 4, 2, 1]) { - test('create shortarray stride=$stride', - makeTestDHTShortArrayCreateDelete(stride: stride)); - test('add shortarray stride=$stride', - makeTestDHTShortArrayAdd(stride: 256)); + for (final stride in [256, 16 /*64, 32, 16, 8, 4, 2, 1 */]) { + test('create log stride=$stride', + makeTestDHTLogCreateDelete(stride: stride)); + test('add/truncate log stride=$stride', + makeTestDHTLogAddTruncate(stride: 256), + timeout: const Timeout(Duration(seconds: 480))); } }); }); diff --git a/packages/veilid_support/example/integration_test/fixtures/dht_record_pool_fixture.dart b/packages/veilid_support/example/integration_test/fixtures/dht_record_pool_fixture.dart index 216d00f..d38181f 100644 --- a/packages/veilid_support/example/integration_test/fixtures/dht_record_pool_fixture.dart +++ b/packages/veilid_support/example/integration_test/fixtures/dht_record_pool_fixture.dart @@ -1,6 +1,7 @@ import 'dart:async'; import 'package:async_tools/async_tools.dart'; +import 'package:flutter/foundation.dart'; import 'package:veilid_support/veilid_support.dart'; import 'package:veilid_test/veilid_test.dart'; @@ -12,9 +13,13 @@ class DHTRecordPoolFixture implements TickerFixtureTickable { UpdateProcessorFixture updateProcessorFixture; TickerFixture tickerFixture; - Future setUp() async { + Future setUp({bool purge = true}) async { await _fixtureMutex.acquire(); - await DHTRecordPool.init(); + if (purge) { + await Veilid.instance.debug('record purge local'); + await Veilid.instance.debug('record purge remote'); + } + await DHTRecordPool.init(logger: debugPrintSynchronously); tickerFixture.register(this); } @@ -22,6 +27,10 @@ class DHTRecordPoolFixture implements TickerFixtureTickable { assert(_fixtureMutex.isLocked, 'should not tearDown without setUp'); tickerFixture.unregister(this); await DHTRecordPool.close(); + + final recordList = await Veilid.instance.debug('record list local'); + debugPrintSynchronously('DHT Record List:\n$recordList'); + _fixtureMutex.release(); } diff --git a/packages/veilid_support/example/integration_test/test_dht_log.dart b/packages/veilid_support/example/integration_test/test_dht_log.dart new file mode 100644 index 0000000..fcdabad --- /dev/null +++ b/packages/veilid_support/example/integration_test/test_dht_log.dart @@ -0,0 +1,130 @@ +import 'dart:convert'; + +import 'package:flutter_test/flutter_test.dart'; +import 'package:veilid_support/veilid_support.dart'; + +Future Function() makeTestDHTLogCreateDelete({required int stride}) => + () async { + // Close before delete + { + final dlog = await DHTLog.create( + debugName: 'log_create_delete 1 stride $stride', stride: stride); + expect(await dlog.operate((r) async => r.length), isZero); + expect(dlog.isOpen, isTrue); + await dlog.close(); + expect(dlog.isOpen, isFalse); + await dlog.delete(); + // Operate should fail + await expectLater(() async => dlog.operate((r) async => r.length), + throwsA(isA())); + } + + // Close after delete + { + final dlog = await DHTLog.create( + debugName: 'log_create_delete 2 stride $stride', stride: stride); + await dlog.delete(); + // Operate should still succeed because things aren't closed + expect(await dlog.operate((r) async => r.length), isZero); + await dlog.close(); + // Operate should fail + await expectLater(() async => dlog.operate((r) async => r.length), + throwsA(isA())); + } + + // Close after delete multiple + // Okay to request delete multiple times before close + { + final dlog = await DHTLog.create( + debugName: 'log_create_delete 3 stride $stride', stride: stride); + await dlog.delete(); + await dlog.delete(); + // Operate should still succeed because things aren't closed + expect(await dlog.operate((r) async => r.length), isZero); + await dlog.close(); + await dlog.close(); + // Operate should fail + await expectLater(() async => dlog.operate((r) async => r.length), + throwsA(isA())); + } + }; + +Future Function() makeTestDHTLogAddTruncate({required int stride}) => + () async { + final startTime = DateTime.now(); + + final dlog = await DHTLog.create( + debugName: 'log_add 1 stride $stride', stride: stride); + + final dataset = Iterable.generate(1000) + .map((n) => utf8.encode('elem $n')) + .toList(); + + print('adding\n'); + { + final res = await dlog.operateAppend((w) async { + const chunk = 50; + for (var n = 0; n < dataset.length; n += chunk) { + print('$n-${n + chunk - 1} '); + final success = + await w.tryAppendItems(dataset.sublist(n, n + chunk)); + expect(success, isTrue); + } + }); + expect(res, isNull); + } + + print('get all\n'); + { + final dataset2 = await dlog.operate((r) async => r.getItemRange(0)); + expect(dataset2, equals(dataset)); + } + { + final dataset3 = + await dlog.operate((r) async => r.getItemRange(64, length: 128)); + expect(dataset3, equals(dataset.sublist(64, 64 + 128))); + } + { + final dataset4 = + await dlog.operate((r) async => r.getItemRange(0, length: 1000)); + expect(dataset4, equals(dataset.sublist(0, 1000))); + } + { + final dataset5 = + await dlog.operate((r) async => r.getItemRange(500, length: 499)); + expect(dataset5, equals(dataset.sublist(500, 999))); + } + print('truncate\n'); + { + await dlog.operateAppend((w) async => w.truncate(5)); + } + { + final dataset6 = await dlog + .operate((r) async => r.getItemRange(500 - 5, length: 499)); + expect(dataset6, equals(dataset.sublist(500, 999))); + } + print('truncate 2\n'); + { + await dlog.operateAppend((w) async => w.truncate(251)); + } + { + final dataset7 = await dlog + .operate((r) async => r.getItemRange(500 - 256, length: 499)); + expect(dataset7, equals(dataset.sublist(500, 999))); + } + print('clear\n'); + { + await dlog.operateAppend((w) async => w.clear()); + } + print('get all\n'); + { + final dataset8 = await dlog.operate((r) async => r.getItemRange(0)); + expect(dataset8, isEmpty); + } + + await dlog.delete(); + await dlog.close(); + + final endTime = DateTime.now(); + print('Duration: ${endTime.difference(startTime)}'); + }; diff --git a/packages/veilid_support/example/integration_test/test_dht_record_pool.dart b/packages/veilid_support/example/integration_test/test_dht_record_pool.dart index 2f05d0b..45b26a7 100644 --- a/packages/veilid_support/example/integration_test/test_dht_record_pool.dart +++ b/packages/veilid_support/example/integration_test/test_dht_record_pool.dart @@ -151,17 +151,29 @@ Future testDHTRecordDeepCreateDelete() async { // Make root record final recroot = await pool.createRecord(debugName: 'test_deep_create_delete'); - for (var d = 0; d < numIterations; d++) { - // Make child set 1 - var parent = recroot; - final children = []; - for (var n = 0; n < numChildren; n++) { - final child = - await pool.createRecord(debugName: 'deep $n', parent: parent.key); - children.add(child); - parent = child; - } + // Make child set 1 + var parent = recroot; + final children = []; + for (var n = 0; n < numChildren; n++) { + final child = + await pool.createRecord(debugName: 'deep $n', parent: parent.key); + children.add(child); + parent = child; + } + // Should mark for deletion + expect(await pool.deleteRecord(recroot.key), isFalse); + + // Root should still be valid + expect(await pool.isValidRecordKey(recroot.key), isTrue); + + // Close root record + await recroot.close(); + + // Root should still be valid because children still exist + expect(await pool.isValidRecordKey(recroot.key), isTrue); + + for (var d = 0; d < numIterations; d++) { // Make child set 2 final children2 = []; parent = recroot; @@ -171,31 +183,31 @@ Future testDHTRecordDeepCreateDelete() async { children2.add(child); parent = child; } - // Should fail to delete root - await expectLater( - () async => pool.deleteRecord(recroot.key), throwsA(isA())); - - // Close child set 1 - await children.map((c) => c.close()).wait; - - // Delete child set 1 in reverse order - for (var n = numChildren - 1; n >= 0; n--) { - await pool.deleteRecord(children[n].key); - } - - // Should fail to delete root - await expectLater( - () async => pool.deleteRecord(recroot.key), throwsA(isA())); - - // Close child set 1 - await children2.map((c) => c.close()).wait; // Delete child set 2 in reverse order for (var n = numChildren - 1; n >= 0; n--) { - await pool.deleteRecord(children2[n].key); + expect(await pool.deleteRecord(children2[n].key), isFalse); } + + // Root should still be there + expect(await pool.isValidRecordKey(recroot.key), isTrue); + + // Close child set 2 + await children2.map((c) => c.close()).wait; + + // All child set 2 should be invalid + for (final c2 in children2) { + // Children should be invalid and deleted now + expect(await pool.isValidRecordKey(c2.key), isFalse); + } + + // Root should still be valid + expect(await pool.isValidRecordKey(recroot.key), isTrue); } - // Should be able to delete root now - await pool.deleteRecord(recroot.key); + // Close child set 1 + await children.map((c) => c.close()).wait; + + // Root should have gone away + expect(await pool.isValidRecordKey(recroot.key), isFalse); } diff --git a/packages/veilid_support/example/integration_test/test_dht_short_array.dart b/packages/veilid_support/example/integration_test/test_dht_short_array.dart index c2fcc2b..6ba2d23 100644 --- a/packages/veilid_support/example/integration_test/test_dht_short_array.dart +++ b/packages/veilid_support/example/integration_test/test_dht_short_array.dart @@ -61,10 +61,10 @@ Future Function() makeTestDHTShortArrayAdd({required int stride}) => .map((n) => utf8.encode('elem $n')) .toList(); - print('adding\n'); + print('adding singles\n'); { final res = await arr.operateWrite((w) async { - for (var n = 0; n < dataset.length; n++) { + for (var n = 4; n < 8; n++) { print('$n '); final success = await w.tryAddItem(dataset[n]); expect(success, isTrue); @@ -73,6 +73,40 @@ Future Function() makeTestDHTShortArrayAdd({required int stride}) => expect(res, isNull); } + print('adding batch\n'); + { + final res = await arr.operateWrite((w) async { + print('${dataset.length ~/ 2}-${dataset.length}'); + final success = await w.tryAddItems( + dataset.sublist(dataset.length ~/ 2, dataset.length)); + expect(success, isTrue); + }); + expect(res, isNull); + } + + print('inserting singles\n'); + { + final res = await arr.operateWrite((w) async { + for (var n = 0; n < 4; n++) { + print('$n '); + final success = await w.tryInsertItem(n, dataset[n]); + expect(success, isTrue); + } + }); + expect(res, isNull); + } + + print('inserting batch\n'); + { + final res = await arr.operateWrite((w) async { + print('8-${dataset.length ~/ 2}'); + final success = await w.tryInsertItems( + 8, dataset.sublist(8, dataset.length ~/ 2)); + expect(success, isTrue); + }); + expect(res, isNull); + } + //print('get all\n'); { final dataset2 = await arr.operate((r) async => r.getItemRange(0)); diff --git a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log.dart b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log.dart index 7513c8b..f7b606c 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log.dart @@ -1,4 +1,5 @@ import 'dart:async'; +import 'dart:math'; import 'dart:typed_data'; import 'package:async_tools/async_tools.dart'; diff --git a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_append.dart b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_append.dart index 6a172a7..96c3eb4 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_append.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_append.dart @@ -9,34 +9,74 @@ class _DHTLogAppend extends _DHTLogRead implements DHTAppendTruncateRandomRead { @override Future tryAppendItem(Uint8List value) async { // Allocate empty index at the end of the list - final endPos = _spine.length; + final insertPos = _spine.length; _spine.allocateTail(1); - final lookup = await _spine.lookupPosition(endPos); + final lookup = await _spine.lookupPosition(insertPos); if (lookup == null) { throw StateError("can't write to dht log"); } + // Write item to the segment - return lookup.shortArray - .operateWrite((write) async => write.tryWriteItem(lookup.pos, value)); + return lookup.shortArray.operateWrite((write) async { + // If this a new segment, then clear it in case we have wrapped around + if (lookup.pos == 0) { + await write.clear(); + } else if (lookup.pos != write.length) { + // We should always be appending at the length + throw StateError('appending should be at the end'); + } + return write.tryAddItem(value); + }); + } + + @override + Future tryAppendItems(List values) async { + // Allocate empty index at the end of the list + final insertPos = _spine.length; + _spine.allocateTail(values.length); + + // Look up the first position and shortarray + for (var valueIdx = 0; valueIdx < values.length;) { + final remaining = values.length - valueIdx; + + final lookup = await _spine.lookupPosition(insertPos + valueIdx); + if (lookup == null) { + throw StateError("can't write to dht log"); + } + + final sacount = min(remaining, DHTShortArray.maxElements - lookup.pos); + final success = await lookup.shortArray.operateWrite((write) async { + // If this a new segment, then clear it in case we have wrapped around + if (lookup.pos == 0) { + await write.clear(); + } else if (lookup.pos != write.length) { + // We should always be appending at the length + throw StateError('appending should be at the end'); + } + return write.tryAddItems(values.sublist(valueIdx, valueIdx + sacount)); + }); + if (!success) { + return false; + } + valueIdx += sacount; + } + return true; } @override Future truncate(int count) async { - final len = _spine.length; - if (count > len) { - count = len; - } + count = min(count, _spine.length); if (count == 0) { return; } if (count < 0) { throw StateError('can not remove negative items'); } - _spine.releaseHead(count); + await _spine.releaseHead(count); } @override Future clear() async { - _spine.releaseHead(_spine.length); + await _spine.releaseHead(_spine.length); } } diff --git a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_spine.dart b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_spine.dart index 65f7110..7ce3dbf 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_spine.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_spine.dart @@ -83,12 +83,8 @@ class _DHTLogSpine { Future delete() async { await _spineMutex.protect(() async { - final pool = DHTRecordPool.instance; - final futures = >[pool.deleteRecord(_spineRecord.key)]; - for (final (_, sc) in _spineCache) { - futures.add(sc.delete()); - } - await Future.wait(futures); + // Will deep delete all segment records as they are children + await _spineRecord.delete(); }); } @@ -218,7 +214,7 @@ class _DHTLogSpine { static TypedKey? _getSegmentKey(Uint8List subkeyData, int segment) { final decodedLength = TypedKey.decodedLength(); final segmentKeyBytes = subkeyData.sublist( - decodedLength * segment, (decodedLength + 1) * segment); + decodedLength * segment, decodedLength * (segment + 1)); if (segmentKeyBytes.equals(_emptySegmentKey)) { return null; } @@ -234,7 +230,7 @@ class _DHTLogSpine { } else { segmentKeyBytes = segmentKey.decode(); } - subkeyData.setRange(decodedLength * segment, (decodedLength + 1) * segment, + subkeyData.setRange(decodedLength * segment, decodedLength * (segment + 1), segmentKeyBytes); } @@ -435,7 +431,7 @@ class _DHTLogSpine { _tail = (_tail + count) % _positionLimit; } - void releaseHead(int count) { + Future releaseHead(int count) async { assert(_spineMutex.isLocked, 'should be locked'); final currentLength = length; @@ -447,6 +443,73 @@ class _DHTLogSpine { } _head = (_head + count) % _positionLimit; + await _purgeUnusedSegments(); + } + + Future _deleteSegmentsContiguous(int start, int end) async { + assert(_spineMutex.isLocked, 'should be in mutex here'); + + final startSegmentNumber = start ~/ DHTShortArray.maxElements; + final startSegmentPos = start % DHTShortArray.maxElements; + + final endSegmentNumber = end ~/ DHTShortArray.maxElements; + final endSegmentPos = end % DHTShortArray.maxElements; + + final firstDeleteSegment = + (startSegmentPos == 0) ? startSegmentNumber : startSegmentNumber + 1; + final lastDeleteSegment = + (endSegmentPos == 0) ? endSegmentNumber - 1 : endSegmentNumber - 2; + + int? lastSubkey; + Uint8List? subkeyData; + for (var segmentNumber = firstDeleteSegment; + segmentNumber <= lastDeleteSegment; + segmentNumber++) { + // Lookup what subkey and segment subrange has this position's segment + // shortarray + final l = lookupSegment(segmentNumber); + final subkey = l.subkey; + final segment = l.segment; + + if (lastSubkey != subkey) { + // Flush subkey writes + if (lastSubkey != null) { + await _spineRecord.eventualWriteBytes(subkeyData!, + subkey: lastSubkey); + } + + xxx debug this, it takes forever + + // Get next subkey + subkeyData = await _spineRecord.get(subkey: subkey); + if (subkeyData != null) { + lastSubkey = subkey; + } else { + lastSubkey = null; + } + } + if (subkeyData != null) { + final segmentKey = _getSegmentKey(subkeyData, segment); + if (segmentKey != null) { + await DHTRecordPool.instance.deleteRecord(segmentKey); + _setSegmentKey(subkeyData, segment, null); + } + } + } + // Flush subkey writes + if (lastSubkey != null) { + await _spineRecord.eventualWriteBytes(subkeyData!, subkey: lastSubkey); + } + } + + Future _purgeUnusedSegments() async { + assert(_spineMutex.isLocked, 'should be in mutex here'); + if (_head < _tail) { + await _deleteSegmentsContiguous(0, _head); + await _deleteSegmentsContiguous(_tail, _positionLimit); + } else if (_head > _tail) { + await _deleteSegmentsContiguous(_tail, _head); + } } ///////////////////////////////////////////////////////////////////////////// @@ -532,7 +595,7 @@ class _DHTLogSpine { // Position of the start of the log (oldest items) int _head; - // Position of the end of the log (newest items) + // Position of the end of the log (newest items) (exclusive) int _tail; // LRU cache of DHT spine elements accessed recently diff --git a/packages/veilid_support/lib/dht_support/src/dht_record/dht_record_pool.dart b/packages/veilid_support/lib/dht_support/src/dht_record/dht_record_pool.dart index a64f461..a4748df 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_record/dht_record_pool.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_record/dht_record_pool.dart @@ -91,7 +91,6 @@ class SharedDHTRecordData { Map subkeySeqCache = {}; bool needsWatchStateUpdate = false; WatchState? unionWatchState; - bool deleteOnClose = false; } // Per opened record data @@ -128,6 +127,7 @@ class DHTRecordPool with TableDBBackedJson { : _state = const DHTRecordPoolAllocations(), _mutex = Mutex(), _opened = {}, + _markedForDelete = {}, _routingContext = routingContext, _veilid = veilid; @@ -140,6 +140,8 @@ class DHTRecordPool with TableDBBackedJson { final Mutex _mutex; // Which DHT records are currently open final Map _opened; + // Which DHT records are marked for deletion + final Set _markedForDelete; // Default routing context to use for new keys final VeilidRoutingContext _routingContext; // Convenience accessor @@ -288,6 +290,8 @@ class DHTRecordPool with TableDBBackedJson { return openedRecordInfo; } + // Called when a DHTRecord is closed + // Cleans up the opened record housekeeping and processes any late deletions Future _recordClosed(DHTRecord record) async { await _mutex.protect(() async { final key = record.key; @@ -301,14 +305,37 @@ class DHTRecordPool with TableDBBackedJson { } if (openedRecordInfo.records.isEmpty) { await _routingContext.closeDHTRecord(key); - if (openedRecordInfo.shared.deleteOnClose) { - await _deleteRecordInner(key); - } _opened.remove(key); + + await _checkForLateDeletesInner(key); } }); } + // Check to see if this key can finally be deleted + // If any parents are marked for deletion, try them first + Future _checkForLateDeletesInner(TypedKey key) async { + // Get parent list in bottom up order including our own key + final parents = []; + TypedKey? nextParent = key; + while (nextParent != null) { + parents.add(nextParent); + nextParent = getParentRecordKey(nextParent); + } + + // If any parent is ready to delete all its children do it + for (final parent in parents) { + if (_markedForDelete.contains(parent)) { + final deleted = await _deleteRecordInner(parent); + if (!deleted) { + // If we couldn't delete a child then no 'marked for delete' parents + // above us will be ready to delete either + break; + } + } + } + } + // Collect all dependencies (including the record itself) // in reverse (bottom-up/delete order) List _collectChildrenInner(TypedKey recordKey) { @@ -328,7 +355,13 @@ class DHTRecordPool with TableDBBackedJson { return allDeps.reversedView; } - String _debugChildren(TypedKey recordKey, {List? allDeps}) { + /// Collect all dependencies (including the record itself) + /// in reverse (bottom-up/delete order) + Future> collectChildren(TypedKey recordKey) => + _mutex.protect(() async => _collectChildrenInner(recordKey)); + + /// Print children + String debugChildren(TypedKey recordKey, {List? allDeps}) { allDeps ??= _collectChildrenInner(recordKey); // ignore: avoid_print var out = @@ -342,32 +375,48 @@ class DHTRecordPool with TableDBBackedJson { return out; } - Future _deleteRecordInner(TypedKey recordKey) async { - log('deleteDHTRecord: key=$recordKey'); + // Actual delete function + Future _finalizeDeleteRecordInner(TypedKey recordKey) async { + log('_finalizeDeleteRecordInner: key=$recordKey'); // Remove this child from parents await _removeDependenciesInner([recordKey]); await _routingContext.deleteDHTRecord(recordKey); + _markedForDelete.remove(recordKey); } - Future deleteRecord(TypedKey recordKey) async { - await _mutex.protect(() async { - final allDeps = _collectChildrenInner(recordKey); - - if (allDeps.singleOrNull != recordKey) { - final dbgstr = _debugChildren(recordKey, allDeps: allDeps); - throw StateError('must delete children first: $dbgstr'); + // Deep delete mechanism inside mutex + Future _deleteRecordInner(TypedKey recordKey) async { + final toDelete = _readyForDeleteInner(recordKey); + if (toDelete.isNotEmpty) { + // delete now + for (final deleteKey in toDelete) { + await _finalizeDeleteRecordInner(deleteKey); } + return true; + } + // mark for deletion + _markedForDelete.add(recordKey); + return false; + } - final ori = _opened[recordKey]; - if (ori != null) { - // delete after close - ori.shared.deleteOnClose = true; - } else { - // delete now - await _deleteRecordInner(recordKey); + /// Delete a record and its children if they are all closed + /// otherwise mark that record for deletion eventually + /// Returns true if the deletion was processed immediately + /// Returns false if the deletion was marked for later + Future deleteRecord(TypedKey recordKey) async => + _mutex.protect(() async => _deleteRecordInner(recordKey)); + + // If everything underneath is closed including itself, return the + // list of children (and itself) to finally actually delete + List _readyForDeleteInner(TypedKey recordKey) { + final allDeps = _collectChildrenInner(recordKey); + for (final dep in allDeps) { + if (_opened.containsKey(dep)) { + return []; } - }); + } + return allDeps; } void _validateParentInner(TypedKey? parent, TypedKey child) { @@ -456,6 +505,19 @@ class DHTRecordPool with TableDBBackedJson { } } + bool _isValidRecordKeyInner(TypedKey key) { + if (_state.rootRecords.contains(key)) { + return true; + } + if (_state.childrenByParent.containsKey(key.toJson())) { + return true; + } + return false; + } + + Future isValidRecordKey(TypedKey key) => + _mutex.protect(() async => _isValidRecordKeyInner(key)); + /////////////////////////////////////////////////////////////////////// /// Create a root DHTRecord that has no dependent records diff --git a/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_head.dart b/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_head.dart index 0a2b7d2..e2bf392 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_head.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_head.dart @@ -67,12 +67,8 @@ class _DHTShortArrayHead { Future delete() async { await _headMutex.protect(() async { - final pool = DHTRecordPool.instance; - final futures = >[pool.deleteRecord(_headRecord.key)]; - for (final lr in _linkedRecords) { - futures.add(pool.deleteRecord(lr.key)); - } - await Future.wait(futures); + // Will deep delete all linked records as they are children + await _headRecord.delete(); }); } diff --git a/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_write.dart b/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_write.dart index 0d51663..dbd8984 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_write.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_write.dart @@ -8,19 +8,12 @@ class _DHTShortArrayWrite extends _DHTShortArrayRead _DHTShortArrayWrite._(super.head) : super._(); @override - Future tryAddItem(Uint8List value) async { - // Allocate empty index at the end of the list - final pos = _head.length; - _head.allocateIndex(pos); + Future tryAddItem(Uint8List value) => + tryInsertItem(_head.length, value); - // Write item - final ok = await tryWriteItem(pos, value); - if (!ok) { - _head.freeIndex(pos); - } - - return ok; - } + @override + Future tryAddItems(List values) => + tryInsertItems(_head.length, values); @override Future tryInsertItem(int pos, Uint8List value) async { @@ -35,6 +28,29 @@ class _DHTShortArrayWrite extends _DHTShortArrayRead return true; } + @override + Future tryInsertItems(int pos, List values) async { + // Allocate empty indices at the end of the list + for (var i = 0; i < values.length; i++) { + _head.allocateIndex(pos + i); + } + + // Write items + var success = true; + final dws = DelayedWaitSet(); + for (var i = 0; i < values.length; i++) { + dws.add(() async { + final ok = await tryWriteItem(pos + i, values[i]); + if (!ok) { + _head.freeIndex(pos + i); + success = false; + } + }); + } + await dws(chunkSize: maxDHTConcurrency, onChunkDone: (_) => success); + return success; + } + @override Future swapItem(int aPos, int bPos) async { if (aPos < 0 || aPos >= _head.length) { diff --git a/packages/veilid_support/lib/dht_support/src/interfaces/dht_append_truncate.dart b/packages/veilid_support/lib/dht_support/src/interfaces/dht_append_truncate.dart index babcc7d..d98037c 100644 --- a/packages/veilid_support/lib/dht_support/src/interfaces/dht_append_truncate.dart +++ b/packages/veilid_support/lib/dht_support/src/interfaces/dht_append_truncate.dart @@ -14,6 +14,13 @@ abstract class DHTAppendTruncate { /// This may throw an exception if the number elements added exceeds limits. Future tryAppendItem(Uint8List value); + /// Try to add a list of items to the end of the DHT data structure. + /// Return true if the elements were successfully added, and false if the + /// state changed before the element could be added or a newer value was found + /// on the network. + /// This may throw an exception if the number elements added exceeds limits. + Future tryAppendItems(List values); + /// Try to remove a number of items from the head of the DHT data structure. /// Throws StateError if count < 0 Future truncate(int count); diff --git a/packages/veilid_support/lib/dht_support/src/interfaces/dht_openable.dart b/packages/veilid_support/lib/dht_support/src/interfaces/dht_openable.dart index 1ee1140..e28f703 100644 --- a/packages/veilid_support/lib/dht_support/src/interfaces/dht_openable.dart +++ b/packages/veilid_support/lib/dht_support/src/interfaces/dht_openable.dart @@ -29,12 +29,12 @@ extension DHTOpenableExt on D { } try { - final out = await scopeFunction(this); - await close(); - return out; - } on Exception catch (_) { + return await scopeFunction(this); + } on Exception { await delete(); rethrow; + } finally { + await close(); } } diff --git a/packages/veilid_support/lib/dht_support/src/interfaces/dht_random_write.dart b/packages/veilid_support/lib/dht_support/src/interfaces/dht_random_write.dart index 53f307c..17a450e 100644 --- a/packages/veilid_support/lib/dht_support/src/interfaces/dht_random_write.dart +++ b/packages/veilid_support/lib/dht_support/src/interfaces/dht_random_write.dart @@ -30,6 +30,13 @@ abstract class DHTRandomWrite { /// built-in limit of 'maxElements = 256' entries. Future tryAddItem(Uint8List value); + /// Try to add a list of items to the end of the DHTArray. Return true if the + /// elements were successfully added, and false if the state changed before + /// the elements could be added or a newer value was found on the network. + /// This may throw an exception if the number elements added exceeds the + /// built-in limit of 'maxElements = 256' entries. + Future tryAddItems(List values); + /// Try to insert an item as position 'pos' of the DHTArray. /// Return true if the element was successfully inserted, and false if the /// state changed before the element could be inserted or a newer value was @@ -38,6 +45,14 @@ abstract class DHTRandomWrite { /// built-in limit of 'maxElements = 256' entries. Future tryInsertItem(int pos, Uint8List value); + /// Try to insert items at position 'pos' of the DHTArray. + /// Return true if the elements were successfully inserted, and false if the + /// state changed before the elements could be inserted or a newer value was + /// found on the network. + /// This may throw an exception if the number elements added exceeds the + /// built-in limit of 'maxElements = 256' entries. + Future tryInsertItems(int pos, List values); + /// Swap items at position 'aPos' and 'bPos' in the DHTArray. /// Throws IndexError if either of the positions swapped exceed /// the length of the list From cf837e21764267c21ba3f3367521e0bbd80eb90b Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Thu, 16 May 2024 14:07:25 -0400 Subject: [PATCH 4/7] dht log passes tests --- .../example/integration_test/app_test.dart | 39 +++--- .../integration_test/test_dht_log.dart | 8 +- .../test_dht_record_pool.dart | 6 +- .../test_dht_short_array.dart | 7 +- .../lib/dht_support/src/dht_log/dht_log.dart | 39 ++++-- .../src/dht_log/dht_log_append.dart | 43 ++++--- .../dht_support/src/dht_log/dht_log_read.dart | 34 +++--- .../src/dht_log/dht_log_spine.dart | 98 ++++++++------- .../dht_record/default_dht_record_cubit.dart | 2 +- .../src/dht_record/dht_record.dart | 115 ++++++++++++------ .../src/dht_record/dht_record_cubit.dart | 2 +- .../src/dht_record/dht_record_pool.dart | 1 - .../src/dht_short_array/dht_short_array.dart | 39 ++++-- .../dht_short_array/dht_short_array_head.dart | 2 +- .../dht_short_array/dht_short_array_read.dart | 4 +- .../src/interfaces/dht_openable.dart | 5 +- packages/veilid_support/lib/src/identity.dart | 2 +- 17 files changed, 267 insertions(+), 179 deletions(-) diff --git a/packages/veilid_support/example/integration_test/app_test.dart b/packages/veilid_support/example/integration_test/app_test.dart index ba7785c..b3548a2 100644 --- a/packages/veilid_support/example/integration_test/app_test.dart +++ b/packages/veilid_support/example/integration_test/app_test.dart @@ -12,6 +12,8 @@ import 'test_dht_record_pool.dart'; import 'test_dht_short_array.dart'; void main() { + final startTime = DateTime.now(); + IntegrationTestWidgetsFlutterBinding.ensureInitialized(); final veilidFixture = DefaultVeilidFixture(programName: 'veilid_support integration test'); @@ -39,26 +41,26 @@ void main() { test('create pool', testDHTRecordPoolCreate); - // group('DHTRecordPool Tests', () { - // setUpAll(dhtRecordPoolFixture.setUp); - // tearDownAll(dhtRecordPoolFixture.tearDown); + group('DHTRecordPool Tests', () { + setUpAll(dhtRecordPoolFixture.setUp); + tearDownAll(dhtRecordPoolFixture.tearDown); - // test('create/delete record', testDHTRecordCreateDelete); - // test('record scopes', testDHTRecordScopes); - // test('create/delete deep record', testDHTRecordDeepCreateDelete); - // }); + test('create/delete record', testDHTRecordCreateDelete); + test('record scopes', testDHTRecordScopes); + test('create/delete deep record', testDHTRecordDeepCreateDelete); + }); - // group('DHTShortArray Tests', () { - // setUpAll(dhtRecordPoolFixture.setUp); - // tearDownAll(dhtRecordPoolFixture.tearDown); + group('DHTShortArray Tests', () { + setUpAll(dhtRecordPoolFixture.setUp); + tearDownAll(dhtRecordPoolFixture.tearDown); - // for (final stride in [256, 16 /*64, 32, 16, 8, 4, 2, 1 */]) { - // test('create shortarray stride=$stride', - // makeTestDHTShortArrayCreateDelete(stride: stride)); - // test('add shortarray stride=$stride', - // makeTestDHTShortArrayAdd(stride: 256)); - // } - // }); + for (final stride in [256, 16 /*64, 32, 16, 8, 4, 2, 1 */]) { + test('create shortarray stride=$stride', + makeTestDHTShortArrayCreateDelete(stride: stride)); + test('add shortarray stride=$stride', + makeTestDHTShortArrayAdd(stride: 256)); + } + }); group('DHTLog Tests', () { setUpAll(dhtRecordPoolFixture.setUp); @@ -75,4 +77,7 @@ void main() { }); }); }); + + final endTime = DateTime.now(); + print('Duration: ${endTime.difference(startTime)}'); } diff --git a/packages/veilid_support/example/integration_test/test_dht_log.dart b/packages/veilid_support/example/integration_test/test_dht_log.dart index fcdabad..f8d758e 100644 --- a/packages/veilid_support/example/integration_test/test_dht_log.dart +++ b/packages/veilid_support/example/integration_test/test_dht_log.dart @@ -42,7 +42,7 @@ Future Function() makeTestDHTLogCreateDelete({required int stride}) => // Operate should still succeed because things aren't closed expect(await dlog.operate((r) async => r.length), isZero); await dlog.close(); - await dlog.close(); + await expectLater(() async => dlog.close(), throwsA(isA())); // Operate should fail await expectLater(() async => dlog.operate((r) async => r.length), throwsA(isA())); @@ -51,8 +51,6 @@ Future Function() makeTestDHTLogCreateDelete({required int stride}) => Future Function() makeTestDHTLogAddTruncate({required int stride}) => () async { - final startTime = DateTime.now(); - final dlog = await DHTLog.create( debugName: 'log_add 1 stride $stride', stride: stride); @@ -121,10 +119,8 @@ Future Function() makeTestDHTLogAddTruncate({required int stride}) => final dataset8 = await dlog.operate((r) async => r.getItemRange(0)); expect(dataset8, isEmpty); } + print('delete and close\n'); await dlog.delete(); await dlog.close(); - - final endTime = DateTime.now(); - print('Duration: ${endTime.difference(startTime)}'); }; diff --git a/packages/veilid_support/example/integration_test/test_dht_record_pool.dart b/packages/veilid_support/example/integration_test/test_dht_record_pool.dart index 45b26a7..2f52c00 100644 --- a/packages/veilid_support/example/integration_test/test_dht_record_pool.dart +++ b/packages/veilid_support/example/integration_test/test_dht_record_pool.dart @@ -48,7 +48,7 @@ Future testDHTRecordCreateDelete() async { // Set should succeed still await rec3.tryWriteBytes(utf8.encode('test')); await rec3.close(); - await rec3.close(); + await expectLater(() async => rec3.close(), throwsA(isA())); // Set should fail await expectLater(() async => rec3.tryWriteBytes(utf8.encode('test')), throwsA(isA())); @@ -84,7 +84,7 @@ Future testDHTRecordScopes() async { } on Exception { assert(false, 'should not throw'); } - await rec2.close(); + await expectLater(() async => rec2.close(), throwsA(isA())); await pool.deleteRecord(rec2.key); } @@ -115,6 +115,7 @@ Future testDHTRecordGetSet() async { final val = await rec.get(); await pool.deleteRecord(rec.key); expect(val, isNull); + await rec.close(); } // Test set then get @@ -125,6 +126,7 @@ Future testDHTRecordGetSet() async { // Invalid subkey should throw await expectLater( () async => rec2.get(subkey: 1), throwsA(isA())); + await rec2.close(); await pool.deleteRecord(rec2.key); } diff --git a/packages/veilid_support/example/integration_test/test_dht_short_array.dart b/packages/veilid_support/example/integration_test/test_dht_short_array.dart index 6ba2d23..7dead48 100644 --- a/packages/veilid_support/example/integration_test/test_dht_short_array.dart +++ b/packages/veilid_support/example/integration_test/test_dht_short_array.dart @@ -43,7 +43,7 @@ Future Function() makeTestDHTShortArrayCreateDelete( // Operate should still succeed because things aren't closed expect(await arr.operate((r) async => r.length), isZero); await arr.close(); - await arr.close(); + await expectLater(() async => arr.close(), throwsA(isA())); // Operate should fail await expectLater(() async => arr.operate((r) async => r.length), throwsA(isA())); @@ -52,8 +52,6 @@ Future Function() makeTestDHTShortArrayCreateDelete( Future Function() makeTestDHTShortArrayAdd({required int stride}) => () async { - final startTime = DateTime.now(); - final arr = await DHTShortArray.create( debugName: 'sa_add 1 stride $stride', stride: stride); @@ -131,7 +129,4 @@ Future Function() makeTestDHTShortArrayAdd({required int stride}) => await arr.delete(); await arr.close(); - - final endTime = DateTime.now(); - print('Duration: ${endTime.difference(startTime)}'); }; diff --git a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log.dart b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log.dart index f7b606c..5dd36a0 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log.dart @@ -42,11 +42,13 @@ class DHTLogUpdate extends Equatable { /// * The head and tail position of the log /// - subkeyIdx = pos / recordsPerSubkey /// - recordIdx = pos % recordsPerSubkey -class DHTLog implements DHTOpenable { +class DHTLog implements DHTOpenable { //////////////////////////////////////////////////////////////// // Constructors - DHTLog._({required _DHTLogSpine spine}) : _spine = spine { + DHTLog._({required _DHTLogSpine spine}) + : _spine = spine, + _openCount = 1 { _spine.onUpdatedSpine = (update) { _watchController?.sink.add(update); }; @@ -162,18 +164,29 @@ class DHTLog implements DHTOpenable { /// Check if the DHTLog is open @override - bool get isOpen => _spine.isOpen; + bool get isOpen => _openCount > 0; + + /// Add a reference to this log + @override + Future ref() async => _mutex.protect(() async { + _openCount++; + return this; + }); /// Free all resources for the DHTLog @override - Future close() async { - if (!isOpen) { - return; - } - await _watchController?.close(); - _watchController = null; - await _spine.close(); - } + Future close() async => _mutex.protect(() async { + if (_openCount == 0) { + throw StateError('already closed'); + } + _openCount--; + if (_openCount != 0) { + return; + } + await _watchController?.close(); + _watchController = null; + await _spine.close(); + }); /// Free all resources for the DHTLog and delete it from the DHT /// Will wait until the short array is closed to delete it @@ -284,6 +297,10 @@ class DHTLog implements DHTOpenable { // Internal representation refreshed from spine record final _DHTLogSpine _spine; + // Openable + int _openCount; + final _mutex = Mutex(); + // Watch mutex to ensure we keep the representation valid final Mutex _listenMutex = Mutex(); // Stream of external changes diff --git a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_append.dart b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_append.dart index 96c3eb4..26c22da 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_append.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_append.dart @@ -17,16 +17,16 @@ class _DHTLogAppend extends _DHTLogRead implements DHTAppendTruncateRandomRead { } // Write item to the segment - return lookup.shortArray.operateWrite((write) async { - // If this a new segment, then clear it in case we have wrapped around - if (lookup.pos == 0) { - await write.clear(); - } else if (lookup.pos != write.length) { - // We should always be appending at the length - throw StateError('appending should be at the end'); - } - return write.tryAddItem(value); - }); + return lookup.shortArray.scope((sa) => sa.operateWrite((write) async { + // If this a new segment, then clear it in case we have wrapped around + if (lookup.pos == 0) { + await write.clear(); + } else if (lookup.pos != write.length) { + // We should always be appending at the length + throw StateError('appending should be at the end'); + } + return write.tryAddItem(value); + })); } @override @@ -45,16 +45,19 @@ class _DHTLogAppend extends _DHTLogRead implements DHTAppendTruncateRandomRead { } final sacount = min(remaining, DHTShortArray.maxElements - lookup.pos); - final success = await lookup.shortArray.operateWrite((write) async { - // If this a new segment, then clear it in case we have wrapped around - if (lookup.pos == 0) { - await write.clear(); - } else if (lookup.pos != write.length) { - // We should always be appending at the length - throw StateError('appending should be at the end'); - } - return write.tryAddItems(values.sublist(valueIdx, valueIdx + sacount)); - }); + final success = + await lookup.shortArray.scope((sa) => sa.operateWrite((write) async { + // If this a new segment, then clear it in + // case we have wrapped around + if (lookup.pos == 0) { + await write.clear(); + } else if (lookup.pos != write.length) { + // We should always be appending at the length + throw StateError('appending should be at the end'); + } + return write + .tryAddItems(values.sublist(valueIdx, valueIdx + sacount)); + })); if (!success) { return false; } diff --git a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_read.dart b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_read.dart index 0919412..ea36fc2 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_read.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_read.dart @@ -19,8 +19,8 @@ class _DHTLogRead implements DHTRandomRead { return null; } - return lookup.shortArray.operate( - (read) => read.getItem(lookup.pos, forceRefresh: forceRefresh)); + return lookup.shortArray.scope((sa) => sa.operate( + (read) => read.getItem(lookup.pos, forceRefresh: forceRefresh))); } (int, int) _clampStartLen(int start, int? len) { @@ -71,22 +71,22 @@ class _DHTLogRead implements DHTRandomRead { // Check each segment for offline positions var foundOffline = false; - await lookup.shortArray.operate((read) async { - final segmentOffline = await read.getOfflinePositions(); + await lookup.shortArray.scope((sa) => sa.operate((read) async { + final segmentOffline = await read.getOfflinePositions(); - // For each shortarray segment go through their segment positions - // in reverse order and see if they are offline - for (var segmentPos = lookup.pos; - segmentPos >= 0 && pos >= 0; - segmentPos--, pos--) { - // If the position in the segment is offline, then - // mark the position in the log as offline - if (segmentOffline.contains(segmentPos)) { - positionOffline.add(pos); - foundOffline = true; - } - } - }); + // For each shortarray segment go through their segment positions + // in reverse order and see if they are offline + for (var segmentPos = lookup.pos; + segmentPos >= 0 && pos >= 0; + segmentPos--, pos--) { + // If the position in the segment is offline, then + // mark the position in the log as offline + if (segmentOffline.contains(segmentPos)) { + positionOffline.add(pos); + foundOffline = true; + } + } + })); // If we found nothing offline in this segment then we can stop if (!foundOffline) { diff --git a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_spine.dart b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_spine.dart index 7ce3dbf..12a80c9 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_spine.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_spine.dart @@ -15,6 +15,13 @@ class _DHTLogSegmentLookup extends Equatable { List get props => [subkey, segment]; } +class _SubkeyData { + _SubkeyData({required this.subkey, required this.data}); + int subkey; + Uint8List data; + bool changed = false; +} + class _DHTLogSpine { _DHTLogSpine._( {required DHTRecord spineRecord, @@ -47,7 +54,7 @@ class _DHTLogSpine { static Future<_DHTLogSpine> load({required DHTRecord spineRecord}) async { // Get an updated spine head record copy if one exists final spineHead = await spineRecord.getProtobuf(proto.DHTLog.fromBuffer, - subkey: 0, refreshMode: DHTRecordRefreshMode.refresh); + subkey: 0, refreshMode: DHTRecordRefreshMode.network); if (spineHead == null) { throw StateError('spine head missing during refresh'); } @@ -234,7 +241,7 @@ class _DHTLogSpine { segmentKeyBytes); } - Future _getOrCreateSegmentInner(int segmentNumber) async { + Future _openOrCreateSegmentInner(int segmentNumber) async { assert(_spineMutex.isLocked, 'should be in mutex here'); assert(_spineRecord.writer != null, 'should be writable'); @@ -292,7 +299,7 @@ class _DHTLogSpine { } } - Future _getSegmentInner(int segmentNumber) async { + Future _openSegmentInner(int segmentNumber) async { assert(_spineMutex.isLocked, 'should be in mutex here'); // Lookup what subkey and segment subrange has this position's segment @@ -321,7 +328,7 @@ class _DHTLogSpine { return segmentRec; } - Future getOrCreateSegment(int segmentNumber) async { + Future _openOrCreateSegment(int segmentNumber) async { assert(_spineMutex.isLocked, 'should be in mutex here'); // See if we already have this in the cache @@ -331,21 +338,22 @@ class _DHTLogSpine { final x = _spineCache.removeAt(i); _spineCache.add(x); // Return the shortarray for this position - return x.$2; + return x.$2.ref(); } } - // If we don't have it in the cache, get/create it and then cache it - final segment = await _getOrCreateSegmentInner(segmentNumber); - _spineCache.add((segmentNumber, segment)); + // If we don't have it in the cache, get/create it and then cache a ref + final segment = await _openOrCreateSegmentInner(segmentNumber); + _spineCache.add((segmentNumber, await segment.ref())); if (_spineCache.length > _spineCacheLength) { // Trim the LRU cache - _spineCache.removeAt(0); + final (_, sa) = _spineCache.removeAt(0); + await sa.close(); } return segment; } - Future getSegment(int segmentNumber) async { + Future _openSegment(int segmentNumber) async { assert(_spineMutex.isLocked, 'should be in mutex here'); // See if we already have this in the cache @@ -355,19 +363,20 @@ class _DHTLogSpine { final x = _spineCache.removeAt(i); _spineCache.add(x); // Return the shortarray for this position - return x.$2; + return x.$2.ref(); } } // If we don't have it in the cache, get it and then cache it - final segment = await _getSegmentInner(segmentNumber); + final segment = await _openSegmentInner(segmentNumber); if (segment == null) { return null; } - _spineCache.add((segmentNumber, segment)); + _spineCache.add((segmentNumber, await segment.ref())); if (_spineCache.length > _spineCacheLength) { // Trim the LRU cache - _spineCache.removeAt(0); + final (_, sa) = _spineCache.removeAt(0); + await sa.close(); } return segment; } @@ -409,8 +418,8 @@ class _DHTLogSpine { // Get the segment shortArray final shortArray = (_spineRecord.writer == null) - ? await getSegment(segmentNumber) - : await getOrCreateSegment(segmentNumber); + ? await _openSegment(segmentNumber) + : await _openOrCreateSegment(segmentNumber); if (shortArray == null) { return null; } @@ -442,12 +451,16 @@ class _DHTLogSpine { throw StateError('ring buffer underflow'); } + final oldHead = _head; _head = (_head + count) % _positionLimit; - await _purgeUnusedSegments(); + final newHead = _head; + await _purgeSegments(oldHead, newHead); } Future _deleteSegmentsContiguous(int start, int end) async { assert(_spineMutex.isLocked, 'should be in mutex here'); + DHTRecordPool.instance + .log('_deleteSegmentsContiguous: start=$start, end=$end'); final startSegmentNumber = start ~/ DHTShortArray.maxElements; final startSegmentPos = start % DHTShortArray.maxElements; @@ -460,8 +473,7 @@ class _DHTLogSpine { final lastDeleteSegment = (endSegmentPos == 0) ? endSegmentNumber - 1 : endSegmentNumber - 2; - int? lastSubkey; - Uint8List? subkeyData; + _SubkeyData? lastSubkeyData; for (var segmentNumber = firstDeleteSegment; segmentNumber <= lastDeleteSegment; segmentNumber++) { @@ -471,44 +483,48 @@ class _DHTLogSpine { final subkey = l.subkey; final segment = l.segment; - if (lastSubkey != subkey) { + if (subkey != lastSubkeyData?.subkey) { // Flush subkey writes - if (lastSubkey != null) { - await _spineRecord.eventualWriteBytes(subkeyData!, - subkey: lastSubkey); + if (lastSubkeyData != null && lastSubkeyData.changed) { + await _spineRecord.eventualWriteBytes(lastSubkeyData.data, + subkey: lastSubkeyData.subkey); } - xxx debug this, it takes forever - - // Get next subkey - subkeyData = await _spineRecord.get(subkey: subkey); - if (subkeyData != null) { - lastSubkey = subkey; + // Get next subkey if available locally + final data = await _spineRecord.get( + subkey: subkey, refreshMode: DHTRecordRefreshMode.local); + if (data != null) { + lastSubkeyData = _SubkeyData(subkey: subkey, data: data); } else { - lastSubkey = null; + lastSubkeyData = null; + // If the subkey was not available locally we can go to the + // last segment number at the end of this subkey + segmentNumber = ((subkey + 1) * DHTLog.segmentsPerSubkey) - 1; } } - if (subkeyData != null) { - final segmentKey = _getSegmentKey(subkeyData, segment); + if (lastSubkeyData != null) { + final segmentKey = _getSegmentKey(lastSubkeyData.data, segment); if (segmentKey != null) { await DHTRecordPool.instance.deleteRecord(segmentKey); - _setSegmentKey(subkeyData, segment, null); + _setSegmentKey(lastSubkeyData.data, segment, null); + lastSubkeyData.changed = true; } } } // Flush subkey writes - if (lastSubkey != null) { - await _spineRecord.eventualWriteBytes(subkeyData!, subkey: lastSubkey); + if (lastSubkeyData != null) { + await _spineRecord.eventualWriteBytes(lastSubkeyData.data, + subkey: lastSubkeyData.subkey); } } - Future _purgeUnusedSegments() async { + Future _purgeSegments(int from, int to) async { assert(_spineMutex.isLocked, 'should be in mutex here'); - if (_head < _tail) { - await _deleteSegmentsContiguous(0, _head); - await _deleteSegmentsContiguous(_tail, _positionLimit); - } else if (_head > _tail) { - await _deleteSegmentsContiguous(_tail, _head); + if (from < to) { + await _deleteSegmentsContiguous(from, to); + } else if (from > to) { + await _deleteSegmentsContiguous(from, _positionLimit); + await _deleteSegmentsContiguous(0, to); } } diff --git a/packages/veilid_support/lib/dht_support/src/dht_record/default_dht_record_cubit.dart b/packages/veilid_support/lib/dht_support/src/dht_record/default_dht_record_cubit.dart index 0b4e0b6..a333160 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_record/default_dht_record_cubit.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_record/default_dht_record_cubit.dart @@ -39,7 +39,7 @@ class DefaultDHTRecordCubit extends DHTRecordCubit { final firstSubkey = subkeys.firstOrNull!.low; if (firstSubkey != defaultSubkey || updatedata == null) { final maybeData = - await record.get(refreshMode: DHTRecordRefreshMode.refresh); + await record.get(refreshMode: DHTRecordRefreshMode.network); if (maybeData == null) { return null; } diff --git a/packages/veilid_support/lib/dht_support/src/dht_record/dht_record.dart b/packages/veilid_support/lib/dht_support/src/dht_record/dht_record.dart index 3d625f8..7bf5129 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_record/dht_record.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_record/dht_record.dart @@ -16,19 +16,27 @@ class DHTRecordWatchChange extends Equatable { /// Refresh mode for DHT record 'get' enum DHTRecordRefreshMode { /// Return existing subkey values if they exist locally already - existing, + /// And then check the network for a newer value + /// This is the default refresh mode + cached, + + /// Return existing subkey values only if they exist locally already + local, /// Always check the network for a newer subkey value - refresh, + network, /// Always check the network for a newer subkey value but only /// return that value if its sequence number is newer than the local value - refreshOnlyUpdates, + update; + + bool get _forceRefresh => this == network || this == update; + bool get _inspectLocal => this == local || this == update; } ///////////////////////////////////////////////// -class DHTRecord implements DHTOpenable { +class DHTRecord implements DHTOpenable { DHTRecord._( {required VeilidRoutingContext routingContext, required SharedDHTRecordData sharedDHTRecordData, @@ -40,7 +48,7 @@ class DHTRecord implements DHTOpenable { _routingContext = routingContext, _defaultSubkey = defaultSubkey, _writer = writer, - _open = true, + _openCount = 1, _sharedDHTRecordData = sharedDHTRecordData; //////////////////////////////////////////////////////////////////////////// @@ -48,25 +56,37 @@ class DHTRecord implements DHTOpenable { /// Check if the DHTRecord is open @override - bool get isOpen => _open; + bool get isOpen => _openCount > 0; + + /// Add a reference to this DHTRecord + @override + Future ref() async => _mutex.protect(() async { + _openCount++; + return this; + }); /// Free all resources for the DHTRecord @override - Future close() async { - if (!_open) { - return; - } - await watchController?.close(); - await DHTRecordPool.instance._recordClosed(this); - _open = false; - } + Future close() async => _mutex.protect(() async { + if (_openCount == 0) { + throw StateError('already closed'); + } + _openCount--; + if (_openCount != 0) { + return; + } + + await _watchController?.close(); + _watchController = null; + await DHTRecordPool.instance._recordClosed(this); + }); /// Free all resources for the DHTRecord and delete it from the DHT /// Will wait until the record is closed to delete it @override - Future delete() async { - await DHTRecordPool.instance.deleteRecord(key); - } + Future delete() async => _mutex.protect(() async { + await DHTRecordPool.instance.deleteRecord(key); + }); //////////////////////////////////////////////////////////////////////////// // Public API @@ -95,25 +115,37 @@ class DHTRecord implements DHTOpenable { Future get( {int subkey = -1, DHTRecordCrypto? crypto, - DHTRecordRefreshMode refreshMode = DHTRecordRefreshMode.existing, + DHTRecordRefreshMode refreshMode = DHTRecordRefreshMode.cached, Output? outSeqNum}) async { subkey = subkeyOrDefault(subkey); + + // Get the last sequence number if we need it + final lastSeq = + refreshMode._inspectLocal ? await _localSubkeySeq(subkey) : null; + + // See if we only ever want the locally stored value + if (refreshMode == DHTRecordRefreshMode.local && lastSeq == null) { + // If it's not available locally already just return null now + return null; + } + final valueData = await _routingContext.getDHTValue(key, subkey, - forceRefresh: refreshMode != DHTRecordRefreshMode.existing); + forceRefresh: refreshMode._forceRefresh); if (valueData == null) { return null; } - final lastSeq = _sharedDHTRecordData.subkeySeqCache[subkey]; - if (refreshMode == DHTRecordRefreshMode.refreshOnlyUpdates && + // See if this get resulted in a newer sequence number + if (refreshMode == DHTRecordRefreshMode.update && lastSeq != null && valueData.seq <= lastSeq) { + // If we're only returning updates then punt now return null; } + // If we're returning a value, decrypt it final out = (crypto ?? _crypto).decrypt(valueData.data, subkey); if (outSeqNum != null) { outSeqNum.save(valueData.seq); } - _sharedDHTRecordData.subkeySeqCache[subkey] = valueData.seq; return out; } @@ -128,7 +160,7 @@ class DHTRecord implements DHTOpenable { Future getJson(T Function(dynamic) fromJson, {int subkey = -1, DHTRecordCrypto? crypto, - DHTRecordRefreshMode refreshMode = DHTRecordRefreshMode.existing, + DHTRecordRefreshMode refreshMode = DHTRecordRefreshMode.cached, Output? outSeqNum}) async { final data = await get( subkey: subkey, @@ -154,7 +186,7 @@ class DHTRecord implements DHTOpenable { T Function(List i) fromBuffer, {int subkey = -1, DHTRecordCrypto? crypto, - DHTRecordRefreshMode refreshMode = DHTRecordRefreshMode.existing, + DHTRecordRefreshMode refreshMode = DHTRecordRefreshMode.cached, Output? outSeqNum}) async { final data = await get( subkey: subkey, @@ -176,7 +208,7 @@ class DHTRecord implements DHTOpenable { KeyPair? writer, Output? outSeqNum}) async { subkey = subkeyOrDefault(subkey); - final lastSeq = _sharedDHTRecordData.subkeySeqCache[subkey]; + final lastSeq = await _localSubkeySeq(subkey); final encryptedNewValue = await (crypto ?? _crypto).encrypt(newValue, subkey); @@ -198,7 +230,6 @@ class DHTRecord implements DHTOpenable { if (isUpdated && outSeqNum != null) { outSeqNum.save(newValueData.seq); } - _sharedDHTRecordData.subkeySeqCache[subkey] = newValueData.seq; // See if the encrypted data returned is exactly the same // if so, shortcut and don't bother decrypting it @@ -228,7 +259,7 @@ class DHTRecord implements DHTOpenable { KeyPair? writer, Output? outSeqNum}) async { subkey = subkeyOrDefault(subkey); - final lastSeq = _sharedDHTRecordData.subkeySeqCache[subkey]; + final lastSeq = await _localSubkeySeq(subkey); final encryptedNewValue = await (crypto ?? _crypto).encrypt(newValue, subkey); @@ -254,7 +285,6 @@ class DHTRecord implements DHTOpenable { if (outSeqNum != null) { outSeqNum.save(newValueData.seq); } - _sharedDHTRecordData.subkeySeqCache[subkey] = newValueData.seq; // The encrypted data returned should be exactly the same // as what we are trying to set, @@ -402,13 +432,13 @@ class DHTRecord implements DHTOpenable { DHTRecordCrypto? crypto, }) async { // Set up watch requirements - watchController ??= + _watchController ??= StreamController.broadcast(onCancel: () { // If there are no more listeners then we can get rid of the controller - watchController = null; + _watchController = null; }); - return watchController!.stream.listen( + return _watchController!.stream.listen( (change) { if (change.local && !localChanges) { return; @@ -431,8 +461,8 @@ class DHTRecord implements DHTOpenable { }, cancelOnError: true, onError: (e) async { - await watchController!.close(); - watchController = null; + await _watchController!.close(); + _watchController = null; }); } @@ -455,6 +485,14 @@ class DHTRecord implements DHTOpenable { ////////////////////////////////////////////////////////////////////////// + Future _localSubkeySeq(int subkey) async { + final rr = await _routingContext.inspectDHTRecord( + key, + subkeys: [ValueSubkeyRange.single(subkey)], + ); + return rr.localSeqs.firstOrNull ?? 0xFFFFFFFF; + } + void _addValueChange( {required bool local, required Uint8List? data, @@ -464,7 +502,7 @@ class DHTRecord implements DHTOpenable { final watchedSubkeys = ws.subkeys; if (watchedSubkeys == null) { // Report all subkeys - watchController?.add( + _watchController?.add( DHTRecordWatchChange(local: local, data: data, subkeys: subkeys)); } else { // Only some subkeys are being watched, see if the reported update @@ -479,7 +517,7 @@ class DHTRecord implements DHTOpenable { overlappedFirstSubkey == updateFirstSubkey ? data : null; // Report only watched subkeys - watchController?.add(DHTRecordWatchChange( + _watchController?.add(DHTRecordWatchChange( local: local, data: updatedData, subkeys: overlappedSubkeys)); } } @@ -504,10 +542,9 @@ class DHTRecord implements DHTOpenable { final KeyPair? _writer; final DHTRecordCrypto _crypto; final String debugName; - - bool _open; - @internal - StreamController? watchController; + final _mutex = Mutex(); + int _openCount; + StreamController? _watchController; @internal WatchState? watchState; } diff --git a/packages/veilid_support/lib/dht_support/src/dht_record/dht_record_cubit.dart b/packages/veilid_support/lib/dht_support/src/dht_record/dht_record_cubit.dart index 8616658..1cfcfcd 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_record/dht_record_cubit.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_record/dht_record_cubit.dart @@ -93,7 +93,7 @@ class DHTRecordCubit extends Cubit> { for (final skr in subkeys) { for (var sk = skr.low; sk <= skr.high; sk++) { final data = await _record.get( - subkey: sk, refreshMode: DHTRecordRefreshMode.refreshOnlyUpdates); + subkey: sk, refreshMode: DHTRecordRefreshMode.update); if (data != null) { final newState = await _stateFunction(_record, updateSubkeys, data); if (newState != null) { diff --git a/packages/veilid_support/lib/dht_support/src/dht_record/dht_record_pool.dart b/packages/veilid_support/lib/dht_support/src/dht_record/dht_record_pool.dart index a4748df..a8e86a1 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_record/dht_record_pool.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_record/dht_record_pool.dart @@ -88,7 +88,6 @@ class SharedDHTRecordData { DHTRecordDescriptor recordDescriptor; KeyPair? defaultWriter; VeilidRoutingContext defaultRoutingContext; - Map subkeySeqCache = {}; bool needsWatchStateUpdate = false; WatchState? unionWatchState; } diff --git a/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array.dart b/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array.dart index 082a391..cd62fa6 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array.dart @@ -13,12 +13,13 @@ part 'dht_short_array_write.dart'; /////////////////////////////////////////////////////////////////////// -class DHTShortArray implements DHTOpenable { +class DHTShortArray implements DHTOpenable { //////////////////////////////////////////////////////////////// // Constructors DHTShortArray._({required DHTRecord headRecord}) - : _head = _DHTShortArrayHead(headRecord: headRecord) { + : _head = _DHTShortArrayHead(headRecord: headRecord), + _openCount = 1 { _head.onUpdatedHead = () { _watchController?.sink.add(null); }; @@ -139,18 +140,30 @@ class DHTShortArray implements DHTOpenable { /// Check if the shortarray is open @override - bool get isOpen => _head.isOpen; + bool get isOpen => _openCount > 0; + + /// Add a reference to this shortarray + @override + Future ref() async => _mutex.protect(() async { + _openCount++; + return this; + }); /// Free all resources for the DHTShortArray @override - Future close() async { - if (!isOpen) { - return; - } - await _watchController?.close(); - _watchController = null; - await _head.close(); - } + Future close() async => _mutex.protect(() async { + if (_openCount == 0) { + throw StateError('already closed'); + } + _openCount--; + if (_openCount != 0) { + return; + } + + await _watchController?.close(); + _watchController = null; + await _head.close(); + }); /// Free all resources for the DHTShortArray and delete it from the DHT /// Will wait until the short array is closed to delete it @@ -255,6 +268,10 @@ class DHTShortArray implements DHTOpenable { // Internal representation refreshed from head record final _DHTShortArrayHead _head; + // Openable + int _openCount; + final _mutex = Mutex(); + // Watch mutex to ensure we keep the representation valid final Mutex _listenMutex = Mutex(); // Stream of external changes diff --git a/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_head.dart b/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_head.dart index e2bf392..1403e87 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_head.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_head.dart @@ -248,7 +248,7 @@ class _DHTShortArrayHead { Future _loadHead() async { // Get an updated head record copy if one exists final head = await _headRecord.getProtobuf(proto.DHTShortArray.fromBuffer, - subkey: 0, refreshMode: DHTRecordRefreshMode.refresh); + subkey: 0, refreshMode: DHTRecordRefreshMode.network); if (head == null) { throw StateError('shortarray head missing during refresh'); } diff --git a/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_read.dart b/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_read.dart index 88cefde..919564c 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_read.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_read.dart @@ -22,8 +22,8 @@ class _DHTShortArrayRead implements DHTRandomRead { final out = lookup.record.get( subkey: lookup.recordSubkey, refreshMode: refresh - ? DHTRecordRefreshMode.refresh - : DHTRecordRefreshMode.existing, + ? DHTRecordRefreshMode.network + : DHTRecordRefreshMode.cached, outSeqNum: outSeqNum); if (outSeqNum.value != null) { _head.updatePositionSeq(pos, false, outSeqNum.value!); diff --git a/packages/veilid_support/lib/dht_support/src/interfaces/dht_openable.dart b/packages/veilid_support/lib/dht_support/src/interfaces/dht_openable.dart index e28f703..ffd58f9 100644 --- a/packages/veilid_support/lib/dht_support/src/interfaces/dht_openable.dart +++ b/packages/veilid_support/lib/dht_support/src/interfaces/dht_openable.dart @@ -1,12 +1,13 @@ import 'dart:async'; -abstract class DHTOpenable { +abstract class DHTOpenable { bool get isOpen; + Future ref(); Future close(); Future delete(); } -extension DHTOpenableExt on D { +extension DHTOpenableExt> on D { /// Runs a closure that guarantees the DHTOpenable /// will be closed upon exit, even if an uncaught exception is thrown Future scope(Future Function(D) scopeFunction) async { diff --git a/packages/veilid_support/lib/src/identity.dart b/packages/veilid_support/lib/src/identity.dart index 2645894..5721461 100644 --- a/packages/veilid_support/lib/src/identity.dart +++ b/packages/veilid_support/lib/src/identity.dart @@ -301,7 +301,7 @@ Future openIdentityMaster( 'IdentityMaster::openIdentityMaster::IdentityMasterRecord')) .deleteScope((masterRec) async { final identityMaster = (await masterRec.getJson(IdentityMaster.fromJson, - refreshMode: DHTRecordRefreshMode.refresh))!; + refreshMode: DHTRecordRefreshMode.network))!; // Validate IdentityMaster final masterRecordKey = masterRec.key; From ed893852a2308e0e8f5fe3f4d2ed30803e476655 Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Mon, 20 May 2024 20:48:17 -0400 Subject: [PATCH 5/7] flutter updates --- .../chat_single_contact_list_widget.dart | 2 +- .../views/scan_invitation_dialog.dart | 117 +++---- lib/contacts/views/contact_list_widget.dart | 2 +- .../example/integration_test/app_test.dart | 18 +- packages/veilid_support/example/pubspec.lock | 290 +++++++++++++++++- packages/veilid_support/example/pubspec.yaml | 1 + .../src/dht_log/dht_log_append.dart | 44 ++- .../dht_short_array/dht_short_array_head.dart | 31 +- .../dht_short_array/dht_short_array_read.dart | 2 +- .../dht_short_array_write.dart | 94 ++++-- packages/veilid_support/pubspec.lock | 22 +- packages/veilid_support/pubspec.yaml | 6 +- pubspec.lock | 90 +++--- pubspec.yaml | 9 +- 14 files changed, 528 insertions(+), 200 deletions(-) diff --git a/lib/chat_list/views/chat_single_contact_list_widget.dart b/lib/chat_list/views/chat_single_contact_list_widget.dart index a671011..785dbcb 100644 --- a/lib/chat_list/views/chat_single_contact_list_widget.dart +++ b/lib/chat_list/views/chat_single_contact_list_widget.dart @@ -34,7 +34,7 @@ class ChatSingleContactListWidget extends StatelessWidget { ? const EmptyChatListWidget() : SearchableList( initialList: chatList.map((x) => x.value).toList(), - builder: (l, i, c) { + itemBuilder: (c) { final contact = contactMap[c.remoteConversationRecordKey]; if (contact == null) { diff --git a/lib/contact_invitation/views/scan_invitation_dialog.dart b/lib/contact_invitation/views/scan_invitation_dialog.dart index 44bb32e..ab47df0 100644 --- a/lib/contact_invitation/views/scan_invitation_dialog.dart +++ b/lib/contact_invitation/views/scan_invitation_dialog.dart @@ -1,5 +1,4 @@ import 'dart:async'; -import 'dart:io'; import 'dart:typed_data'; import 'package:awesome_extensions/awesome_extensions.dart'; @@ -16,65 +15,64 @@ import '../../theme/theme.dart'; import '../../tools/tools.dart'; import 'invitation_dialog.dart'; -class BarcodeOverlay extends CustomPainter { - BarcodeOverlay({ - required this.barcode, - required this.arguments, - required this.boxFit, - required this.capture, - }); +// class BarcodeOverlay extends CustomPainter { +// BarcodeOverlay({ +// required this.barcode, +// required this.boxFit, +// required this.capture, +// required this.size, +// }); - final BarcodeCapture capture; - final Barcode barcode; - final MobileScannerArguments arguments; - final BoxFit boxFit; +// final BarcodeCapture capture; +// final Barcode barcode; +// final BoxFit boxFit; +// final Size size; - @override - void paint(Canvas canvas, Size size) { - final adjustedSize = applyBoxFit(boxFit, arguments.size, size); +// @override +// void paint(Canvas canvas, Size size) { +// final adjustedSize = applyBoxFit(boxFit, size, size); - var verticalPadding = size.height - adjustedSize.destination.height; - var horizontalPadding = size.width - adjustedSize.destination.width; - if (verticalPadding > 0) { - verticalPadding = verticalPadding / 2; - } else { - verticalPadding = 0; - } +// var verticalPadding = size.height - adjustedSize.destination.height; +// var horizontalPadding = size.width - adjustedSize.destination.width; +// if (verticalPadding > 0) { +// verticalPadding = verticalPadding / 2; +// } else { +// verticalPadding = 0; +// } - if (horizontalPadding > 0) { - horizontalPadding = horizontalPadding / 2; - } else { - horizontalPadding = 0; - } +// if (horizontalPadding > 0) { +// horizontalPadding = horizontalPadding / 2; +// } else { +// horizontalPadding = 0; +// } - final ratioWidth = (Platform.isIOS ? capture.width : arguments.size.width) / - adjustedSize.destination.width; - final ratioHeight = - (Platform.isIOS ? capture.height : arguments.size.height) / - adjustedSize.destination.height; +// final ratioWidth = (Platform.isIOS ? capture.size.width : size.width) / +// adjustedSize.destination.width; +// final ratioHeight = (Platform.isIOS ? capture.size.height : size.height) / +// adjustedSize.destination.height; - final adjustedOffset = []; - for (final offset in barcode.corners) { - adjustedOffset.add( - Offset( - offset.dx / ratioWidth + horizontalPadding, - offset.dy / ratioHeight + verticalPadding, - ), - ); - } - final cutoutPath = Path()..addPolygon(adjustedOffset, true); +// final adjustedOffset = []; +// for (final offset in barcode.corners) { +// adjustedOffset.add( +// Offset( +// offset.dx / ratioWidth + horizontalPadding, +// offset.dy / ratioHeight + verticalPadding, +// ), +// ); +// } +// final cutoutPath = Path()..addPolygon(adjustedOffset, true); - final backgroundPaint = Paint() - ..color = Colors.red.withOpacity(0.3) - ..style = PaintingStyle.fill - ..blendMode = BlendMode.dstOut; +// final backgroundPaint = Paint() +// ..color = Colors.red.withOpacity(0.3) +// ..style = PaintingStyle.fill +// ..blendMode = BlendMode.dstOut; - canvas.drawPath(cutoutPath, backgroundPaint); - } +// canvas.drawPath(cutoutPath, backgroundPaint); +// } - @override - bool shouldRepaint(covariant CustomPainter oldDelegate) => false; -} +// @override +// bool shouldRepaint(covariant CustomPainter oldDelegate) => false; +// } class ScannerOverlay extends CustomPainter { ScannerOverlay(this.scanWindow); @@ -202,9 +200,9 @@ class ScanInvitationDialogState extends State { IconButton( color: Colors.white, icon: ValueListenableBuilder( - valueListenable: cameraController.torchState, + valueListenable: cameraController, builder: (context, state, child) { - switch (state) { + switch (state.torchState) { case TorchState.off: return Icon(Icons.flash_off, color: @@ -212,6 +210,12 @@ class ScanInvitationDialogState extends State { case TorchState.on: return Icon(Icons.flash_on, color: scale.primaryScale.primary); + case TorchState.auto: + return Icon(Icons.flash_auto, + color: scale.primaryScale.primary); + case TorchState.unavailable: + return Icon(Icons.no_flash, + color: scale.primaryScale.primary); } }, ), @@ -236,10 +240,9 @@ class ScanInvitationDialogState extends State { IconButton( color: Colors.white, icon: ValueListenableBuilder( - valueListenable: - cameraController.cameraFacingState, + valueListenable: cameraController, builder: (context, state, child) { - switch (state) { + switch (state.cameraDirection) { case CameraFacing.front: return const Icon(Icons.camera_front); case CameraFacing.back: @@ -265,7 +268,7 @@ class ScanInvitationDialogState extends State { SchedulerBinding.instance .addPostFrameCallback((_) { cameraController.dispose(); - Navigator.pop(context, null); + Navigator.pop(context); }) })), ], diff --git a/lib/contacts/views/contact_list_widget.dart b/lib/contacts/views/contact_list_widget.dart index 4c83a92..6ef3ca0 100644 --- a/lib/contacts/views/contact_list_widget.dart +++ b/lib/contacts/views/contact_list_widget.dart @@ -38,7 +38,7 @@ class ContactListWidget extends StatelessWidget { ? const EmptyContactListWidget() : SearchableList( initialList: contactList.toList(), - builder: (l, i, c) => + itemBuilder: (c) => ContactItemWidget(contact: c, disabled: disabled) .paddingLTRB(0, 4, 0, 0), filter: (value) { diff --git a/packages/veilid_support/example/integration_test/app_test.dart b/packages/veilid_support/example/integration_test/app_test.dart index b3548a2..9c85998 100644 --- a/packages/veilid_support/example/integration_test/app_test.dart +++ b/packages/veilid_support/example/integration_test/app_test.dart @@ -1,8 +1,9 @@ -@Timeout(Duration(seconds: 240)) +//@Timeout(Duration(seconds: 240)) -library veilid_support_integration_test; +//library veilid_support_integration_test; -import 'package:flutter_test/flutter_test.dart'; +import 'package:flutter/foundation.dart'; +import 'package:test/test.dart'; import 'package:integration_test/integration_test.dart'; import 'package:veilid_test/veilid_test.dart'; @@ -28,6 +29,10 @@ void main() { group('Started Tests', () { setUpAll(veilidFixture.setUp); tearDownAll(veilidFixture.tearDown); + tearDownAll(() { + final endTime = DateTime.now(); + debugPrintSynchronously('Duration: ${endTime.difference(startTime)}'); + }); group('Attached Tests', () { setUpAll(veilidFixture.attach); @@ -58,7 +63,7 @@ void main() { test('create shortarray stride=$stride', makeTestDHTShortArrayCreateDelete(stride: stride)); test('add shortarray stride=$stride', - makeTestDHTShortArrayAdd(stride: 256)); + makeTestDHTShortArrayAdd(stride: stride)); } }); @@ -70,14 +75,11 @@ void main() { test('create log stride=$stride', makeTestDHTLogCreateDelete(stride: stride)); test('add/truncate log stride=$stride', - makeTestDHTLogAddTruncate(stride: 256), + makeTestDHTLogAddTruncate(stride: stride), timeout: const Timeout(Duration(seconds: 480))); } }); }); }); }); - - final endTime = DateTime.now(); - print('Duration: ${endTime.difference(startTime)}'); } diff --git a/packages/veilid_support/example/pubspec.lock b/packages/veilid_support/example/pubspec.lock index a3fee79..3defe80 100644 --- a/packages/veilid_support/example/pubspec.lock +++ b/packages/veilid_support/example/pubspec.lock @@ -1,6 +1,30 @@ # Generated by pub # See https://dart.dev/tools/pub/glossary#lockfile packages: + _fe_analyzer_shared: + dependency: transitive + description: + name: _fe_analyzer_shared + sha256: "0b2f2bd91ba804e53a61d757b986f89f1f9eaed5b11e4b2f5a2468d86d6c9fc7" + url: "https://pub.dev" + source: hosted + version: "67.0.0" + analyzer: + dependency: transitive + description: + name: analyzer + sha256: "37577842a27e4338429a1cbc32679d508836510b056f1eedf0c8d20e39c1383d" + url: "https://pub.dev" + source: hosted + version: "6.4.1" + args: + dependency: transitive + description: + name: args + sha256: "7cf60b9f0cc88203c5a190b4cd62a99feea42759a7fa695010eb5de1c0b2252a" + url: "https://pub.dev" + source: hosted + version: "2.5.0" async: dependency: transitive description: @@ -81,6 +105,30 @@ packages: url: "https://pub.dev" source: hosted version: "1.18.0" + convert: + dependency: transitive + description: + name: convert + sha256: "0f08b14755d163f6e2134cb58222dd25ea2a2ee8a195e53983d57c075324d592" + url: "https://pub.dev" + source: hosted + version: "3.1.1" + coverage: + dependency: transitive + description: + name: coverage + sha256: "3945034e86ea203af7a056d98e98e42a5518fff200d6e8e6647e1886b07e936e" + url: "https://pub.dev" + source: hosted + version: "1.8.0" + crypto: + dependency: transitive + description: + name: crypto + sha256: ff625774173754681d66daaf4a448684fb04b78f902da9cb3d308c19cc5e8bab + url: "https://pub.dev" + source: hosted + version: "3.0.3" cupertino_icons: dependency: "direct main" description: @@ -109,10 +157,10 @@ packages: dependency: transitive description: name: fast_immutable_collections - sha256: "38fbc50df5b219dcfb83ebbc3275ec09872530ca1153858fc56fceadb310d037" + sha256: "533806a7f0c624c2e479d05d3fdce4c87109a7cd0db39b8cc3830d3a2e8dedc7" url: "https://pub.dev" source: hosted - version: "10.2.2" + version: "10.2.3" ffi: dependency: transitive description: @@ -165,11 +213,27 @@ packages: url: "https://pub.dev" source: hosted version: "2.4.1" + frontend_server_client: + dependency: transitive + description: + name: frontend_server_client + sha256: f64a0333a82f30b0cca061bc3d143813a486dc086b574bfb233b7c1372427694 + url: "https://pub.dev" + source: hosted + version: "4.0.0" fuchsia_remote_debug_protocol: dependency: transitive description: flutter source: sdk version: "0.0.0" + glob: + dependency: transitive + description: + name: glob + sha256: "0e7014b3b7d4dac1ca4d6114f82bf1782ee86745b9b42a92c9289c23d8a0ab63" + url: "https://pub.dev" + source: hosted + version: "2.1.2" globbing: dependency: transitive description: @@ -178,11 +242,43 @@ packages: url: "https://pub.dev" source: hosted version: "1.0.0" + http_multi_server: + dependency: transitive + description: + name: http_multi_server + sha256: "97486f20f9c2f7be8f514851703d0119c3596d14ea63227af6f7a481ef2b2f8b" + url: "https://pub.dev" + source: hosted + version: "3.2.1" + http_parser: + dependency: transitive + description: + name: http_parser + sha256: "2aa08ce0341cc9b354a498388e30986515406668dbcc4f7c950c3e715496693b" + url: "https://pub.dev" + source: hosted + version: "4.0.2" integration_test: dependency: "direct dev" description: flutter source: sdk version: "0.0.0" + io: + dependency: transitive + description: + name: io + sha256: "2ec25704aba361659e10e3e5f5d672068d332fc8ac516421d483a11e5cbd061e" + url: "https://pub.dev" + source: hosted + version: "1.0.4" + js: + dependency: transitive + description: + name: js + sha256: c1b2e9b5ea78c45e1a0788d29606ba27dc5f71f019f32ca5140f61ef071838cf + url: "https://pub.dev" + source: hosted + version: "0.7.1" json_annotation: dependency: transitive description: @@ -195,26 +291,26 @@ packages: dependency: transitive description: name: leak_tracker - sha256: "78eb209deea09858f5269f5a5b02be4049535f568c07b275096836f01ea323fa" + sha256: "7f0df31977cb2c0b88585095d168e689669a2cc9b97c309665e3386f3e9d341a" url: "https://pub.dev" source: hosted - version: "10.0.0" + version: "10.0.4" leak_tracker_flutter_testing: dependency: transitive description: name: leak_tracker_flutter_testing - sha256: b46c5e37c19120a8a01918cfaf293547f47269f7cb4b0058f21531c2465d6ef0 + sha256: "06e98f569d004c1315b991ded39924b21af84cf14cc94791b8aea337d25b57f8" url: "https://pub.dev" source: hosted - version: "2.0.1" + version: "3.0.3" leak_tracker_testing: dependency: transitive description: name: leak_tracker_testing - sha256: a597f72a664dbd293f3bfc51f9ba69816f84dcd403cdac7066cb3f6003f3ab47 + sha256: "6ba465d5d76e67ddf503e1161d1f4a6bc42306f9d66ca1e8f079a47290fb06d3" url: "https://pub.dev" source: hosted - version: "2.0.1" + version: "3.0.1" lint_hard: dependency: "direct dev" description: @@ -223,6 +319,14 @@ packages: url: "https://pub.dev" source: hosted version: "4.0.0" + logging: + dependency: transitive + description: + name: logging + sha256: "623a88c9594aa774443aa3eb2d41807a48486b5613e67599fb4c41c0ad47c340" + url: "https://pub.dev" + source: hosted + version: "1.2.0" loggy: dependency: transitive description: @@ -251,10 +355,34 @@ packages: dependency: transitive description: name: meta - sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 + sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" url: "https://pub.dev" source: hosted - version: "1.11.0" + version: "1.12.0" + mime: + dependency: transitive + description: + name: mime + sha256: "2e123074287cc9fd6c09de8336dae606d1ddb88d9ac47358826db698c176a1f2" + url: "https://pub.dev" + source: hosted + version: "1.0.5" + node_preamble: + dependency: transitive + description: + name: node_preamble + sha256: "6e7eac89047ab8a8d26cf16127b5ed26de65209847630400f9aefd7cd5c730db" + url: "https://pub.dev" + source: hosted + version: "2.0.2" + package_config: + dependency: transitive + description: + name: package_config + sha256: "1c5b77ccc91e4823a5af61ee74e6b972db1ef98c2ff5a18d3161c982a55448bd" + url: "https://pub.dev" + source: hosted + version: "2.1.0" path: dependency: transitive description: @@ -327,6 +455,14 @@ packages: url: "https://pub.dev" source: hosted version: "2.1.8" + pool: + dependency: transitive + description: + name: pool + sha256: "20fe868b6314b322ea036ba325e6fc0711a22948856475e2c2b6306e8ab39c2a" + url: "https://pub.dev" + source: hosted + version: "1.5.1" process: dependency: transitive description: @@ -343,11 +479,67 @@ packages: url: "https://pub.dev" source: hosted version: "3.1.0" + pub_semver: + dependency: transitive + description: + name: pub_semver + sha256: "40d3ab1bbd474c4c2328c91e3a7df8c6dd629b79ece4c4bd04bee496a224fb0c" + url: "https://pub.dev" + source: hosted + version: "2.1.4" + shelf: + dependency: transitive + description: + name: shelf + sha256: ad29c505aee705f41a4d8963641f91ac4cee3c8fad5947e033390a7bd8180fa4 + url: "https://pub.dev" + source: hosted + version: "1.4.1" + shelf_packages_handler: + dependency: transitive + description: + name: shelf_packages_handler + sha256: "89f967eca29607c933ba9571d838be31d67f53f6e4ee15147d5dc2934fee1b1e" + url: "https://pub.dev" + source: hosted + version: "3.0.2" + shelf_static: + dependency: transitive + description: + name: shelf_static + sha256: a41d3f53c4adf0f57480578c1d61d90342cd617de7fc8077b1304643c2d85c1e + url: "https://pub.dev" + source: hosted + version: "1.1.2" + shelf_web_socket: + dependency: transitive + description: + name: shelf_web_socket + sha256: "9ca081be41c60190ebcb4766b2486a7d50261db7bd0f5d9615f2d653637a84c1" + url: "https://pub.dev" + source: hosted + version: "1.0.4" sky_engine: dependency: transitive description: flutter source: sdk version: "0.0.99" + source_map_stack_trace: + dependency: transitive + description: + name: source_map_stack_trace + sha256: "84cf769ad83aa6bb61e0aa5a18e53aea683395f196a6f39c4c881fb90ed4f7ae" + url: "https://pub.dev" + source: hosted + version: "2.1.1" + source_maps: + dependency: transitive + description: + name: source_maps + sha256: "708b3f6b97248e5781f493b765c3337db11c5d2c81c3094f10904bfa8004c703" + url: "https://pub.dev" + source: hosted + version: "0.10.12" source_span: dependency: transitive description: @@ -412,14 +604,38 @@ packages: url: "https://pub.dev" source: hosted version: "1.2.1" + test: + dependency: "direct dev" + description: + name: test + sha256: "7ee446762c2c50b3bd4ea96fe13ffac69919352bd3b4b17bac3f3465edc58073" + url: "https://pub.dev" + source: hosted + version: "1.25.2" test_api: dependency: transitive description: name: test_api - sha256: "5c2f730018264d276c20e4f1503fd1308dfbbae39ec8ee63c5236311ac06954b" + sha256: "9955ae474176f7ac8ee4e989dadfb411a58c30415bcfb648fa04b2b8a03afa7f" url: "https://pub.dev" source: hosted - version: "0.6.1" + version: "0.7.0" + test_core: + dependency: transitive + description: + name: test_core + sha256: "2bc4b4ecddd75309300d8096f781c0e3280ca1ef85beda558d33fcbedc2eead4" + url: "https://pub.dev" + source: hosted + version: "0.6.0" + typed_data: + dependency: transitive + description: + name: typed_data + sha256: facc8d6582f16042dd49f2463ff1bd6e2c9ef9f3d5da3d9b087e244a7b564b3c + url: "https://pub.dev" + source: hosted + version: "1.3.2" vector_math: dependency: transitive description: @@ -453,10 +669,34 @@ packages: dependency: transitive description: name: vm_service - sha256: b3d56ff4341b8f182b96aceb2fa20e3dcb336b9f867bc0eafc0de10f1048e957 + sha256: "3923c89304b715fb1eb6423f017651664a03bf5f4b29983627c4da791f74a4ec" url: "https://pub.dev" source: hosted - version: "13.0.0" + version: "14.2.1" + watcher: + dependency: transitive + description: + name: watcher + sha256: "3d2ad6751b3c16cf07c7fca317a1413b3f26530319181b37e3b9039b84fc01d8" + url: "https://pub.dev" + source: hosted + version: "1.1.0" + web: + dependency: transitive + description: + name: web + sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" + url: "https://pub.dev" + source: hosted + version: "0.5.1" + web_socket_channel: + dependency: transitive + description: + name: web_socket_channel + sha256: "58c6666b342a38816b2e7e50ed0f1e261959630becd4c879c4f26bfa14aa5a42" + url: "https://pub.dev" + source: hosted + version: "2.4.5" webdriver: dependency: transitive description: @@ -465,14 +705,22 @@ packages: url: "https://pub.dev" source: hosted version: "3.0.3" + webkit_inspection_protocol: + dependency: transitive + description: + name: webkit_inspection_protocol + sha256: "87d3f2333bb240704cd3f1c6b5b7acd8a10e7f0bc28c28dcf14e782014f4a572" + url: "https://pub.dev" + source: hosted + version: "1.2.1" win32: dependency: transitive description: name: win32 - sha256: "0eaf06e3446824099858367950a813472af675116bf63f008a4c2a75ae13e9cb" + sha256: a79dbe579cb51ecd6d30b17e0cae4e0ea15e2c0e66f69ad4198f22a6789e94f4 url: "https://pub.dev" source: hosted - version: "5.5.0" + version: "5.5.1" xdg_directories: dependency: transitive description: @@ -481,6 +729,14 @@ packages: url: "https://pub.dev" source: hosted version: "1.0.4" + yaml: + dependency: transitive + description: + name: yaml + sha256: "75769501ea3489fca56601ff33454fe45507ea3bfb014161abc3b43ae25989d5" + url: "https://pub.dev" + source: hosted + version: "3.1.2" sdks: - dart: ">=3.3.4 <4.0.0" + dart: ">=3.4.0 <4.0.0" flutter: ">=3.19.1" diff --git a/packages/veilid_support/example/pubspec.yaml b/packages/veilid_support/example/pubspec.yaml index 1c73078..60f06e7 100644 --- a/packages/veilid_support/example/pubspec.yaml +++ b/packages/veilid_support/example/pubspec.yaml @@ -20,6 +20,7 @@ dev_dependencies: integration_test: sdk: flutter lint_hard: ^4.0.0 + test: ^1.25.2 veilid_test: path: ../../../../veilid/veilid-flutter/packages/veilid_test diff --git a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_append.dart b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_append.dart index 26c22da..877df89 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_append.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_append.dart @@ -36,6 +36,9 @@ class _DHTLogAppend extends _DHTLogRead implements DHTAppendTruncateRandomRead { _spine.allocateTail(values.length); // Look up the first position and shortarray + final dws = DelayedWaitSet(); + + var success = true; for (var valueIdx = 0; valueIdx < values.length;) { final remaining = values.length - valueIdx; @@ -45,25 +48,32 @@ class _DHTLogAppend extends _DHTLogRead implements DHTAppendTruncateRandomRead { } final sacount = min(remaining, DHTShortArray.maxElements - lookup.pos); - final success = - await lookup.shortArray.scope((sa) => sa.operateWrite((write) async { - // If this a new segment, then clear it in - // case we have wrapped around - if (lookup.pos == 0) { - await write.clear(); - } else if (lookup.pos != write.length) { - // We should always be appending at the length - throw StateError('appending should be at the end'); - } - return write - .tryAddItems(values.sublist(valueIdx, valueIdx + sacount)); - })); - if (!success) { - return false; - } + final sublistValues = values.sublist(valueIdx, valueIdx + sacount); + + dws.add(() async { + final ok = await lookup.shortArray + .scope((sa) => sa.operateWrite((write) async { + // If this a new segment, then clear it in + // case we have wrapped around + if (lookup.pos == 0) { + await write.clear(); + } else if (lookup.pos != write.length) { + // We should always be appending at the length + throw StateError('appending should be at the end'); + } + return write.tryAddItems(sublistValues); + })); + if (!ok) { + success = false; + } + }); + valueIdx += sacount; } - return true; + + await dws(chunkSize: maxDHTConcurrency); + + return success; } @override diff --git a/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_head.dart b/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_head.dart index 1403e87..501892d 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_head.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_head.dart @@ -215,7 +215,7 @@ class _DHTShortArrayHead { } } on Exception catch (_) { // On any exception close the records we have opened - await Future.wait(newRecords.entries.map((e) => e.value.close())); + await newRecords.entries.map((e) => e.value.close()).wait; rethrow; } @@ -259,13 +259,22 @@ class _DHTShortArrayHead { ///////////////////////////////////////////////////////////////////////////// // Linked record management - Future _getOrCreateLinkedRecord(int recordNumber) async { + Future _getOrCreateLinkedRecord( + int recordNumber, bool allowCreate) async { if (recordNumber == 0) { return _headRecord; } - final pool = DHTRecordPool.instance; recordNumber--; - while (recordNumber >= _linkedRecords.length) { + if (recordNumber < _linkedRecords.length) { + return _linkedRecords[recordNumber]; + } + + if (!allowCreate) { + throw StateError("asked for non-existent record and can't create"); + } + + final pool = DHTRecordPool.instance; + for (var rn = _linkedRecords.length; rn <= recordNumber; rn++) { // Linked records must use SMPL schema so writer can be specified // Use the same writer as the head record final smplWriter = _headRecord.writer!; @@ -287,9 +296,6 @@ class _DHTShortArrayHead { // Add to linked records _linkedRecords.add(dhtRecord); } - if (!await _writeHead()) { - throw StateError('failed to add linked record'); - } return _linkedRecords[recordNumber]; } @@ -313,15 +319,16 @@ class _DHTShortArrayHead { ); } - Future lookupPosition(int pos) async { + Future lookupPosition( + int pos, bool allowCreate) async { final idx = _index[pos]; - return lookupIndex(idx); + return lookupIndex(idx, allowCreate); } - Future lookupIndex(int idx) async { + Future lookupIndex(int idx, bool allowCreate) async { final seq = idx < _seqs.length ? _seqs[idx] : 0xFFFFFFFF; final recordNumber = idx ~/ _stride; - final record = await _getOrCreateLinkedRecord(recordNumber); + final record = await _getOrCreateLinkedRecord(recordNumber, allowCreate); final recordSubkey = (idx % _stride) + ((recordNumber == 0) ? 1 : 0); return DHTShortArrayHeadLookup( record: record, recordSubkey: recordSubkey, seq: seq); @@ -378,7 +385,7 @@ class _DHTShortArrayHead { assert( newKeys.length <= (DHTShortArray.maxElements + (_stride - 1)) ~/ _stride, - 'too many keys'); + 'too many keys: $newKeys.length'); assert(newKeys.length == linkedKeys.length, 'duplicated linked keys'); final newIndex = index.toSet(); assert(newIndex.length <= DHTShortArray.maxElements, 'too many indexes'); diff --git a/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_read.dart b/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_read.dart index 919564c..6485c02 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_read.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_read.dart @@ -15,7 +15,7 @@ class _DHTShortArrayRead implements DHTRandomRead { throw IndexError.withLength(pos, length); } - final lookup = await _head.lookupPosition(pos); + final lookup = await _head.lookupPosition(pos, false); final refresh = forceRefresh || _head.positionNeedsRefresh(pos); final outSeqNum = Output(); diff --git a/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_write.dart b/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_write.dart index dbd8984..df93b59 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_write.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array_write.dart @@ -17,37 +17,77 @@ class _DHTShortArrayWrite extends _DHTShortArrayRead @override Future tryInsertItem(int pos, Uint8List value) async { + if (pos < 0 || pos > _head.length) { + throw IndexError.withLength(pos, _head.length); + } + // Allocate empty index at position _head.allocateIndex(pos); - - // Write item - final ok = await tryWriteItem(pos, value); - if (!ok) { - _head.freeIndex(pos); + var success = false; + try { + // Write item + success = await tryWriteItem(pos, value); + } finally { + if (!success) { + _head.freeIndex(pos); + } } return true; } @override Future tryInsertItems(int pos, List values) async { - // Allocate empty indices at the end of the list + if (pos < 0 || pos > _head.length) { + throw IndexError.withLength(pos, _head.length); + } + + // Allocate empty indices for (var i = 0; i < values.length; i++) { _head.allocateIndex(pos + i); } - // Write items var success = true; - final dws = DelayedWaitSet(); - for (var i = 0; i < values.length; i++) { - dws.add(() async { - final ok = await tryWriteItem(pos + i, values[i]); - if (!ok) { - _head.freeIndex(pos + i); - success = false; + final outSeqNums = List.generate(values.length, (_) => Output()); + final lookups = []; + try { + // do all lookups + for (var i = 0; i < values.length; i++) { + final lookup = await _head.lookupPosition(pos + i, true); + lookups.add(lookup); + } + + // Write items in parallel + final dws = DelayedWaitSet(); + for (var i = 0; i < values.length; i++) { + final lookup = lookups[i]; + final value = values[i]; + final outSeqNum = outSeqNums[i]; + dws.add(() async { + final outValue = await lookup.record.tryWriteBytes(value, + subkey: lookup.recordSubkey, outSeqNum: outSeqNum); + if (outValue != null) { + success = false; + } + }); + } + + await dws(chunkSize: maxDHTConcurrency, onChunkDone: (_) => success); + } finally { + // Update sequence numbers + for (var i = 0; i < values.length; i++) { + if (outSeqNums[i].value != null) { + _head.updatePositionSeq(pos + i, true, outSeqNums[i].value!); } - }); + } + + // Free indices if this was a failure + if (!success) { + for (var i = 0; i < values.length; i++) { + _head.freeIndex(pos); + } + } } - await dws(chunkSize: maxDHTConcurrency, onChunkDone: (_) => success); + return success; } @@ -68,7 +108,7 @@ class _DHTShortArrayWrite extends _DHTShortArrayRead if (pos < 0 || pos >= _head.length) { throw IndexError.withLength(pos, _head.length); } - final lookup = await _head.lookupPosition(pos); + final lookup = await _head.lookupPosition(pos, true); final outSeqNum = Output(); @@ -98,24 +138,22 @@ class _DHTShortArrayWrite extends _DHTShortArrayRead if (pos < 0 || pos >= _head.length) { throw IndexError.withLength(pos, _head.length); } - final lookup = await _head.lookupPosition(pos); - - final outSeqNum = Output(); + final lookup = await _head.lookupPosition(pos, true); + final outSeqNumRead = Output(); final oldValue = lookup.seq == 0xFFFFFFFF ? null : await lookup.record - .get(subkey: lookup.recordSubkey, outSeqNum: outSeqNum); - - if (outSeqNum.value != null) { - _head.updatePositionSeq(pos, false, outSeqNum.value!); + .get(subkey: lookup.recordSubkey, outSeqNum: outSeqNumRead); + if (outSeqNumRead.value != null) { + _head.updatePositionSeq(pos, false, outSeqNumRead.value!); } + final outSeqNumWrite = Output(); final result = await lookup.record.tryWriteBytes(newValue, - subkey: lookup.recordSubkey, outSeqNum: outSeqNum); - - if (outSeqNum.value != null) { - _head.updatePositionSeq(pos, true, outSeqNum.value!); + subkey: lookup.recordSubkey, outSeqNum: outSeqNumWrite); + if (outSeqNumWrite.value != null) { + _head.updatePositionSeq(pos, true, outSeqNumWrite.value!); } if (result != null) { diff --git a/packages/veilid_support/pubspec.lock b/packages/veilid_support/pubspec.lock index cf16648..d58ee4d 100644 --- a/packages/veilid_support/pubspec.lock +++ b/packages/veilid_support/pubspec.lock @@ -85,10 +85,10 @@ packages: dependency: transitive description: name: build_daemon - sha256: "0343061a33da9c5810b2d6cee51945127d8f4c060b7fbdd9d54917f0a3feaaa1" + sha256: "79b2aef6ac2ed00046867ed354c88778c9c0f029df8a20fe10b5436826721ef9" url: "https://pub.dev" source: hosted - version: "4.0.1" + version: "4.0.2" build_resolvers: dependency: transitive description: @@ -101,10 +101,10 @@ packages: dependency: "direct dev" description: name: build_runner - sha256: "3ac61a79bfb6f6cc11f693591063a7f19a7af628dc52f141743edac5c16e8c22" + sha256: "1414d6d733a85d8ad2f1dfcb3ea7945759e35a123cb99ccfac75d0758f75edfa" url: "https://pub.dev" source: hosted - version: "2.4.9" + version: "2.4.10" build_runner_core: dependency: transitive description: @@ -221,10 +221,10 @@ packages: dependency: "direct main" description: name: fast_immutable_collections - sha256: "38fbc50df5b219dcfb83ebbc3275ec09872530ca1153858fc56fceadb310d037" + sha256: "533806a7f0c624c2e479d05d3fdce4c87109a7cd0db39b8cc3830d3a2e8dedc7" url: "https://pub.dev" source: hosted - version: "10.2.2" + version: "10.2.3" ffi: dependency: transitive description: @@ -399,10 +399,10 @@ packages: dependency: "direct main" description: name: meta - sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 + sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" url: "https://pub.dev" source: hosted - version: "1.11.0" + version: "1.12.0" mime: dependency: transitive description: @@ -763,10 +763,10 @@ packages: dependency: transitive description: name: win32 - sha256: "0eaf06e3446824099858367950a813472af675116bf63f008a4c2a75ae13e9cb" + sha256: a79dbe579cb51ecd6d30b17e0cae4e0ea15e2c0e66f69ad4198f22a6789e94f4 url: "https://pub.dev" source: hosted - version: "5.5.0" + version: "5.5.1" xdg_directories: dependency: transitive description: @@ -784,5 +784,5 @@ packages: source: hosted version: "3.1.2" sdks: - dart: ">=3.3.0 <4.0.0" + dart: ">=3.4.0 <4.0.0" flutter: ">=3.19.1" diff --git a/packages/veilid_support/pubspec.yaml b/packages/veilid_support/pubspec.yaml index a7baeed..06403ca 100644 --- a/packages/veilid_support/pubspec.yaml +++ b/packages/veilid_support/pubspec.yaml @@ -12,11 +12,11 @@ dependencies: bloc_advanced_tools: ^0.1.1 collection: ^1.18.0 equatable: ^2.0.5 - fast_immutable_collections: ^10.2.2 + fast_immutable_collections: ^10.2.3 freezed_annotation: ^2.4.1 json_annotation: ^4.9.0 loggy: ^2.0.3 - meta: ^1.11.0 + meta: ^1.12.0 protobuf: ^3.1.0 veilid: @@ -24,7 +24,7 @@ dependencies: path: ../../../veilid/veilid-flutter dev_dependencies: - build_runner: ^2.4.9 + build_runner: ^2.4.10 freezed: ^2.5.2 json_serializable: ^6.8.0 lint_hard: ^4.0.0 diff --git a/pubspec.lock b/pubspec.lock index 07b05e5..c6e754b 100644 --- a/pubspec.lock +++ b/pubspec.lock @@ -68,10 +68,10 @@ packages: dependency: "direct main" description: name: awesome_extensions - sha256: c3bf11d07a69fe10ff5541717b920661c7a87a791ee182851f1c92a2d15b95a2 + sha256: "07e52221467e651cab9219a26286245760831c3852ea2c54883a48a54f120d7c" url: "https://pub.dev" source: hosted - version: "2.0.14" + version: "2.0.16" badges: dependency: "direct main" description: @@ -139,10 +139,10 @@ packages: dependency: transitive description: name: build_daemon - sha256: "0343061a33da9c5810b2d6cee51945127d8f4c060b7fbdd9d54917f0a3feaaa1" + sha256: "79b2aef6ac2ed00046867ed354c88778c9c0f029df8a20fe10b5436826721ef9" url: "https://pub.dev" source: hosted - version: "4.0.1" + version: "4.0.2" build_resolvers: dependency: transitive description: @@ -155,10 +155,10 @@ packages: dependency: "direct dev" description: name: build_runner - sha256: "3ac61a79bfb6f6cc11f693591063a7f19a7af628dc52f141743edac5c16e8c22" + sha256: "1414d6d733a85d8ad2f1dfcb3ea7945759e35a123cb99ccfac75d0758f75edfa" url: "https://pub.dev" source: hosted - version: "2.4.9" + version: "2.4.10" build_runner_core: dependency: transitive description: @@ -219,10 +219,10 @@ packages: dependency: transitive description: name: camera_android - sha256: "7b0aba6398afa8475e2bc9115d976efb49cf8db781e922572d443795c04a4f4f" + sha256: b350ac087f111467e705b2b76cc1322f7f5bdc122aa83b4b243b0872f390d229 url: "https://pub.dev" source: hosted - version: "0.10.9+1" + version: "0.10.9+2" camera_avfoundation: dependency: transitive description: @@ -387,10 +387,10 @@ packages: dependency: transitive description: name: diffutil_dart - sha256: e0297e4600b9797edff228ed60f4169a778ea357691ec98408fa3b72994c7d06 + sha256: "5e74883aedf87f3b703cb85e815bdc1ed9208b33501556e4a8a5572af9845c81" url: "https://pub.dev" source: hosted - version: "3.0.0" + version: "4.0.1" equatable: dependency: "direct main" description: @@ -403,10 +403,10 @@ packages: dependency: "direct main" description: name: fast_immutable_collections - sha256: "38fbc50df5b219dcfb83ebbc3275ec09872530ca1153858fc56fceadb310d037" + sha256: "533806a7f0c624c2e479d05d3fdce4c87109a7cd0db39b8cc3830d3a2e8dedc7" url: "https://pub.dev" source: hosted - version: "10.2.2" + version: "10.2.3" ffi: dependency: transitive description: @@ -472,10 +472,10 @@ packages: dependency: "direct main" description: name: flutter_chat_ui - sha256: c8580c85e2d29359ffc84147e643d08d883eb6e757208652377f0105ef58807f + sha256: "40fb37acc328dd179eadc3d67bf8bd2d950dc0da34464aa8d48e8707e0234c09" url: "https://pub.dev" source: hosted - version: "1.6.12" + version: "1.6.13" flutter_form_builder: dependency: "direct main" description: @@ -634,10 +634,10 @@ packages: dependency: "direct main" description: name: go_router - sha256: b465e99ce64ba75e61c8c0ce3d87b66d8ac07f0b35d0a7e0263fcfc10f99e836 + sha256: aa073287b8f43553678e6fa9e8bb9c83212ff76e09542129a8099bbc8db4df65 url: "https://pub.dev" source: hosted - version: "13.2.5" + version: "14.1.2" graphs: dependency: transitive description: @@ -714,10 +714,10 @@ packages: dependency: "direct main" description: name: intl - sha256: "3bc132a9dbce73a7e4a21a17d06e1878839ffbf975568bc875c60537824b0c4d" + sha256: d6f56758b7d3014a48af9701c085700aac781a92a87a62b1333b46d8879661cf url: "https://pub.dev" source: hosted - version: "0.18.1" + version: "0.19.0" io: dependency: transitive description: @@ -802,10 +802,10 @@ packages: dependency: "direct main" description: name: meta - sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 + sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" url: "https://pub.dev" source: hosted - version: "1.11.0" + version: "1.12.0" mime: dependency: transitive description: @@ -818,10 +818,10 @@ packages: dependency: "direct main" description: name: mobile_scanner - sha256: "827765afbd4792ff3fd105ad593821ac0f6d8a7d352689013b07ee85be336312" + sha256: b8c0e9afcfd52534f85ec666f3d52156f560b5e6c25b1e3d4fe2087763607926 url: "https://pub.dev" source: hosted - version: "4.0.1" + version: "5.1.1" motion_toast: dependency: "direct main" description: @@ -938,10 +938,10 @@ packages: dependency: transitive description: name: photo_view - sha256: "8036802a00bae2a78fc197af8a158e3e2f7b500561ed23b4c458107685e645bb" + sha256: "1fc3d970a91295fbd1364296575f854c9863f225505c28c46e0a03e48960c75e" url: "https://pub.dev" source: hosted - version: "0.14.0" + version: "0.15.0" pinput: dependency: "direct main" description: @@ -1106,26 +1106,26 @@ packages: dependency: "direct main" description: name: searchable_listview - sha256: f9bc1a57dfcba49ce2d190d642567fb82309dd23849b3b0a328266e3f90054db + sha256: dfa6358f5e097f45b5b51a160cb6189e112e3abe0f728f4740349cd3b6575617 url: "https://pub.dev" source: hosted - version: "2.12.0" + version: "2.13.0" share_plus: dependency: "direct main" description: name: share_plus - sha256: fb5319f3aab4c5dda5ebb92dca978179ba21f8c783ee4380910ef4c1c6824f51 + sha256: ef3489a969683c4f3d0239010cc8b7a2a46543a8d139e111c06c558875083544 url: "https://pub.dev" source: hosted - version: "8.0.3" + version: "9.0.0" share_plus_platform_interface: dependency: transitive description: name: share_plus_platform_interface - sha256: "251eb156a8b5fa9ce033747d73535bf53911071f8d3b6f4f0b578505ce0d4496" + sha256: "0f9e4418835d1b2c3ae78fdb918251959106cefdbc4dd43526e182f80e82f6d4" url: "https://pub.dev" source: hosted - version: "3.4.0" + version: "4.0.0" shared_preferences: dependency: "direct main" description: @@ -1194,10 +1194,10 @@ packages: dependency: transitive description: name: shelf_web_socket - sha256: "9ca081be41c60190ebcb4766b2486a7d50261db7bd0f5d9615f2d653637a84c1" + sha256: "073c147238594ecd0d193f3456a5fe91c4b0abbcc68bf5cd95b36c4e194ac611" url: "https://pub.dev" source: hosted - version: "1.0.4" + version: "2.0.0" signal_strength_indicator: dependency: "direct main" description: @@ -1399,10 +1399,10 @@ packages: dependency: transitive description: name: url_launcher_android - sha256: "360a6ed2027f18b73c8d98e159dda67a61b7f2e0f6ec26e86c3ada33b0621775" + sha256: "17cd5e205ea615e2c6ea7a77323a11712dffa0720a8a90540db57a01347f9ad9" url: "https://pub.dev" source: hosted - version: "6.3.1" + version: "6.3.2" url_launcher_ios: dependency: transitive description: @@ -1529,30 +1529,38 @@ packages: url: "https://pub.dev" source: hosted version: "0.5.1" + web_socket: + dependency: transitive + description: + name: web_socket + sha256: "217f49b5213796cb508d6a942a5dc604ce1cb6a0a6b3d8cb3f0c314f0ecea712" + url: "https://pub.dev" + source: hosted + version: "0.1.4" web_socket_channel: dependency: transitive description: name: web_socket_channel - sha256: "58c6666b342a38816b2e7e50ed0f1e261959630becd4c879c4f26bfa14aa5a42" + sha256: a2d56211ee4d35d9b344d9d4ce60f362e4f5d1aafb988302906bd732bc731276 url: "https://pub.dev" source: hosted - version: "2.4.5" + version: "3.0.0" win32: dependency: transitive description: name: win32 - sha256: "0eaf06e3446824099858367950a813472af675116bf63f008a4c2a75ae13e9cb" + sha256: a79dbe579cb51ecd6d30b17e0cae4e0ea15e2c0e66f69ad4198f22a6789e94f4 url: "https://pub.dev" source: hosted - version: "5.5.0" + version: "5.5.1" window_manager: dependency: "direct main" description: name: window_manager - sha256: b3c895bdf936c77b83c5254bec2e6b3f066710c1f89c38b20b8acc382b525494 + sha256: "8699323b30da4cdbe2aa2e7c9de567a6abd8a97d9a5c850a3c86dcd0b34bbfbf" url: "https://pub.dev" source: hosted - version: "0.3.8" + version: "0.3.9" xdg_directories: dependency: transitive description: @@ -1610,5 +1618,5 @@ packages: source: hosted version: "1.1.2" sdks: - dart: ">=3.3.0 <4.0.0" + dart: ">=3.4.0 <4.0.0" flutter: ">=3.19.1" diff --git a/pubspec.yaml b/pubspec.yaml index d3e5a50..f3bc02b 100644 --- a/pubspec.yaml +++ b/pubspec.yaml @@ -44,14 +44,14 @@ dependencies: flutter_translate: ^4.0.4 form_builder_validators: ^9.1.0 freezed_annotation: ^2.4.1 - go_router: ^13.2.5 + go_router: ^14.1.2 hydrated_bloc: ^9.1.5 image: ^4.1.7 intl: ^0.18.1 json_annotation: ^4.9.0 loggy: ^2.0.3 meta: ^1.11.0 - mobile_scanner: ^4.0.1 + mobile_scanner: ^5.1.1 motion_toast: ^2.9.1 pasteboard: ^0.2.0 path: ^1.9.0 @@ -66,7 +66,7 @@ dependencies: radix_colors: ^1.0.4 reorderable_grid: ^1.0.10 searchable_listview: ^2.12.0 - share_plus: ^8.0.3 + share_plus: ^9.0.0 shared_preferences: ^2.2.3 signal_strength_indicator: ^0.4.1 split_view: ^3.2.1 @@ -88,6 +88,9 @@ dependency_overrides: path: ../dart_async_tools bloc_advanced_tools: path: ../bloc_advanced_tools + # REMOVE ONCE form_builder_validators HAS A FIX UPSTREAM + intl: 0.19.0 + dev_dependencies: build_runner: ^2.4.9 From ff1ea709a8412e6a6e03a46fdb1e60e0e1c591d8 Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Tue, 21 May 2024 15:19:27 -0400 Subject: [PATCH 6/7] tests pass --- .../example/integration_test/app_test.dart | 16 +- .../integration_test/test_dht_log.dart | 4 +- .../test_dht_record_pool.dart | 2 +- .../test_dht_short_array.dart | 7 +- packages/veilid_support/example/pubspec.lock | 2 +- packages/veilid_support/example/pubspec.yaml | 2 - .../lib/dht_support/src/dht_log/dht_log.dart | 8 +- .../src/dht_log/dht_log_append.dart | 27 ++-- .../dht_support/src/dht_log/dht_log_read.dart | 4 +- .../src/dht_log/dht_log_spine.dart | 140 ++++++++++++++---- .../src/dht_record/dht_record.dart | 8 +- .../src/dht_short_array/dht_short_array.dart | 8 +- .../{dht_openable.dart => dht_closeable.dart} | 23 ++- .../src/interfaces/interfaces.dart | 2 +- 14 files changed, 178 insertions(+), 75 deletions(-) rename packages/veilid_support/lib/dht_support/src/interfaces/{dht_openable.dart => dht_closeable.dart} (61%) diff --git a/packages/veilid_support/example/integration_test/app_test.dart b/packages/veilid_support/example/integration_test/app_test.dart index 9c85998..83e3dc8 100644 --- a/packages/veilid_support/example/integration_test/app_test.dart +++ b/packages/veilid_support/example/integration_test/app_test.dart @@ -1,10 +1,6 @@ -//@Timeout(Duration(seconds: 240)) - -//library veilid_support_integration_test; - import 'package:flutter/foundation.dart'; -import 'package:test/test.dart'; import 'package:integration_test/integration_test.dart'; +import 'package:test/test.dart'; import 'package:veilid_test/veilid_test.dart'; import 'fixtures/fixtures.dart'; @@ -26,7 +22,7 @@ void main() { tickerFixture: tickerFixture, updateProcessorFixture: updateProcessorFixture); - group('Started Tests', () { + group(timeout: const Timeout(Duration(seconds: 240)), 'Started Tests', () { setUpAll(veilidFixture.setUp); tearDownAll(veilidFixture.tearDown); tearDownAll(() { @@ -74,9 +70,11 @@ void main() { for (final stride in [256, 16 /*64, 32, 16, 8, 4, 2, 1 */]) { test('create log stride=$stride', makeTestDHTLogCreateDelete(stride: stride)); - test('add/truncate log stride=$stride', - makeTestDHTLogAddTruncate(stride: stride), - timeout: const Timeout(Duration(seconds: 480))); + test( + timeout: const Timeout(Duration(seconds: 480)), + 'add/truncate log stride=$stride', + makeTestDHTLogAddTruncate(stride: stride), + ); } }); }); diff --git a/packages/veilid_support/example/integration_test/test_dht_log.dart b/packages/veilid_support/example/integration_test/test_dht_log.dart index f8d758e..0e5829c 100644 --- a/packages/veilid_support/example/integration_test/test_dht_log.dart +++ b/packages/veilid_support/example/integration_test/test_dht_log.dart @@ -1,6 +1,6 @@ import 'dart:convert'; -import 'package:flutter_test/flutter_test.dart'; +import 'package:test/test.dart'; import 'package:veilid_support/veilid_support.dart'; Future Function() makeTestDHTLogCreateDelete({required int stride}) => @@ -61,7 +61,7 @@ Future Function() makeTestDHTLogAddTruncate({required int stride}) => print('adding\n'); { final res = await dlog.operateAppend((w) async { - const chunk = 50; + const chunk = 25; for (var n = 0; n < dataset.length; n += chunk) { print('$n-${n + chunk - 1} '); final success = diff --git a/packages/veilid_support/example/integration_test/test_dht_record_pool.dart b/packages/veilid_support/example/integration_test/test_dht_record_pool.dart index 2f52c00..1b300a9 100644 --- a/packages/veilid_support/example/integration_test/test_dht_record_pool.dart +++ b/packages/veilid_support/example/integration_test/test_dht_record_pool.dart @@ -1,7 +1,7 @@ import 'dart:convert'; import 'package:flutter/foundation.dart'; -import 'package:flutter_test/flutter_test.dart'; +import 'package:test/test.dart'; import 'package:veilid_support/veilid_support.dart'; Future testDHTRecordPoolCreate() async { diff --git a/packages/veilid_support/example/integration_test/test_dht_short_array.dart b/packages/veilid_support/example/integration_test/test_dht_short_array.dart index 7dead48..637afe0 100644 --- a/packages/veilid_support/example/integration_test/test_dht_short_array.dart +++ b/packages/veilid_support/example/integration_test/test_dht_short_array.dart @@ -1,6 +1,6 @@ import 'dart:convert'; -import 'package:flutter_test/flutter_test.dart'; +import 'package:test/test.dart'; import 'package:veilid_support/veilid_support.dart'; Future Function() makeTestDHTShortArrayCreateDelete( @@ -118,7 +118,10 @@ Future Function() makeTestDHTShortArrayAdd({required int stride}) => //print('clear\n'); { - await arr.operateWrite((w) async => w.clear()); + await arr.operateWriteEventual((w) async { + await w.clear(); + return true; + }); } //print('get all\n'); diff --git a/packages/veilid_support/example/pubspec.lock b/packages/veilid_support/example/pubspec.lock index 3defe80..b7cbcd7 100644 --- a/packages/veilid_support/example/pubspec.lock +++ b/packages/veilid_support/example/pubspec.lock @@ -196,7 +196,7 @@ packages: source: sdk version: "0.0.0" flutter_test: - dependency: "direct dev" + dependency: transitive description: flutter source: sdk version: "0.0.0" diff --git a/packages/veilid_support/example/pubspec.yaml b/packages/veilid_support/example/pubspec.yaml index 60f06e7..f353fc9 100644 --- a/packages/veilid_support/example/pubspec.yaml +++ b/packages/veilid_support/example/pubspec.yaml @@ -15,8 +15,6 @@ dependencies: dev_dependencies: async_tools: ^0.1.1 - flutter_test: - sdk: flutter integration_test: sdk: flutter lint_hard: ^4.0.0 diff --git a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log.dart b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log.dart index 5dd36a0..3f561ff 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log.dart @@ -42,7 +42,7 @@ class DHTLogUpdate extends Equatable { /// * The head and tail position of the log /// - subkeyIdx = pos / recordsPerSubkey /// - recordIdx = pos % recordsPerSubkey -class DHTLog implements DHTOpenable { +class DHTLog implements DHTDeleteable { //////////////////////////////////////////////////////////////// // Constructors @@ -160,12 +160,16 @@ class DHTLog implements DHTOpenable { ); //////////////////////////////////////////////////////////////////////////// - // DHTOpenable + // DHTCloseable /// Check if the DHTLog is open @override bool get isOpen => _openCount > 0; + /// The type of the openable scope + @override + FutureOr scoped() => this; + /// Add a reference to this log @override Future ref() async => _mutex.protect(() async { diff --git a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_append.dart b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_append.dart index 877df89..c184032 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_append.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_append.dart @@ -17,7 +17,7 @@ class _DHTLogAppend extends _DHTLogRead implements DHTAppendTruncateRandomRead { } // Write item to the segment - return lookup.shortArray.scope((sa) => sa.operateWrite((write) async { + return lookup.scope((sa) => sa.operateWrite((write) async { // If this a new segment, then clear it in case we have wrapped around if (lookup.pos == 0) { await write.clear(); @@ -51,18 +51,17 @@ class _DHTLogAppend extends _DHTLogRead implements DHTAppendTruncateRandomRead { final sublistValues = values.sublist(valueIdx, valueIdx + sacount); dws.add(() async { - final ok = await lookup.shortArray - .scope((sa) => sa.operateWrite((write) async { - // If this a new segment, then clear it in - // case we have wrapped around - if (lookup.pos == 0) { - await write.clear(); - } else if (lookup.pos != write.length) { - // We should always be appending at the length - throw StateError('appending should be at the end'); - } - return write.tryAddItems(sublistValues); - })); + final ok = await lookup.scope((sa) => sa.operateWrite((write) async { + // If this a new segment, then clear it in + // case we have wrapped around + if (lookup.pos == 0) { + await write.clear(); + } else if (lookup.pos != write.length) { + // We should always be appending at the length + throw StateError('appending should be at the end'); + } + return write.tryAddItems(sublistValues); + })); if (!ok) { success = false; } @@ -71,7 +70,7 @@ class _DHTLogAppend extends _DHTLogRead implements DHTAppendTruncateRandomRead { valueIdx += sacount; } - await dws(chunkSize: maxDHTConcurrency); + await dws(); return success; } diff --git a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_read.dart b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_read.dart index ea36fc2..3618abd 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_read.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_read.dart @@ -19,7 +19,7 @@ class _DHTLogRead implements DHTRandomRead { return null; } - return lookup.shortArray.scope((sa) => sa.operate( + return lookup.scope((sa) => sa.operate( (read) => read.getItem(lookup.pos, forceRefresh: forceRefresh))); } @@ -71,7 +71,7 @@ class _DHTLogRead implements DHTRandomRead { // Check each segment for offline positions var foundOffline = false; - await lookup.shortArray.scope((sa) => sa.operate((read) async { + await lookup.scope((sa) => sa.operate((read) async { final segmentOffline = await read.getOfflinePositions(); // For each shortarray segment go through their segment positions diff --git a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_spine.dart b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_spine.dart index 12a80c9..9a8c64e 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_spine.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_log/dht_log_spine.dart @@ -1,9 +1,58 @@ part of 'dht_log.dart'; -class DHTLogPositionLookup { - const DHTLogPositionLookup({required this.shortArray, required this.pos}); - final DHTShortArray shortArray; +class _DHTLogPosition extends DHTCloseable<_DHTLogPosition, DHTShortArray> { + _DHTLogPosition._({ + required _DHTLogSpine dhtLogSpine, + required DHTShortArray shortArray, + required this.pos, + required int segmentNumber, + }) : _segmentShortArray = shortArray, + _dhtLogSpine = dhtLogSpine, + _segmentNumber = segmentNumber; final int pos; + + final _DHTLogSpine _dhtLogSpine; + final DHTShortArray _segmentShortArray; + var _openCount = 1; + final int _segmentNumber; + final Mutex _mutex = Mutex(); + + /// Check if the DHTLogPosition is open + @override + bool get isOpen => _openCount > 0; + + /// The type of the openable scope + @override + FutureOr scoped() => _segmentShortArray; + + /// Add a reference to this log + @override + Future<_DHTLogPosition> ref() async => _mutex.protect(() async { + _openCount++; + return this; + }); + + /// Free all resources for the DHTLogPosition + @override + Future close() async => _mutex.protect(() async { + if (_openCount == 0) { + throw StateError('already closed'); + } + _openCount--; + if (_openCount != 0) { + return; + } + await _dhtLogSpine._segmentClosed(_segmentNumber); + }); +} + +class _OpenedSegment { + _OpenedSegment._({ + required this.shortArray, + }); + + final DHTShortArray shortArray; + int openCount = 1; } class _DHTLogSegmentLookup extends Equatable { @@ -32,6 +81,7 @@ class _DHTLogSpine { _head = head, _tail = tail, _segmentStride = stride, + _openedSegments = {}, _spineCache = []; // Create a new spine record and push it to the network @@ -85,6 +135,8 @@ class _DHTLogSpine { futures.add(sc.close()); } await Future.wait(futures); + + assert(_openedSegments.isEmpty, 'should have closed all segments by now'); }); } @@ -247,7 +299,7 @@ class _DHTLogSpine { // Lookup what subkey and segment subrange has this position's segment // shortarray - final l = lookupSegment(segmentNumber); + final l = _lookupSegment(segmentNumber); final subkey = l.subkey; final segment = l.segment; @@ -304,7 +356,7 @@ class _DHTLogSpine { // Lookup what subkey and segment subrange has this position's segment // shortarray - final l = lookupSegment(segmentNumber); + final l = _lookupSegment(segmentNumber); final subkey = l.subkey; final segment = l.segment; @@ -381,7 +433,7 @@ class _DHTLogSpine { return segment; } - _DHTLogSegmentLookup lookupSegment(int segmentNumber) { + _DHTLogSegmentLookup _lookupSegment(int segmentNumber) { assert(_spineMutex.isLocked, 'should be in mutex here'); if (segmentNumber < 0) { @@ -400,30 +452,60 @@ class _DHTLogSpine { /////////////////////////////////////////// // API for public interfaces - Future lookupPosition(int pos) async { + Future<_DHTLogPosition?> lookupPosition(int pos) async { assert(_spineMutex.isLocked, 'should be locked'); + return _spineCacheMutex.protect(() async { + // Check if our position is in bounds + final endPos = length; + if (pos < 0 || pos >= endPos) { + throw IndexError.withLength(pos, endPos); + } - // Check if our position is in bounds - final endPos = length; - if (pos < 0 || pos >= endPos) { - throw IndexError.withLength(pos, endPos); - } + // Calculate absolute position, ring-buffer style + final absolutePosition = (_head + pos) % _positionLimit; - // Calculate absolute position, ring-buffer style - final absolutePosition = (_head + pos) % _positionLimit; + // Determine the segment number and position within the segment + final segmentNumber = absolutePosition ~/ DHTShortArray.maxElements; + final segmentPos = absolutePosition % DHTShortArray.maxElements; - // Determine the segment number and position within the segment - final segmentNumber = absolutePosition ~/ DHTShortArray.maxElements; - final segmentPos = absolutePosition % DHTShortArray.maxElements; + // Get the segment shortArray + final openedSegment = _openedSegments[segmentNumber]; + late final DHTShortArray shortArray; + if (openedSegment != null) { + openedSegment.openCount++; + shortArray = openedSegment.shortArray; + } else { + final newShortArray = (_spineRecord.writer == null) + ? await _openSegment(segmentNumber) + : await _openOrCreateSegment(segmentNumber); + if (newShortArray == null) { + return null; + } - // Get the segment shortArray - final shortArray = (_spineRecord.writer == null) - ? await _openSegment(segmentNumber) - : await _openOrCreateSegment(segmentNumber); - if (shortArray == null) { - return null; - } - return DHTLogPositionLookup(shortArray: shortArray, pos: segmentPos); + _openedSegments[segmentNumber] = + _OpenedSegment._(shortArray: newShortArray); + + shortArray = newShortArray; + } + + return _DHTLogPosition._( + dhtLogSpine: this, + shortArray: shortArray, + pos: segmentPos, + segmentNumber: segmentNumber); + }); + } + + Future _segmentClosed(int segmentNumber) async { + assert(_spineMutex.isLocked, 'should be locked'); + await _spineCacheMutex.protect(() async { + final os = _openedSegments[segmentNumber]!; + os.openCount--; + if (os.openCount == 0) { + _openedSegments.remove(segmentNumber); + await os.shortArray.close(); + } + }); } void allocateTail(int count) { @@ -479,7 +561,7 @@ class _DHTLogSpine { segmentNumber++) { // Lookup what subkey and segment subrange has this position's segment // shortarray - final l = lookupSegment(segmentNumber); + final l = _lookupSegment(segmentNumber); final subkey = l.subkey; final segment = l.segment; @@ -608,6 +690,8 @@ class _DHTLogSpine { // Spine DHT record final DHTRecord _spineRecord; + // Segment stride to use for spine elements + final int _segmentStride; // Position of the start of the log (oldest items) int _head; @@ -616,8 +700,8 @@ class _DHTLogSpine { // LRU cache of DHT spine elements accessed recently // Pair of position and associated shortarray segment + final Mutex _spineCacheMutex = Mutex(); final List<(int, DHTShortArray)> _spineCache; + final Map _openedSegments; static const int _spineCacheLength = 3; - // Segment stride to use for spine elements - final int _segmentStride; } diff --git a/packages/veilid_support/lib/dht_support/src/dht_record/dht_record.dart b/packages/veilid_support/lib/dht_support/src/dht_record/dht_record.dart index 7bf5129..80b68ad 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_record/dht_record.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_record/dht_record.dart @@ -36,7 +36,7 @@ enum DHTRecordRefreshMode { ///////////////////////////////////////////////// -class DHTRecord implements DHTOpenable { +class DHTRecord implements DHTDeleteable { DHTRecord._( {required VeilidRoutingContext routingContext, required SharedDHTRecordData sharedDHTRecordData, @@ -52,12 +52,16 @@ class DHTRecord implements DHTOpenable { _sharedDHTRecordData = sharedDHTRecordData; //////////////////////////////////////////////////////////////////////////// - // DHTOpenable + // DHTCloseable /// Check if the DHTRecord is open @override bool get isOpen => _openCount > 0; + /// The type of the openable scope + @override + FutureOr scoped() => this; + /// Add a reference to this DHTRecord @override Future ref() async => _mutex.protect(() async { diff --git a/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array.dart b/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array.dart index cd62fa6..daf3061 100644 --- a/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array.dart +++ b/packages/veilid_support/lib/dht_support/src/dht_short_array/dht_short_array.dart @@ -13,7 +13,7 @@ part 'dht_short_array_write.dart'; /////////////////////////////////////////////////////////////////////// -class DHTShortArray implements DHTOpenable { +class DHTShortArray implements DHTDeleteable { //////////////////////////////////////////////////////////////// // Constructors @@ -136,12 +136,16 @@ class DHTShortArray implements DHTOpenable { ); //////////////////////////////////////////////////////////////////////////// - // DHTOpenable + // DHTCloseable /// Check if the shortarray is open @override bool get isOpen => _openCount > 0; + /// The type of the openable scope + @override + FutureOr scoped() => this; + /// Add a reference to this shortarray @override Future ref() async => _mutex.protect(() async { diff --git a/packages/veilid_support/lib/dht_support/src/interfaces/dht_openable.dart b/packages/veilid_support/lib/dht_support/src/interfaces/dht_closeable.dart similarity index 61% rename from packages/veilid_support/lib/dht_support/src/interfaces/dht_openable.dart rename to packages/veilid_support/lib/dht_support/src/interfaces/dht_closeable.dart index ffd58f9..65e9db1 100644 --- a/packages/veilid_support/lib/dht_support/src/interfaces/dht_openable.dart +++ b/packages/veilid_support/lib/dht_support/src/interfaces/dht_closeable.dart @@ -1,27 +1,36 @@ import 'dart:async'; -abstract class DHTOpenable { +import 'package:meta/meta.dart'; + +abstract class DHTCloseable { bool get isOpen; + @protected + FutureOr scoped(); Future ref(); Future close(); +} + +abstract class DHTDeleteable extends DHTCloseable { Future delete(); } -extension DHTOpenableExt> on D { - /// Runs a closure that guarantees the DHTOpenable +extension DHTCloseableExt on DHTCloseable { + /// Runs a closure that guarantees the DHTCloseable /// will be closed upon exit, even if an uncaught exception is thrown Future scope(Future Function(D) scopeFunction) async { if (!isOpen) { throw StateError('not open in scope'); } try { - return await scopeFunction(this); + return await scopeFunction(await scoped()); } finally { await close(); } } +} - /// Runs a closure that guarantees the DHTOpenable +extension DHTDeletableExt on DHTDeleteable { + /// Runs a closure that guarantees the DHTCloseable /// will be closed upon exit, and deleted if an an /// uncaught exception is thrown Future deleteScope(Future Function(D) scopeFunction) async { @@ -30,7 +39,7 @@ extension DHTOpenableExt> on D { } try { - return await scopeFunction(this); + return await scopeFunction(await scoped()); } on Exception { await delete(); rethrow; @@ -39,7 +48,7 @@ extension DHTOpenableExt> on D { } } - /// Scopes a closure that conditionally deletes the DHTOpenable on exit + /// Scopes a closure that conditionally deletes the DHTCloseable on exit Future maybeDeleteScope( bool delete, Future Function(D) scopeFunction) async { if (delete) { diff --git a/packages/veilid_support/lib/dht_support/src/interfaces/interfaces.dart b/packages/veilid_support/lib/dht_support/src/interfaces/interfaces.dart index 6c61075..16f9970 100644 --- a/packages/veilid_support/lib/dht_support/src/interfaces/interfaces.dart +++ b/packages/veilid_support/lib/dht_support/src/interfaces/interfaces.dart @@ -1,4 +1,4 @@ -export 'dht_openable.dart'; +export 'dht_closeable.dart'; export 'dht_random_read.dart'; export 'dht_random_write.dart'; export 'exceptions.dart'; From 11be8bb70532f4266dc3f1790f2038c6f258218b Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Tue, 21 May 2024 19:48:36 -0400 Subject: [PATCH 7/7] lock updates --- macos/Podfile.lock | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/macos/Podfile.lock b/macos/Podfile.lock index 7ca005d..faa2836 100644 --- a/macos/Podfile.lock +++ b/macos/Podfile.lock @@ -1,6 +1,6 @@ PODS: - FlutterMacOS (1.0.0) - - mobile_scanner (3.5.6): + - mobile_scanner (5.1.1): - FlutterMacOS - pasteboard (0.0.1): - FlutterMacOS @@ -68,15 +68,15 @@ EXTERNAL SOURCES: SPEC CHECKSUMS: FlutterMacOS: 8f6f14fa908a6fb3fba0cd85dbd81ec4b251fb24 - mobile_scanner: 54ceceae0c8da2457e26a362a6be5c61154b1829 + mobile_scanner: 1efac1e53c294b24e3bb55bcc7f4deee0233a86b pasteboard: 9b69dba6fedbb04866be632205d532fe2f6b1d99 - path_provider_foundation: 3784922295ac71e43754bd15e0653ccfd36a147c + path_provider_foundation: 2b6b4c569c0fb62ec74538f866245ac84301af46 screen_retriever: 59634572a57080243dd1bf715e55b6c54f241a38 share_plus: 36537c04ce0c3e3f5bd297ce4318b6d5ee5fd6cf - shared_preferences_foundation: b4c3b4cddf1c21f02770737f147a3f5da9d39695 + shared_preferences_foundation: fcdcbc04712aee1108ac7fda236f363274528f78 smart_auth: b38e3ab4bfe089eacb1e233aca1a2340f96c28e9 sqflite: 673a0e54cc04b7d6dba8d24fb8095b31c3a99eec - url_launcher_macos: d2691c7dd33ed713bf3544850a623080ec693d95 + url_launcher_macos: 5f437abeda8c85500ceb03f5c1938a8c5a705399 veilid: a54f57b7bcf0e4e072fe99272d76ca126b2026d0 window_manager: 3a1844359a6295ab1e47659b1a777e36773cd6e8