more locking work

This commit is contained in:
Christien Rioux 2024-03-28 21:46:26 -05:00
parent 41bb198d92
commit 5a8b1caf93
12 changed files with 481 additions and 484 deletions

View File

@ -54,12 +54,14 @@ class NewAccountPageState extends State<NewAccountPage> {
validator: FormBuilderValidators.compose([
FormBuilderValidators.required(),
]),
textInputAction: TextInputAction.next,
),
FormBuilderTextField(
name: formFieldPronouns,
maxLength: 64,
decoration: InputDecoration(
labelText: translate('account.form_pronouns')),
textInputAction: TextInputAction.next,
),
Row(children: [
const Spacer(),

View File

@ -83,12 +83,10 @@ class ChatListCubit extends DHTShortArrayCubit<proto.Chat> {
Future<void> deleteChat(
{required TypedKey remoteConversationRecordKey}) async {
final remoteConversationKey = remoteConversationRecordKey.toProto();
final accountRecordKey =
_activeAccountInfo.userLogin.accountRecordInfo.accountRecord.recordKey;
// Remove Chat from account's list
// if this fails, don't keep retrying, user can try again later
await operate((shortArray) async {
final deletedItem = await operate((shortArray) async {
if (activeChatCubit.state == remoteConversationRecordKey) {
activeChatCubit.setActiveChat(null);
}
@ -101,19 +99,21 @@ class ChatListCubit extends DHTShortArrayCubit<proto.Chat> {
if (c.remoteConversationRecordKey == remoteConversationKey) {
// Found the right chat
if (await shortArray.tryRemoveItem(i) != null) {
try {
await (await DHTShortArray.openOwned(
c.reconciledChatRecord.toVeilid(),
parent: accountRecordKey))
.delete();
} on Exception catch (e) {
log.debug('error removing reconciled chat record: $e', e);
}
return c;
}
return;
return null;
}
}
return null;
});
if (deletedItem != null) {
try {
await DHTRecordPool.instance
.delete(deletedItem.reconciledChatRecord.toVeilid().recordKey);
} on Exception catch (e) {
log.debug('error removing reconciled chat record: $e', e);
}
}
}
final ActiveChatCubit activeChatCubit;

View File

@ -6,6 +6,7 @@ import 'package:veilid_support/veilid_support.dart';
import '../../account_manager/account_manager.dart';
import '../../proto/proto.dart' as proto;
import '../../tools/tools.dart';
import '../models/models.dart';
//////////////////////////////////////////////////
@ -157,7 +158,7 @@ class ContactInvitationListCubit
_activeAccountInfo.userLogin.accountRecordInfo.accountRecord.recordKey;
// Remove ContactInvitationRecord from account's list
await operate((shortArray) async {
final deletedItem = await operate((shortArray) async {
for (var i = 0; i < shortArray.length; i++) {
final item = await shortArray.getItemProtobuf(
proto.ContactInvitationRecord.fromBuffer, i);
@ -166,25 +167,37 @@ class ContactInvitationListCubit
}
if (item.contactRequestInbox.recordKey.toVeilid() ==
contactRequestInboxRecordKey) {
await shortArray.tryRemoveItem(i);
await (await pool.openOwned(item.contactRequestInbox.toVeilid(),
parent: accountRecordKey))
.scope((contactRequestInbox) async {
// Wipe out old invitation so it shows up as invalid
await contactRequestInbox.tryWriteBytes(Uint8List(0));
await contactRequestInbox.delete();
});
if (!accepted) {
await (await pool.openRead(
item.localConversationRecordKey.toVeilid(),
parent: accountRecordKey))
.delete();
if (await shortArray.tryRemoveItem(i) != null) {
return item;
}
return;
return null;
}
}
return null;
});
if (deletedItem != null) {
// Delete the contact request inbox
final contactRequestInbox = deletedItem.contactRequestInbox.toVeilid();
await (await pool.openOwned(contactRequestInbox,
parent: accountRecordKey))
.scope((contactRequestInbox) async {
// Wipe out old invitation so it shows up as invalid
await contactRequestInbox.tryWriteBytes(Uint8List(0));
});
try {
await pool.delete(contactRequestInbox.recordKey);
} on Exception catch (e) {
log.debug('error removing contact request inbox: $e', e);
}
if (!accepted) {
try {
await pool.delete(deletedItem.localConversationRecordKey.toVeilid());
} on Exception catch (e) {
log.debug('error removing local conversation record: $e', e);
}
}
}
}
Future<ValidContactInvitation?> validateInvitation(

View File

@ -14,8 +14,7 @@ class ContactListCubit extends DHTShortArrayCubit<proto.Contact> {
ContactListCubit({
required ActiveAccountInfo activeAccountInfo,
required proto.Account account,
}) : _activeAccountInfo = activeAccountInfo,
super(
}) : super(
open: () => _open(activeAccountInfo, account),
decodeElement: proto.Contact.fromBuffer);
@ -62,14 +61,12 @@ class ContactListCubit extends DHTShortArrayCubit<proto.Contact> {
Future<void> deleteContact({required proto.Contact contact}) async {
final pool = DHTRecordPool.instance;
final accountRecordKey =
_activeAccountInfo.userLogin.accountRecordInfo.accountRecord.recordKey;
final localConversationKey = contact.localConversationRecordKey.toVeilid();
final remoteConversationKey =
contact.remoteConversationRecordKey.toVeilid();
// Remove Contact from account's list
await operate((shortArray) async {
final deletedItem = await operate((shortArray) async {
for (var i = 0; i < shortArray.length; i++) {
final item =
await shortArray.getItemProtobuf(proto.Contact.fromBuffer, i);
@ -78,29 +75,28 @@ class ContactListCubit extends DHTShortArrayCubit<proto.Contact> {
}
if (item.remoteConversationRecordKey ==
contact.remoteConversationRecordKey) {
await shortArray.tryRemoveItem(i);
break;
if (await shortArray.tryRemoveItem(i) != null) {
return item;
}
return null;
}
}
return null;
});
if (deletedItem != null) {
try {
await (await pool.openRead(localConversationKey,
parent: accountRecordKey))
.delete();
await pool.delete(localConversationKey);
} on Exception catch (e) {
log.debug('error removing local conversation record key: $e', e);
}
try {
if (localConversationKey != remoteConversationKey) {
await (await pool.openRead(remoteConversationKey,
parent: accountRecordKey))
.delete();
await pool.delete(remoteConversationKey);
}
} on Exception catch (e) {
log.debug('error removing remote conversation record key: $e', e);
}
});
}
}
//
final ActiveAccountInfo _activeAccountInfo;
}

View File

@ -27,8 +27,7 @@ void main() async {
// Ansi colors
ansiColorDisabled = false;
// Catch errors
await runZonedGuarded(() async {
Future<void> mainFunc() async {
// Logs
initLoggy();
@ -53,7 +52,15 @@ void main() async {
// Hot reloads will only restart this part, not Veilid
runApp(LocalizedApp(localizationDelegate,
VeilidChatApp(initialThemeData: initialThemeData)));
}, (error, stackTrace) {
log.error('Dart Runtime: {$error}\n{$stackTrace}');
});
}
if (kDebugMode) {
// In debug mode, run the app without catching exceptions for debugging
await mainFunc();
} else {
// Catch errors in production without killing the app
await runZonedGuarded(mainFunc, (error, stackTrace) {
log.error('Dart Runtime: {$error}\n{$stackTrace}');
});
}
}

View File

@ -27,7 +27,6 @@ class DHTRecord {
_defaultSubkey = defaultSubkey,
_writer = writer,
_open = true,
_valid = true,
_sharedDHTRecordData = sharedDHTRecordData;
final SharedDHTRecordData _sharedDHTRecordData;
@ -37,7 +36,6 @@ class DHTRecord {
final DHTRecordCrypto _crypto;
bool _open;
bool _valid;
@internal
StreamController<DHTRecordWatchChange>? watchController;
@internal
@ -59,9 +57,6 @@ class DHTRecord {
OwnedDHTRecordPointer(recordKey: key, owner: ownerKeyPair!);
Future<void> close() async {
if (!_valid) {
throw StateError('already deleted');
}
if (!_open) {
return;
}
@ -70,33 +65,26 @@ class DHTRecord {
_open = false;
}
void _markDeleted() {
_valid = false;
}
Future<void> delete() => DHTRecordPool.instance.delete(key);
Future<T> scope<T>(Future<T> Function(DHTRecord) scopeFunction) async {
try {
return await scopeFunction(this);
} finally {
if (_valid) {
await close();
}
await close();
}
}
Future<T> deleteScope<T>(Future<T> Function(DHTRecord) scopeFunction) async {
try {
final out = await scopeFunction(this);
if (_valid && _open) {
if (_open) {
await close();
}
return out;
} on Exception catch (_) {
if (_valid) {
await delete();
if (_open) {
await close();
}
await DHTRecordPool.instance.delete(key);
rethrow;
}
}

View File

@ -73,6 +73,7 @@ class SharedDHTRecordData {
VeilidRoutingContext defaultRoutingContext;
Map<int, int> subkeySeqCache = {};
bool needsWatchStateUpdate = false;
bool deleteOnClose = false;
}
// Per opened record data
@ -182,7 +183,7 @@ class DHTRecordPool with TableDBBacked<DHTRecordPoolAllocations> {
// If we are opening a key that already exists
// make sure we are using the same parent if one was specified
_validateParent(parent, recordKey);
_validateParentInner(parent, recordKey);
// See if this has been opened yet
final openedRecordInfo = _opened[recordKey];
@ -232,54 +233,58 @@ class DHTRecordPool with TableDBBacked<DHTRecordPoolAllocations> {
}
if (openedRecordInfo.records.isEmpty) {
await _routingContext.closeDHTRecord(key);
if (openedRecordInfo.shared.deleteOnClose) {
await _deleteInner(key);
}
_opened.remove(key);
}
});
}
Future<void> delete(TypedKey recordKey) async {
final allDeletedRecords = <DHTRecord>{};
final allDeletedRecordKeys = <TypedKey>[];
// Collect all dependencies (including the record itself)
// in reverse (bottom-up/delete order)
List<TypedKey> _collectChildrenInner(TypedKey recordKey) {
assert(_mutex.isLocked, 'should be locked here');
await _mutex.protect(() async {
// Collect all dependencies (including the record itself)
final allDeps = <TypedKey>[];
final currentDeps = [recordKey];
while (currentDeps.isNotEmpty) {
final nextDep = currentDeps.removeLast();
final allDeps = <TypedKey>[];
final currentDeps = [recordKey];
while (currentDeps.isNotEmpty) {
final nextDep = currentDeps.removeLast();
// Remove this child from its parent
await _removeDependencyInner(nextDep);
allDeps.add(nextDep);
final childDeps =
_state.childrenByParent[nextDep.toJson()]?.toList() ?? [];
currentDeps.addAll(childDeps);
}
// Delete all dependent records in parallel (including the record itself)
for (final dep in allDeps) {
// If record is opened, close it first
final openinfo = _opened[dep];
if (openinfo != null) {
for (final rec in openinfo.records) {
allDeletedRecords.add(rec);
}
}
// Then delete
allDeletedRecordKeys.add(dep);
}
});
await Future.wait(allDeletedRecords.map((r) => r.close()));
for (final deletedRecord in allDeletedRecords) {
deletedRecord._markDeleted();
allDeps.add(nextDep);
final childDeps =
_state.childrenByParent[nextDep.toJson()]?.toList() ?? [];
currentDeps.addAll(childDeps);
}
await Future.wait(
allDeletedRecordKeys.map(_routingContext.deleteDHTRecord));
return allDeps.reversedView;
}
void _validateParent(TypedKey? parent, TypedKey child) {
Future<void> _deleteInner(TypedKey recordKey) async {
// Remove this child from parents
await _removeDependenciesInner([recordKey]);
await _routingContext.deleteDHTRecord(recordKey);
}
Future<void> delete(TypedKey recordKey) async {
await _mutex.protect(() async {
final allDeps = _collectChildrenInner(recordKey);
assert(allDeps.singleOrNull == recordKey, 'must delete children first');
final ori = _opened[recordKey];
if (ori != null) {
// delete after close
ori.shared.deleteOnClose = true;
} else {
// delete now
await _deleteInner(recordKey);
}
});
}
void _validateParentInner(TypedKey? parent, TypedKey child) {
assert(_mutex.isLocked, 'should be locked here');
final childJson = child.toJson();
final existingParent = _state.parentByChild[childJson];
if (parent == null) {
@ -319,29 +324,35 @@ class DHTRecordPool with TableDBBacked<DHTRecordPoolAllocations> {
}
}
Future<void> _removeDependencyInner(TypedKey child) async {
Future<void> _removeDependenciesInner(List<TypedKey> childList) async {
assert(_mutex.isLocked, 'should be locked here');
if (_state.rootRecords.contains(child)) {
_state = await store(
_state.copyWith(rootRecords: _state.rootRecords.remove(child)));
} else {
final parent = _state.parentByChild[child.toJson()];
if (parent == null) {
return;
}
final children = _state.childrenByParent[parent.toJson()]!.remove(child);
late final DHTRecordPoolAllocations newState;
if (children.isEmpty) {
newState = _state.copyWith(
childrenByParent: _state.childrenByParent.remove(parent.toJson()),
parentByChild: _state.parentByChild.remove(child.toJson()));
var state = _state;
for (final child in childList) {
if (_state.rootRecords.contains(child)) {
state = state.copyWith(rootRecords: state.rootRecords.remove(child));
} else {
newState = _state.copyWith(
childrenByParent:
_state.childrenByParent.add(parent.toJson(), children),
parentByChild: _state.parentByChild.remove(child.toJson()));
final parent = state.parentByChild[child.toJson()];
if (parent == null) {
continue;
}
final children = state.childrenByParent[parent.toJson()]!.remove(child);
if (children.isEmpty) {
state = state.copyWith(
childrenByParent: state.childrenByParent.remove(parent.toJson()),
parentByChild: state.parentByChild.remove(child.toJson()));
} else {
state = state.copyWith(
childrenByParent:
state.childrenByParent.add(parent.toJson(), children),
parentByChild: state.parentByChild.remove(child.toJson()));
}
}
_state = await store(newState);
}
if (state != _state) {
_state = await store(state);
}
}
@ -595,10 +606,10 @@ class DHTRecordPool with TableDBBacked<DHTRecordPoolAllocations> {
_tickCount = 0;
try {
// See if any opened records need watch state changes
final unord = <Future<bool> Function()>[];
final allSuccess = await _mutex.protect(() async {
// See if any opened records need watch state changes
final unord = <Future<bool> Function()>[];
await _mutex.protect(() async {
for (final kv in _opened.entries) {
final openedRecordKey = kv.key;
final openedRecordInfo = kv.value;
@ -647,13 +658,15 @@ class DHTRecordPool with TableDBBacked<DHTRecordPoolAllocations> {
}
}
}
// Process all watch changes
return unord.isEmpty ||
(await unord.map((f) => f()).wait).reduce((a, b) => a && b);
});
// Process all watch changes
// If any watched did not success, back off the attempts to
// update the watches for a bit
final allSuccess = unord.isEmpty ||
(await unord.map((f) => f()).wait).reduce((a, b) => a && b);
if (!allSuccess) {
_watchBackoffTimer *= watchBackoffMultiplier;
_watchBackoffTimer = min(_watchBackoffTimer, watchBackoffMax);

View File

@ -16,7 +16,11 @@ class DHTShortArray {
// Constructors
DHTShortArray._({required DHTRecord headRecord})
: _head = _DHTShortArrayHead(headRecord: headRecord) {}
: _head = _DHTShortArrayHead(headRecord: headRecord) {
_head.onUpdatedHead = () {
_watchController?.sink.add(null);
};
}
// Create a DHTShortArray
// if smplWriter is specified, uses a SMPL schema with a single writer
@ -52,12 +56,15 @@ class DHTShortArray {
try {
final dhtShortArray = DHTShortArray._(headRecord: dhtRecord);
if (!await dhtShortArray._head._tryWriteHead()) {
throw StateError('Failed to write head at this time');
}
await dhtShortArray._head.operate((head) async {
if (!await head._writeHead()) {
throw StateError('Failed to write head at this time');
}
});
return dhtShortArray;
} on Exception catch (_) {
await dhtRecord.delete();
await dhtRecord.close();
await pool.delete(dhtRecord.key);
rethrow;
}
}
@ -70,7 +77,7 @@ class DHTShortArray {
parent: parent, routingContext: routingContext, crypto: crypto);
try {
final dhtShortArray = DHTShortArray._(headRecord: dhtRecord);
await dhtShortArray._head._refreshInner();
await dhtShortArray._head.operate((head) => head._loadHead());
return dhtShortArray;
} on Exception catch (_) {
await dhtRecord.close();
@ -90,7 +97,7 @@ class DHTShortArray {
parent: parent, routingContext: routingContext, crypto: crypto);
try {
final dhtShortArray = DHTShortArray._(headRecord: dhtRecord);
await dhtShortArray._head._refreshInner();
await dhtShortArray._head.operate((head) => head._loadHead());
return dhtShortArray;
} on Exception catch (_) {
await dhtRecord.close();
@ -115,13 +122,14 @@ class DHTShortArray {
////////////////////////////////////////////////////////////////////////////
// Public API
// External references for the shortarray
TypedKey get recordKey => _head.headRecord.key;
OwnedDHTRecordPointer get recordPointer =>
_head.headRecord.ownedDHTRecordPointer;
/// Get the record key for this shortarray
TypedKey get recordKey => _head.recordKey;
/// Get the record pointer foir this shortarray
OwnedDHTRecordPointer get recordPointer => _head.recordPointer;
/// Returns the number of elements in the DHTShortArray
int get length => _head.index.length;
int get length => _head.length;
/// Free all resources for the DHTShortArray
Future<void> close() async {
@ -131,8 +139,8 @@ class DHTShortArray {
/// Free all resources for the DHTShortArray and delete it from the DHT
Future<void> delete() async {
await _watchController?.close();
await _head.delete();
await close();
await DHTRecordPool.instance.delete(recordKey);
}
/// Runs a closure that guarantees the DHTShortArray
@ -169,23 +177,15 @@ class DHTShortArray {
Future<Uint8List?> _getItemInner(_DHTShortArrayHead head, int pos,
{bool forceRefresh = false}) async {
if (pos < 0 || pos >= head.index.length) {
throw IndexError.withLength(pos, head.index.length);
if (pos < 0 || pos >= length) {
throw IndexError.withLength(pos, length);
}
final index = head.index[pos];
final recordNumber = index ~/ head.stride;
final record = head.getLinkedRecord(recordNumber);
if (record == null) {
throw StateError('Record does not exist');
}
final recordSubkey = (index % head.stride) + ((recordNumber == 0) ? 1 : 0);
final refresh = forceRefresh || head.indexNeedsRefresh(index);
final (record, recordSubkey) = await head.lookupPosition(pos);
final refresh = forceRefresh || head.positionNeedsRefresh(pos);
final out = record.get(subkey: recordSubkey, forceRefresh: refresh);
await head.updateIndexSeq(index, false);
await head.updatePositionSeq(pos, false);
return out;
}
@ -197,7 +197,7 @@ class DHTShortArray {
_head.operate((head) async {
final out = <Uint8List>[];
for (var pos = 0; pos < head.index.length; pos++) {
for (var pos = 0; pos < head.length; pos++) {
final elem =
await _getItemInner(head, pos, forceRefresh: forceRefresh);
if (elem == null) {
@ -248,21 +248,14 @@ class DHTShortArray {
final out = await _head
.operateWrite((head) async => _tryAddItemInner(head, value)) ??
false;
// Send update
_watchController?.sink.add(null);
return out;
}
Future<bool> _tryAddItemInner(
_DHTShortArrayHead head, Uint8List value) async {
// Allocate empty index
final index = head.emptyIndex();
// Add new index
final pos = head.index.length;
head.index.add(index);
// Allocate empty index at the end of the list
final pos = head.length;
head.allocateIndex(pos);
// Write item
final (_, wasSet) = await _tryWriteItemInner(head, pos, value);
@ -271,7 +264,7 @@ class DHTShortArray {
}
// Get sequence number written
await head.updateIndexSeq(index, true);
await head.updatePositionSeq(pos, true);
return true;
}
@ -287,19 +280,13 @@ class DHTShortArray {
(head) async => _tryInsertItemInner(head, pos, value)) ??
false;
// Send update
_watchController?.sink.add(null);
return out;
}
Future<bool> _tryInsertItemInner(
_DHTShortArrayHead head, int pos, Uint8List value) async {
// Allocate empty index
final index = head.emptyIndex();
// Add new index
_head.index.insert(pos, index);
// Allocate empty index at position
head.allocateIndex(pos);
// Write item
final (_, wasSet) = await _tryWriteItemInner(head, pos, value);
@ -308,7 +295,7 @@ class DHTShortArray {
}
// Get sequence number written
await head.updateIndexSeq(index, true);
await head.updatePositionSeq(pos, true);
return true;
}
@ -325,24 +312,13 @@ class DHTShortArray {
(head) async => _trySwapItemInner(head, aPos, bPos)) ??
false;
// Send update
_watchController?.sink.add(null);
return out;
}
Future<bool> _trySwapItemInner(
_DHTShortArrayHead head, int aPos, int bPos) async {
// No-op case
if (aPos == bPos) {
return true;
}
// Swap indices
final aIdx = _head.index[aPos];
final bIdx = _head.index[bPos];
_head.index[aPos] = bIdx;
_head.index[bPos] = aIdx;
head.swapIndex(aPos, bPos);
return true;
}
@ -358,28 +334,17 @@ class DHTShortArray {
final out =
_head.operateWrite((head) async => _tryRemoveItemInner(head, pos));
// Send update
_watchController?.sink.add(null);
return out;
}
Future<Uint8List> _tryRemoveItemInner(
_DHTShortArrayHead head, int pos) async {
final index = _head.index.removeAt(pos);
final recordNumber = index ~/ head.stride;
final record = head.getLinkedRecord(recordNumber);
if (record == null) {
throw StateError('Record does not exist');
}
final recordSubkey = (index % head.stride) + ((recordNumber == 0) ? 1 : 0);
final (record, recordSubkey) = await head.lookupPosition(pos);
final result = await record.get(subkey: recordSubkey);
if (result == null) {
throw StateError('Element does not exist');
}
head.freeIndex(index);
head.freeIndex(pos);
return result;
}
@ -405,15 +370,11 @@ class DHTShortArray {
final out =
await _head.operateWrite((head) async => _tryClearInner(head)) ?? false;
// Send update
_watchController?.sink.add(null);
return out;
}
Future<bool> _tryClearInner(_DHTShortArrayHead head) async {
head.index.clear();
head.free.clear();
head.clearIndex();
return true;
}
@ -434,23 +395,15 @@ class DHTShortArray {
return (null, false);
}
// Send update
_watchController?.sink.add(null);
return out;
}
Future<(Uint8List?, bool)> _tryWriteItemInner(
_DHTShortArrayHead head, int pos, Uint8List newValue) async {
if (pos < 0 || pos >= head.index.length) {
throw IndexError.withLength(pos, _head.index.length);
if (pos < 0 || pos >= head.length) {
throw IndexError.withLength(pos, head.length);
}
final index = head.index[pos];
final recordNumber = index ~/ head.stride;
final record = await head.getOrCreateLinkedRecord(recordNumber);
final recordSubkey = (index % head.stride) + ((recordNumber == 0) ? 1 : 0);
final (record, recordSubkey) = await head.lookupPosition(pos);
final oldValue = await record.get(subkey: recordSubkey);
final result = await record.tryWriteBytes(newValue, subkey: recordSubkey);
if (result != null) {
@ -471,9 +424,6 @@ class DHTShortArray {
(_, wasSet) = await _tryWriteItemInner(head, pos, newValue);
return wasSet;
}, timeout: timeout);
// Send update
_watchController?.sink.add(null);
}
/// Change an item at position 'pos' of the DHTShortArray.
@ -497,9 +447,6 @@ class DHTShortArray {
(_, wasSet) = await _tryWriteItemInner(head, pos, updatedData);
return wasSet;
}, timeout: timeout);
// Send update
_watchController?.sink.add(null);
}
/// Convenience function:
@ -569,13 +516,13 @@ class DHTShortArray {
// rid of the controller and drop our subscriptions
unawaited(_listenMutex.protect(() async {
// Cancel watches of head record
await _head._cancelWatch();
await _head.cancelWatch();
_watchController = null;
}));
});
// Start watching head record
await _head._watch();
await _head.watch();
}
// Return subscription
return _watchController!.stream.listen((_) => onChanged());

View File

@ -1,40 +1,52 @@
part of 'dht_short_array.dart';
////////////////////////////////////////////////////////////////
// Internal Operations
class _DHTShortArrayHead {
_DHTShortArrayHead({required this.headRecord})
: linkedRecords = [],
index = [],
free = [],
seqs = [],
localSeqs = [] {
_DHTShortArrayHead({required DHTRecord headRecord})
: _headRecord = headRecord,
_linkedRecords = [],
_index = [],
_free = [],
_seqs = [],
_localSeqs = [] {
_calculateStride();
}
proto.DHTShortArray toProto() {
void _calculateStride() {
switch (_headRecord.schema) {
case DHTSchemaDFLT(oCnt: final oCnt):
if (oCnt <= 1) {
throw StateError('Invalid DFLT schema in DHTShortArray');
}
_stride = oCnt - 1;
case DHTSchemaSMPL(oCnt: final oCnt, members: final members):
if (oCnt != 0 || members.length != 1 || members[0].mCnt <= 1) {
throw StateError('Invalid SMPL schema in DHTShortArray');
}
_stride = members[0].mCnt - 1;
}
assert(_stride <= DHTShortArray.maxElements, 'stride too long');
}
proto.DHTShortArray _toProto() {
assert(_headMutex.isLocked, 'should be in mutex here');
final head = proto.DHTShortArray();
head.keys.addAll(linkedRecords.map((lr) => lr.key.toProto()));
head.index.addAll(index);
head.seqs.addAll(seqs);
head.keys.addAll(_linkedRecords.map((lr) => lr.key.toProto()));
head.index.addAll(_index);
head.seqs.addAll(_seqs);
// Do not serialize free list, it gets recreated
// Do not serialize local seqs, they are only locally relevant
return head;
}
Future<void> close() async {
final futures = <Future<void>>[headRecord.close()];
for (final lr in linkedRecords) {
futures.add(lr.close());
}
await Future.wait(futures);
}
TypedKey get recordKey => _headRecord.key;
OwnedDHTRecordPointer get recordPointer => _headRecord.ownedDHTRecordPointer;
int get length => _index.length;
Future<void> delete() async {
final futures = <Future<void>>[headRecord.delete()];
for (final lr in linkedRecords) {
futures.add(lr.delete());
Future<void> close() async {
final futures = <Future<void>>[_headRecord.close()];
for (final lr in _linkedRecords) {
futures.add(lr.close());
}
await Future.wait(futures);
}
@ -46,55 +58,57 @@ class _DHTShortArrayHead {
});
Future<T?> operateWrite<T>(
Future<T> Function(_DHTShortArrayHead) closure) async {
final oldLinkedRecords = List.of(linkedRecords);
final oldIndex = List.of(index);
final oldFree = List.of(free);
final oldSeqs = List.of(seqs);
try {
final out = await _headMutex.protect(() async {
final out = await closure(this);
// Write head assuming it has been changed
if (!await _tryWriteHead()) {
// Failed to write head means head got overwritten so write should
// be considered failed
return null;
}
return out;
});
return out;
} on Exception {
// Exception means state needs to be reverted
linkedRecords = oldLinkedRecords;
index = oldIndex;
free = oldFree;
seqs = oldSeqs;
Future<T> Function(_DHTShortArrayHead) closure) async =>
_headMutex.protect(() async {
final oldLinkedRecords = List.of(_linkedRecords);
final oldIndex = List.of(_index);
final oldFree = List.of(_free);
final oldSeqs = List.of(_seqs);
try {
final out = await closure(this);
// Write head assuming it has been changed
if (!await _writeHead()) {
// Failed to write head means head got overwritten so write should
// be considered failed
return null;
}
rethrow;
}
}
onUpdatedHead?.call();
return out;
} on Exception {
// Exception means state needs to be reverted
_linkedRecords = oldLinkedRecords;
_index = oldIndex;
_free = oldFree;
_seqs = oldSeqs;
rethrow;
}
});
Future<void> operateWriteEventual(
Future<bool> Function(_DHTShortArrayHead) closure,
{Duration? timeout}) async {
late List<DHTRecord> oldLinkedRecords;
late List<int> oldIndex;
late List<int> oldFree;
late List<int> oldSeqs;
final timeoutTs = timeout == null
? null
: Veilid.instance.now().offset(TimestampDuration.fromDuration(timeout));
try {
await _headMutex.protect(() async {
await _headMutex.protect(() async {
late List<DHTRecord> oldLinkedRecords;
late List<int> oldIndex;
late List<int> oldFree;
late List<int> oldSeqs;
try {
// Iterate until we have a successful element and head write
do {
// Save off old values each pass of tryWriteHead because the head
// will have changed
oldLinkedRecords = List.of(linkedRecords);
oldIndex = List.of(index);
oldFree = List.of(free);
oldSeqs = List.of(seqs);
oldLinkedRecords = List.of(_linkedRecords);
oldIndex = List.of(_index);
oldFree = List.of(_free);
oldSeqs = List.of(_seqs);
// Try to do the element write
do {
@ -107,26 +121,30 @@ class _DHTShortArrayHead {
} while (!await closure(this));
// Try to do the head write
} while (!await _tryWriteHead());
});
} on Exception {
// Exception means state needs to be reverted
linkedRecords = oldLinkedRecords;
index = oldIndex;
free = oldFree;
seqs = oldSeqs;
} while (!await _writeHead());
rethrow;
}
onUpdatedHead?.call();
} on Exception {
// Exception means state needs to be reverted
_linkedRecords = oldLinkedRecords;
_index = oldIndex;
_free = oldFree;
_seqs = oldSeqs;
rethrow;
}
});
}
/// Serialize and write out the current head record, possibly updating it
/// if a newer copy is available online. Returns true if the write was
/// successful
Future<bool> _tryWriteHead() async {
final headBuffer = toProto().writeToBuffer();
Future<bool> _writeHead() async {
assert(_headMutex.isLocked, 'should be in mutex here');
final existingData = await headRecord.tryWriteBytes(headBuffer);
final headBuffer = _toProto().writeToBuffer();
final existingData = await _headRecord.tryWriteBytes(headBuffer);
if (existingData != null) {
// Head write failed, incorporate update
await _updateHead(proto.DHTShortArray.fromBuffer(existingData));
@ -138,6 +156,8 @@ class _DHTShortArrayHead {
/// Validate a new head record that has come in from the network
Future<void> _updateHead(proto.DHTShortArray head) async {
assert(_headMutex.isLocked, 'should be in mutex here');
// Get the set of new linked keys and validate it
final updatedLinkedKeys = head.keys.map((p) => p.toVeilid()).toList();
final updatedIndex = List.of(head.index);
@ -146,7 +166,7 @@ class _DHTShortArrayHead {
// See which records are actually new
final oldRecords = Map<TypedKey, DHTRecord>.fromEntries(
linkedRecords.map((lr) => MapEntry(lr.key, lr)));
_linkedRecords.map((lr) => MapEntry(lr.key, lr)));
final newRecords = <TypedKey, DHTRecord>{};
final sameRecords = <TypedKey, DHTRecord>{};
final updatedLinkedRecords = <DHTRecord>[];
@ -173,32 +193,33 @@ class _DHTShortArrayHead {
// From this point forward we should not throw an exception or everything
// is possibly invalid. Just pass the exception up it happens and the caller
// will have to delete this short array and reopen it if it can
await Future.wait(oldRecords.entries
await oldRecords.entries
.where((e) => !sameRecords.containsKey(e.key))
.map((e) => e.value.close()));
.map((e) => e.value.close())
.wait;
// Get the localseqs list from inspect results
final localReports = await [headRecord, ...updatedLinkedRecords].map((r) {
final start = (r.key == headRecord.key) ? 1 : 0;
return r
.inspect(subkeys: [ValueSubkeyRange.make(start, start + stride - 1)]);
final localReports = await [_headRecord, ...updatedLinkedRecords].map((r) {
final start = (r.key == _headRecord.key) ? 1 : 0;
return r.inspect(
subkeys: [ValueSubkeyRange.make(start, start + _stride - 1)]);
}).wait;
final updatedLocalSeqs =
localReports.map((l) => l.localSeqs).expand((e) => e).toList();
// Make the new head cache
linkedRecords = updatedLinkedRecords;
index = updatedIndex;
free = updatedFree;
seqs = updatedSeqs;
localSeqs = updatedLocalSeqs;
_linkedRecords = updatedLinkedRecords;
_index = updatedIndex;
_free = updatedFree;
_seqs = updatedSeqs;
_localSeqs = updatedLocalSeqs;
}
/// Pull the latest or updated copy of the head record from the network
Future<bool> _refreshInner(
// Pull the latest or updated copy of the head record from the network
Future<bool> _loadHead(
{bool forceRefresh = true, bool onlyUpdates = false}) async {
// Get an updated head record copy if one exists
final head = await headRecord.getProtobuf(proto.DHTShortArray.fromBuffer,
final head = await _headRecord.getProtobuf(proto.DHTShortArray.fromBuffer,
subkey: 0, forceRefresh: forceRefresh, onlyUpdates: onlyUpdates);
if (head == null) {
if (onlyUpdates) {
@ -213,82 +234,110 @@ class _DHTShortArrayHead {
return true;
}
void _calculateStride() {
switch (headRecord.schema) {
case DHTSchemaDFLT(oCnt: final oCnt):
if (oCnt <= 1) {
throw StateError('Invalid DFLT schema in DHTShortArray');
}
stride = oCnt - 1;
case DHTSchemaSMPL(oCnt: final oCnt, members: final members):
if (oCnt != 0 || members.length != 1 || members[0].mCnt <= 1) {
throw StateError('Invalid SMPL schema in DHTShortArray');
}
stride = members[0].mCnt - 1;
}
assert(stride <= DHTShortArray.maxElements, 'stride too long');
}
/////////////////////////////////////////////////////////////////////////////
// Linked record management
DHTRecord? getLinkedRecord(int recordNumber) {
Future<DHTRecord> _getOrCreateLinkedRecord(int recordNumber) async {
if (recordNumber == 0) {
return headRecord;
}
recordNumber--;
if (recordNumber >= linkedRecords.length) {
return null;
}
return linkedRecords[recordNumber];
}
Future<DHTRecord> getOrCreateLinkedRecord(int recordNumber) async {
if (recordNumber == 0) {
return headRecord;
return _headRecord;
}
final pool = DHTRecordPool.instance;
recordNumber--;
while (recordNumber >= linkedRecords.length) {
while (recordNumber >= _linkedRecords.length) {
// Linked records must use SMPL schema so writer can be specified
// Use the same writer as the head record
final smplWriter = headRecord.writer!;
final parent = pool.getParentRecordKey(headRecord.key);
final routingContext = headRecord.routingContext;
final crypto = headRecord.crypto;
final smplWriter = _headRecord.writer!;
final parent = _headRecord.key;
final routingContext = _headRecord.routingContext;
final crypto = _headRecord.crypto;
final schema = DHTSchema.smpl(
oCnt: 0,
members: [DHTSchemaMember(mKey: smplWriter.key, mCnt: stride)]);
final dhtCreateRecord = await pool.create(
members: [DHTSchemaMember(mKey: smplWriter.key, mCnt: _stride)]);
final dhtRecord = await pool.create(
parent: parent,
routingContext: routingContext,
schema: schema,
crypto: crypto,
writer: smplWriter);
// Reopen with SMPL writer
await dhtCreateRecord.close();
final dhtRecord = await pool.openWrite(dhtCreateRecord.key, smplWriter,
parent: parent, routingContext: routingContext, crypto: crypto);
// Add to linked records
linkedRecords.add(dhtRecord);
if (!await _tryWriteHead()) {
await _refreshInner();
}
_linkedRecords.add(dhtRecord);
}
return linkedRecords[recordNumber];
if (!await _writeHead()) {
throw StateError('failed to add linked record');
}
return _linkedRecords[recordNumber];
}
int emptyIndex() {
if (free.isNotEmpty) {
return free.removeLast();
/// Open a linked record for reading or writing, same as the head record
Future<DHTRecord> _openLinkedRecord(TypedKey recordKey) async {
final writer = _headRecord.writer;
return (writer != null)
? await DHTRecordPool.instance.openWrite(
recordKey,
writer,
parent: _headRecord.key,
routingContext: _headRecord.routingContext,
)
: await DHTRecordPool.instance.openRead(
recordKey,
parent: _headRecord.key,
routingContext: _headRecord.routingContext,
);
}
Future<(DHTRecord, int)> lookupPosition(int pos) async {
final idx = _index[pos];
return lookupIndex(idx);
}
Future<(DHTRecord, int)> lookupIndex(int idx) async {
final recordNumber = idx ~/ _stride;
final record = await _getOrCreateLinkedRecord(recordNumber);
final recordSubkey = (idx % _stride) + ((recordNumber == 0) ? 1 : 0);
return (record, recordSubkey);
}
/////////////////////////////////////////////////////////////////////////////
// Index management
/// Allocate an empty index slot at a specific position
void allocateIndex(int pos) {
// Allocate empty index
final idx = _emptyIndex();
_index.insert(pos, idx);
}
int _emptyIndex() {
if (_free.isNotEmpty) {
return _free.removeLast();
}
if (index.length == DHTShortArray.maxElements) {
if (_index.length == DHTShortArray.maxElements) {
throw StateError('too many elements');
}
return index.length;
return _index.length;
}
void freeIndex(int idx) {
free.add(idx);
void swapIndex(int aPos, int bPos) {
if (aPos == bPos) {
return;
}
final aIdx = _index[aPos];
final bIdx = _index[bPos];
_index[aPos] = bIdx;
_index[bPos] = aIdx;
}
void clearIndex() {
_index.clear();
_free.clear();
}
/// Release an index at a particular position
void freeIndex(int pos) {
final idx = _index.removeAt(pos);
_free.add(idx);
// xxx: free list optimization here?
}
@ -299,7 +348,8 @@ class _DHTShortArrayHead {
// Ensure nothing is duplicated in the linked keys set
final newKeys = linkedKeys.toSet();
assert(
newKeys.length <= (DHTShortArray.maxElements + (stride - 1)) ~/ stride,
newKeys.length <=
(DHTShortArray.maxElements + (_stride - 1)) ~/ _stride,
'too many keys');
assert(newKeys.length == linkedKeys.length, 'duplicated linked keys');
final newIndex = index.toSet();
@ -307,7 +357,7 @@ class _DHTShortArrayHead {
assert(newIndex.length == index.length, 'duplicated index locations');
// Ensure all the index keys fit into the existing records
final indexCapacity = (linkedKeys.length + 1) * stride;
final indexCapacity = (linkedKeys.length + 1) * _stride;
int? maxIndex;
for (final idx in newIndex) {
assert(idx >= 0 || idx < indexCapacity, 'index out of range');
@ -328,117 +378,97 @@ class _DHTShortArrayHead {
return free;
}
/// Open a linked record for reading or writing, same as the head record
Future<DHTRecord> _openLinkedRecord(TypedKey recordKey) async {
final writer = headRecord.writer;
return (writer != null)
? await DHTRecordPool.instance.openWrite(
recordKey,
writer,
parent: headRecord.key,
routingContext: headRecord.routingContext,
)
: await DHTRecordPool.instance.openRead(
recordKey,
parent: headRecord.key,
routingContext: headRecord.routingContext,
);
}
/// Check if we know that the network has a copy of an index that is newer
/// than our local copy from looking at the seqs list in the head
bool indexNeedsRefresh(int index) {
bool positionNeedsRefresh(int pos) {
final idx = _index[pos];
// If our local sequence number is unknown or hasnt been written yet
// then a normal DHT operation is going to pull from the network anyway
if (localSeqs.length < index || localSeqs[index] == 0xFFFFFFFF) {
if (_localSeqs.length < idx || _localSeqs[idx] == 0xFFFFFFFF) {
return false;
}
// If the remote sequence number record is unknown or hasnt been written
// at this index yet, then we also do not refresh at this time as it
// is the first time the index is being written to
if (seqs.length < index || seqs[index] == 0xFFFFFFFF) {
if (_seqs.length < idx || _seqs[idx] == 0xFFFFFFFF) {
return false;
}
return localSeqs[index] < seqs[index];
return _localSeqs[idx] < _seqs[idx];
}
/// Update the sequence number for a particular index in
/// our local sequence number list.
/// If a write is happening, update the network copy as well.
Future<void> updateIndexSeq(int index, bool write) async {
final recordNumber = index ~/ stride;
final record = await getOrCreateLinkedRecord(recordNumber);
final recordSubkey = (index % stride) + ((recordNumber == 0) ? 1 : 0);
Future<void> updatePositionSeq(int pos, bool write) async {
final idx = _index[pos];
final (record, recordSubkey) = await lookupIndex(idx);
final report =
await record.inspect(subkeys: [ValueSubkeyRange.single(recordSubkey)]);
while (localSeqs.length <= index) {
localSeqs.add(0xFFFFFFFF);
while (_localSeqs.length <= idx) {
_localSeqs.add(0xFFFFFFFF);
}
localSeqs[index] = report.localSeqs[0];
_localSeqs[idx] = report.localSeqs[0];
if (write) {
while (seqs.length <= index) {
seqs.add(0xFFFFFFFF);
while (_seqs.length <= idx) {
_seqs.add(0xFFFFFFFF);
}
seqs[index] = report.localSeqs[0];
_seqs[idx] = report.localSeqs[0];
}
}
/////////////////////////////////////////////////////////////////////////////
// Watch For Updates
// Watch head for changes
Future<void> _watch() async {
Future<void> watch() async {
// This will update any existing watches if necessary
try {
await headRecord.watch(subkeys: [ValueSubkeyRange.single(0)]);
await _headRecord.watch(subkeys: [ValueSubkeyRange.single(0)]);
// Update changes to the head record
// Don't watch for local changes because this class already handles
// notifying listeners and knows when it makes local changes
_subscription ??=
await headRecord.listen(localChanges: false, _onUpdateHead);
await _headRecord.listen(localChanges: false, _onHeadValueChanged);
} on Exception {
// If anything fails, try to cancel the watches
await _cancelWatch();
await cancelWatch();
rethrow;
}
}
// Stop watching for changes to head and linked records
Future<void> _cancelWatch() async {
await headRecord.cancelWatch();
Future<void> cancelWatch() async {
await _headRecord.cancelWatch();
await _subscription?.cancel();
_subscription = null;
}
// Called when a head or linked record changes
Future<void> _onUpdateHead(
Future<void> _onHeadValueChanged(
DHTRecord record, Uint8List? data, List<ValueSubkeyRange> subkeys) async {
// If head record subkey zero changes, then the layout
// of the dhtshortarray has changed
var updateHead = false;
if (record == headRecord && subkeys.containsSubkey(0)) {
updateHead = true;
if (data == null) {
throw StateError('head value changed without data');
}
if (record.key != _headRecord.key ||
subkeys.length != 1 ||
subkeys[0] != ValueSubkeyRange.single(0)) {
throw StateError('watch returning wrong subkey range');
}
// If we have any other subkeys to update, do them first
final unord = List<Future<Uint8List?>>.empty(growable: true);
for (final skr in subkeys) {
for (var subkey = skr.low; subkey <= skr.high; subkey++) {
// Skip head subkey
if (updateHead && subkey == 0) {
continue;
}
// Get the subkey, which caches the result in the local record store
unord.add(record.get(subkey: subkey, forceRefresh: true));
}
}
await unord.wait;
// Decode updated head
final headData = proto.DHTShortArray.fromBuffer(data);
// Then update the head record
if (updateHead) {
await _refreshInner(forceRefresh: false);
}
await _headMutex.protect(() async {
await _updateHead(headData);
onUpdatedHead?.call();
});
}
////////////////////////////////////////////////////////////////////////////
@ -447,25 +477,26 @@ class _DHTShortArrayHead {
final Mutex _headMutex = Mutex();
// Subscription to head record internal changes
StreamSubscription<DHTRecordWatchChange>? _subscription;
// Notify closure for external head changes
void Function()? onUpdatedHead;
// Head DHT record
final DHTRecord headRecord;
final DHTRecord _headRecord;
// How many elements per linked record
late final int stride;
// List of additional records after the head record used for element data
List<DHTRecord> linkedRecords;
late final int _stride;
// List of additional records after the head record used for element data
List<DHTRecord> _linkedRecords;
// Ordering of the subkey indices.
// Elements are subkey numbers. Represents the element order.
List<int> index;
List<int> _index;
// List of free subkeys for elements that have been removed.
// Used to optimize allocations.
List<int> free;
List<int> _free;
// The sequence numbers of each subkey.
// Index is by subkey number not by element index.
// (n-1 for head record and then the next n for linked records)
List<int> seqs;
List<int> _seqs;
// The local sequence numbers for each subkey.
List<int> localSeqs;
List<int> _localSeqs;
}

View File

@ -93,7 +93,7 @@ extension IdentityMasterExtension on IdentityMaster {
/// Deletes a master identity and the identity record under it
Future<void> delete() async {
final pool = DHTRecordPool.instance;
await (await pool.openRead(masterRecordKey)).delete();
await pool.delete(masterRecordKey);
}
Future<VeilidCryptoSystem> get identityCrypto =>

View File

@ -203,10 +203,10 @@ packages:
dependency: transitive
description:
name: dart_style
sha256: "40ae61a5d43feea6d24bd22c0537a6629db858963b99b4bc1c3db80676f32368"
sha256: "99e066ce75c89d6b29903d788a7bb9369cf754f7b24bf70bf4b6d6d6b26853b9"
url: "https://pub.dev"
source: hosted
version: "2.3.4"
version: "2.3.6"
equatable:
dependency: "direct main"
description:
@ -219,10 +219,10 @@ packages:
dependency: "direct main"
description:
name: fast_immutable_collections
sha256: "6df5b5bb29f52644c4c653ef0ae7d26c8463f8d6551b0ac94561103ff6c5ca17"
sha256: "49154d1da38a34519b907b0e94a06705a59b7127728131dc4a54fe62fd95a83e"
url: "https://pub.dev"
source: hosted
version: "10.1.1"
version: "10.2.1"
ffi:
dependency: transitive
description:
@ -728,10 +728,10 @@ packages:
dependency: transitive
description:
name: vm_service
sha256: e7d5ecd604e499358c5fe35ee828c0298a320d54455e791e9dcf73486bc8d9f0
sha256: a75f83f14ad81d5fe4b3319710b90dec37da0e22612326b696c9e1b8f34bbf48
url: "https://pub.dev"
source: hosted
version: "14.1.0"
version: "14.2.0"
watcher:
dependency: transitive
description:
@ -744,10 +744,10 @@ packages:
dependency: transitive
description:
name: web
sha256: "1d9158c616048c38f712a6646e317a3426da10e884447626167240d45209cbad"
sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27"
url: "https://pub.dev"
source: hosted
version: "0.5.0"
version: "0.5.1"
web_socket_channel:
dependency: transitive
description:
@ -768,10 +768,10 @@ packages:
dependency: transitive
description:
name: win32
sha256: "464f5674532865248444b4c3daca12bd9bf2d7c47f759ce2617986e7229494a8"
sha256: "8cb58b45c47dcb42ab3651533626161d6b67a2921917d8d429791f76972b3480"
url: "https://pub.dev"
source: hosted
version: "5.2.0"
version: "5.3.0"
xdg_directories:
dependency: transitive
description:
@ -790,4 +790,4 @@ packages:
version: "3.1.2"
sdks:
dart: ">=3.3.0 <4.0.0"
flutter: ">=3.10.6"
flutter: ">=3.19.1"

View File

@ -68,10 +68,10 @@ packages:
dependency: "direct main"
description:
name: awesome_extensions
sha256: cde9c8c155c1a1cafc5286807e16124e97f0cff739a47ec17aa9d26c3c37abcf
sha256: "7d235d64a81543a7e200a91b1149bef7d32241290fa483bae25b31be41449a7c"
url: "https://pub.dev"
source: hosted
version: "2.0.12"
version: "2.0.13"
badges:
dependency: "direct main"
description:
@ -219,18 +219,18 @@ packages:
dependency: transitive
description:
name: camera_android
sha256: "351429510121d179b9aac5a2e8cb525c3cd6c39f4d709c5f72dfb21726e52371"
sha256: "15a6543878a41c141807ffab496f66b7fef6da0f23372f5513fc6349e60f437e"
url: "https://pub.dev"
source: hosted
version: "0.10.8+16"
version: "0.10.8+17"
camera_avfoundation:
dependency: transitive
description:
name: camera_avfoundation
sha256: "7d0763dfcbf060f56aa254a68c103210280bee9e97bbe4fdef23e257a4f70ab9"
sha256: "8b113e43ee4434c9244c03c905432a0d5956cedaded3cd7381abaab89ce50297"
url: "https://pub.dev"
source: hosted
version: "0.9.14"
version: "0.9.14+1"
camera_platform_interface:
dependency: transitive
description:
@ -379,10 +379,10 @@ packages:
dependency: transitive
description:
name: dart_style
sha256: "40ae61a5d43feea6d24bd22c0537a6629db858963b99b4bc1c3db80676f32368"
sha256: "99e066ce75c89d6b29903d788a7bb9369cf754f7b24bf70bf4b6d6d6b26853b9"
url: "https://pub.dev"
source: hosted
version: "2.3.4"
version: "2.3.6"
diffutil_dart:
dependency: transitive
description:
@ -403,10 +403,10 @@ packages:
dependency: "direct main"
description:
name: fast_immutable_collections
sha256: "6df5b5bb29f52644c4c653ef0ae7d26c8463f8d6551b0ac94561103ff6c5ca17"
sha256: "49154d1da38a34519b907b0e94a06705a59b7127728131dc4a54fe62fd95a83e"
url: "https://pub.dev"
source: hosted
version: "10.1.1"
version: "10.2.1"
ffi:
dependency: transitive
description:
@ -517,10 +517,10 @@ packages:
dependency: "direct main"
description:
name: flutter_native_splash
sha256: "558f10070f03ee71f850a78f7136ab239a67636a294a44a06b6b7345178edb1e"
sha256: edf39bcf4d74aca1eb2c1e43c3e445fd9f494013df7f0da752fefe72020eedc0
url: "https://pub.dev"
source: hosted
version: "2.3.10"
version: "2.4.0"
flutter_parsed_text:
dependency: transitive
description:
@ -549,10 +549,10 @@ packages:
dependency: "direct main"
description:
name: flutter_slidable
sha256: "19ed4813003a6ff4e9c6bcce37e792a2a358919d7603b2b31ff200229191e44c"
sha256: "673403d2eeef1f9e8483bd6d8d92aae73b1d8bd71f382bc3930f699c731bc27c"
url: "https://pub.dev"
source: hosted
version: "3.0.1"
version: "3.1.0"
flutter_spinkit:
dependency: "direct main"
description:
@ -634,10 +634,10 @@ packages:
dependency: "direct main"
description:
name: go_router
sha256: "170c46e237d6eb0e6e9f0e8b3f56101e14fb64f787016e42edd74c39cf8b176a"
sha256: "7ecb2f391edbca5473db591b48555a8912dde60edd0fb3013bd6743033b2d3f8"
url: "https://pub.dev"
source: hosted
version: "13.2.0"
version: "13.2.1"
graphs:
dependency: transitive
description:
@ -818,10 +818,10 @@ packages:
dependency: "direct main"
description:
name: mobile_scanner
sha256: "619ed5fd43ca9007a151f00c3dc43feedeaf235fe5647735d0237c38849d49dc"
sha256: "827765afbd4792ff3fd105ad593821ac0f6d8a7d352689013b07ee85be336312"
url: "https://pub.dev"
source: hosted
version: "4.0.0"
version: "4.0.1"
motion_toast:
dependency: "direct main"
description:
@ -1009,10 +1009,10 @@ packages:
dependency: "direct main"
description:
name: provider
sha256: "9a96a0a19b594dbc5bf0f1f27d2bc67d5f95957359b461cd9feb44ed6ae75096"
sha256: c8a055ee5ce3fd98d6fc872478b03823ffdb448699c6ebdbbc71d59b596fd48c
url: "https://pub.dev"
source: hosted
version: "6.1.1"
version: "6.1.2"
pub_semver:
dependency: transitive
description:
@ -1041,10 +1041,10 @@ packages:
dependency: "direct main"
description:
name: qr_code_dart_scan
sha256: b42d097e346a546fcf9ff2f5a0e39ea1315449608cfd9b2bc6513988b488a371
sha256: "8e9732d5b6e4e28d50647dc6d7713bf421148cadf28c768a10e9810bf6f3d87a"
url: "https://pub.dev"
source: hosted
version: "0.7.5"
version: "0.7.6"
qr_flutter:
dependency: "direct main"
description:
@ -1057,10 +1057,10 @@ packages:
dependency: "direct main"
description:
name: quickalert
sha256: "0c21c9be68b9ae76082e1ad56db9f51202a38e617e08376f05375238277cfb5a"
sha256: b5d62b1e20b08cc0ff5f40b6da519bdc7a5de6082f13d90572cf4e72eea56c5e
url: "https://pub.dev"
source: hosted
version: "1.0.2"
version: "1.1.0"
quiver:
dependency: transitive
description:
@ -1113,10 +1113,10 @@ packages:
dependency: "direct main"
description:
name: searchable_listview
sha256: "5cd3cd87e0cbd4e6685f6798a9bb4bcc170df20fb92beb662b978f5fccded634"
sha256: "5535ea3efa4599cf23ce52870a9580b52ece5d691aa90655ebec76d5081c9592"
url: "https://pub.dev"
source: hosted
version: "2.10.2"
version: "2.11.1"
share_plus:
dependency: "direct main"
description:
@ -1129,10 +1129,10 @@ packages:
dependency: transitive
description:
name: share_plus_platform_interface
sha256: df08bc3a07d01f5ea47b45d03ffcba1fa9cd5370fb44b3f38c70e42cced0f956
sha256: "251eb156a8b5fa9ce033747d73535bf53911071f8d3b6f4f0b578505ce0d4496"
url: "https://pub.dev"
source: hosted
version: "3.3.1"
version: "3.4.0"
shared_preferences:
dependency: "direct main"
description:
@ -1278,10 +1278,10 @@ packages:
dependency: transitive
description:
name: sqflite_common
sha256: "28d8c66baee4968519fb8bd6cdbedad982d6e53359091f0b74544a9f32ec72d5"
sha256: "3da423ce7baf868be70e2c0976c28a1bb2f73644268b7ffa7d2e08eab71f16a4"
url: "https://pub.dev"
source: hosted
version: "2.5.3"
version: "2.5.4"
stack_trace:
dependency: "direct main"
description:
@ -1532,10 +1532,10 @@ packages:
dependency: transitive
description:
name: web
sha256: "1d9158c616048c38f712a6646e317a3426da10e884447626167240d45209cbad"
sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27"
url: "https://pub.dev"
source: hosted
version: "0.5.0"
version: "0.5.1"
web_socket_channel:
dependency: transitive
description:
@ -1548,10 +1548,10 @@ packages:
dependency: transitive
description:
name: win32
sha256: "464f5674532865248444b4c3daca12bd9bf2d7c47f759ce2617986e7229494a8"
sha256: "8cb58b45c47dcb42ab3651533626161d6b67a2921917d8d429791f76972b3480"
url: "https://pub.dev"
source: hosted
version: "5.2.0"
version: "5.3.0"
window_manager:
dependency: "direct main"
description: