mirror of
https://gitlab.com/veilid/veilidchat.git
synced 2025-10-11 04:38:28 -04:00
clean up a bunch of exceptions
This commit is contained in:
parent
c077a0290f
commit
bf38c2c44d
21 changed files with 244 additions and 166 deletions
|
@ -7,6 +7,7 @@ import 'package:collection/collection.dart';
|
|||
import 'package:equatable/equatable.dart';
|
||||
import 'package:meta/meta.dart';
|
||||
|
||||
import '../../../src/veilid_log.dart';
|
||||
import '../../../veilid_support.dart';
|
||||
import '../../proto/proto.dart' as proto;
|
||||
|
||||
|
|
|
@ -47,8 +47,16 @@ class _DHTLogRead implements DHTLogReadOperations {
|
|||
|
||||
final chunks = Iterable<int>.generate(length)
|
||||
.slices(kMaxDHTConcurrency)
|
||||
.map((chunk) => chunk
|
||||
.map((pos) async => get(pos + start, forceRefresh: forceRefresh)));
|
||||
.map((chunk) => chunk.map((pos) async {
|
||||
try {
|
||||
return get(pos + start, forceRefresh: forceRefresh);
|
||||
// Need some way to debug ParallelWaitError
|
||||
// ignore: avoid_catches_without_on_clauses
|
||||
} catch (e, st) {
|
||||
veilidLoggy.error('$e\n$st\n');
|
||||
rethrow;
|
||||
}
|
||||
}));
|
||||
|
||||
for (final chunk in chunks) {
|
||||
final elems = await chunk.wait;
|
||||
|
|
|
@ -248,7 +248,12 @@ class _DHTLogSpine {
|
|||
final headDelta = _ringDistance(newHead, oldHead);
|
||||
final tailDelta = _ringDistance(newTail, oldTail);
|
||||
if (headDelta > _positionLimit ~/ 2 || tailDelta > _positionLimit ~/ 2) {
|
||||
throw const DHTExceptionInvalidData();
|
||||
throw DHTExceptionInvalidData('_DHTLogSpine::_updateHead '
|
||||
'_head=$_head _tail=$_tail '
|
||||
'oldHead=$oldHead oldTail=$oldTail '
|
||||
'newHead=$newHead newTail=$newTail '
|
||||
'headDelta=$headDelta tailDelta=$tailDelta '
|
||||
'_positionLimit=$_positionLimit');
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -17,7 +17,8 @@ class _DHTLogWrite extends _DHTLogRead implements DHTLogWriteOperations {
|
|||
}
|
||||
final lookup = await _spine.lookupPosition(pos);
|
||||
if (lookup == null) {
|
||||
throw const DHTExceptionInvalidData();
|
||||
throw DHTExceptionInvalidData(
|
||||
'_DHTLogRead::tryWriteItem pos=$pos _spine.length=${_spine.length}');
|
||||
}
|
||||
|
||||
// Write item to the segment
|
||||
|
@ -45,12 +46,14 @@ class _DHTLogWrite extends _DHTLogRead implements DHTLogWriteOperations {
|
|||
}
|
||||
final aLookup = await _spine.lookupPosition(aPos);
|
||||
if (aLookup == null) {
|
||||
throw const DHTExceptionInvalidData();
|
||||
throw DHTExceptionInvalidData('_DHTLogWrite::swap aPos=$aPos bPos=$bPos '
|
||||
'_spine.length=${_spine.length}');
|
||||
}
|
||||
final bLookup = await _spine.lookupPosition(bPos);
|
||||
if (bLookup == null) {
|
||||
await aLookup.close();
|
||||
throw const DHTExceptionInvalidData();
|
||||
throw DHTExceptionInvalidData('_DHTLogWrite::swap aPos=$aPos bPos=$bPos '
|
||||
'_spine.length=${_spine.length}');
|
||||
}
|
||||
|
||||
// Swap items in the segments
|
||||
|
@ -65,7 +68,10 @@ class _DHTLogWrite extends _DHTLogRead implements DHTLogWriteOperations {
|
|||
if (bItem.value == null) {
|
||||
final aItem = await aWrite.get(aLookup.pos);
|
||||
if (aItem == null) {
|
||||
throw const DHTExceptionInvalidData();
|
||||
throw DHTExceptionInvalidData(
|
||||
'_DHTLogWrite::swap aPos=$aPos bPos=$bPos '
|
||||
'aLookup.pos=${aLookup.pos} bLookup.pos=${bLookup.pos} '
|
||||
'_spine.length=${_spine.length}');
|
||||
}
|
||||
await sb.operateWriteEventual((bWrite) async {
|
||||
final success = await bWrite
|
||||
|
@ -101,7 +107,9 @@ class _DHTLogWrite extends _DHTLogRead implements DHTLogWriteOperations {
|
|||
await write.clear();
|
||||
} else if (lookup.pos != write.length) {
|
||||
// We should always be appending at the length
|
||||
throw const DHTExceptionInvalidData();
|
||||
throw DHTExceptionInvalidData(
|
||||
'_DHTLogWrite::add lookup.pos=${lookup.pos} '
|
||||
'write.length=${write.length}');
|
||||
}
|
||||
return write.add(value);
|
||||
}));
|
||||
|
@ -117,12 +125,16 @@ class _DHTLogWrite extends _DHTLogRead implements DHTLogWriteOperations {
|
|||
final dws = DelayedWaitSet<void, void>();
|
||||
|
||||
var success = true;
|
||||
for (var valueIdx = 0; valueIdx < values.length;) {
|
||||
for (var valueIdxIter = 0; valueIdxIter < values.length;) {
|
||||
final valueIdx = valueIdxIter;
|
||||
final remaining = values.length - valueIdx;
|
||||
|
||||
final lookup = await _spine.lookupPosition(insertPos + valueIdx);
|
||||
if (lookup == null) {
|
||||
throw const DHTExceptionInvalidData();
|
||||
throw DHTExceptionInvalidData('_DHTLogWrite::addAll '
|
||||
'_spine.length=${_spine.length}'
|
||||
'insertPos=$insertPos valueIdx=$valueIdx '
|
||||
'values.length=${values.length} ');
|
||||
}
|
||||
|
||||
final sacount = min(remaining, DHTShortArray.maxElements - lookup.pos);
|
||||
|
@ -137,16 +149,21 @@ class _DHTLogWrite extends _DHTLogRead implements DHTLogWriteOperations {
|
|||
await write.clear();
|
||||
} else if (lookup.pos != write.length) {
|
||||
// We should always be appending at the length
|
||||
throw const DHTExceptionInvalidData();
|
||||
await write.truncate(lookup.pos);
|
||||
}
|
||||
return write.addAll(sublistValues);
|
||||
await write.addAll(sublistValues);
|
||||
success = true;
|
||||
}));
|
||||
} on DHTExceptionOutdated {
|
||||
success = false;
|
||||
// Need some way to debug ParallelWaitError
|
||||
// ignore: avoid_catches_without_on_clauses
|
||||
} catch (e, st) {
|
||||
veilidLoggy.error('$e\n$st\n');
|
||||
}
|
||||
});
|
||||
|
||||
valueIdx += sacount;
|
||||
valueIdxIter += sacount;
|
||||
}
|
||||
|
||||
await dws();
|
||||
|
|
|
@ -246,11 +246,13 @@ class DHTRecordPool with TableDBBackedJson<DHTRecordPoolAllocations> {
|
|||
/// Print children
|
||||
String debugChildren(TypedKey recordKey, {List<TypedKey>? allDeps}) {
|
||||
allDeps ??= _collectChildrenInner(recordKey);
|
||||
// Debugging
|
||||
// ignore: avoid_print
|
||||
var out =
|
||||
'Parent: $recordKey (${_state.debugNames[recordKey.toString()]})\n';
|
||||
for (final dep in allDeps) {
|
||||
if (dep != recordKey) {
|
||||
// Debugging
|
||||
// ignore: avoid_print
|
||||
out += ' Child: $dep (${_state.debugNames[dep.toString()]})\n';
|
||||
}
|
||||
|
@ -270,32 +272,25 @@ class DHTRecordPool with TableDBBackedJson<DHTRecordPoolAllocations> {
|
|||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
final now = Veilid.instance.now().value;
|
||||
// Expired, process renewal if desired
|
||||
for (final entry in _opened.entries) {
|
||||
final openedKey = entry.key;
|
||||
final openedRecordInfo = entry.value;
|
||||
|
||||
if (openedKey == updateValueChange.key) {
|
||||
// Renew watch state for each opened record
|
||||
for (final rec in openedRecordInfo.records) {
|
||||
// See if the watch had an expiration and if it has expired
|
||||
// otherwise the renewal will keep the same parameters
|
||||
final watchState = rec._watchState;
|
||||
if (watchState != null) {
|
||||
final exp = watchState.expiration;
|
||||
if (exp != null && exp.value < now) {
|
||||
// Has expiration, and it has expired, clear watch state
|
||||
rec._watchState = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
openedRecordInfo.shared.needsWatchStateUpdate = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
// else {
|
||||
|
||||
// XXX: should no longer be necessary
|
||||
// // Remove watch state
|
||||
//
|
||||
// for (final entry in _opened.entries) {
|
||||
// final openedKey = entry.key;
|
||||
// final openedRecordInfo = entry.value;
|
||||
|
||||
// if (openedKey == updateValueChange.key) {
|
||||
// for (final rec in openedRecordInfo.records) {
|
||||
// rec._watchState = null;
|
||||
// }
|
||||
// openedRecordInfo.shared.needsWatchStateUpdate = true;
|
||||
// break;
|
||||
// }
|
||||
// }
|
||||
//}
|
||||
}
|
||||
|
||||
/// Log the current record allocations
|
||||
|
@ -735,7 +730,6 @@ class DHTRecordPool with TableDBBackedJson<DHTRecordPoolAllocations> {
|
|||
int? totalCount;
|
||||
Timestamp? maxExpiration;
|
||||
List<ValueSubkeyRange>? allSubkeys;
|
||||
Timestamp? earliestRenewalTime;
|
||||
|
||||
var noExpiration = false;
|
||||
var everySubkey = false;
|
||||
|
@ -768,15 +762,6 @@ class DHTRecordPool with TableDBBackedJson<DHTRecordPoolAllocations> {
|
|||
} else {
|
||||
everySubkey = true;
|
||||
}
|
||||
final wsRenewalTime = ws.renewalTime;
|
||||
if (wsRenewalTime != null) {
|
||||
earliestRenewalTime = earliestRenewalTime == null
|
||||
? wsRenewalTime
|
||||
: Timestamp(
|
||||
value: (wsRenewalTime.value < earliestRenewalTime.value
|
||||
? wsRenewalTime.value
|
||||
: earliestRenewalTime.value));
|
||||
}
|
||||
}
|
||||
}
|
||||
if (noExpiration) {
|
||||
|
@ -790,25 +775,10 @@ class DHTRecordPool with TableDBBackedJson<DHTRecordPoolAllocations> {
|
|||
}
|
||||
|
||||
return _WatchState(
|
||||
subkeys: allSubkeys,
|
||||
expiration: maxExpiration,
|
||||
count: totalCount,
|
||||
renewalTime: earliestRenewalTime);
|
||||
}
|
||||
|
||||
static void _updateWatchRealExpirations(Iterable<DHTRecord> records,
|
||||
Timestamp realExpiration, Timestamp renewalTime) {
|
||||
for (final rec in records) {
|
||||
final ws = rec._watchState;
|
||||
if (ws != null) {
|
||||
rec._watchState = _WatchState(
|
||||
subkeys: ws.subkeys,
|
||||
expiration: ws.expiration,
|
||||
count: ws.count,
|
||||
realExpiration: realExpiration,
|
||||
renewalTime: renewalTime);
|
||||
}
|
||||
}
|
||||
subkeys: allSubkeys,
|
||||
expiration: maxExpiration,
|
||||
count: totalCount,
|
||||
);
|
||||
}
|
||||
|
||||
Future<void> _watchStateChange(
|
||||
|
@ -833,9 +803,9 @@ class DHTRecordPool with TableDBBackedJson<DHTRecordPoolAllocations> {
|
|||
// Only try this once, if it doesn't succeed then it can just expire
|
||||
// on its own.
|
||||
try {
|
||||
final cancelled = await dhtctx.cancelDHTWatch(openedRecordKey);
|
||||
final stillActive = await dhtctx.cancelDHTWatch(openedRecordKey);
|
||||
|
||||
log('cancelDHTWatch: key=$openedRecordKey, cancelled=$cancelled, '
|
||||
log('cancelDHTWatch: key=$openedRecordKey, stillActive=$stillActive, '
|
||||
'debugNames=${openedRecordInfo.debugNames}');
|
||||
|
||||
openedRecordInfo.shared.unionWatchState = null;
|
||||
|
@ -858,34 +828,20 @@ class DHTRecordPool with TableDBBackedJson<DHTRecordPoolAllocations> {
|
|||
final subkeys = unionWatchState.subkeys?.toList();
|
||||
final count = unionWatchState.count;
|
||||
final expiration = unionWatchState.expiration;
|
||||
final now = veilid.now();
|
||||
|
||||
final realExpiration = await dhtctx.watchDHTValues(openedRecordKey,
|
||||
final active = await dhtctx.watchDHTValues(openedRecordKey,
|
||||
subkeys: unionWatchState.subkeys?.toList(),
|
||||
count: unionWatchState.count,
|
||||
expiration: unionWatchState.expiration ??
|
||||
(_defaultWatchDurationSecs == null
|
||||
? null
|
||||
: veilid.now().offset(TimestampDuration.fromMillis(
|
||||
_defaultWatchDurationSecs! * 1000))));
|
||||
expiration: unionWatchState.expiration);
|
||||
|
||||
final expirationDuration = realExpiration.diff(now);
|
||||
final renewalTime = now.offset(TimestampDuration(
|
||||
value: expirationDuration.value *
|
||||
BigInt.from(_watchRenewalNumerator) ~/
|
||||
BigInt.from(_watchRenewalDenominator)));
|
||||
|
||||
log('watchDHTValues: key=$openedRecordKey, subkeys=$subkeys, '
|
||||
log('watchDHTValues(active=$active): '
|
||||
'key=$openedRecordKey, subkeys=$subkeys, '
|
||||
'count=$count, expiration=$expiration, '
|
||||
'realExpiration=$realExpiration, '
|
||||
'renewalTime=$renewalTime, '
|
||||
'debugNames=${openedRecordInfo.debugNames}');
|
||||
|
||||
// Update watch states with real expiration
|
||||
if (realExpiration.value != BigInt.zero) {
|
||||
if (active) {
|
||||
openedRecordInfo.shared.unionWatchState = unionWatchState;
|
||||
_updateWatchRealExpirations(
|
||||
openedRecordInfo.records, realExpiration, renewalTime);
|
||||
openedRecordInfo.shared.needsWatchStateUpdate = false;
|
||||
}
|
||||
} on VeilidAPIExceptionTimeout {
|
||||
|
@ -944,22 +900,13 @@ class DHTRecordPool with TableDBBackedJson<DHTRecordPoolAllocations> {
|
|||
/// Ticker to check watch state change requests
|
||||
Future<void> tick() async => _mutex.protect(() async {
|
||||
// See if any opened records need watch state changes
|
||||
final now = veilid.now();
|
||||
for (final kv in _opened.entries) {
|
||||
final openedRecordKey = kv.key;
|
||||
final openedRecordInfo = kv.value;
|
||||
|
||||
var wantsWatchStateUpdate =
|
||||
final wantsWatchStateUpdate =
|
||||
openedRecordInfo.shared.needsWatchStateUpdate;
|
||||
|
||||
// Check if we have reached renewal time for the watch
|
||||
if (openedRecordInfo.shared.unionWatchState != null &&
|
||||
openedRecordInfo.shared.unionWatchState!.renewalTime != null &&
|
||||
now.value >
|
||||
openedRecordInfo.shared.unionWatchState!.renewalTime!.value) {
|
||||
wantsWatchStateUpdate = true;
|
||||
}
|
||||
|
||||
if (wantsWatchStateUpdate) {
|
||||
// Update union watch state
|
||||
final unionWatchState =
|
||||
|
|
|
@ -1,9 +1,5 @@
|
|||
part of 'dht_record_pool.dart';
|
||||
|
||||
const int? _defaultWatchDurationSecs = null; // 600
|
||||
const int _watchRenewalNumerator = 4;
|
||||
const int _watchRenewalDenominator = 5;
|
||||
|
||||
// DHT crypto domain
|
||||
const String _cryptoDomainDHT = 'dht';
|
||||
|
||||
|
@ -14,21 +10,17 @@ const _sfListen = 'listen';
|
|||
/// Watch state
|
||||
@immutable
|
||||
class _WatchState extends Equatable {
|
||||
const _WatchState(
|
||||
{required this.subkeys,
|
||||
required this.expiration,
|
||||
required this.count,
|
||||
this.realExpiration,
|
||||
this.renewalTime});
|
||||
const _WatchState({
|
||||
required this.subkeys,
|
||||
required this.expiration,
|
||||
required this.count,
|
||||
});
|
||||
final List<ValueSubkeyRange>? subkeys;
|
||||
final Timestamp? expiration;
|
||||
final int? count;
|
||||
final Timestamp? realExpiration;
|
||||
final Timestamp? renewalTime;
|
||||
|
||||
@override
|
||||
List<Object?> get props =>
|
||||
[subkeys, expiration, count, realExpiration, renewalTime];
|
||||
List<Object?> get props => [subkeys, expiration, count];
|
||||
}
|
||||
|
||||
/// Data shared amongst all DHTRecord instances
|
||||
|
|
|
@ -4,6 +4,7 @@ import 'dart:typed_data';
|
|||
import 'package:async_tools/async_tools.dart';
|
||||
import 'package:collection/collection.dart';
|
||||
|
||||
import '../../../src/veilid_log.dart';
|
||||
import '../../../veilid_support.dart';
|
||||
import '../../proto/proto.dart' as proto;
|
||||
|
||||
|
|
|
@ -383,6 +383,24 @@ class _DHTShortArrayHead {
|
|||
// xxx: free list optimization here?
|
||||
}
|
||||
|
||||
/// Truncate index to a particular length
|
||||
void truncate(int newLength) {
|
||||
if (newLength >= _index.length) {
|
||||
return;
|
||||
} else if (newLength == 0) {
|
||||
clearIndex();
|
||||
return;
|
||||
} else if (newLength < 0) {
|
||||
throw StateError('can not truncate to negative length');
|
||||
}
|
||||
|
||||
final newIndex = _index.sublist(0, newLength);
|
||||
final freed = _index.sublist(newLength);
|
||||
|
||||
_index = newIndex;
|
||||
_free.addAll(freed);
|
||||
}
|
||||
|
||||
/// Validate the head from the DHT is properly formatted
|
||||
/// and calculate the free list from it while we're here
|
||||
List<int> _makeFreeList(
|
||||
|
|
|
@ -60,8 +60,16 @@ class _DHTShortArrayRead implements DHTShortArrayReadOperations {
|
|||
|
||||
final chunks = Iterable<int>.generate(length)
|
||||
.slices(kMaxDHTConcurrency)
|
||||
.map((chunk) => chunk
|
||||
.map((pos) async => get(pos + start, forceRefresh: forceRefresh)));
|
||||
.map((chunk) => chunk.map((pos) async {
|
||||
try {
|
||||
return get(pos + start, forceRefresh: forceRefresh);
|
||||
// Need some way to debug ParallelWaitError
|
||||
// ignore: avoid_catches_without_on_clauses
|
||||
} catch (e, st) {
|
||||
veilidLoggy.error('$e\n$st\n');
|
||||
rethrow;
|
||||
}
|
||||
}));
|
||||
|
||||
for (final chunk in chunks) {
|
||||
final elems = await chunk.wait;
|
||||
|
|
|
@ -9,6 +9,7 @@ abstract class DHTShortArrayWriteOperations
|
|||
DHTRandomWrite,
|
||||
DHTInsertRemove,
|
||||
DHTAdd,
|
||||
DHTTruncate,
|
||||
DHTClear {}
|
||||
|
||||
class _DHTShortArrayWrite extends _DHTShortArrayRead
|
||||
|
@ -72,10 +73,16 @@ class _DHTShortArrayWrite extends _DHTShortArrayRead
|
|||
final value = values[i];
|
||||
final outSeqNum = outSeqNums[i];
|
||||
dws.add((_) async {
|
||||
final outValue = await lookup.record.tryWriteBytes(value,
|
||||
subkey: lookup.recordSubkey, outSeqNum: outSeqNum);
|
||||
if (outValue != null) {
|
||||
success = false;
|
||||
try {
|
||||
final outValue = await lookup.record.tryWriteBytes(value,
|
||||
subkey: lookup.recordSubkey, outSeqNum: outSeqNum);
|
||||
if (outValue != null) {
|
||||
success = false;
|
||||
}
|
||||
// Need some way to debug ParallelWaitError
|
||||
// ignore: avoid_catches_without_on_clauses
|
||||
} catch (e, st) {
|
||||
veilidLoggy.error('$e\n$st\n');
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -142,6 +149,11 @@ class _DHTShortArrayWrite extends _DHTShortArrayRead
|
|||
_head.clearIndex();
|
||||
}
|
||||
|
||||
@override
|
||||
Future<void> truncate(int newLength) async {
|
||||
_head.truncate(newLength);
|
||||
}
|
||||
|
||||
@override
|
||||
Future<bool> tryWriteItem(int pos, Uint8List newValue,
|
||||
{Output<Uint8List>? output}) async {
|
||||
|
|
|
@ -2,20 +2,32 @@ class DHTExceptionOutdated implements Exception {
|
|||
const DHTExceptionOutdated(
|
||||
[this.cause = 'operation failed due to newer dht value']);
|
||||
final String cause;
|
||||
|
||||
@override
|
||||
String toString() => 'DHTExceptionOutdated: $cause';
|
||||
}
|
||||
|
||||
class DHTExceptionInvalidData implements Exception {
|
||||
const DHTExceptionInvalidData([this.cause = 'dht data structure is corrupt']);
|
||||
const DHTExceptionInvalidData(this.cause);
|
||||
final String cause;
|
||||
|
||||
@override
|
||||
String toString() => 'DHTExceptionInvalidData: $cause';
|
||||
}
|
||||
|
||||
class DHTExceptionCancelled implements Exception {
|
||||
const DHTExceptionCancelled([this.cause = 'operation was cancelled']);
|
||||
final String cause;
|
||||
|
||||
@override
|
||||
String toString() => 'DHTExceptionCancelled: $cause';
|
||||
}
|
||||
|
||||
class DHTExceptionNotAvailable implements Exception {
|
||||
const DHTExceptionNotAvailable(
|
||||
[this.cause = 'request could not be completed at this time']);
|
||||
final String cause;
|
||||
|
||||
@override
|
||||
String toString() => 'DHTExceptionNotAvailable: $cause';
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import 'package:protobuf/protobuf.dart';
|
|||
|
||||
import 'config.dart';
|
||||
import 'table_db.dart';
|
||||
import 'veilid_log.dart';
|
||||
|
||||
class PersistentQueue<T extends GeneratedMessage>
|
||||
with TableDBBackedFromBuffer<IList<T>> {
|
||||
|
@ -46,7 +47,7 @@ class PersistentQueue<T extends GeneratedMessage>
|
|||
}
|
||||
}
|
||||
|
||||
Future<void> _init(_) async {
|
||||
Future<void> _init(Completer<void> _) async {
|
||||
// Start the processor
|
||||
unawaited(Future.delayed(Duration.zero, () async {
|
||||
await _initWait();
|
||||
|
@ -182,10 +183,28 @@ class PersistentQueue<T extends GeneratedMessage>
|
|||
|
||||
@override
|
||||
IList<T> valueFromBuffer(Uint8List bytes) {
|
||||
final reader = CodedBufferReader(bytes);
|
||||
var out = IList<T>();
|
||||
while (!reader.isAtEnd()) {
|
||||
out = out.add(_fromBuffer(reader.readBytesAsView()));
|
||||
try {
|
||||
final reader = CodedBufferReader(bytes);
|
||||
while (!reader.isAtEnd()) {
|
||||
final bytes = reader.readBytesAsView();
|
||||
try {
|
||||
final item = _fromBuffer(bytes);
|
||||
out = out.add(item);
|
||||
} on Exception catch (e, st) {
|
||||
veilidLoggy.debug(
|
||||
'Dropping invalid item from persistent queue: $bytes\n'
|
||||
'tableName=${tableName()}:tableKeyName=${tableKeyName()}\n',
|
||||
e,
|
||||
st);
|
||||
}
|
||||
}
|
||||
} on Exception catch (e, st) {
|
||||
veilidLoggy.debug(
|
||||
'Dropping remainder of invalid persistent queue\n'
|
||||
'tableName=${tableName()}:tableKeyName=${tableKeyName()}\n',
|
||||
e,
|
||||
st);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@ import 'package:meta/meta.dart';
|
|||
import 'package:protobuf/protobuf.dart';
|
||||
|
||||
import '../veilid_support.dart';
|
||||
import 'veilid_log.dart';
|
||||
|
||||
@immutable
|
||||
class TableDBArrayUpdate extends Equatable {
|
||||
|
@ -262,7 +263,16 @@ class _TableDBArrayBase {
|
|||
final dws = DelayedWaitSet<Uint8List, void>();
|
||||
while (batchLen > 0) {
|
||||
final entry = await _getIndexEntry(pos);
|
||||
dws.add((_) async => (await _loadEntry(entry))!);
|
||||
dws.add((_) async {
|
||||
try {
|
||||
return (await _loadEntry(entry))!;
|
||||
// Need some way to debug ParallelWaitError
|
||||
// ignore: avoid_catches_without_on_clauses
|
||||
} catch (e, st) {
|
||||
veilidLoggy.error('$e\n$st\n');
|
||||
rethrow;
|
||||
}
|
||||
});
|
||||
pos++;
|
||||
batchLen--;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue