better handling of subkeys for spine

This commit is contained in:
Christien Rioux 2024-06-08 23:18:54 -04:00
parent 0b9835b23d
commit 2c3d4dce93
9 changed files with 203 additions and 162 deletions

View File

@ -36,161 +36,161 @@ void main() {
setUpAll(veilidFixture.attach);
tearDownAll(veilidFixture.detach);
// group('TableDB Tests', () {
// group('TableDBArray Tests', () {
// // test('create/delete TableDBArray', testTableDBArrayCreateDelete);
group('TableDB Tests', () {
group('TableDBArray Tests', () {
// test('create/delete TableDBArray', testTableDBArrayCreateDelete);
// group('TableDBArray Add/Get Tests', () {
// for (final params in [
// //
// (99, 3, 15),
// (100, 4, 16),
// (101, 5, 17),
// //
// (511, 3, 127),
// (512, 4, 128),
// (513, 5, 129),
// //
// (4095, 3, 1023),
// (4096, 4, 1024),
// (4097, 5, 1025),
// //
// (65535, 3, 16383),
// (65536, 4, 16384),
// (65537, 5, 16385),
// ]) {
// final count = params.$1;
// final singles = params.$2;
// final batchSize = params.$3;
group('TableDBArray Add/Get Tests', () {
for (final params in [
//
(99, 3, 15),
(100, 4, 16),
(101, 5, 17),
//
(511, 3, 127),
(512, 4, 128),
(513, 5, 129),
//
(4095, 3, 1023),
(4096, 4, 1024),
(4097, 5, 1025),
//
(65535, 3, 16383),
(65536, 4, 16384),
(65537, 5, 16385),
]) {
final count = params.$1;
final singles = params.$2;
final batchSize = params.$3;
// test(
// timeout: const Timeout(Duration(seconds: 480)),
// 'add/remove TableDBArray count = $count batchSize=$batchSize',
// makeTestTableDBArrayAddGetClear(
// count: count,
// singles: singles,
// batchSize: batchSize,
// crypto: const VeilidCryptoPublic()),
// );
// }
// });
test(
timeout: const Timeout(Duration(seconds: 480)),
'add/remove TableDBArray count = $count batchSize=$batchSize',
makeTestTableDBArrayAddGetClear(
count: count,
singles: singles,
batchSize: batchSize,
crypto: const VeilidCryptoPublic()),
);
}
});
// group('TableDBArray Insert Tests', () {
// for (final params in [
// //
// (99, 3, 15),
// (100, 4, 16),
// (101, 5, 17),
// //
// (511, 3, 127),
// (512, 4, 128),
// (513, 5, 129),
// //
// (4095, 3, 1023),
// (4096, 4, 1024),
// (4097, 5, 1025),
// //
// (65535, 3, 16383),
// (65536, 4, 16384),
// (65537, 5, 16385),
// ]) {
// final count = params.$1;
// final singles = params.$2;
// final batchSize = params.$3;
group('TableDBArray Insert Tests', () {
for (final params in [
//
(99, 3, 15),
(100, 4, 16),
(101, 5, 17),
//
(511, 3, 127),
(512, 4, 128),
(513, 5, 129),
//
(4095, 3, 1023),
(4096, 4, 1024),
(4097, 5, 1025),
//
(65535, 3, 16383),
(65536, 4, 16384),
(65537, 5, 16385),
]) {
final count = params.$1;
final singles = params.$2;
final batchSize = params.$3;
// test(
// timeout: const Timeout(Duration(seconds: 480)),
// 'insert TableDBArray count=$count singles=$singles batchSize=$batchSize',
// makeTestTableDBArrayInsert(
// count: count,
// singles: singles,
// batchSize: batchSize,
// crypto: const VeilidCryptoPublic()),
// );
// }
// });
test(
timeout: const Timeout(Duration(seconds: 480)),
'insert TableDBArray count=$count singles=$singles batchSize=$batchSize',
makeTestTableDBArrayInsert(
count: count,
singles: singles,
batchSize: batchSize,
crypto: const VeilidCryptoPublic()),
);
}
});
// group('TableDBArray Remove Tests', () {
// for (final params in [
// //
// (99, 3, 15),
// (100, 4, 16),
// (101, 5, 17),
// //
// (511, 3, 127),
// (512, 4, 128),
// (513, 5, 129),
// //
// (4095, 3, 1023),
// (4096, 4, 1024),
// (4097, 5, 1025),
// //
// (16383, 3, 4095),
// (16384, 4, 4096),
// (16385, 5, 4097),
// ]) {
// final count = params.$1;
// final singles = params.$2;
// final batchSize = params.$3;
group('TableDBArray Remove Tests', () {
for (final params in [
//
(99, 3, 15),
(100, 4, 16),
(101, 5, 17),
//
(511, 3, 127),
(512, 4, 128),
(513, 5, 129),
//
(4095, 3, 1023),
(4096, 4, 1024),
(4097, 5, 1025),
//
(16383, 3, 4095),
(16384, 4, 4096),
(16385, 5, 4097),
]) {
final count = params.$1;
final singles = params.$2;
final batchSize = params.$3;
// test(
// timeout: const Timeout(Duration(seconds: 480)),
// 'remove TableDBArray count=$count singles=$singles batchSize=$batchSize',
// makeTestTableDBArrayRemove(
// count: count,
// singles: singles,
// batchSize: batchSize,
// crypto: const VeilidCryptoPublic()),
// );
// }
// });
// });
// });
group('DHT Support Tests', () {
setUpAll(updateProcessorFixture.setUp);
setUpAll(tickerFixture.setUp);
tearDownAll(tickerFixture.tearDown);
tearDownAll(updateProcessorFixture.tearDown);
test('create pool', testDHTRecordPoolCreate);
group('DHTRecordPool Tests', () {
setUpAll(dhtRecordPoolFixture.setUp);
tearDownAll(dhtRecordPoolFixture.tearDown);
test('create/delete record', testDHTRecordCreateDelete);
test('record scopes', testDHTRecordScopes);
test('create/delete deep record', testDHTRecordDeepCreateDelete);
});
group('DHTShortArray Tests', () {
setUpAll(dhtRecordPoolFixture.setUp);
tearDownAll(dhtRecordPoolFixture.tearDown);
for (final stride in [256, 16 /*64, 32, 16, 8, 4, 2, 1 */]) {
test('create shortarray stride=$stride',
makeTestDHTShortArrayCreateDelete(stride: stride));
test('add shortarray stride=$stride',
makeTestDHTShortArrayAdd(stride: stride));
}
});
group('DHTLog Tests', () {
setUpAll(dhtRecordPoolFixture.setUp);
tearDownAll(dhtRecordPoolFixture.tearDown);
for (final stride in [256, 16 /*64, 32, 16, 8, 4, 2, 1 */]) {
test('create log stride=$stride',
makeTestDHTLogCreateDelete(stride: stride));
test(
timeout: const Timeout(Duration(seconds: 480)),
'add/truncate log stride=$stride',
makeTestDHTLogAddTruncate(stride: stride),
);
}
test(
timeout: const Timeout(Duration(seconds: 480)),
'remove TableDBArray count=$count singles=$singles batchSize=$batchSize',
makeTestTableDBArrayRemove(
count: count,
singles: singles,
batchSize: batchSize,
crypto: const VeilidCryptoPublic()),
);
}
});
});
});
// group('DHT Support Tests', () {
// setUpAll(updateProcessorFixture.setUp);
// setUpAll(tickerFixture.setUp);
// tearDownAll(tickerFixture.tearDown);
// tearDownAll(updateProcessorFixture.tearDown);
// test('create pool', testDHTRecordPoolCreate);
// group('DHTRecordPool Tests', () {
// setUpAll(dhtRecordPoolFixture.setUp);
// tearDownAll(dhtRecordPoolFixture.tearDown);
// test('create/delete record', testDHTRecordCreateDelete);
// test('record scopes', testDHTRecordScopes);
// test('create/delete deep record', testDHTRecordDeepCreateDelete);
// });
// group('DHTShortArray Tests', () {
// setUpAll(dhtRecordPoolFixture.setUp);
// tearDownAll(dhtRecordPoolFixture.tearDown);
// for (final stride in [256, 16 /*64, 32, 16, 8, 4, 2, 1 */]) {
// test('create shortarray stride=$stride',
// makeTestDHTShortArrayCreateDelete(stride: stride));
// test('add shortarray stride=$stride',
// makeTestDHTShortArrayAdd(stride: stride));
// }
// });
// group('DHTLog Tests', () {
// setUpAll(dhtRecordPoolFixture.setUp);
// tearDownAll(dhtRecordPoolFixture.tearDown);
// for (final stride in [256, 16 /*64, 32, 16, 8, 4, 2, 1 */]) {
// test('create log stride=$stride',
// makeTestDHTLogCreateDelete(stride: stride));
// test(
// timeout: const Timeout(Duration(seconds: 480)),
// 'add/truncate log stride=$stride',
// makeTestDHTLogAddTruncate(stride: stride),
// );
// }
// });
// });
});
});
}

View File

@ -204,6 +204,9 @@ class DHTLog implements DHTDeleteable<DHTLog> {
/// Get the record key for this log
TypedKey get recordKey => _spine.recordKey;
/// Get the writer for the log
KeyPair? get writer => _spine._spineRecord.writer;
/// Get the record pointer foir this log
OwnedDHTRecordPointer get recordPointer => _spine.recordPointer;

View File

@ -126,13 +126,22 @@ class DHTLogCubit<T> extends Cubit<DHTLogBusyState<T>>
final end = ((tail - 1) % length) + 1;
final start = (count < end) ? end - count : 0;
final offlinePositions = await reader.getOfflinePositions();
// If this is writeable get the offline positions
Set<int>? offlinePositions;
if (_log.writer != null) {
offlinePositions = await reader.getOfflinePositions();
if (offlinePositions == null) {
return const AsyncValue.loading();
}
}
// Get the items
final allItems = (await reader.getRange(start,
length: end - start, forceRefresh: forceRefresh))
?.indexed
.map((x) => OnlineElementState(
value: _decodeElement(x.$2),
isOffline: offlinePositions.contains(x.$1)))
isOffline: offlinePositions?.contains(x.$1) ?? false))
.toIList();
if (allItems == null) {
return const AsyncValue.loading();

View File

@ -61,20 +61,23 @@ class _DHTLogRead implements DHTLogReadOperations {
}
@override
Future<Set<int>> getOfflinePositions() async {
Future<Set<int>?> getOfflinePositions() async {
final positionOffline = <int>{};
// Iterate positions backward from most recent
for (var pos = _spine.length - 1; pos >= 0; pos--) {
final lookup = await _spine.lookupPosition(pos);
if (lookup == null) {
throw StateError('Unable to look up position');
return null;
}
// Check each segment for offline positions
var foundOffline = false;
await lookup.scope((sa) => sa.operate((read) async {
final success = await lookup.scope((sa) => sa.operate((read) async {
final segmentOffline = await read.getOfflinePositions();
if (segmentOffline == null) {
return false;
}
// For each shortarray segment go through their segment positions
// in reverse order and see if they are offline
@ -88,8 +91,11 @@ class _DHTLogRead implements DHTLogReadOperations {
foundOffline = true;
}
}
return true;
}));
if (!success) {
return null;
}
// If we found nothing offline in this segment then we can stop
if (!foundOffline) {
break;

View File

@ -354,13 +354,24 @@ class _DHTLogSpine {
final subkey = l.subkey;
final segment = l.segment;
final subkeyData = await _spineRecord.get(subkey: subkey);
if (subkeyData == null) {
return null;
// See if we have the segment key locally
TypedKey? segmentKey;
var subkeyData = await _spineRecord.get(
subkey: subkey, refreshMode: DHTRecordRefreshMode.local);
if (subkeyData != null) {
segmentKey = _getSegmentKey(subkeyData, segment);
}
final segmentKey = _getSegmentKey(subkeyData, segment);
if (segmentKey == null) {
return null;
// If not, try from the network
subkeyData = await _spineRecord.get(
subkey: subkey, refreshMode: DHTRecordRefreshMode.network);
if (subkeyData == null) {
return null;
}
segmentKey = _getSegmentKey(subkeyData, segment);
if (segmentKey == null) {
return null;
}
}
// Open a shortarray segment

View File

@ -16,7 +16,7 @@ class DHTRecordWatchChange extends Equatable {
/// Refresh mode for DHT record 'get'
enum DHTRecordRefreshMode {
/// Return existing subkey values if they exist locally already
/// And then check the network for a newer value
/// If not, check the network for a value
/// This is the default refresh mode
cached,

View File

@ -182,6 +182,9 @@ class DHTShortArray implements DHTDeleteable<DHTShortArray> {
/// Get the record key for this shortarray
TypedKey get recordKey => _head.recordKey;
/// Get the writer for the log
KeyPair? get writer => _head._headRecord.writer;
/// Get the record pointer foir this shortarray
OwnedDHTRecordPointer get recordPointer => _head.recordPointer;

View File

@ -53,12 +53,21 @@ class DHTShortArrayCubit<T> extends Cubit<DHTShortArrayBusyState<T>>
{bool forceRefresh = false}) async {
try {
final newState = await _shortArray.operate((reader) async {
final offlinePositions = await reader.getOfflinePositions();
// If this is writeable get the offline positions
Set<int>? offlinePositions;
if (_shortArray.writer != null) {
offlinePositions = await reader.getOfflinePositions();
if (offlinePositions == null) {
return null;
}
}
// Get the items
final allItems = (await reader.getRange(0, forceRefresh: forceRefresh))
?.indexed
.map((x) => DHTShortArrayElementState(
value: _decodeElement(x.$2),
isOffline: offlinePositions.contains(x.$1)))
isOffline: offlinePositions?.contains(x.$1) ?? false))
.toIList();
return allItems;
});

View File

@ -26,7 +26,7 @@ abstract class DHTRandomRead {
{int? length, bool forceRefresh = false});
/// Get a list of the positions that were written offline and not flushed yet
Future<Set<int>> getOfflinePositions();
Future<Set<int>?> getOfflinePositions();
}
extension DHTRandomReadExt on DHTRandomRead {