mirror of
https://gitlab.com/veilid/veilidchat.git
synced 2024-12-25 23:59:32 -05:00
checkpoint
This commit is contained in:
parent
3315644ba8
commit
8cd73b2844
@ -7,6 +7,7 @@ import 'package:integration_test/integration_test.dart';
|
|||||||
import 'package:veilid_test/veilid_test.dart';
|
import 'package:veilid_test/veilid_test.dart';
|
||||||
|
|
||||||
import 'fixtures/fixtures.dart';
|
import 'fixtures/fixtures.dart';
|
||||||
|
import 'test_dht_log.dart';
|
||||||
import 'test_dht_record_pool.dart';
|
import 'test_dht_record_pool.dart';
|
||||||
import 'test_dht_short_array.dart';
|
import 'test_dht_short_array.dart';
|
||||||
|
|
||||||
@ -38,24 +39,37 @@ void main() {
|
|||||||
|
|
||||||
test('create pool', testDHTRecordPoolCreate);
|
test('create pool', testDHTRecordPoolCreate);
|
||||||
|
|
||||||
group('DHTRecordPool Tests', () {
|
// group('DHTRecordPool Tests', () {
|
||||||
|
// setUpAll(dhtRecordPoolFixture.setUp);
|
||||||
|
// tearDownAll(dhtRecordPoolFixture.tearDown);
|
||||||
|
|
||||||
|
// test('create/delete record', testDHTRecordCreateDelete);
|
||||||
|
// test('record scopes', testDHTRecordScopes);
|
||||||
|
// test('create/delete deep record', testDHTRecordDeepCreateDelete);
|
||||||
|
// });
|
||||||
|
|
||||||
|
// group('DHTShortArray Tests', () {
|
||||||
|
// setUpAll(dhtRecordPoolFixture.setUp);
|
||||||
|
// tearDownAll(dhtRecordPoolFixture.tearDown);
|
||||||
|
|
||||||
|
// for (final stride in [256, 16 /*64, 32, 16, 8, 4, 2, 1 */]) {
|
||||||
|
// test('create shortarray stride=$stride',
|
||||||
|
// makeTestDHTShortArrayCreateDelete(stride: stride));
|
||||||
|
// test('add shortarray stride=$stride',
|
||||||
|
// makeTestDHTShortArrayAdd(stride: 256));
|
||||||
|
// }
|
||||||
|
// });
|
||||||
|
|
||||||
|
group('DHTLog Tests', () {
|
||||||
setUpAll(dhtRecordPoolFixture.setUp);
|
setUpAll(dhtRecordPoolFixture.setUp);
|
||||||
tearDownAll(dhtRecordPoolFixture.tearDown);
|
tearDownAll(dhtRecordPoolFixture.tearDown);
|
||||||
|
|
||||||
test('create/delete record', testDHTRecordCreateDelete);
|
for (final stride in [256, 16 /*64, 32, 16, 8, 4, 2, 1 */]) {
|
||||||
test('record scopes', testDHTRecordScopes);
|
test('create log stride=$stride',
|
||||||
test('create/delete deep record', testDHTRecordDeepCreateDelete);
|
makeTestDHTLogCreateDelete(stride: stride));
|
||||||
});
|
test('add/truncate log stride=$stride',
|
||||||
|
makeTestDHTLogAddTruncate(stride: 256),
|
||||||
group('DHTShortArray Tests', () {
|
timeout: const Timeout(Duration(seconds: 480)));
|
||||||
setUpAll(dhtRecordPoolFixture.setUp);
|
|
||||||
tearDownAll(dhtRecordPoolFixture.tearDown);
|
|
||||||
|
|
||||||
for (final stride in [256, 64, 32, 16, 8, 4, 2, 1]) {
|
|
||||||
test('create shortarray stride=$stride',
|
|
||||||
makeTestDHTShortArrayCreateDelete(stride: stride));
|
|
||||||
test('add shortarray stride=$stride',
|
|
||||||
makeTestDHTShortArrayAdd(stride: 256));
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
import 'dart:async';
|
import 'dart:async';
|
||||||
|
|
||||||
import 'package:async_tools/async_tools.dart';
|
import 'package:async_tools/async_tools.dart';
|
||||||
|
import 'package:flutter/foundation.dart';
|
||||||
import 'package:veilid_support/veilid_support.dart';
|
import 'package:veilid_support/veilid_support.dart';
|
||||||
import 'package:veilid_test/veilid_test.dart';
|
import 'package:veilid_test/veilid_test.dart';
|
||||||
|
|
||||||
@ -12,9 +13,13 @@ class DHTRecordPoolFixture implements TickerFixtureTickable {
|
|||||||
UpdateProcessorFixture updateProcessorFixture;
|
UpdateProcessorFixture updateProcessorFixture;
|
||||||
TickerFixture tickerFixture;
|
TickerFixture tickerFixture;
|
||||||
|
|
||||||
Future<void> setUp() async {
|
Future<void> setUp({bool purge = true}) async {
|
||||||
await _fixtureMutex.acquire();
|
await _fixtureMutex.acquire();
|
||||||
await DHTRecordPool.init();
|
if (purge) {
|
||||||
|
await Veilid.instance.debug('record purge local');
|
||||||
|
await Veilid.instance.debug('record purge remote');
|
||||||
|
}
|
||||||
|
await DHTRecordPool.init(logger: debugPrintSynchronously);
|
||||||
tickerFixture.register(this);
|
tickerFixture.register(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -22,6 +27,10 @@ class DHTRecordPoolFixture implements TickerFixtureTickable {
|
|||||||
assert(_fixtureMutex.isLocked, 'should not tearDown without setUp');
|
assert(_fixtureMutex.isLocked, 'should not tearDown without setUp');
|
||||||
tickerFixture.unregister(this);
|
tickerFixture.unregister(this);
|
||||||
await DHTRecordPool.close();
|
await DHTRecordPool.close();
|
||||||
|
|
||||||
|
final recordList = await Veilid.instance.debug('record list local');
|
||||||
|
debugPrintSynchronously('DHT Record List:\n$recordList');
|
||||||
|
|
||||||
_fixtureMutex.release();
|
_fixtureMutex.release();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -0,0 +1,130 @@
|
|||||||
|
import 'dart:convert';
|
||||||
|
|
||||||
|
import 'package:flutter_test/flutter_test.dart';
|
||||||
|
import 'package:veilid_support/veilid_support.dart';
|
||||||
|
|
||||||
|
Future<void> Function() makeTestDHTLogCreateDelete({required int stride}) =>
|
||||||
|
() async {
|
||||||
|
// Close before delete
|
||||||
|
{
|
||||||
|
final dlog = await DHTLog.create(
|
||||||
|
debugName: 'log_create_delete 1 stride $stride', stride: stride);
|
||||||
|
expect(await dlog.operate((r) async => r.length), isZero);
|
||||||
|
expect(dlog.isOpen, isTrue);
|
||||||
|
await dlog.close();
|
||||||
|
expect(dlog.isOpen, isFalse);
|
||||||
|
await dlog.delete();
|
||||||
|
// Operate should fail
|
||||||
|
await expectLater(() async => dlog.operate((r) async => r.length),
|
||||||
|
throwsA(isA<StateError>()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close after delete
|
||||||
|
{
|
||||||
|
final dlog = await DHTLog.create(
|
||||||
|
debugName: 'log_create_delete 2 stride $stride', stride: stride);
|
||||||
|
await dlog.delete();
|
||||||
|
// Operate should still succeed because things aren't closed
|
||||||
|
expect(await dlog.operate((r) async => r.length), isZero);
|
||||||
|
await dlog.close();
|
||||||
|
// Operate should fail
|
||||||
|
await expectLater(() async => dlog.operate((r) async => r.length),
|
||||||
|
throwsA(isA<StateError>()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close after delete multiple
|
||||||
|
// Okay to request delete multiple times before close
|
||||||
|
{
|
||||||
|
final dlog = await DHTLog.create(
|
||||||
|
debugName: 'log_create_delete 3 stride $stride', stride: stride);
|
||||||
|
await dlog.delete();
|
||||||
|
await dlog.delete();
|
||||||
|
// Operate should still succeed because things aren't closed
|
||||||
|
expect(await dlog.operate((r) async => r.length), isZero);
|
||||||
|
await dlog.close();
|
||||||
|
await dlog.close();
|
||||||
|
// Operate should fail
|
||||||
|
await expectLater(() async => dlog.operate((r) async => r.length),
|
||||||
|
throwsA(isA<StateError>()));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Future<void> Function() makeTestDHTLogAddTruncate({required int stride}) =>
|
||||||
|
() async {
|
||||||
|
final startTime = DateTime.now();
|
||||||
|
|
||||||
|
final dlog = await DHTLog.create(
|
||||||
|
debugName: 'log_add 1 stride $stride', stride: stride);
|
||||||
|
|
||||||
|
final dataset = Iterable<int>.generate(1000)
|
||||||
|
.map((n) => utf8.encode('elem $n'))
|
||||||
|
.toList();
|
||||||
|
|
||||||
|
print('adding\n');
|
||||||
|
{
|
||||||
|
final res = await dlog.operateAppend((w) async {
|
||||||
|
const chunk = 50;
|
||||||
|
for (var n = 0; n < dataset.length; n += chunk) {
|
||||||
|
print('$n-${n + chunk - 1} ');
|
||||||
|
final success =
|
||||||
|
await w.tryAppendItems(dataset.sublist(n, n + chunk));
|
||||||
|
expect(success, isTrue);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
expect(res, isNull);
|
||||||
|
}
|
||||||
|
|
||||||
|
print('get all\n');
|
||||||
|
{
|
||||||
|
final dataset2 = await dlog.operate((r) async => r.getItemRange(0));
|
||||||
|
expect(dataset2, equals(dataset));
|
||||||
|
}
|
||||||
|
{
|
||||||
|
final dataset3 =
|
||||||
|
await dlog.operate((r) async => r.getItemRange(64, length: 128));
|
||||||
|
expect(dataset3, equals(dataset.sublist(64, 64 + 128)));
|
||||||
|
}
|
||||||
|
{
|
||||||
|
final dataset4 =
|
||||||
|
await dlog.operate((r) async => r.getItemRange(0, length: 1000));
|
||||||
|
expect(dataset4, equals(dataset.sublist(0, 1000)));
|
||||||
|
}
|
||||||
|
{
|
||||||
|
final dataset5 =
|
||||||
|
await dlog.operate((r) async => r.getItemRange(500, length: 499));
|
||||||
|
expect(dataset5, equals(dataset.sublist(500, 999)));
|
||||||
|
}
|
||||||
|
print('truncate\n');
|
||||||
|
{
|
||||||
|
await dlog.operateAppend((w) async => w.truncate(5));
|
||||||
|
}
|
||||||
|
{
|
||||||
|
final dataset6 = await dlog
|
||||||
|
.operate((r) async => r.getItemRange(500 - 5, length: 499));
|
||||||
|
expect(dataset6, equals(dataset.sublist(500, 999)));
|
||||||
|
}
|
||||||
|
print('truncate 2\n');
|
||||||
|
{
|
||||||
|
await dlog.operateAppend((w) async => w.truncate(251));
|
||||||
|
}
|
||||||
|
{
|
||||||
|
final dataset7 = await dlog
|
||||||
|
.operate((r) async => r.getItemRange(500 - 256, length: 499));
|
||||||
|
expect(dataset7, equals(dataset.sublist(500, 999)));
|
||||||
|
}
|
||||||
|
print('clear\n');
|
||||||
|
{
|
||||||
|
await dlog.operateAppend((w) async => w.clear());
|
||||||
|
}
|
||||||
|
print('get all\n');
|
||||||
|
{
|
||||||
|
final dataset8 = await dlog.operate((r) async => r.getItemRange(0));
|
||||||
|
expect(dataset8, isEmpty);
|
||||||
|
}
|
||||||
|
|
||||||
|
await dlog.delete();
|
||||||
|
await dlog.close();
|
||||||
|
|
||||||
|
final endTime = DateTime.now();
|
||||||
|
print('Duration: ${endTime.difference(startTime)}');
|
||||||
|
};
|
@ -151,7 +151,6 @@ Future<void> testDHTRecordDeepCreateDelete() async {
|
|||||||
// Make root record
|
// Make root record
|
||||||
final recroot = await pool.createRecord(debugName: 'test_deep_create_delete');
|
final recroot = await pool.createRecord(debugName: 'test_deep_create_delete');
|
||||||
|
|
||||||
for (var d = 0; d < numIterations; d++) {
|
|
||||||
// Make child set 1
|
// Make child set 1
|
||||||
var parent = recroot;
|
var parent = recroot;
|
||||||
final children = <DHTRecord>[];
|
final children = <DHTRecord>[];
|
||||||
@ -162,6 +161,19 @@ Future<void> testDHTRecordDeepCreateDelete() async {
|
|||||||
parent = child;
|
parent = child;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Should mark for deletion
|
||||||
|
expect(await pool.deleteRecord(recroot.key), isFalse);
|
||||||
|
|
||||||
|
// Root should still be valid
|
||||||
|
expect(await pool.isValidRecordKey(recroot.key), isTrue);
|
||||||
|
|
||||||
|
// Close root record
|
||||||
|
await recroot.close();
|
||||||
|
|
||||||
|
// Root should still be valid because children still exist
|
||||||
|
expect(await pool.isValidRecordKey(recroot.key), isTrue);
|
||||||
|
|
||||||
|
for (var d = 0; d < numIterations; d++) {
|
||||||
// Make child set 2
|
// Make child set 2
|
||||||
final children2 = <DHTRecord>[];
|
final children2 = <DHTRecord>[];
|
||||||
parent = recroot;
|
parent = recroot;
|
||||||
@ -171,31 +183,31 @@ Future<void> testDHTRecordDeepCreateDelete() async {
|
|||||||
children2.add(child);
|
children2.add(child);
|
||||||
parent = child;
|
parent = child;
|
||||||
}
|
}
|
||||||
// Should fail to delete root
|
|
||||||
await expectLater(
|
// Delete child set 2 in reverse order
|
||||||
() async => pool.deleteRecord(recroot.key), throwsA(isA<StateError>()));
|
for (var n = numChildren - 1; n >= 0; n--) {
|
||||||
|
expect(await pool.deleteRecord(children2[n].key), isFalse);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Root should still be there
|
||||||
|
expect(await pool.isValidRecordKey(recroot.key), isTrue);
|
||||||
|
|
||||||
|
// Close child set 2
|
||||||
|
await children2.map((c) => c.close()).wait;
|
||||||
|
|
||||||
|
// All child set 2 should be invalid
|
||||||
|
for (final c2 in children2) {
|
||||||
|
// Children should be invalid and deleted now
|
||||||
|
expect(await pool.isValidRecordKey(c2.key), isFalse);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Root should still be valid
|
||||||
|
expect(await pool.isValidRecordKey(recroot.key), isTrue);
|
||||||
|
}
|
||||||
|
|
||||||
// Close child set 1
|
// Close child set 1
|
||||||
await children.map((c) => c.close()).wait;
|
await children.map((c) => c.close()).wait;
|
||||||
|
|
||||||
// Delete child set 1 in reverse order
|
// Root should have gone away
|
||||||
for (var n = numChildren - 1; n >= 0; n--) {
|
expect(await pool.isValidRecordKey(recroot.key), isFalse);
|
||||||
await pool.deleteRecord(children[n].key);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should fail to delete root
|
|
||||||
await expectLater(
|
|
||||||
() async => pool.deleteRecord(recroot.key), throwsA(isA<StateError>()));
|
|
||||||
|
|
||||||
// Close child set 1
|
|
||||||
await children2.map((c) => c.close()).wait;
|
|
||||||
|
|
||||||
// Delete child set 2 in reverse order
|
|
||||||
for (var n = numChildren - 1; n >= 0; n--) {
|
|
||||||
await pool.deleteRecord(children2[n].key);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should be able to delete root now
|
|
||||||
await pool.deleteRecord(recroot.key);
|
|
||||||
}
|
}
|
||||||
|
@ -61,10 +61,10 @@ Future<void> Function() makeTestDHTShortArrayAdd({required int stride}) =>
|
|||||||
.map((n) => utf8.encode('elem $n'))
|
.map((n) => utf8.encode('elem $n'))
|
||||||
.toList();
|
.toList();
|
||||||
|
|
||||||
print('adding\n');
|
print('adding singles\n');
|
||||||
{
|
{
|
||||||
final res = await arr.operateWrite((w) async {
|
final res = await arr.operateWrite((w) async {
|
||||||
for (var n = 0; n < dataset.length; n++) {
|
for (var n = 4; n < 8; n++) {
|
||||||
print('$n ');
|
print('$n ');
|
||||||
final success = await w.tryAddItem(dataset[n]);
|
final success = await w.tryAddItem(dataset[n]);
|
||||||
expect(success, isTrue);
|
expect(success, isTrue);
|
||||||
@ -73,6 +73,40 @@ Future<void> Function() makeTestDHTShortArrayAdd({required int stride}) =>
|
|||||||
expect(res, isNull);
|
expect(res, isNull);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
print('adding batch\n');
|
||||||
|
{
|
||||||
|
final res = await arr.operateWrite((w) async {
|
||||||
|
print('${dataset.length ~/ 2}-${dataset.length}');
|
||||||
|
final success = await w.tryAddItems(
|
||||||
|
dataset.sublist(dataset.length ~/ 2, dataset.length));
|
||||||
|
expect(success, isTrue);
|
||||||
|
});
|
||||||
|
expect(res, isNull);
|
||||||
|
}
|
||||||
|
|
||||||
|
print('inserting singles\n');
|
||||||
|
{
|
||||||
|
final res = await arr.operateWrite((w) async {
|
||||||
|
for (var n = 0; n < 4; n++) {
|
||||||
|
print('$n ');
|
||||||
|
final success = await w.tryInsertItem(n, dataset[n]);
|
||||||
|
expect(success, isTrue);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
expect(res, isNull);
|
||||||
|
}
|
||||||
|
|
||||||
|
print('inserting batch\n');
|
||||||
|
{
|
||||||
|
final res = await arr.operateWrite((w) async {
|
||||||
|
print('8-${dataset.length ~/ 2}');
|
||||||
|
final success = await w.tryInsertItems(
|
||||||
|
8, dataset.sublist(8, dataset.length ~/ 2));
|
||||||
|
expect(success, isTrue);
|
||||||
|
});
|
||||||
|
expect(res, isNull);
|
||||||
|
}
|
||||||
|
|
||||||
//print('get all\n');
|
//print('get all\n');
|
||||||
{
|
{
|
||||||
final dataset2 = await arr.operate((r) async => r.getItemRange(0));
|
final dataset2 = await arr.operate((r) async => r.getItemRange(0));
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
import 'dart:async';
|
import 'dart:async';
|
||||||
|
import 'dart:math';
|
||||||
import 'dart:typed_data';
|
import 'dart:typed_data';
|
||||||
|
|
||||||
import 'package:async_tools/async_tools.dart';
|
import 'package:async_tools/async_tools.dart';
|
||||||
|
@ -9,34 +9,74 @@ class _DHTLogAppend extends _DHTLogRead implements DHTAppendTruncateRandomRead {
|
|||||||
@override
|
@override
|
||||||
Future<bool> tryAppendItem(Uint8List value) async {
|
Future<bool> tryAppendItem(Uint8List value) async {
|
||||||
// Allocate empty index at the end of the list
|
// Allocate empty index at the end of the list
|
||||||
final endPos = _spine.length;
|
final insertPos = _spine.length;
|
||||||
_spine.allocateTail(1);
|
_spine.allocateTail(1);
|
||||||
final lookup = await _spine.lookupPosition(endPos);
|
final lookup = await _spine.lookupPosition(insertPos);
|
||||||
if (lookup == null) {
|
if (lookup == null) {
|
||||||
throw StateError("can't write to dht log");
|
throw StateError("can't write to dht log");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write item to the segment
|
// Write item to the segment
|
||||||
return lookup.shortArray
|
return lookup.shortArray.operateWrite((write) async {
|
||||||
.operateWrite((write) async => write.tryWriteItem(lookup.pos, value));
|
// If this a new segment, then clear it in case we have wrapped around
|
||||||
|
if (lookup.pos == 0) {
|
||||||
|
await write.clear();
|
||||||
|
} else if (lookup.pos != write.length) {
|
||||||
|
// We should always be appending at the length
|
||||||
|
throw StateError('appending should be at the end');
|
||||||
|
}
|
||||||
|
return write.tryAddItem(value);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
@override
|
||||||
|
Future<bool> tryAppendItems(List<Uint8List> values) async {
|
||||||
|
// Allocate empty index at the end of the list
|
||||||
|
final insertPos = _spine.length;
|
||||||
|
_spine.allocateTail(values.length);
|
||||||
|
|
||||||
|
// Look up the first position and shortarray
|
||||||
|
for (var valueIdx = 0; valueIdx < values.length;) {
|
||||||
|
final remaining = values.length - valueIdx;
|
||||||
|
|
||||||
|
final lookup = await _spine.lookupPosition(insertPos + valueIdx);
|
||||||
|
if (lookup == null) {
|
||||||
|
throw StateError("can't write to dht log");
|
||||||
|
}
|
||||||
|
|
||||||
|
final sacount = min(remaining, DHTShortArray.maxElements - lookup.pos);
|
||||||
|
final success = await lookup.shortArray.operateWrite((write) async {
|
||||||
|
// If this a new segment, then clear it in case we have wrapped around
|
||||||
|
if (lookup.pos == 0) {
|
||||||
|
await write.clear();
|
||||||
|
} else if (lookup.pos != write.length) {
|
||||||
|
// We should always be appending at the length
|
||||||
|
throw StateError('appending should be at the end');
|
||||||
|
}
|
||||||
|
return write.tryAddItems(values.sublist(valueIdx, valueIdx + sacount));
|
||||||
|
});
|
||||||
|
if (!success) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
valueIdx += sacount;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@override
|
@override
|
||||||
Future<void> truncate(int count) async {
|
Future<void> truncate(int count) async {
|
||||||
final len = _spine.length;
|
count = min(count, _spine.length);
|
||||||
if (count > len) {
|
|
||||||
count = len;
|
|
||||||
}
|
|
||||||
if (count == 0) {
|
if (count == 0) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (count < 0) {
|
if (count < 0) {
|
||||||
throw StateError('can not remove negative items');
|
throw StateError('can not remove negative items');
|
||||||
}
|
}
|
||||||
_spine.releaseHead(count);
|
await _spine.releaseHead(count);
|
||||||
}
|
}
|
||||||
|
|
||||||
@override
|
@override
|
||||||
Future<void> clear() async {
|
Future<void> clear() async {
|
||||||
_spine.releaseHead(_spine.length);
|
await _spine.releaseHead(_spine.length);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -83,12 +83,8 @@ class _DHTLogSpine {
|
|||||||
|
|
||||||
Future<void> delete() async {
|
Future<void> delete() async {
|
||||||
await _spineMutex.protect(() async {
|
await _spineMutex.protect(() async {
|
||||||
final pool = DHTRecordPool.instance;
|
// Will deep delete all segment records as they are children
|
||||||
final futures = <Future<void>>[pool.deleteRecord(_spineRecord.key)];
|
await _spineRecord.delete();
|
||||||
for (final (_, sc) in _spineCache) {
|
|
||||||
futures.add(sc.delete());
|
|
||||||
}
|
|
||||||
await Future.wait(futures);
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -218,7 +214,7 @@ class _DHTLogSpine {
|
|||||||
static TypedKey? _getSegmentKey(Uint8List subkeyData, int segment) {
|
static TypedKey? _getSegmentKey(Uint8List subkeyData, int segment) {
|
||||||
final decodedLength = TypedKey.decodedLength<TypedKey>();
|
final decodedLength = TypedKey.decodedLength<TypedKey>();
|
||||||
final segmentKeyBytes = subkeyData.sublist(
|
final segmentKeyBytes = subkeyData.sublist(
|
||||||
decodedLength * segment, (decodedLength + 1) * segment);
|
decodedLength * segment, decodedLength * (segment + 1));
|
||||||
if (segmentKeyBytes.equals(_emptySegmentKey)) {
|
if (segmentKeyBytes.equals(_emptySegmentKey)) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
@ -234,7 +230,7 @@ class _DHTLogSpine {
|
|||||||
} else {
|
} else {
|
||||||
segmentKeyBytes = segmentKey.decode();
|
segmentKeyBytes = segmentKey.decode();
|
||||||
}
|
}
|
||||||
subkeyData.setRange(decodedLength * segment, (decodedLength + 1) * segment,
|
subkeyData.setRange(decodedLength * segment, decodedLength * (segment + 1),
|
||||||
segmentKeyBytes);
|
segmentKeyBytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -435,7 +431,7 @@ class _DHTLogSpine {
|
|||||||
_tail = (_tail + count) % _positionLimit;
|
_tail = (_tail + count) % _positionLimit;
|
||||||
}
|
}
|
||||||
|
|
||||||
void releaseHead(int count) {
|
Future<void> releaseHead(int count) async {
|
||||||
assert(_spineMutex.isLocked, 'should be locked');
|
assert(_spineMutex.isLocked, 'should be locked');
|
||||||
|
|
||||||
final currentLength = length;
|
final currentLength = length;
|
||||||
@ -447,6 +443,73 @@ class _DHTLogSpine {
|
|||||||
}
|
}
|
||||||
|
|
||||||
_head = (_head + count) % _positionLimit;
|
_head = (_head + count) % _positionLimit;
|
||||||
|
await _purgeUnusedSegments();
|
||||||
|
}
|
||||||
|
|
||||||
|
Future<void> _deleteSegmentsContiguous(int start, int end) async {
|
||||||
|
assert(_spineMutex.isLocked, 'should be in mutex here');
|
||||||
|
|
||||||
|
final startSegmentNumber = start ~/ DHTShortArray.maxElements;
|
||||||
|
final startSegmentPos = start % DHTShortArray.maxElements;
|
||||||
|
|
||||||
|
final endSegmentNumber = end ~/ DHTShortArray.maxElements;
|
||||||
|
final endSegmentPos = end % DHTShortArray.maxElements;
|
||||||
|
|
||||||
|
final firstDeleteSegment =
|
||||||
|
(startSegmentPos == 0) ? startSegmentNumber : startSegmentNumber + 1;
|
||||||
|
final lastDeleteSegment =
|
||||||
|
(endSegmentPos == 0) ? endSegmentNumber - 1 : endSegmentNumber - 2;
|
||||||
|
|
||||||
|
int? lastSubkey;
|
||||||
|
Uint8List? subkeyData;
|
||||||
|
for (var segmentNumber = firstDeleteSegment;
|
||||||
|
segmentNumber <= lastDeleteSegment;
|
||||||
|
segmentNumber++) {
|
||||||
|
// Lookup what subkey and segment subrange has this position's segment
|
||||||
|
// shortarray
|
||||||
|
final l = lookupSegment(segmentNumber);
|
||||||
|
final subkey = l.subkey;
|
||||||
|
final segment = l.segment;
|
||||||
|
|
||||||
|
if (lastSubkey != subkey) {
|
||||||
|
// Flush subkey writes
|
||||||
|
if (lastSubkey != null) {
|
||||||
|
await _spineRecord.eventualWriteBytes(subkeyData!,
|
||||||
|
subkey: lastSubkey);
|
||||||
|
}
|
||||||
|
|
||||||
|
xxx debug this, it takes forever
|
||||||
|
|
||||||
|
// Get next subkey
|
||||||
|
subkeyData = await _spineRecord.get(subkey: subkey);
|
||||||
|
if (subkeyData != null) {
|
||||||
|
lastSubkey = subkey;
|
||||||
|
} else {
|
||||||
|
lastSubkey = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (subkeyData != null) {
|
||||||
|
final segmentKey = _getSegmentKey(subkeyData, segment);
|
||||||
|
if (segmentKey != null) {
|
||||||
|
await DHTRecordPool.instance.deleteRecord(segmentKey);
|
||||||
|
_setSegmentKey(subkeyData, segment, null);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Flush subkey writes
|
||||||
|
if (lastSubkey != null) {
|
||||||
|
await _spineRecord.eventualWriteBytes(subkeyData!, subkey: lastSubkey);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Future<void> _purgeUnusedSegments() async {
|
||||||
|
assert(_spineMutex.isLocked, 'should be in mutex here');
|
||||||
|
if (_head < _tail) {
|
||||||
|
await _deleteSegmentsContiguous(0, _head);
|
||||||
|
await _deleteSegmentsContiguous(_tail, _positionLimit);
|
||||||
|
} else if (_head > _tail) {
|
||||||
|
await _deleteSegmentsContiguous(_tail, _head);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
@ -532,7 +595,7 @@ class _DHTLogSpine {
|
|||||||
|
|
||||||
// Position of the start of the log (oldest items)
|
// Position of the start of the log (oldest items)
|
||||||
int _head;
|
int _head;
|
||||||
// Position of the end of the log (newest items)
|
// Position of the end of the log (newest items) (exclusive)
|
||||||
int _tail;
|
int _tail;
|
||||||
|
|
||||||
// LRU cache of DHT spine elements accessed recently
|
// LRU cache of DHT spine elements accessed recently
|
||||||
|
@ -91,7 +91,6 @@ class SharedDHTRecordData {
|
|||||||
Map<int, int> subkeySeqCache = {};
|
Map<int, int> subkeySeqCache = {};
|
||||||
bool needsWatchStateUpdate = false;
|
bool needsWatchStateUpdate = false;
|
||||||
WatchState? unionWatchState;
|
WatchState? unionWatchState;
|
||||||
bool deleteOnClose = false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Per opened record data
|
// Per opened record data
|
||||||
@ -128,6 +127,7 @@ class DHTRecordPool with TableDBBackedJson<DHTRecordPoolAllocations> {
|
|||||||
: _state = const DHTRecordPoolAllocations(),
|
: _state = const DHTRecordPoolAllocations(),
|
||||||
_mutex = Mutex(),
|
_mutex = Mutex(),
|
||||||
_opened = <TypedKey, OpenedRecordInfo>{},
|
_opened = <TypedKey, OpenedRecordInfo>{},
|
||||||
|
_markedForDelete = <TypedKey>{},
|
||||||
_routingContext = routingContext,
|
_routingContext = routingContext,
|
||||||
_veilid = veilid;
|
_veilid = veilid;
|
||||||
|
|
||||||
@ -140,6 +140,8 @@ class DHTRecordPool with TableDBBackedJson<DHTRecordPoolAllocations> {
|
|||||||
final Mutex _mutex;
|
final Mutex _mutex;
|
||||||
// Which DHT records are currently open
|
// Which DHT records are currently open
|
||||||
final Map<TypedKey, OpenedRecordInfo> _opened;
|
final Map<TypedKey, OpenedRecordInfo> _opened;
|
||||||
|
// Which DHT records are marked for deletion
|
||||||
|
final Set<TypedKey> _markedForDelete;
|
||||||
// Default routing context to use for new keys
|
// Default routing context to use for new keys
|
||||||
final VeilidRoutingContext _routingContext;
|
final VeilidRoutingContext _routingContext;
|
||||||
// Convenience accessor
|
// Convenience accessor
|
||||||
@ -288,6 +290,8 @@ class DHTRecordPool with TableDBBackedJson<DHTRecordPoolAllocations> {
|
|||||||
return openedRecordInfo;
|
return openedRecordInfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Called when a DHTRecord is closed
|
||||||
|
// Cleans up the opened record housekeeping and processes any late deletions
|
||||||
Future<void> _recordClosed(DHTRecord record) async {
|
Future<void> _recordClosed(DHTRecord record) async {
|
||||||
await _mutex.protect(() async {
|
await _mutex.protect(() async {
|
||||||
final key = record.key;
|
final key = record.key;
|
||||||
@ -301,14 +305,37 @@ class DHTRecordPool with TableDBBackedJson<DHTRecordPoolAllocations> {
|
|||||||
}
|
}
|
||||||
if (openedRecordInfo.records.isEmpty) {
|
if (openedRecordInfo.records.isEmpty) {
|
||||||
await _routingContext.closeDHTRecord(key);
|
await _routingContext.closeDHTRecord(key);
|
||||||
if (openedRecordInfo.shared.deleteOnClose) {
|
|
||||||
await _deleteRecordInner(key);
|
|
||||||
}
|
|
||||||
_opened.remove(key);
|
_opened.remove(key);
|
||||||
|
|
||||||
|
await _checkForLateDeletesInner(key);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check to see if this key can finally be deleted
|
||||||
|
// If any parents are marked for deletion, try them first
|
||||||
|
Future<void> _checkForLateDeletesInner(TypedKey key) async {
|
||||||
|
// Get parent list in bottom up order including our own key
|
||||||
|
final parents = <TypedKey>[];
|
||||||
|
TypedKey? nextParent = key;
|
||||||
|
while (nextParent != null) {
|
||||||
|
parents.add(nextParent);
|
||||||
|
nextParent = getParentRecordKey(nextParent);
|
||||||
|
}
|
||||||
|
|
||||||
|
// If any parent is ready to delete all its children do it
|
||||||
|
for (final parent in parents) {
|
||||||
|
if (_markedForDelete.contains(parent)) {
|
||||||
|
final deleted = await _deleteRecordInner(parent);
|
||||||
|
if (!deleted) {
|
||||||
|
// If we couldn't delete a child then no 'marked for delete' parents
|
||||||
|
// above us will be ready to delete either
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Collect all dependencies (including the record itself)
|
// Collect all dependencies (including the record itself)
|
||||||
// in reverse (bottom-up/delete order)
|
// in reverse (bottom-up/delete order)
|
||||||
List<TypedKey> _collectChildrenInner(TypedKey recordKey) {
|
List<TypedKey> _collectChildrenInner(TypedKey recordKey) {
|
||||||
@ -328,7 +355,13 @@ class DHTRecordPool with TableDBBackedJson<DHTRecordPoolAllocations> {
|
|||||||
return allDeps.reversedView;
|
return allDeps.reversedView;
|
||||||
}
|
}
|
||||||
|
|
||||||
String _debugChildren(TypedKey recordKey, {List<TypedKey>? allDeps}) {
|
/// Collect all dependencies (including the record itself)
|
||||||
|
/// in reverse (bottom-up/delete order)
|
||||||
|
Future<List<TypedKey>> collectChildren(TypedKey recordKey) =>
|
||||||
|
_mutex.protect(() async => _collectChildrenInner(recordKey));
|
||||||
|
|
||||||
|
/// Print children
|
||||||
|
String debugChildren(TypedKey recordKey, {List<TypedKey>? allDeps}) {
|
||||||
allDeps ??= _collectChildrenInner(recordKey);
|
allDeps ??= _collectChildrenInner(recordKey);
|
||||||
// ignore: avoid_print
|
// ignore: avoid_print
|
||||||
var out =
|
var out =
|
||||||
@ -342,32 +375,48 @@ class DHTRecordPool with TableDBBackedJson<DHTRecordPoolAllocations> {
|
|||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
|
|
||||||
Future<void> _deleteRecordInner(TypedKey recordKey) async {
|
// Actual delete function
|
||||||
log('deleteDHTRecord: key=$recordKey');
|
Future<void> _finalizeDeleteRecordInner(TypedKey recordKey) async {
|
||||||
|
log('_finalizeDeleteRecordInner: key=$recordKey');
|
||||||
|
|
||||||
// Remove this child from parents
|
// Remove this child from parents
|
||||||
await _removeDependenciesInner([recordKey]);
|
await _removeDependenciesInner([recordKey]);
|
||||||
await _routingContext.deleteDHTRecord(recordKey);
|
await _routingContext.deleteDHTRecord(recordKey);
|
||||||
|
_markedForDelete.remove(recordKey);
|
||||||
}
|
}
|
||||||
|
|
||||||
Future<void> deleteRecord(TypedKey recordKey) async {
|
// Deep delete mechanism inside mutex
|
||||||
await _mutex.protect(() async {
|
Future<bool> _deleteRecordInner(TypedKey recordKey) async {
|
||||||
final allDeps = _collectChildrenInner(recordKey);
|
final toDelete = _readyForDeleteInner(recordKey);
|
||||||
|
if (toDelete.isNotEmpty) {
|
||||||
if (allDeps.singleOrNull != recordKey) {
|
|
||||||
final dbgstr = _debugChildren(recordKey, allDeps: allDeps);
|
|
||||||
throw StateError('must delete children first: $dbgstr');
|
|
||||||
}
|
|
||||||
|
|
||||||
final ori = _opened[recordKey];
|
|
||||||
if (ori != null) {
|
|
||||||
// delete after close
|
|
||||||
ori.shared.deleteOnClose = true;
|
|
||||||
} else {
|
|
||||||
// delete now
|
// delete now
|
||||||
await _deleteRecordInner(recordKey);
|
for (final deleteKey in toDelete) {
|
||||||
|
await _finalizeDeleteRecordInner(deleteKey);
|
||||||
}
|
}
|
||||||
});
|
return true;
|
||||||
|
}
|
||||||
|
// mark for deletion
|
||||||
|
_markedForDelete.add(recordKey);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Delete a record and its children if they are all closed
|
||||||
|
/// otherwise mark that record for deletion eventually
|
||||||
|
/// Returns true if the deletion was processed immediately
|
||||||
|
/// Returns false if the deletion was marked for later
|
||||||
|
Future<bool> deleteRecord(TypedKey recordKey) async =>
|
||||||
|
_mutex.protect(() async => _deleteRecordInner(recordKey));
|
||||||
|
|
||||||
|
// If everything underneath is closed including itself, return the
|
||||||
|
// list of children (and itself) to finally actually delete
|
||||||
|
List<TypedKey> _readyForDeleteInner(TypedKey recordKey) {
|
||||||
|
final allDeps = _collectChildrenInner(recordKey);
|
||||||
|
for (final dep in allDeps) {
|
||||||
|
if (_opened.containsKey(dep)) {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return allDeps;
|
||||||
}
|
}
|
||||||
|
|
||||||
void _validateParentInner(TypedKey? parent, TypedKey child) {
|
void _validateParentInner(TypedKey? parent, TypedKey child) {
|
||||||
@ -456,6 +505,19 @@ class DHTRecordPool with TableDBBackedJson<DHTRecordPoolAllocations> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool _isValidRecordKeyInner(TypedKey key) {
|
||||||
|
if (_state.rootRecords.contains(key)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (_state.childrenByParent.containsKey(key.toJson())) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
Future<bool> isValidRecordKey(TypedKey key) =>
|
||||||
|
_mutex.protect(() async => _isValidRecordKeyInner(key));
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
/// Create a root DHTRecord that has no dependent records
|
/// Create a root DHTRecord that has no dependent records
|
||||||
|
@ -67,12 +67,8 @@ class _DHTShortArrayHead {
|
|||||||
|
|
||||||
Future<void> delete() async {
|
Future<void> delete() async {
|
||||||
await _headMutex.protect(() async {
|
await _headMutex.protect(() async {
|
||||||
final pool = DHTRecordPool.instance;
|
// Will deep delete all linked records as they are children
|
||||||
final futures = <Future<void>>[pool.deleteRecord(_headRecord.key)];
|
await _headRecord.delete();
|
||||||
for (final lr in _linkedRecords) {
|
|
||||||
futures.add(pool.deleteRecord(lr.key));
|
|
||||||
}
|
|
||||||
await Future.wait(futures);
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,19 +8,12 @@ class _DHTShortArrayWrite extends _DHTShortArrayRead
|
|||||||
_DHTShortArrayWrite._(super.head) : super._();
|
_DHTShortArrayWrite._(super.head) : super._();
|
||||||
|
|
||||||
@override
|
@override
|
||||||
Future<bool> tryAddItem(Uint8List value) async {
|
Future<bool> tryAddItem(Uint8List value) =>
|
||||||
// Allocate empty index at the end of the list
|
tryInsertItem(_head.length, value);
|
||||||
final pos = _head.length;
|
|
||||||
_head.allocateIndex(pos);
|
|
||||||
|
|
||||||
// Write item
|
@override
|
||||||
final ok = await tryWriteItem(pos, value);
|
Future<bool> tryAddItems(List<Uint8List> values) =>
|
||||||
if (!ok) {
|
tryInsertItems(_head.length, values);
|
||||||
_head.freeIndex(pos);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ok;
|
|
||||||
}
|
|
||||||
|
|
||||||
@override
|
@override
|
||||||
Future<bool> tryInsertItem(int pos, Uint8List value) async {
|
Future<bool> tryInsertItem(int pos, Uint8List value) async {
|
||||||
@ -35,6 +28,29 @@ class _DHTShortArrayWrite extends _DHTShortArrayRead
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@override
|
||||||
|
Future<bool> tryInsertItems(int pos, List<Uint8List> values) async {
|
||||||
|
// Allocate empty indices at the end of the list
|
||||||
|
for (var i = 0; i < values.length; i++) {
|
||||||
|
_head.allocateIndex(pos + i);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write items
|
||||||
|
var success = true;
|
||||||
|
final dws = DelayedWaitSet<void>();
|
||||||
|
for (var i = 0; i < values.length; i++) {
|
||||||
|
dws.add(() async {
|
||||||
|
final ok = await tryWriteItem(pos + i, values[i]);
|
||||||
|
if (!ok) {
|
||||||
|
_head.freeIndex(pos + i);
|
||||||
|
success = false;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
await dws(chunkSize: maxDHTConcurrency, onChunkDone: (_) => success);
|
||||||
|
return success;
|
||||||
|
}
|
||||||
|
|
||||||
@override
|
@override
|
||||||
Future<void> swapItem(int aPos, int bPos) async {
|
Future<void> swapItem(int aPos, int bPos) async {
|
||||||
if (aPos < 0 || aPos >= _head.length) {
|
if (aPos < 0 || aPos >= _head.length) {
|
||||||
|
@ -14,6 +14,13 @@ abstract class DHTAppendTruncate {
|
|||||||
/// This may throw an exception if the number elements added exceeds limits.
|
/// This may throw an exception if the number elements added exceeds limits.
|
||||||
Future<bool> tryAppendItem(Uint8List value);
|
Future<bool> tryAppendItem(Uint8List value);
|
||||||
|
|
||||||
|
/// Try to add a list of items to the end of the DHT data structure.
|
||||||
|
/// Return true if the elements were successfully added, and false if the
|
||||||
|
/// state changed before the element could be added or a newer value was found
|
||||||
|
/// on the network.
|
||||||
|
/// This may throw an exception if the number elements added exceeds limits.
|
||||||
|
Future<bool> tryAppendItems(List<Uint8List> values);
|
||||||
|
|
||||||
/// Try to remove a number of items from the head of the DHT data structure.
|
/// Try to remove a number of items from the head of the DHT data structure.
|
||||||
/// Throws StateError if count < 0
|
/// Throws StateError if count < 0
|
||||||
Future<void> truncate(int count);
|
Future<void> truncate(int count);
|
||||||
|
@ -29,12 +29,12 @@ extension DHTOpenableExt<D extends DHTOpenable> on D {
|
|||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
final out = await scopeFunction(this);
|
return await scopeFunction(this);
|
||||||
await close();
|
} on Exception {
|
||||||
return out;
|
|
||||||
} on Exception catch (_) {
|
|
||||||
await delete();
|
await delete();
|
||||||
rethrow;
|
rethrow;
|
||||||
|
} finally {
|
||||||
|
await close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -30,6 +30,13 @@ abstract class DHTRandomWrite {
|
|||||||
/// built-in limit of 'maxElements = 256' entries.
|
/// built-in limit of 'maxElements = 256' entries.
|
||||||
Future<bool> tryAddItem(Uint8List value);
|
Future<bool> tryAddItem(Uint8List value);
|
||||||
|
|
||||||
|
/// Try to add a list of items to the end of the DHTArray. Return true if the
|
||||||
|
/// elements were successfully added, and false if the state changed before
|
||||||
|
/// the elements could be added or a newer value was found on the network.
|
||||||
|
/// This may throw an exception if the number elements added exceeds the
|
||||||
|
/// built-in limit of 'maxElements = 256' entries.
|
||||||
|
Future<bool> tryAddItems(List<Uint8List> values);
|
||||||
|
|
||||||
/// Try to insert an item as position 'pos' of the DHTArray.
|
/// Try to insert an item as position 'pos' of the DHTArray.
|
||||||
/// Return true if the element was successfully inserted, and false if the
|
/// Return true if the element was successfully inserted, and false if the
|
||||||
/// state changed before the element could be inserted or a newer value was
|
/// state changed before the element could be inserted or a newer value was
|
||||||
@ -38,6 +45,14 @@ abstract class DHTRandomWrite {
|
|||||||
/// built-in limit of 'maxElements = 256' entries.
|
/// built-in limit of 'maxElements = 256' entries.
|
||||||
Future<bool> tryInsertItem(int pos, Uint8List value);
|
Future<bool> tryInsertItem(int pos, Uint8List value);
|
||||||
|
|
||||||
|
/// Try to insert items at position 'pos' of the DHTArray.
|
||||||
|
/// Return true if the elements were successfully inserted, and false if the
|
||||||
|
/// state changed before the elements could be inserted or a newer value was
|
||||||
|
/// found on the network.
|
||||||
|
/// This may throw an exception if the number elements added exceeds the
|
||||||
|
/// built-in limit of 'maxElements = 256' entries.
|
||||||
|
Future<bool> tryInsertItems(int pos, List<Uint8List> values);
|
||||||
|
|
||||||
/// Swap items at position 'aPos' and 'bPos' in the DHTArray.
|
/// Swap items at position 'aPos' and 'bPos' in the DHTArray.
|
||||||
/// Throws IndexError if either of the positions swapped exceed
|
/// Throws IndexError if either of the positions swapped exceed
|
||||||
/// the length of the list
|
/// the length of the list
|
||||||
|
Loading…
Reference in New Issue
Block a user