mirror of
https://gitlab.com/veilid/veilidchat.git
synced 2025-08-13 16:35:32 -04:00
checkpoint
This commit is contained in:
parent
3315644ba8
commit
8cd73b2844
14 changed files with 513 additions and 114 deletions
|
@ -7,6 +7,7 @@ import 'package:integration_test/integration_test.dart';
|
|||
import 'package:veilid_test/veilid_test.dart';
|
||||
|
||||
import 'fixtures/fixtures.dart';
|
||||
import 'test_dht_log.dart';
|
||||
import 'test_dht_record_pool.dart';
|
||||
import 'test_dht_short_array.dart';
|
||||
|
||||
|
@ -38,24 +39,37 @@ void main() {
|
|||
|
||||
test('create pool', testDHTRecordPoolCreate);
|
||||
|
||||
group('DHTRecordPool Tests', () {
|
||||
// group('DHTRecordPool Tests', () {
|
||||
// setUpAll(dhtRecordPoolFixture.setUp);
|
||||
// tearDownAll(dhtRecordPoolFixture.tearDown);
|
||||
|
||||
// test('create/delete record', testDHTRecordCreateDelete);
|
||||
// test('record scopes', testDHTRecordScopes);
|
||||
// test('create/delete deep record', testDHTRecordDeepCreateDelete);
|
||||
// });
|
||||
|
||||
// group('DHTShortArray Tests', () {
|
||||
// setUpAll(dhtRecordPoolFixture.setUp);
|
||||
// tearDownAll(dhtRecordPoolFixture.tearDown);
|
||||
|
||||
// for (final stride in [256, 16 /*64, 32, 16, 8, 4, 2, 1 */]) {
|
||||
// test('create shortarray stride=$stride',
|
||||
// makeTestDHTShortArrayCreateDelete(stride: stride));
|
||||
// test('add shortarray stride=$stride',
|
||||
// makeTestDHTShortArrayAdd(stride: 256));
|
||||
// }
|
||||
// });
|
||||
|
||||
group('DHTLog Tests', () {
|
||||
setUpAll(dhtRecordPoolFixture.setUp);
|
||||
tearDownAll(dhtRecordPoolFixture.tearDown);
|
||||
|
||||
test('create/delete record', testDHTRecordCreateDelete);
|
||||
test('record scopes', testDHTRecordScopes);
|
||||
test('create/delete deep record', testDHTRecordDeepCreateDelete);
|
||||
});
|
||||
|
||||
group('DHTShortArray Tests', () {
|
||||
setUpAll(dhtRecordPoolFixture.setUp);
|
||||
tearDownAll(dhtRecordPoolFixture.tearDown);
|
||||
|
||||
for (final stride in [256, 64, 32, 16, 8, 4, 2, 1]) {
|
||||
test('create shortarray stride=$stride',
|
||||
makeTestDHTShortArrayCreateDelete(stride: stride));
|
||||
test('add shortarray stride=$stride',
|
||||
makeTestDHTShortArrayAdd(stride: 256));
|
||||
for (final stride in [256, 16 /*64, 32, 16, 8, 4, 2, 1 */]) {
|
||||
test('create log stride=$stride',
|
||||
makeTestDHTLogCreateDelete(stride: stride));
|
||||
test('add/truncate log stride=$stride',
|
||||
makeTestDHTLogAddTruncate(stride: 256),
|
||||
timeout: const Timeout(Duration(seconds: 480)));
|
||||
}
|
||||
});
|
||||
});
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import 'dart:async';
|
||||
|
||||
import 'package:async_tools/async_tools.dart';
|
||||
import 'package:flutter/foundation.dart';
|
||||
import 'package:veilid_support/veilid_support.dart';
|
||||
import 'package:veilid_test/veilid_test.dart';
|
||||
|
||||
|
@ -12,9 +13,13 @@ class DHTRecordPoolFixture implements TickerFixtureTickable {
|
|||
UpdateProcessorFixture updateProcessorFixture;
|
||||
TickerFixture tickerFixture;
|
||||
|
||||
Future<void> setUp() async {
|
||||
Future<void> setUp({bool purge = true}) async {
|
||||
await _fixtureMutex.acquire();
|
||||
await DHTRecordPool.init();
|
||||
if (purge) {
|
||||
await Veilid.instance.debug('record purge local');
|
||||
await Veilid.instance.debug('record purge remote');
|
||||
}
|
||||
await DHTRecordPool.init(logger: debugPrintSynchronously);
|
||||
tickerFixture.register(this);
|
||||
}
|
||||
|
||||
|
@ -22,6 +27,10 @@ class DHTRecordPoolFixture implements TickerFixtureTickable {
|
|||
assert(_fixtureMutex.isLocked, 'should not tearDown without setUp');
|
||||
tickerFixture.unregister(this);
|
||||
await DHTRecordPool.close();
|
||||
|
||||
final recordList = await Veilid.instance.debug('record list local');
|
||||
debugPrintSynchronously('DHT Record List:\n$recordList');
|
||||
|
||||
_fixtureMutex.release();
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,130 @@
|
|||
import 'dart:convert';
|
||||
|
||||
import 'package:flutter_test/flutter_test.dart';
|
||||
import 'package:veilid_support/veilid_support.dart';
|
||||
|
||||
Future<void> Function() makeTestDHTLogCreateDelete({required int stride}) =>
|
||||
() async {
|
||||
// Close before delete
|
||||
{
|
||||
final dlog = await DHTLog.create(
|
||||
debugName: 'log_create_delete 1 stride $stride', stride: stride);
|
||||
expect(await dlog.operate((r) async => r.length), isZero);
|
||||
expect(dlog.isOpen, isTrue);
|
||||
await dlog.close();
|
||||
expect(dlog.isOpen, isFalse);
|
||||
await dlog.delete();
|
||||
// Operate should fail
|
||||
await expectLater(() async => dlog.operate((r) async => r.length),
|
||||
throwsA(isA<StateError>()));
|
||||
}
|
||||
|
||||
// Close after delete
|
||||
{
|
||||
final dlog = await DHTLog.create(
|
||||
debugName: 'log_create_delete 2 stride $stride', stride: stride);
|
||||
await dlog.delete();
|
||||
// Operate should still succeed because things aren't closed
|
||||
expect(await dlog.operate((r) async => r.length), isZero);
|
||||
await dlog.close();
|
||||
// Operate should fail
|
||||
await expectLater(() async => dlog.operate((r) async => r.length),
|
||||
throwsA(isA<StateError>()));
|
||||
}
|
||||
|
||||
// Close after delete multiple
|
||||
// Okay to request delete multiple times before close
|
||||
{
|
||||
final dlog = await DHTLog.create(
|
||||
debugName: 'log_create_delete 3 stride $stride', stride: stride);
|
||||
await dlog.delete();
|
||||
await dlog.delete();
|
||||
// Operate should still succeed because things aren't closed
|
||||
expect(await dlog.operate((r) async => r.length), isZero);
|
||||
await dlog.close();
|
||||
await dlog.close();
|
||||
// Operate should fail
|
||||
await expectLater(() async => dlog.operate((r) async => r.length),
|
||||
throwsA(isA<StateError>()));
|
||||
}
|
||||
};
|
||||
|
||||
Future<void> Function() makeTestDHTLogAddTruncate({required int stride}) =>
|
||||
() async {
|
||||
final startTime = DateTime.now();
|
||||
|
||||
final dlog = await DHTLog.create(
|
||||
debugName: 'log_add 1 stride $stride', stride: stride);
|
||||
|
||||
final dataset = Iterable<int>.generate(1000)
|
||||
.map((n) => utf8.encode('elem $n'))
|
||||
.toList();
|
||||
|
||||
print('adding\n');
|
||||
{
|
||||
final res = await dlog.operateAppend((w) async {
|
||||
const chunk = 50;
|
||||
for (var n = 0; n < dataset.length; n += chunk) {
|
||||
print('$n-${n + chunk - 1} ');
|
||||
final success =
|
||||
await w.tryAppendItems(dataset.sublist(n, n + chunk));
|
||||
expect(success, isTrue);
|
||||
}
|
||||
});
|
||||
expect(res, isNull);
|
||||
}
|
||||
|
||||
print('get all\n');
|
||||
{
|
||||
final dataset2 = await dlog.operate((r) async => r.getItemRange(0));
|
||||
expect(dataset2, equals(dataset));
|
||||
}
|
||||
{
|
||||
final dataset3 =
|
||||
await dlog.operate((r) async => r.getItemRange(64, length: 128));
|
||||
expect(dataset3, equals(dataset.sublist(64, 64 + 128)));
|
||||
}
|
||||
{
|
||||
final dataset4 =
|
||||
await dlog.operate((r) async => r.getItemRange(0, length: 1000));
|
||||
expect(dataset4, equals(dataset.sublist(0, 1000)));
|
||||
}
|
||||
{
|
||||
final dataset5 =
|
||||
await dlog.operate((r) async => r.getItemRange(500, length: 499));
|
||||
expect(dataset5, equals(dataset.sublist(500, 999)));
|
||||
}
|
||||
print('truncate\n');
|
||||
{
|
||||
await dlog.operateAppend((w) async => w.truncate(5));
|
||||
}
|
||||
{
|
||||
final dataset6 = await dlog
|
||||
.operate((r) async => r.getItemRange(500 - 5, length: 499));
|
||||
expect(dataset6, equals(dataset.sublist(500, 999)));
|
||||
}
|
||||
print('truncate 2\n');
|
||||
{
|
||||
await dlog.operateAppend((w) async => w.truncate(251));
|
||||
}
|
||||
{
|
||||
final dataset7 = await dlog
|
||||
.operate((r) async => r.getItemRange(500 - 256, length: 499));
|
||||
expect(dataset7, equals(dataset.sublist(500, 999)));
|
||||
}
|
||||
print('clear\n');
|
||||
{
|
||||
await dlog.operateAppend((w) async => w.clear());
|
||||
}
|
||||
print('get all\n');
|
||||
{
|
||||
final dataset8 = await dlog.operate((r) async => r.getItemRange(0));
|
||||
expect(dataset8, isEmpty);
|
||||
}
|
||||
|
||||
await dlog.delete();
|
||||
await dlog.close();
|
||||
|
||||
final endTime = DateTime.now();
|
||||
print('Duration: ${endTime.difference(startTime)}');
|
||||
};
|
|
@ -151,17 +151,29 @@ Future<void> testDHTRecordDeepCreateDelete() async {
|
|||
// Make root record
|
||||
final recroot = await pool.createRecord(debugName: 'test_deep_create_delete');
|
||||
|
||||
for (var d = 0; d < numIterations; d++) {
|
||||
// Make child set 1
|
||||
var parent = recroot;
|
||||
final children = <DHTRecord>[];
|
||||
for (var n = 0; n < numChildren; n++) {
|
||||
final child =
|
||||
await pool.createRecord(debugName: 'deep $n', parent: parent.key);
|
||||
children.add(child);
|
||||
parent = child;
|
||||
}
|
||||
// Make child set 1
|
||||
var parent = recroot;
|
||||
final children = <DHTRecord>[];
|
||||
for (var n = 0; n < numChildren; n++) {
|
||||
final child =
|
||||
await pool.createRecord(debugName: 'deep $n', parent: parent.key);
|
||||
children.add(child);
|
||||
parent = child;
|
||||
}
|
||||
|
||||
// Should mark for deletion
|
||||
expect(await pool.deleteRecord(recroot.key), isFalse);
|
||||
|
||||
// Root should still be valid
|
||||
expect(await pool.isValidRecordKey(recroot.key), isTrue);
|
||||
|
||||
// Close root record
|
||||
await recroot.close();
|
||||
|
||||
// Root should still be valid because children still exist
|
||||
expect(await pool.isValidRecordKey(recroot.key), isTrue);
|
||||
|
||||
for (var d = 0; d < numIterations; d++) {
|
||||
// Make child set 2
|
||||
final children2 = <DHTRecord>[];
|
||||
parent = recroot;
|
||||
|
@ -171,31 +183,31 @@ Future<void> testDHTRecordDeepCreateDelete() async {
|
|||
children2.add(child);
|
||||
parent = child;
|
||||
}
|
||||
// Should fail to delete root
|
||||
await expectLater(
|
||||
() async => pool.deleteRecord(recroot.key), throwsA(isA<StateError>()));
|
||||
|
||||
// Close child set 1
|
||||
await children.map((c) => c.close()).wait;
|
||||
|
||||
// Delete child set 1 in reverse order
|
||||
for (var n = numChildren - 1; n >= 0; n--) {
|
||||
await pool.deleteRecord(children[n].key);
|
||||
}
|
||||
|
||||
// Should fail to delete root
|
||||
await expectLater(
|
||||
() async => pool.deleteRecord(recroot.key), throwsA(isA<StateError>()));
|
||||
|
||||
// Close child set 1
|
||||
await children2.map((c) => c.close()).wait;
|
||||
|
||||
// Delete child set 2 in reverse order
|
||||
for (var n = numChildren - 1; n >= 0; n--) {
|
||||
await pool.deleteRecord(children2[n].key);
|
||||
expect(await pool.deleteRecord(children2[n].key), isFalse);
|
||||
}
|
||||
|
||||
// Root should still be there
|
||||
expect(await pool.isValidRecordKey(recroot.key), isTrue);
|
||||
|
||||
// Close child set 2
|
||||
await children2.map((c) => c.close()).wait;
|
||||
|
||||
// All child set 2 should be invalid
|
||||
for (final c2 in children2) {
|
||||
// Children should be invalid and deleted now
|
||||
expect(await pool.isValidRecordKey(c2.key), isFalse);
|
||||
}
|
||||
|
||||
// Root should still be valid
|
||||
expect(await pool.isValidRecordKey(recroot.key), isTrue);
|
||||
}
|
||||
|
||||
// Should be able to delete root now
|
||||
await pool.deleteRecord(recroot.key);
|
||||
// Close child set 1
|
||||
await children.map((c) => c.close()).wait;
|
||||
|
||||
// Root should have gone away
|
||||
expect(await pool.isValidRecordKey(recroot.key), isFalse);
|
||||
}
|
||||
|
|
|
@ -61,10 +61,10 @@ Future<void> Function() makeTestDHTShortArrayAdd({required int stride}) =>
|
|||
.map((n) => utf8.encode('elem $n'))
|
||||
.toList();
|
||||
|
||||
print('adding\n');
|
||||
print('adding singles\n');
|
||||
{
|
||||
final res = await arr.operateWrite((w) async {
|
||||
for (var n = 0; n < dataset.length; n++) {
|
||||
for (var n = 4; n < 8; n++) {
|
||||
print('$n ');
|
||||
final success = await w.tryAddItem(dataset[n]);
|
||||
expect(success, isTrue);
|
||||
|
@ -73,6 +73,40 @@ Future<void> Function() makeTestDHTShortArrayAdd({required int stride}) =>
|
|||
expect(res, isNull);
|
||||
}
|
||||
|
||||
print('adding batch\n');
|
||||
{
|
||||
final res = await arr.operateWrite((w) async {
|
||||
print('${dataset.length ~/ 2}-${dataset.length}');
|
||||
final success = await w.tryAddItems(
|
||||
dataset.sublist(dataset.length ~/ 2, dataset.length));
|
||||
expect(success, isTrue);
|
||||
});
|
||||
expect(res, isNull);
|
||||
}
|
||||
|
||||
print('inserting singles\n');
|
||||
{
|
||||
final res = await arr.operateWrite((w) async {
|
||||
for (var n = 0; n < 4; n++) {
|
||||
print('$n ');
|
||||
final success = await w.tryInsertItem(n, dataset[n]);
|
||||
expect(success, isTrue);
|
||||
}
|
||||
});
|
||||
expect(res, isNull);
|
||||
}
|
||||
|
||||
print('inserting batch\n');
|
||||
{
|
||||
final res = await arr.operateWrite((w) async {
|
||||
print('8-${dataset.length ~/ 2}');
|
||||
final success = await w.tryInsertItems(
|
||||
8, dataset.sublist(8, dataset.length ~/ 2));
|
||||
expect(success, isTrue);
|
||||
});
|
||||
expect(res, isNull);
|
||||
}
|
||||
|
||||
//print('get all\n');
|
||||
{
|
||||
final dataset2 = await arr.operate((r) async => r.getItemRange(0));
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue