mirror of
https://gitlab.com/veilid/veilidchat.git
synced 2024-10-01 06:55:46 -04:00
Merge branch 'dht-log' into 'main'
DHT Log See merge request veilid/veilidchat!26
This commit is contained in:
commit
8b64fbadc5
@ -227,7 +227,7 @@ class SingleContactMessagesCubit extends Cubit<SingleContactMessagesState> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Future<void> _reconcileMessagesInner(
|
Future<void> _reconcileMessagesInner(
|
||||||
{required DHTShortArrayWrite reconciledMessagesWriter,
|
{required DHTRandomReadWrite reconciledMessagesWriter,
|
||||||
required IList<proto.Message> messages}) async {
|
required IList<proto.Message> messages}) async {
|
||||||
// Ensure remoteMessages is sorted by timestamp
|
// Ensure remoteMessages is sorted by timestamp
|
||||||
final newMessages = messages
|
final newMessages = messages
|
||||||
@ -236,7 +236,7 @@ class SingleContactMessagesCubit extends Cubit<SingleContactMessagesState> {
|
|||||||
|
|
||||||
// Existing messages will always be sorted by timestamp so merging is easy
|
// Existing messages will always be sorted by timestamp so merging is easy
|
||||||
final existingMessages = await reconciledMessagesWriter
|
final existingMessages = await reconciledMessagesWriter
|
||||||
.getAllItemsProtobuf(proto.Message.fromBuffer);
|
.getItemRangeProtobuf(proto.Message.fromBuffer, 0);
|
||||||
if (existingMessages == null) {
|
if (existingMessages == null) {
|
||||||
throw Exception(
|
throw Exception(
|
||||||
'Could not load existing reconciled messages at this time');
|
'Could not load existing reconciled messages at this time');
|
||||||
|
@ -92,31 +92,29 @@ class ChatListCubit extends DHTShortArrayCubit<proto.Chat>
|
|||||||
|
|
||||||
// Remove Chat from account's list
|
// Remove Chat from account's list
|
||||||
// if this fails, don't keep retrying, user can try again later
|
// if this fails, don't keep retrying, user can try again later
|
||||||
final (deletedItem, success) =
|
final deletedItem =
|
||||||
// Ensure followers get their changes before we return
|
// Ensure followers get their changes before we return
|
||||||
await syncFollowers(() => operateWrite((writer) async {
|
await syncFollowers(() => operateWrite((writer) async {
|
||||||
if (activeChatCubit.state == remoteConversationRecordKey) {
|
if (activeChatCubit.state == remoteConversationRecordKey) {
|
||||||
activeChatCubit.setActiveChat(null);
|
activeChatCubit.setActiveChat(null);
|
||||||
}
|
}
|
||||||
for (var i = 0; i < writer.length; i++) {
|
for (var i = 0; i < writer.length; i++) {
|
||||||
final cbuf = await writer.getItem(i);
|
final c =
|
||||||
if (cbuf == null) {
|
await writer.getItemProtobuf(proto.Chat.fromBuffer, i);
|
||||||
|
if (c == null) {
|
||||||
throw Exception('Failed to get chat');
|
throw Exception('Failed to get chat');
|
||||||
}
|
}
|
||||||
final c = proto.Chat.fromBuffer(cbuf);
|
|
||||||
if (c.remoteConversationRecordKey == remoteConversationKey) {
|
if (c.remoteConversationRecordKey == remoteConversationKey) {
|
||||||
// Found the right chat
|
// Found the right chat
|
||||||
if (await writer.tryRemoveItem(i) != null) {
|
await writer.removeItem(i);
|
||||||
return c;
|
return c;
|
||||||
}
|
}
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
}));
|
}));
|
||||||
// Since followers are synced, we can safetly remove the reconciled
|
// Since followers are synced, we can safetly remove the reconciled
|
||||||
// chat record now
|
// chat record now
|
||||||
if (success && deletedItem != null) {
|
if (deletedItem != null) {
|
||||||
try {
|
try {
|
||||||
await DHTRecordPool.instance.deleteRecord(
|
await DHTRecordPool.instance.deleteRecord(
|
||||||
deletedItem.reconciledChatRecord.toVeilid().recordKey);
|
deletedItem.reconciledChatRecord.toVeilid().recordKey);
|
||||||
|
@ -34,7 +34,7 @@ class ChatSingleContactListWidget extends StatelessWidget {
|
|||||||
? const EmptyChatListWidget()
|
? const EmptyChatListWidget()
|
||||||
: SearchableList<proto.Chat>(
|
: SearchableList<proto.Chat>(
|
||||||
initialList: chatList.map((x) => x.value).toList(),
|
initialList: chatList.map((x) => x.value).toList(),
|
||||||
builder: (l, i, c) {
|
itemBuilder: (c) {
|
||||||
final contact =
|
final contact =
|
||||||
contactMap[c.remoteConversationRecordKey];
|
contactMap[c.remoteConversationRecordKey];
|
||||||
if (contact == null) {
|
if (contact == null) {
|
||||||
|
@ -177,7 +177,7 @@ class ContactInvitationListCubit
|
|||||||
_activeAccountInfo.userLogin.accountRecordInfo.accountRecord.recordKey;
|
_activeAccountInfo.userLogin.accountRecordInfo.accountRecord.recordKey;
|
||||||
|
|
||||||
// Remove ContactInvitationRecord from account's list
|
// Remove ContactInvitationRecord from account's list
|
||||||
final (deletedItem, success) = await operateWrite((writer) async {
|
final deletedItem = await operateWrite((writer) async {
|
||||||
for (var i = 0; i < writer.length; i++) {
|
for (var i = 0; i < writer.length; i++) {
|
||||||
final item = await writer.getItemProtobuf(
|
final item = await writer.getItemProtobuf(
|
||||||
proto.ContactInvitationRecord.fromBuffer, i);
|
proto.ContactInvitationRecord.fromBuffer, i);
|
||||||
@ -186,16 +186,14 @@ class ContactInvitationListCubit
|
|||||||
}
|
}
|
||||||
if (item.contactRequestInbox.recordKey.toVeilid() ==
|
if (item.contactRequestInbox.recordKey.toVeilid() ==
|
||||||
contactRequestInboxRecordKey) {
|
contactRequestInboxRecordKey) {
|
||||||
if (await writer.tryRemoveItem(i) != null) {
|
await writer.removeItem(i);
|
||||||
return item;
|
return item;
|
||||||
}
|
}
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
});
|
});
|
||||||
|
|
||||||
if (success && deletedItem != null) {
|
if (deletedItem != null) {
|
||||||
// Delete the contact request inbox
|
// Delete the contact request inbox
|
||||||
final contactRequestInbox = deletedItem.contactRequestInbox.toVeilid();
|
final contactRequestInbox = deletedItem.contactRequestInbox.toVeilid();
|
||||||
await (await pool.openRecordOwned(contactRequestInbox,
|
await (await pool.openRecordOwned(contactRequestInbox,
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
import 'dart:async';
|
import 'dart:async';
|
||||||
import 'dart:io';
|
|
||||||
import 'dart:typed_data';
|
import 'dart:typed_data';
|
||||||
|
|
||||||
import 'package:awesome_extensions/awesome_extensions.dart';
|
import 'package:awesome_extensions/awesome_extensions.dart';
|
||||||
@ -16,65 +15,64 @@ import '../../theme/theme.dart';
|
|||||||
import '../../tools/tools.dart';
|
import '../../tools/tools.dart';
|
||||||
import 'invitation_dialog.dart';
|
import 'invitation_dialog.dart';
|
||||||
|
|
||||||
class BarcodeOverlay extends CustomPainter {
|
// class BarcodeOverlay extends CustomPainter {
|
||||||
BarcodeOverlay({
|
// BarcodeOverlay({
|
||||||
required this.barcode,
|
// required this.barcode,
|
||||||
required this.arguments,
|
// required this.boxFit,
|
||||||
required this.boxFit,
|
// required this.capture,
|
||||||
required this.capture,
|
// required this.size,
|
||||||
});
|
// });
|
||||||
|
|
||||||
final BarcodeCapture capture;
|
// final BarcodeCapture capture;
|
||||||
final Barcode barcode;
|
// final Barcode barcode;
|
||||||
final MobileScannerArguments arguments;
|
// final BoxFit boxFit;
|
||||||
final BoxFit boxFit;
|
// final Size size;
|
||||||
|
|
||||||
@override
|
// @override
|
||||||
void paint(Canvas canvas, Size size) {
|
// void paint(Canvas canvas, Size size) {
|
||||||
final adjustedSize = applyBoxFit(boxFit, arguments.size, size);
|
// final adjustedSize = applyBoxFit(boxFit, size, size);
|
||||||
|
|
||||||
var verticalPadding = size.height - adjustedSize.destination.height;
|
// var verticalPadding = size.height - adjustedSize.destination.height;
|
||||||
var horizontalPadding = size.width - adjustedSize.destination.width;
|
// var horizontalPadding = size.width - adjustedSize.destination.width;
|
||||||
if (verticalPadding > 0) {
|
// if (verticalPadding > 0) {
|
||||||
verticalPadding = verticalPadding / 2;
|
// verticalPadding = verticalPadding / 2;
|
||||||
} else {
|
// } else {
|
||||||
verticalPadding = 0;
|
// verticalPadding = 0;
|
||||||
}
|
// }
|
||||||
|
|
||||||
if (horizontalPadding > 0) {
|
// if (horizontalPadding > 0) {
|
||||||
horizontalPadding = horizontalPadding / 2;
|
// horizontalPadding = horizontalPadding / 2;
|
||||||
} else {
|
// } else {
|
||||||
horizontalPadding = 0;
|
// horizontalPadding = 0;
|
||||||
}
|
// }
|
||||||
|
|
||||||
final ratioWidth = (Platform.isIOS ? capture.width : arguments.size.width) /
|
// final ratioWidth = (Platform.isIOS ? capture.size.width : size.width) /
|
||||||
adjustedSize.destination.width;
|
// adjustedSize.destination.width;
|
||||||
final ratioHeight =
|
// final ratioHeight = (Platform.isIOS ? capture.size.height : size.height) /
|
||||||
(Platform.isIOS ? capture.height : arguments.size.height) /
|
// adjustedSize.destination.height;
|
||||||
adjustedSize.destination.height;
|
|
||||||
|
|
||||||
final adjustedOffset = <Offset>[];
|
// final adjustedOffset = <Offset>[];
|
||||||
for (final offset in barcode.corners) {
|
// for (final offset in barcode.corners) {
|
||||||
adjustedOffset.add(
|
// adjustedOffset.add(
|
||||||
Offset(
|
// Offset(
|
||||||
offset.dx / ratioWidth + horizontalPadding,
|
// offset.dx / ratioWidth + horizontalPadding,
|
||||||
offset.dy / ratioHeight + verticalPadding,
|
// offset.dy / ratioHeight + verticalPadding,
|
||||||
),
|
// ),
|
||||||
);
|
// );
|
||||||
}
|
// }
|
||||||
final cutoutPath = Path()..addPolygon(adjustedOffset, true);
|
// final cutoutPath = Path()..addPolygon(adjustedOffset, true);
|
||||||
|
|
||||||
final backgroundPaint = Paint()
|
// final backgroundPaint = Paint()
|
||||||
..color = Colors.red.withOpacity(0.3)
|
// ..color = Colors.red.withOpacity(0.3)
|
||||||
..style = PaintingStyle.fill
|
// ..style = PaintingStyle.fill
|
||||||
..blendMode = BlendMode.dstOut;
|
// ..blendMode = BlendMode.dstOut;
|
||||||
|
|
||||||
canvas.drawPath(cutoutPath, backgroundPaint);
|
// canvas.drawPath(cutoutPath, backgroundPaint);
|
||||||
}
|
// }
|
||||||
|
|
||||||
@override
|
// @override
|
||||||
bool shouldRepaint(covariant CustomPainter oldDelegate) => false;
|
// bool shouldRepaint(covariant CustomPainter oldDelegate) => false;
|
||||||
}
|
// }
|
||||||
|
|
||||||
class ScannerOverlay extends CustomPainter {
|
class ScannerOverlay extends CustomPainter {
|
||||||
ScannerOverlay(this.scanWindow);
|
ScannerOverlay(this.scanWindow);
|
||||||
@ -202,9 +200,9 @@ class ScanInvitationDialogState extends State<ScanInvitationDialog> {
|
|||||||
IconButton(
|
IconButton(
|
||||||
color: Colors.white,
|
color: Colors.white,
|
||||||
icon: ValueListenableBuilder(
|
icon: ValueListenableBuilder(
|
||||||
valueListenable: cameraController.torchState,
|
valueListenable: cameraController,
|
||||||
builder: (context, state, child) {
|
builder: (context, state, child) {
|
||||||
switch (state) {
|
switch (state.torchState) {
|
||||||
case TorchState.off:
|
case TorchState.off:
|
||||||
return Icon(Icons.flash_off,
|
return Icon(Icons.flash_off,
|
||||||
color:
|
color:
|
||||||
@ -212,6 +210,12 @@ class ScanInvitationDialogState extends State<ScanInvitationDialog> {
|
|||||||
case TorchState.on:
|
case TorchState.on:
|
||||||
return Icon(Icons.flash_on,
|
return Icon(Icons.flash_on,
|
||||||
color: scale.primaryScale.primary);
|
color: scale.primaryScale.primary);
|
||||||
|
case TorchState.auto:
|
||||||
|
return Icon(Icons.flash_auto,
|
||||||
|
color: scale.primaryScale.primary);
|
||||||
|
case TorchState.unavailable:
|
||||||
|
return Icon(Icons.no_flash,
|
||||||
|
color: scale.primaryScale.primary);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
@ -236,10 +240,9 @@ class ScanInvitationDialogState extends State<ScanInvitationDialog> {
|
|||||||
IconButton(
|
IconButton(
|
||||||
color: Colors.white,
|
color: Colors.white,
|
||||||
icon: ValueListenableBuilder(
|
icon: ValueListenableBuilder(
|
||||||
valueListenable:
|
valueListenable: cameraController,
|
||||||
cameraController.cameraFacingState,
|
|
||||||
builder: (context, state, child) {
|
builder: (context, state, child) {
|
||||||
switch (state) {
|
switch (state.cameraDirection) {
|
||||||
case CameraFacing.front:
|
case CameraFacing.front:
|
||||||
return const Icon(Icons.camera_front);
|
return const Icon(Icons.camera_front);
|
||||||
case CameraFacing.back:
|
case CameraFacing.back:
|
||||||
@ -265,7 +268,7 @@ class ScanInvitationDialogState extends State<ScanInvitationDialog> {
|
|||||||
SchedulerBinding.instance
|
SchedulerBinding.instance
|
||||||
.addPostFrameCallback((_) {
|
.addPostFrameCallback((_) {
|
||||||
cameraController.dispose();
|
cameraController.dispose();
|
||||||
Navigator.pop(context, null);
|
Navigator.pop(context);
|
||||||
})
|
})
|
||||||
})),
|
})),
|
||||||
],
|
],
|
||||||
|
@ -70,7 +70,7 @@ class ContactListCubit extends DHTShortArrayCubit<proto.Contact> {
|
|||||||
contact.remoteConversationRecordKey.toVeilid();
|
contact.remoteConversationRecordKey.toVeilid();
|
||||||
|
|
||||||
// Remove Contact from account's list
|
// Remove Contact from account's list
|
||||||
final (deletedItem, success) = await operateWrite((writer) async {
|
final deletedItem = await operateWrite((writer) async {
|
||||||
for (var i = 0; i < writer.length; i++) {
|
for (var i = 0; i < writer.length; i++) {
|
||||||
final item = await writer.getItemProtobuf(proto.Contact.fromBuffer, i);
|
final item = await writer.getItemProtobuf(proto.Contact.fromBuffer, i);
|
||||||
if (item == null) {
|
if (item == null) {
|
||||||
@ -78,16 +78,14 @@ class ContactListCubit extends DHTShortArrayCubit<proto.Contact> {
|
|||||||
}
|
}
|
||||||
if (item.remoteConversationRecordKey ==
|
if (item.remoteConversationRecordKey ==
|
||||||
contact.remoteConversationRecordKey) {
|
contact.remoteConversationRecordKey) {
|
||||||
if (await writer.tryRemoveItem(i) != null) {
|
await writer.removeItem(i);
|
||||||
return item;
|
return item;
|
||||||
}
|
}
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
});
|
});
|
||||||
|
|
||||||
if (success && deletedItem != null) {
|
if (deletedItem != null) {
|
||||||
try {
|
try {
|
||||||
// Make a conversation cubit to manipulate the conversation
|
// Make a conversation cubit to manipulate the conversation
|
||||||
final conversationCubit = ConversationCubit(
|
final conversationCubit = ConversationCubit(
|
||||||
|
@ -295,7 +295,7 @@ class ConversationCubit extends Cubit<AsyncValue<ConversationState>> {
|
|||||||
debugName: 'ConversationCubit::initLocalMessages::LocalMessages',
|
debugName: 'ConversationCubit::initLocalMessages::LocalMessages',
|
||||||
parent: localConversationKey,
|
parent: localConversationKey,
|
||||||
crypto: crypto,
|
crypto: crypto,
|
||||||
smplWriter: writer))
|
writer: writer))
|
||||||
.deleteScope((messages) async => await callback(messages));
|
.deleteScope((messages) async => await callback(messages));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -38,7 +38,7 @@ class ContactListWidget extends StatelessWidget {
|
|||||||
? const EmptyContactListWidget()
|
? const EmptyContactListWidget()
|
||||||
: SearchableList<proto.Contact>(
|
: SearchableList<proto.Contact>(
|
||||||
initialList: contactList.toList(),
|
initialList: contactList.toList(),
|
||||||
builder: (l, i, c) =>
|
itemBuilder: (c) =>
|
||||||
ContactItemWidget(contact: c, disabled: disabled)
|
ContactItemWidget(contact: c, disabled: disabled)
|
||||||
.paddingLTRB(0, 4, 0, 0),
|
.paddingLTRB(0, 4, 0, 0),
|
||||||
filter: (value) {
|
filter: (value) {
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
PODS:
|
PODS:
|
||||||
- FlutterMacOS (1.0.0)
|
- FlutterMacOS (1.0.0)
|
||||||
- mobile_scanner (3.5.6):
|
- mobile_scanner (5.1.1):
|
||||||
- FlutterMacOS
|
- FlutterMacOS
|
||||||
- pasteboard (0.0.1):
|
- pasteboard (0.0.1):
|
||||||
- FlutterMacOS
|
- FlutterMacOS
|
||||||
@ -68,15 +68,15 @@ EXTERNAL SOURCES:
|
|||||||
|
|
||||||
SPEC CHECKSUMS:
|
SPEC CHECKSUMS:
|
||||||
FlutterMacOS: 8f6f14fa908a6fb3fba0cd85dbd81ec4b251fb24
|
FlutterMacOS: 8f6f14fa908a6fb3fba0cd85dbd81ec4b251fb24
|
||||||
mobile_scanner: 54ceceae0c8da2457e26a362a6be5c61154b1829
|
mobile_scanner: 1efac1e53c294b24e3bb55bcc7f4deee0233a86b
|
||||||
pasteboard: 9b69dba6fedbb04866be632205d532fe2f6b1d99
|
pasteboard: 9b69dba6fedbb04866be632205d532fe2f6b1d99
|
||||||
path_provider_foundation: 3784922295ac71e43754bd15e0653ccfd36a147c
|
path_provider_foundation: 2b6b4c569c0fb62ec74538f866245ac84301af46
|
||||||
screen_retriever: 59634572a57080243dd1bf715e55b6c54f241a38
|
screen_retriever: 59634572a57080243dd1bf715e55b6c54f241a38
|
||||||
share_plus: 36537c04ce0c3e3f5bd297ce4318b6d5ee5fd6cf
|
share_plus: 36537c04ce0c3e3f5bd297ce4318b6d5ee5fd6cf
|
||||||
shared_preferences_foundation: b4c3b4cddf1c21f02770737f147a3f5da9d39695
|
shared_preferences_foundation: fcdcbc04712aee1108ac7fda236f363274528f78
|
||||||
smart_auth: b38e3ab4bfe089eacb1e233aca1a2340f96c28e9
|
smart_auth: b38e3ab4bfe089eacb1e233aca1a2340f96c28e9
|
||||||
sqflite: 673a0e54cc04b7d6dba8d24fb8095b31c3a99eec
|
sqflite: 673a0e54cc04b7d6dba8d24fb8095b31c3a99eec
|
||||||
url_launcher_macos: d2691c7dd33ed713bf3544850a623080ec693d95
|
url_launcher_macos: 5f437abeda8c85500ceb03f5c1938a8c5a705399
|
||||||
veilid: a54f57b7bcf0e4e072fe99272d76ca126b2026d0
|
veilid: a54f57b7bcf0e4e072fe99272d76ca126b2026d0
|
||||||
window_manager: 3a1844359a6295ab1e47659b1a777e36773cd6e8
|
window_manager: 3a1844359a6295ab1e47659b1a777e36773cd6e8
|
||||||
|
|
||||||
|
10
packages/veilid_support/build.yaml
Normal file
10
packages/veilid_support/build.yaml
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
targets:
|
||||||
|
$default:
|
||||||
|
sources:
|
||||||
|
exclude:
|
||||||
|
- example/**
|
||||||
|
builders:
|
||||||
|
json_serializable:
|
||||||
|
options:
|
||||||
|
explicit_to_json: true
|
||||||
|
field_rename: snake
|
@ -1,16 +1,16 @@
|
|||||||
@Timeout(Duration(seconds: 240))
|
import 'package:flutter/foundation.dart';
|
||||||
|
|
||||||
library veilid_support_integration_test;
|
|
||||||
|
|
||||||
import 'package:flutter_test/flutter_test.dart';
|
|
||||||
import 'package:integration_test/integration_test.dart';
|
import 'package:integration_test/integration_test.dart';
|
||||||
|
import 'package:test/test.dart';
|
||||||
import 'package:veilid_test/veilid_test.dart';
|
import 'package:veilid_test/veilid_test.dart';
|
||||||
|
|
||||||
import 'fixtures/fixtures.dart';
|
import 'fixtures/fixtures.dart';
|
||||||
|
import 'test_dht_log.dart';
|
||||||
import 'test_dht_record_pool.dart';
|
import 'test_dht_record_pool.dart';
|
||||||
import 'test_dht_short_array.dart';
|
import 'test_dht_short_array.dart';
|
||||||
|
|
||||||
void main() {
|
void main() {
|
||||||
|
final startTime = DateTime.now();
|
||||||
|
|
||||||
IntegrationTestWidgetsFlutterBinding.ensureInitialized();
|
IntegrationTestWidgetsFlutterBinding.ensureInitialized();
|
||||||
final veilidFixture =
|
final veilidFixture =
|
||||||
DefaultVeilidFixture(programName: 'veilid_support integration test');
|
DefaultVeilidFixture(programName: 'veilid_support integration test');
|
||||||
@ -22,9 +22,13 @@ void main() {
|
|||||||
tickerFixture: tickerFixture,
|
tickerFixture: tickerFixture,
|
||||||
updateProcessorFixture: updateProcessorFixture);
|
updateProcessorFixture: updateProcessorFixture);
|
||||||
|
|
||||||
group('Started Tests', () {
|
group(timeout: const Timeout(Duration(seconds: 240)), 'Started Tests', () {
|
||||||
setUpAll(veilidFixture.setUp);
|
setUpAll(veilidFixture.setUp);
|
||||||
tearDownAll(veilidFixture.tearDown);
|
tearDownAll(veilidFixture.tearDown);
|
||||||
|
tearDownAll(() {
|
||||||
|
final endTime = DateTime.now();
|
||||||
|
debugPrintSynchronously('Duration: ${endTime.difference(startTime)}');
|
||||||
|
});
|
||||||
|
|
||||||
group('Attached Tests', () {
|
group('Attached Tests', () {
|
||||||
setUpAll(veilidFixture.attach);
|
setUpAll(veilidFixture.attach);
|
||||||
@ -51,11 +55,26 @@ void main() {
|
|||||||
setUpAll(dhtRecordPoolFixture.setUp);
|
setUpAll(dhtRecordPoolFixture.setUp);
|
||||||
tearDownAll(dhtRecordPoolFixture.tearDown);
|
tearDownAll(dhtRecordPoolFixture.tearDown);
|
||||||
|
|
||||||
for (final stride in [256, 64, 32, 16, 8, 4, 2, 1]) {
|
for (final stride in [256, 16 /*64, 32, 16, 8, 4, 2, 1 */]) {
|
||||||
test('create shortarray stride=$stride',
|
test('create shortarray stride=$stride',
|
||||||
makeTestDHTShortArrayCreateDelete(stride: stride));
|
makeTestDHTShortArrayCreateDelete(stride: stride));
|
||||||
test('add shortarray stride=$stride',
|
test('add shortarray stride=$stride',
|
||||||
makeTestDHTShortArrayAdd(stride: 256));
|
makeTestDHTShortArrayAdd(stride: stride));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
group('DHTLog Tests', () {
|
||||||
|
setUpAll(dhtRecordPoolFixture.setUp);
|
||||||
|
tearDownAll(dhtRecordPoolFixture.tearDown);
|
||||||
|
|
||||||
|
for (final stride in [256, 16 /*64, 32, 16, 8, 4, 2, 1 */]) {
|
||||||
|
test('create log stride=$stride',
|
||||||
|
makeTestDHTLogCreateDelete(stride: stride));
|
||||||
|
test(
|
||||||
|
timeout: const Timeout(Duration(seconds: 480)),
|
||||||
|
'add/truncate log stride=$stride',
|
||||||
|
makeTestDHTLogAddTruncate(stride: stride),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
import 'dart:async';
|
import 'dart:async';
|
||||||
|
|
||||||
import 'package:async_tools/async_tools.dart';
|
import 'package:async_tools/async_tools.dart';
|
||||||
|
import 'package:flutter/foundation.dart';
|
||||||
import 'package:veilid_support/veilid_support.dart';
|
import 'package:veilid_support/veilid_support.dart';
|
||||||
import 'package:veilid_test/veilid_test.dart';
|
import 'package:veilid_test/veilid_test.dart';
|
||||||
|
|
||||||
@ -12,9 +13,13 @@ class DHTRecordPoolFixture implements TickerFixtureTickable {
|
|||||||
UpdateProcessorFixture updateProcessorFixture;
|
UpdateProcessorFixture updateProcessorFixture;
|
||||||
TickerFixture tickerFixture;
|
TickerFixture tickerFixture;
|
||||||
|
|
||||||
Future<void> setUp() async {
|
Future<void> setUp({bool purge = true}) async {
|
||||||
await _fixtureMutex.acquire();
|
await _fixtureMutex.acquire();
|
||||||
await DHTRecordPool.init();
|
if (purge) {
|
||||||
|
await Veilid.instance.debug('record purge local');
|
||||||
|
await Veilid.instance.debug('record purge remote');
|
||||||
|
}
|
||||||
|
await DHTRecordPool.init(logger: debugPrintSynchronously);
|
||||||
tickerFixture.register(this);
|
tickerFixture.register(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -22,6 +27,10 @@ class DHTRecordPoolFixture implements TickerFixtureTickable {
|
|||||||
assert(_fixtureMutex.isLocked, 'should not tearDown without setUp');
|
assert(_fixtureMutex.isLocked, 'should not tearDown without setUp');
|
||||||
tickerFixture.unregister(this);
|
tickerFixture.unregister(this);
|
||||||
await DHTRecordPool.close();
|
await DHTRecordPool.close();
|
||||||
|
|
||||||
|
final recordList = await Veilid.instance.debug('record list local');
|
||||||
|
debugPrintSynchronously('DHT Record List:\n$recordList');
|
||||||
|
|
||||||
_fixtureMutex.release();
|
_fixtureMutex.release();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -0,0 +1,126 @@
|
|||||||
|
import 'dart:convert';
|
||||||
|
|
||||||
|
import 'package:test/test.dart';
|
||||||
|
import 'package:veilid_support/veilid_support.dart';
|
||||||
|
|
||||||
|
Future<void> Function() makeTestDHTLogCreateDelete({required int stride}) =>
|
||||||
|
() async {
|
||||||
|
// Close before delete
|
||||||
|
{
|
||||||
|
final dlog = await DHTLog.create(
|
||||||
|
debugName: 'log_create_delete 1 stride $stride', stride: stride);
|
||||||
|
expect(await dlog.operate((r) async => r.length), isZero);
|
||||||
|
expect(dlog.isOpen, isTrue);
|
||||||
|
await dlog.close();
|
||||||
|
expect(dlog.isOpen, isFalse);
|
||||||
|
await dlog.delete();
|
||||||
|
// Operate should fail
|
||||||
|
await expectLater(() async => dlog.operate((r) async => r.length),
|
||||||
|
throwsA(isA<StateError>()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close after delete
|
||||||
|
{
|
||||||
|
final dlog = await DHTLog.create(
|
||||||
|
debugName: 'log_create_delete 2 stride $stride', stride: stride);
|
||||||
|
await dlog.delete();
|
||||||
|
// Operate should still succeed because things aren't closed
|
||||||
|
expect(await dlog.operate((r) async => r.length), isZero);
|
||||||
|
await dlog.close();
|
||||||
|
// Operate should fail
|
||||||
|
await expectLater(() async => dlog.operate((r) async => r.length),
|
||||||
|
throwsA(isA<StateError>()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close after delete multiple
|
||||||
|
// Okay to request delete multiple times before close
|
||||||
|
{
|
||||||
|
final dlog = await DHTLog.create(
|
||||||
|
debugName: 'log_create_delete 3 stride $stride', stride: stride);
|
||||||
|
await dlog.delete();
|
||||||
|
await dlog.delete();
|
||||||
|
// Operate should still succeed because things aren't closed
|
||||||
|
expect(await dlog.operate((r) async => r.length), isZero);
|
||||||
|
await dlog.close();
|
||||||
|
await expectLater(() async => dlog.close(), throwsA(isA<StateError>()));
|
||||||
|
// Operate should fail
|
||||||
|
await expectLater(() async => dlog.operate((r) async => r.length),
|
||||||
|
throwsA(isA<StateError>()));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Future<void> Function() makeTestDHTLogAddTruncate({required int stride}) =>
|
||||||
|
() async {
|
||||||
|
final dlog = await DHTLog.create(
|
||||||
|
debugName: 'log_add 1 stride $stride', stride: stride);
|
||||||
|
|
||||||
|
final dataset = Iterable<int>.generate(1000)
|
||||||
|
.map((n) => utf8.encode('elem $n'))
|
||||||
|
.toList();
|
||||||
|
|
||||||
|
print('adding\n');
|
||||||
|
{
|
||||||
|
final res = await dlog.operateAppend((w) async {
|
||||||
|
const chunk = 25;
|
||||||
|
for (var n = 0; n < dataset.length; n += chunk) {
|
||||||
|
print('$n-${n + chunk - 1} ');
|
||||||
|
final success =
|
||||||
|
await w.tryAppendItems(dataset.sublist(n, n + chunk));
|
||||||
|
expect(success, isTrue);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
expect(res, isNull);
|
||||||
|
}
|
||||||
|
|
||||||
|
print('get all\n');
|
||||||
|
{
|
||||||
|
final dataset2 = await dlog.operate((r) async => r.getItemRange(0));
|
||||||
|
expect(dataset2, equals(dataset));
|
||||||
|
}
|
||||||
|
{
|
||||||
|
final dataset3 =
|
||||||
|
await dlog.operate((r) async => r.getItemRange(64, length: 128));
|
||||||
|
expect(dataset3, equals(dataset.sublist(64, 64 + 128)));
|
||||||
|
}
|
||||||
|
{
|
||||||
|
final dataset4 =
|
||||||
|
await dlog.operate((r) async => r.getItemRange(0, length: 1000));
|
||||||
|
expect(dataset4, equals(dataset.sublist(0, 1000)));
|
||||||
|
}
|
||||||
|
{
|
||||||
|
final dataset5 =
|
||||||
|
await dlog.operate((r) async => r.getItemRange(500, length: 499));
|
||||||
|
expect(dataset5, equals(dataset.sublist(500, 999)));
|
||||||
|
}
|
||||||
|
print('truncate\n');
|
||||||
|
{
|
||||||
|
await dlog.operateAppend((w) async => w.truncate(5));
|
||||||
|
}
|
||||||
|
{
|
||||||
|
final dataset6 = await dlog
|
||||||
|
.operate((r) async => r.getItemRange(500 - 5, length: 499));
|
||||||
|
expect(dataset6, equals(dataset.sublist(500, 999)));
|
||||||
|
}
|
||||||
|
print('truncate 2\n');
|
||||||
|
{
|
||||||
|
await dlog.operateAppend((w) async => w.truncate(251));
|
||||||
|
}
|
||||||
|
{
|
||||||
|
final dataset7 = await dlog
|
||||||
|
.operate((r) async => r.getItemRange(500 - 256, length: 499));
|
||||||
|
expect(dataset7, equals(dataset.sublist(500, 999)));
|
||||||
|
}
|
||||||
|
print('clear\n');
|
||||||
|
{
|
||||||
|
await dlog.operateAppend((w) async => w.clear());
|
||||||
|
}
|
||||||
|
print('get all\n');
|
||||||
|
{
|
||||||
|
final dataset8 = await dlog.operate((r) async => r.getItemRange(0));
|
||||||
|
expect(dataset8, isEmpty);
|
||||||
|
}
|
||||||
|
print('delete and close\n');
|
||||||
|
|
||||||
|
await dlog.delete();
|
||||||
|
await dlog.close();
|
||||||
|
};
|
@ -1,7 +1,7 @@
|
|||||||
import 'dart:convert';
|
import 'dart:convert';
|
||||||
|
|
||||||
import 'package:flutter/foundation.dart';
|
import 'package:flutter/foundation.dart';
|
||||||
import 'package:flutter_test/flutter_test.dart';
|
import 'package:test/test.dart';
|
||||||
import 'package:veilid_support/veilid_support.dart';
|
import 'package:veilid_support/veilid_support.dart';
|
||||||
|
|
||||||
Future<void> testDHTRecordPoolCreate() async {
|
Future<void> testDHTRecordPoolCreate() async {
|
||||||
@ -48,7 +48,7 @@ Future<void> testDHTRecordCreateDelete() async {
|
|||||||
// Set should succeed still
|
// Set should succeed still
|
||||||
await rec3.tryWriteBytes(utf8.encode('test'));
|
await rec3.tryWriteBytes(utf8.encode('test'));
|
||||||
await rec3.close();
|
await rec3.close();
|
||||||
await rec3.close();
|
await expectLater(() async => rec3.close(), throwsA(isA<StateError>()));
|
||||||
// Set should fail
|
// Set should fail
|
||||||
await expectLater(() async => rec3.tryWriteBytes(utf8.encode('test')),
|
await expectLater(() async => rec3.tryWriteBytes(utf8.encode('test')),
|
||||||
throwsA(isA<VeilidAPIException>()));
|
throwsA(isA<VeilidAPIException>()));
|
||||||
@ -84,7 +84,7 @@ Future<void> testDHTRecordScopes() async {
|
|||||||
} on Exception {
|
} on Exception {
|
||||||
assert(false, 'should not throw');
|
assert(false, 'should not throw');
|
||||||
}
|
}
|
||||||
await rec2.close();
|
await expectLater(() async => rec2.close(), throwsA(isA<StateError>()));
|
||||||
await pool.deleteRecord(rec2.key);
|
await pool.deleteRecord(rec2.key);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -115,6 +115,7 @@ Future<void> testDHTRecordGetSet() async {
|
|||||||
final val = await rec.get();
|
final val = await rec.get();
|
||||||
await pool.deleteRecord(rec.key);
|
await pool.deleteRecord(rec.key);
|
||||||
expect(val, isNull);
|
expect(val, isNull);
|
||||||
|
await rec.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test set then get
|
// Test set then get
|
||||||
@ -125,6 +126,7 @@ Future<void> testDHTRecordGetSet() async {
|
|||||||
// Invalid subkey should throw
|
// Invalid subkey should throw
|
||||||
await expectLater(
|
await expectLater(
|
||||||
() async => rec2.get(subkey: 1), throwsA(isA<VeilidAPIException>()));
|
() async => rec2.get(subkey: 1), throwsA(isA<VeilidAPIException>()));
|
||||||
|
await rec2.close();
|
||||||
await pool.deleteRecord(rec2.key);
|
await pool.deleteRecord(rec2.key);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -151,7 +153,6 @@ Future<void> testDHTRecordDeepCreateDelete() async {
|
|||||||
// Make root record
|
// Make root record
|
||||||
final recroot = await pool.createRecord(debugName: 'test_deep_create_delete');
|
final recroot = await pool.createRecord(debugName: 'test_deep_create_delete');
|
||||||
|
|
||||||
for (var d = 0; d < numIterations; d++) {
|
|
||||||
// Make child set 1
|
// Make child set 1
|
||||||
var parent = recroot;
|
var parent = recroot;
|
||||||
final children = <DHTRecord>[];
|
final children = <DHTRecord>[];
|
||||||
@ -162,6 +163,19 @@ Future<void> testDHTRecordDeepCreateDelete() async {
|
|||||||
parent = child;
|
parent = child;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Should mark for deletion
|
||||||
|
expect(await pool.deleteRecord(recroot.key), isFalse);
|
||||||
|
|
||||||
|
// Root should still be valid
|
||||||
|
expect(await pool.isValidRecordKey(recroot.key), isTrue);
|
||||||
|
|
||||||
|
// Close root record
|
||||||
|
await recroot.close();
|
||||||
|
|
||||||
|
// Root should still be valid because children still exist
|
||||||
|
expect(await pool.isValidRecordKey(recroot.key), isTrue);
|
||||||
|
|
||||||
|
for (var d = 0; d < numIterations; d++) {
|
||||||
// Make child set 2
|
// Make child set 2
|
||||||
final children2 = <DHTRecord>[];
|
final children2 = <DHTRecord>[];
|
||||||
parent = recroot;
|
parent = recroot;
|
||||||
@ -171,31 +185,31 @@ Future<void> testDHTRecordDeepCreateDelete() async {
|
|||||||
children2.add(child);
|
children2.add(child);
|
||||||
parent = child;
|
parent = child;
|
||||||
}
|
}
|
||||||
// Should fail to delete root
|
|
||||||
await expectLater(
|
// Delete child set 2 in reverse order
|
||||||
() async => pool.deleteRecord(recroot.key), throwsA(isA<StateError>()));
|
for (var n = numChildren - 1; n >= 0; n--) {
|
||||||
|
expect(await pool.deleteRecord(children2[n].key), isFalse);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Root should still be there
|
||||||
|
expect(await pool.isValidRecordKey(recroot.key), isTrue);
|
||||||
|
|
||||||
|
// Close child set 2
|
||||||
|
await children2.map((c) => c.close()).wait;
|
||||||
|
|
||||||
|
// All child set 2 should be invalid
|
||||||
|
for (final c2 in children2) {
|
||||||
|
// Children should be invalid and deleted now
|
||||||
|
expect(await pool.isValidRecordKey(c2.key), isFalse);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Root should still be valid
|
||||||
|
expect(await pool.isValidRecordKey(recroot.key), isTrue);
|
||||||
|
}
|
||||||
|
|
||||||
// Close child set 1
|
// Close child set 1
|
||||||
await children.map((c) => c.close()).wait;
|
await children.map((c) => c.close()).wait;
|
||||||
|
|
||||||
// Delete child set 1 in reverse order
|
// Root should have gone away
|
||||||
for (var n = numChildren - 1; n >= 0; n--) {
|
expect(await pool.isValidRecordKey(recroot.key), isFalse);
|
||||||
await pool.deleteRecord(children[n].key);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should fail to delete root
|
|
||||||
await expectLater(
|
|
||||||
() async => pool.deleteRecord(recroot.key), throwsA(isA<StateError>()));
|
|
||||||
|
|
||||||
// Close child set 1
|
|
||||||
await children2.map((c) => c.close()).wait;
|
|
||||||
|
|
||||||
// Delete child set 2 in reverse order
|
|
||||||
for (var n = numChildren - 1; n >= 0; n--) {
|
|
||||||
await pool.deleteRecord(children2[n].key);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should be able to delete root now
|
|
||||||
await pool.deleteRecord(recroot.key);
|
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
import 'dart:convert';
|
import 'dart:convert';
|
||||||
|
|
||||||
import 'package:flutter_test/flutter_test.dart';
|
import 'package:test/test.dart';
|
||||||
import 'package:veilid_support/veilid_support.dart';
|
import 'package:veilid_support/veilid_support.dart';
|
||||||
|
|
||||||
Future<void> Function() makeTestDHTShortArrayCreateDelete(
|
Future<void> Function() makeTestDHTShortArrayCreateDelete(
|
||||||
@ -43,7 +43,7 @@ Future<void> Function() makeTestDHTShortArrayCreateDelete(
|
|||||||
// Operate should still succeed because things aren't closed
|
// Operate should still succeed because things aren't closed
|
||||||
expect(await arr.operate((r) async => r.length), isZero);
|
expect(await arr.operate((r) async => r.length), isZero);
|
||||||
await arr.close();
|
await arr.close();
|
||||||
await arr.close();
|
await expectLater(() async => arr.close(), throwsA(isA<StateError>()));
|
||||||
// Operate should fail
|
// Operate should fail
|
||||||
await expectLater(() async => arr.operate((r) async => r.length),
|
await expectLater(() async => arr.operate((r) async => r.length),
|
||||||
throwsA(isA<StateError>()));
|
throwsA(isA<StateError>()));
|
||||||
@ -52,8 +52,6 @@ Future<void> Function() makeTestDHTShortArrayCreateDelete(
|
|||||||
|
|
||||||
Future<void> Function() makeTestDHTShortArrayAdd({required int stride}) =>
|
Future<void> Function() makeTestDHTShortArrayAdd({required int stride}) =>
|
||||||
() async {
|
() async {
|
||||||
final startTime = DateTime.now();
|
|
||||||
|
|
||||||
final arr = await DHTShortArray.create(
|
final arr = await DHTShortArray.create(
|
||||||
debugName: 'sa_add 1 stride $stride', stride: stride);
|
debugName: 'sa_add 1 stride $stride', stride: stride);
|
||||||
|
|
||||||
@ -61,41 +59,77 @@ Future<void> Function() makeTestDHTShortArrayAdd({required int stride}) =>
|
|||||||
.map((n) => utf8.encode('elem $n'))
|
.map((n) => utf8.encode('elem $n'))
|
||||||
.toList();
|
.toList();
|
||||||
|
|
||||||
print('adding\n');
|
print('adding singles\n');
|
||||||
{
|
{
|
||||||
final (res, ok) = await arr.operateWrite((w) async {
|
final res = await arr.operateWrite((w) async {
|
||||||
for (var n = 0; n < dataset.length; n++) {
|
for (var n = 4; n < 8; n++) {
|
||||||
print('$n ');
|
print('$n ');
|
||||||
final success = await w.tryAddItem(dataset[n]);
|
final success = await w.tryAddItem(dataset[n]);
|
||||||
expect(success, isTrue);
|
expect(success, isTrue);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
expect(res, isNull);
|
expect(res, isNull);
|
||||||
expect(ok, isTrue);
|
}
|
||||||
|
|
||||||
|
print('adding batch\n');
|
||||||
|
{
|
||||||
|
final res = await arr.operateWrite((w) async {
|
||||||
|
print('${dataset.length ~/ 2}-${dataset.length}');
|
||||||
|
final success = await w.tryAddItems(
|
||||||
|
dataset.sublist(dataset.length ~/ 2, dataset.length));
|
||||||
|
expect(success, isTrue);
|
||||||
|
});
|
||||||
|
expect(res, isNull);
|
||||||
|
}
|
||||||
|
|
||||||
|
print('inserting singles\n');
|
||||||
|
{
|
||||||
|
final res = await arr.operateWrite((w) async {
|
||||||
|
for (var n = 0; n < 4; n++) {
|
||||||
|
print('$n ');
|
||||||
|
final success = await w.tryInsertItem(n, dataset[n]);
|
||||||
|
expect(success, isTrue);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
expect(res, isNull);
|
||||||
|
}
|
||||||
|
|
||||||
|
print('inserting batch\n');
|
||||||
|
{
|
||||||
|
final res = await arr.operateWrite((w) async {
|
||||||
|
print('8-${dataset.length ~/ 2}');
|
||||||
|
final success = await w.tryInsertItems(
|
||||||
|
8, dataset.sublist(8, dataset.length ~/ 2));
|
||||||
|
expect(success, isTrue);
|
||||||
|
});
|
||||||
|
expect(res, isNull);
|
||||||
}
|
}
|
||||||
|
|
||||||
//print('get all\n');
|
//print('get all\n');
|
||||||
{
|
{
|
||||||
final dataset2 = await arr.operate((r) async => r.getAllItems());
|
final dataset2 = await arr.operate((r) async => r.getItemRange(0));
|
||||||
expect(dataset2, equals(dataset));
|
expect(dataset2, equals(dataset));
|
||||||
}
|
}
|
||||||
|
{
|
||||||
|
final dataset3 =
|
||||||
|
await arr.operate((r) async => r.getItemRange(64, length: 128));
|
||||||
|
expect(dataset3, equals(dataset.sublist(64, 64 + 128)));
|
||||||
|
}
|
||||||
|
|
||||||
//print('clear\n');
|
//print('clear\n');
|
||||||
{
|
{
|
||||||
final (res, ok) = await arr.operateWrite((w) async => w.tryClear());
|
await arr.operateWriteEventual((w) async {
|
||||||
expect(res, isTrue);
|
await w.clear();
|
||||||
expect(ok, isTrue);
|
return true;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
//print('get all\n');
|
//print('get all\n');
|
||||||
{
|
{
|
||||||
final dataset3 = await arr.operate((r) async => r.getAllItems());
|
final dataset4 = await arr.operate((r) async => r.getItemRange(0));
|
||||||
expect(dataset3, isEmpty);
|
expect(dataset4, isEmpty);
|
||||||
}
|
}
|
||||||
|
|
||||||
await arr.delete();
|
await arr.delete();
|
||||||
await arr.close();
|
await arr.close();
|
||||||
|
|
||||||
final endTime = DateTime.now();
|
|
||||||
print('Duration: ${endTime.difference(startTime)}');
|
|
||||||
};
|
};
|
||||||
|
@ -1,6 +1,30 @@
|
|||||||
# Generated by pub
|
# Generated by pub
|
||||||
# See https://dart.dev/tools/pub/glossary#lockfile
|
# See https://dart.dev/tools/pub/glossary#lockfile
|
||||||
packages:
|
packages:
|
||||||
|
_fe_analyzer_shared:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: _fe_analyzer_shared
|
||||||
|
sha256: "0b2f2bd91ba804e53a61d757b986f89f1f9eaed5b11e4b2f5a2468d86d6c9fc7"
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "67.0.0"
|
||||||
|
analyzer:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: analyzer
|
||||||
|
sha256: "37577842a27e4338429a1cbc32679d508836510b056f1eedf0c8d20e39c1383d"
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "6.4.1"
|
||||||
|
args:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: args
|
||||||
|
sha256: "7cf60b9f0cc88203c5a190b4cd62a99feea42759a7fa695010eb5de1c0b2252a"
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "2.5.0"
|
||||||
async:
|
async:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -81,6 +105,30 @@ packages:
|
|||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "1.18.0"
|
version: "1.18.0"
|
||||||
|
convert:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: convert
|
||||||
|
sha256: "0f08b14755d163f6e2134cb58222dd25ea2a2ee8a195e53983d57c075324d592"
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "3.1.1"
|
||||||
|
coverage:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: coverage
|
||||||
|
sha256: "3945034e86ea203af7a056d98e98e42a5518fff200d6e8e6647e1886b07e936e"
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "1.8.0"
|
||||||
|
crypto:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: crypto
|
||||||
|
sha256: ff625774173754681d66daaf4a448684fb04b78f902da9cb3d308c19cc5e8bab
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "3.0.3"
|
||||||
cupertino_icons:
|
cupertino_icons:
|
||||||
dependency: "direct main"
|
dependency: "direct main"
|
||||||
description:
|
description:
|
||||||
@ -109,10 +157,10 @@ packages:
|
|||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
name: fast_immutable_collections
|
name: fast_immutable_collections
|
||||||
sha256: "38fbc50df5b219dcfb83ebbc3275ec09872530ca1153858fc56fceadb310d037"
|
sha256: "533806a7f0c624c2e479d05d3fdce4c87109a7cd0db39b8cc3830d3a2e8dedc7"
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "10.2.2"
|
version: "10.2.3"
|
||||||
ffi:
|
ffi:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -148,7 +196,7 @@ packages:
|
|||||||
source: sdk
|
source: sdk
|
||||||
version: "0.0.0"
|
version: "0.0.0"
|
||||||
flutter_test:
|
flutter_test:
|
||||||
dependency: "direct dev"
|
dependency: transitive
|
||||||
description: flutter
|
description: flutter
|
||||||
source: sdk
|
source: sdk
|
||||||
version: "0.0.0"
|
version: "0.0.0"
|
||||||
@ -165,11 +213,27 @@ packages:
|
|||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "2.4.1"
|
version: "2.4.1"
|
||||||
|
frontend_server_client:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: frontend_server_client
|
||||||
|
sha256: f64a0333a82f30b0cca061bc3d143813a486dc086b574bfb233b7c1372427694
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "4.0.0"
|
||||||
fuchsia_remote_debug_protocol:
|
fuchsia_remote_debug_protocol:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description: flutter
|
description: flutter
|
||||||
source: sdk
|
source: sdk
|
||||||
version: "0.0.0"
|
version: "0.0.0"
|
||||||
|
glob:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: glob
|
||||||
|
sha256: "0e7014b3b7d4dac1ca4d6114f82bf1782ee86745b9b42a92c9289c23d8a0ab63"
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "2.1.2"
|
||||||
globbing:
|
globbing:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -178,11 +242,43 @@ packages:
|
|||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "1.0.0"
|
version: "1.0.0"
|
||||||
|
http_multi_server:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: http_multi_server
|
||||||
|
sha256: "97486f20f9c2f7be8f514851703d0119c3596d14ea63227af6f7a481ef2b2f8b"
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "3.2.1"
|
||||||
|
http_parser:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: http_parser
|
||||||
|
sha256: "2aa08ce0341cc9b354a498388e30986515406668dbcc4f7c950c3e715496693b"
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "4.0.2"
|
||||||
integration_test:
|
integration_test:
|
||||||
dependency: "direct dev"
|
dependency: "direct dev"
|
||||||
description: flutter
|
description: flutter
|
||||||
source: sdk
|
source: sdk
|
||||||
version: "0.0.0"
|
version: "0.0.0"
|
||||||
|
io:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: io
|
||||||
|
sha256: "2ec25704aba361659e10e3e5f5d672068d332fc8ac516421d483a11e5cbd061e"
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "1.0.4"
|
||||||
|
js:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: js
|
||||||
|
sha256: c1b2e9b5ea78c45e1a0788d29606ba27dc5f71f019f32ca5140f61ef071838cf
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "0.7.1"
|
||||||
json_annotation:
|
json_annotation:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -195,26 +291,26 @@ packages:
|
|||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
name: leak_tracker
|
name: leak_tracker
|
||||||
sha256: "78eb209deea09858f5269f5a5b02be4049535f568c07b275096836f01ea323fa"
|
sha256: "7f0df31977cb2c0b88585095d168e689669a2cc9b97c309665e3386f3e9d341a"
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "10.0.0"
|
version: "10.0.4"
|
||||||
leak_tracker_flutter_testing:
|
leak_tracker_flutter_testing:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
name: leak_tracker_flutter_testing
|
name: leak_tracker_flutter_testing
|
||||||
sha256: b46c5e37c19120a8a01918cfaf293547f47269f7cb4b0058f21531c2465d6ef0
|
sha256: "06e98f569d004c1315b991ded39924b21af84cf14cc94791b8aea337d25b57f8"
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "2.0.1"
|
version: "3.0.3"
|
||||||
leak_tracker_testing:
|
leak_tracker_testing:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
name: leak_tracker_testing
|
name: leak_tracker_testing
|
||||||
sha256: a597f72a664dbd293f3bfc51f9ba69816f84dcd403cdac7066cb3f6003f3ab47
|
sha256: "6ba465d5d76e67ddf503e1161d1f4a6bc42306f9d66ca1e8f079a47290fb06d3"
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "2.0.1"
|
version: "3.0.1"
|
||||||
lint_hard:
|
lint_hard:
|
||||||
dependency: "direct dev"
|
dependency: "direct dev"
|
||||||
description:
|
description:
|
||||||
@ -223,6 +319,14 @@ packages:
|
|||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "4.0.0"
|
version: "4.0.0"
|
||||||
|
logging:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: logging
|
||||||
|
sha256: "623a88c9594aa774443aa3eb2d41807a48486b5613e67599fb4c41c0ad47c340"
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "1.2.0"
|
||||||
loggy:
|
loggy:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -251,10 +355,34 @@ packages:
|
|||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
name: meta
|
name: meta
|
||||||
sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04
|
sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136"
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "1.11.0"
|
version: "1.12.0"
|
||||||
|
mime:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: mime
|
||||||
|
sha256: "2e123074287cc9fd6c09de8336dae606d1ddb88d9ac47358826db698c176a1f2"
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "1.0.5"
|
||||||
|
node_preamble:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: node_preamble
|
||||||
|
sha256: "6e7eac89047ab8a8d26cf16127b5ed26de65209847630400f9aefd7cd5c730db"
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "2.0.2"
|
||||||
|
package_config:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: package_config
|
||||||
|
sha256: "1c5b77ccc91e4823a5af61ee74e6b972db1ef98c2ff5a18d3161c982a55448bd"
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "2.1.0"
|
||||||
path:
|
path:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -327,6 +455,14 @@ packages:
|
|||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "2.1.8"
|
version: "2.1.8"
|
||||||
|
pool:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: pool
|
||||||
|
sha256: "20fe868b6314b322ea036ba325e6fc0711a22948856475e2c2b6306e8ab39c2a"
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "1.5.1"
|
||||||
process:
|
process:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -343,11 +479,67 @@ packages:
|
|||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "3.1.0"
|
version: "3.1.0"
|
||||||
|
pub_semver:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: pub_semver
|
||||||
|
sha256: "40d3ab1bbd474c4c2328c91e3a7df8c6dd629b79ece4c4bd04bee496a224fb0c"
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "2.1.4"
|
||||||
|
shelf:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: shelf
|
||||||
|
sha256: ad29c505aee705f41a4d8963641f91ac4cee3c8fad5947e033390a7bd8180fa4
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "1.4.1"
|
||||||
|
shelf_packages_handler:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: shelf_packages_handler
|
||||||
|
sha256: "89f967eca29607c933ba9571d838be31d67f53f6e4ee15147d5dc2934fee1b1e"
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "3.0.2"
|
||||||
|
shelf_static:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: shelf_static
|
||||||
|
sha256: a41d3f53c4adf0f57480578c1d61d90342cd617de7fc8077b1304643c2d85c1e
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "1.1.2"
|
||||||
|
shelf_web_socket:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: shelf_web_socket
|
||||||
|
sha256: "9ca081be41c60190ebcb4766b2486a7d50261db7bd0f5d9615f2d653637a84c1"
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "1.0.4"
|
||||||
sky_engine:
|
sky_engine:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description: flutter
|
description: flutter
|
||||||
source: sdk
|
source: sdk
|
||||||
version: "0.0.99"
|
version: "0.0.99"
|
||||||
|
source_map_stack_trace:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: source_map_stack_trace
|
||||||
|
sha256: "84cf769ad83aa6bb61e0aa5a18e53aea683395f196a6f39c4c881fb90ed4f7ae"
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "2.1.1"
|
||||||
|
source_maps:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: source_maps
|
||||||
|
sha256: "708b3f6b97248e5781f493b765c3337db11c5d2c81c3094f10904bfa8004c703"
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "0.10.12"
|
||||||
source_span:
|
source_span:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -412,14 +604,38 @@ packages:
|
|||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "1.2.1"
|
version: "1.2.1"
|
||||||
|
test:
|
||||||
|
dependency: "direct dev"
|
||||||
|
description:
|
||||||
|
name: test
|
||||||
|
sha256: "7ee446762c2c50b3bd4ea96fe13ffac69919352bd3b4b17bac3f3465edc58073"
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "1.25.2"
|
||||||
test_api:
|
test_api:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
name: test_api
|
name: test_api
|
||||||
sha256: "5c2f730018264d276c20e4f1503fd1308dfbbae39ec8ee63c5236311ac06954b"
|
sha256: "9955ae474176f7ac8ee4e989dadfb411a58c30415bcfb648fa04b2b8a03afa7f"
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "0.6.1"
|
version: "0.7.0"
|
||||||
|
test_core:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: test_core
|
||||||
|
sha256: "2bc4b4ecddd75309300d8096f781c0e3280ca1ef85beda558d33fcbedc2eead4"
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "0.6.0"
|
||||||
|
typed_data:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: typed_data
|
||||||
|
sha256: facc8d6582f16042dd49f2463ff1bd6e2c9ef9f3d5da3d9b087e244a7b564b3c
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "1.3.2"
|
||||||
vector_math:
|
vector_math:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -453,10 +669,34 @@ packages:
|
|||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
name: vm_service
|
name: vm_service
|
||||||
sha256: b3d56ff4341b8f182b96aceb2fa20e3dcb336b9f867bc0eafc0de10f1048e957
|
sha256: "3923c89304b715fb1eb6423f017651664a03bf5f4b29983627c4da791f74a4ec"
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "13.0.0"
|
version: "14.2.1"
|
||||||
|
watcher:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: watcher
|
||||||
|
sha256: "3d2ad6751b3c16cf07c7fca317a1413b3f26530319181b37e3b9039b84fc01d8"
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "1.1.0"
|
||||||
|
web:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: web
|
||||||
|
sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27"
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "0.5.1"
|
||||||
|
web_socket_channel:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: web_socket_channel
|
||||||
|
sha256: "58c6666b342a38816b2e7e50ed0f1e261959630becd4c879c4f26bfa14aa5a42"
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "2.4.5"
|
||||||
webdriver:
|
webdriver:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -465,14 +705,22 @@ packages:
|
|||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "3.0.3"
|
version: "3.0.3"
|
||||||
|
webkit_inspection_protocol:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: webkit_inspection_protocol
|
||||||
|
sha256: "87d3f2333bb240704cd3f1c6b5b7acd8a10e7f0bc28c28dcf14e782014f4a572"
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "1.2.1"
|
||||||
win32:
|
win32:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
name: win32
|
name: win32
|
||||||
sha256: "0eaf06e3446824099858367950a813472af675116bf63f008a4c2a75ae13e9cb"
|
sha256: a79dbe579cb51ecd6d30b17e0cae4e0ea15e2c0e66f69ad4198f22a6789e94f4
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "5.5.0"
|
version: "5.5.1"
|
||||||
xdg_directories:
|
xdg_directories:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -481,6 +729,14 @@ packages:
|
|||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "1.0.4"
|
version: "1.0.4"
|
||||||
|
yaml:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: yaml
|
||||||
|
sha256: "75769501ea3489fca56601ff33454fe45507ea3bfb014161abc3b43ae25989d5"
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "3.1.2"
|
||||||
sdks:
|
sdks:
|
||||||
dart: ">=3.3.4 <4.0.0"
|
dart: ">=3.4.0 <4.0.0"
|
||||||
flutter: ">=3.19.1"
|
flutter: ">=3.19.1"
|
||||||
|
@ -15,11 +15,10 @@ dependencies:
|
|||||||
|
|
||||||
dev_dependencies:
|
dev_dependencies:
|
||||||
async_tools: ^0.1.1
|
async_tools: ^0.1.1
|
||||||
flutter_test:
|
|
||||||
sdk: flutter
|
|
||||||
integration_test:
|
integration_test:
|
||||||
sdk: flutter
|
sdk: flutter
|
||||||
lint_hard: ^4.0.0
|
lint_hard: ^4.0.0
|
||||||
|
test: ^1.25.2
|
||||||
veilid_test:
|
veilid_test:
|
||||||
path: ../../../../veilid/veilid-flutter/packages/veilid_test
|
path: ../../../../veilid/veilid-flutter/packages/veilid_test
|
||||||
|
|
||||||
|
@ -2,5 +2,7 @@
|
|||||||
|
|
||||||
library dht_support;
|
library dht_support;
|
||||||
|
|
||||||
|
export 'src/dht_log/barrel.dart';
|
||||||
export 'src/dht_record/barrel.dart';
|
export 'src/dht_record/barrel.dart';
|
||||||
export 'src/dht_short_array/barrel.dart';
|
export 'src/dht_short_array/barrel.dart';
|
||||||
|
export 'src/interfaces/interfaces.dart';
|
||||||
|
@ -23,6 +23,18 @@ message DHTData {
|
|||||||
uint32 size = 4;
|
uint32 size = 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// DHTLog - represents a ring buffer of many elements with append/truncate semantics
|
||||||
|
// Header in subkey 0 of first key follows this structure
|
||||||
|
message DHTLog {
|
||||||
|
// Position of the start of the log (oldest items)
|
||||||
|
uint32 head = 1;
|
||||||
|
// Position of the end of the log (newest items)
|
||||||
|
uint32 tail = 2;
|
||||||
|
// Stride of each segment of the dhtlog
|
||||||
|
uint32 stride = 3;
|
||||||
|
}
|
||||||
|
|
||||||
// DHTShortArray - represents a re-orderable collection of up to 256 individual elements
|
// DHTShortArray - represents a re-orderable collection of up to 256 individual elements
|
||||||
// Header in subkey 0 of first key follows this structure
|
// Header in subkey 0 of first key follows this structure
|
||||||
//
|
//
|
||||||
@ -50,20 +62,6 @@ message DHTShortArray {
|
|||||||
// calculated through iteration
|
// calculated through iteration
|
||||||
}
|
}
|
||||||
|
|
||||||
// DHTLog - represents a long ring buffer of elements utilizing a multi-level
|
|
||||||
// indirection table of DHTShortArrays.
|
|
||||||
|
|
||||||
message DHTLog {
|
|
||||||
// Keys to concatenate
|
|
||||||
repeated veilid.TypedKey keys = 1;
|
|
||||||
// Back link to another DHTLog further back
|
|
||||||
veilid.TypedKey back = 2;
|
|
||||||
// Count of subkeys in all keys in this DHTLog
|
|
||||||
repeated uint32 subkey_counts = 3;
|
|
||||||
// Total count of subkeys in all keys in this DHTLog including all backlogs
|
|
||||||
uint32 total_subkeys = 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
// DataReference
|
// DataReference
|
||||||
// Pointer to data somewhere in Veilid
|
// Pointer to data somewhere in Veilid
|
||||||
// Abstraction over DHTData and BlockStore
|
// Abstraction over DHTData and BlockStore
|
||||||
|
@ -0,0 +1,2 @@
|
|||||||
|
export 'dht_log.dart';
|
||||||
|
export 'dht_log_cubit.dart';
|
312
packages/veilid_support/lib/dht_support/src/dht_log/dht_log.dart
Normal file
312
packages/veilid_support/lib/dht_support/src/dht_log/dht_log.dart
Normal file
@ -0,0 +1,312 @@
|
|||||||
|
import 'dart:async';
|
||||||
|
import 'dart:math';
|
||||||
|
import 'dart:typed_data';
|
||||||
|
|
||||||
|
import 'package:async_tools/async_tools.dart';
|
||||||
|
import 'package:collection/collection.dart';
|
||||||
|
import 'package:equatable/equatable.dart';
|
||||||
|
import 'package:meta/meta.dart';
|
||||||
|
|
||||||
|
import '../../../veilid_support.dart';
|
||||||
|
import '../../proto/proto.dart' as proto;
|
||||||
|
import '../interfaces/dht_append_truncate.dart';
|
||||||
|
|
||||||
|
part 'dht_log_spine.dart';
|
||||||
|
part 'dht_log_read.dart';
|
||||||
|
part 'dht_log_append.dart';
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
@immutable
|
||||||
|
class DHTLogUpdate extends Equatable {
|
||||||
|
const DHTLogUpdate(
|
||||||
|
{required this.headDelta, required this.tailDelta, required this.length})
|
||||||
|
: assert(headDelta >= 0, 'should never have negative head delta'),
|
||||||
|
assert(tailDelta >= 0, 'should never have negative tail delta'),
|
||||||
|
assert(length >= 0, 'should never have negative length');
|
||||||
|
final int headDelta;
|
||||||
|
final int tailDelta;
|
||||||
|
final int length;
|
||||||
|
|
||||||
|
@override
|
||||||
|
List<Object?> get props => [headDelta, tailDelta, length];
|
||||||
|
}
|
||||||
|
|
||||||
|
/// DHTLog is a ring-buffer queue like data structure with the following
|
||||||
|
/// operations:
|
||||||
|
/// * Add elements to the tail
|
||||||
|
/// * Remove elements from the head
|
||||||
|
/// The structure has a 'spine' record that acts as an indirection table of
|
||||||
|
/// DHTShortArray record pointers spread over its subkeys.
|
||||||
|
/// Subkey 0 of the DHTLog is a head subkey that contains housekeeping data:
|
||||||
|
/// * The head and tail position of the log
|
||||||
|
/// - subkeyIdx = pos / recordsPerSubkey
|
||||||
|
/// - recordIdx = pos % recordsPerSubkey
|
||||||
|
class DHTLog implements DHTDeleteable<DHTLog, DHTLog> {
|
||||||
|
////////////////////////////////////////////////////////////////
|
||||||
|
// Constructors
|
||||||
|
|
||||||
|
DHTLog._({required _DHTLogSpine spine})
|
||||||
|
: _spine = spine,
|
||||||
|
_openCount = 1 {
|
||||||
|
_spine.onUpdatedSpine = (update) {
|
||||||
|
_watchController?.sink.add(update);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a DHTLog
|
||||||
|
static Future<DHTLog> create(
|
||||||
|
{required String debugName,
|
||||||
|
int stride = DHTShortArray.maxElements,
|
||||||
|
VeilidRoutingContext? routingContext,
|
||||||
|
TypedKey? parent,
|
||||||
|
DHTRecordCrypto? crypto,
|
||||||
|
KeyPair? writer}) async {
|
||||||
|
assert(stride <= DHTShortArray.maxElements, 'stride too long');
|
||||||
|
final pool = DHTRecordPool.instance;
|
||||||
|
|
||||||
|
late final DHTRecord spineRecord;
|
||||||
|
if (writer != null) {
|
||||||
|
final schema = DHTSchema.smpl(
|
||||||
|
oCnt: 0,
|
||||||
|
members: [DHTSchemaMember(mKey: writer.key, mCnt: spineSubkeys + 1)]);
|
||||||
|
spineRecord = await pool.createRecord(
|
||||||
|
debugName: debugName,
|
||||||
|
parent: parent,
|
||||||
|
routingContext: routingContext,
|
||||||
|
schema: schema,
|
||||||
|
crypto: crypto,
|
||||||
|
writer: writer);
|
||||||
|
} else {
|
||||||
|
const schema = DHTSchema.dflt(oCnt: spineSubkeys + 1);
|
||||||
|
spineRecord = await pool.createRecord(
|
||||||
|
debugName: debugName,
|
||||||
|
parent: parent,
|
||||||
|
routingContext: routingContext,
|
||||||
|
schema: schema,
|
||||||
|
crypto: crypto);
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
final spine = await _DHTLogSpine.create(
|
||||||
|
spineRecord: spineRecord, segmentStride: stride);
|
||||||
|
return DHTLog._(spine: spine);
|
||||||
|
} on Exception catch (_) {
|
||||||
|
await spineRecord.close();
|
||||||
|
await spineRecord.delete();
|
||||||
|
rethrow;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static Future<DHTLog> openRead(TypedKey logRecordKey,
|
||||||
|
{required String debugName,
|
||||||
|
VeilidRoutingContext? routingContext,
|
||||||
|
TypedKey? parent,
|
||||||
|
DHTRecordCrypto? crypto}) async {
|
||||||
|
final spineRecord = await DHTRecordPool.instance.openRecordRead(
|
||||||
|
logRecordKey,
|
||||||
|
debugName: debugName,
|
||||||
|
parent: parent,
|
||||||
|
routingContext: routingContext,
|
||||||
|
crypto: crypto);
|
||||||
|
try {
|
||||||
|
final spine = await _DHTLogSpine.load(spineRecord: spineRecord);
|
||||||
|
final dhtLog = DHTLog._(spine: spine);
|
||||||
|
return dhtLog;
|
||||||
|
} on Exception catch (_) {
|
||||||
|
await spineRecord.close();
|
||||||
|
rethrow;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static Future<DHTLog> openWrite(
|
||||||
|
TypedKey logRecordKey,
|
||||||
|
KeyPair writer, {
|
||||||
|
required String debugName,
|
||||||
|
VeilidRoutingContext? routingContext,
|
||||||
|
TypedKey? parent,
|
||||||
|
DHTRecordCrypto? crypto,
|
||||||
|
}) async {
|
||||||
|
final spineRecord = await DHTRecordPool.instance.openRecordWrite(
|
||||||
|
logRecordKey, writer,
|
||||||
|
debugName: debugName,
|
||||||
|
parent: parent,
|
||||||
|
routingContext: routingContext,
|
||||||
|
crypto: crypto);
|
||||||
|
try {
|
||||||
|
final spine = await _DHTLogSpine.load(spineRecord: spineRecord);
|
||||||
|
final dhtLog = DHTLog._(spine: spine);
|
||||||
|
return dhtLog;
|
||||||
|
} on Exception catch (_) {
|
||||||
|
await spineRecord.close();
|
||||||
|
rethrow;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static Future<DHTLog> openOwned(
|
||||||
|
OwnedDHTRecordPointer ownedLogRecordPointer, {
|
||||||
|
required String debugName,
|
||||||
|
required TypedKey parent,
|
||||||
|
VeilidRoutingContext? routingContext,
|
||||||
|
DHTRecordCrypto? crypto,
|
||||||
|
}) =>
|
||||||
|
openWrite(
|
||||||
|
ownedLogRecordPointer.recordKey,
|
||||||
|
ownedLogRecordPointer.owner,
|
||||||
|
debugName: debugName,
|
||||||
|
routingContext: routingContext,
|
||||||
|
parent: parent,
|
||||||
|
crypto: crypto,
|
||||||
|
);
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
// DHTCloseable
|
||||||
|
|
||||||
|
/// Check if the DHTLog is open
|
||||||
|
@override
|
||||||
|
bool get isOpen => _openCount > 0;
|
||||||
|
|
||||||
|
/// The type of the openable scope
|
||||||
|
@override
|
||||||
|
FutureOr<DHTLog> scoped() => this;
|
||||||
|
|
||||||
|
/// Add a reference to this log
|
||||||
|
@override
|
||||||
|
Future<DHTLog> ref() async => _mutex.protect(() async {
|
||||||
|
_openCount++;
|
||||||
|
return this;
|
||||||
|
});
|
||||||
|
|
||||||
|
/// Free all resources for the DHTLog
|
||||||
|
@override
|
||||||
|
Future<void> close() async => _mutex.protect(() async {
|
||||||
|
if (_openCount == 0) {
|
||||||
|
throw StateError('already closed');
|
||||||
|
}
|
||||||
|
_openCount--;
|
||||||
|
if (_openCount != 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
await _watchController?.close();
|
||||||
|
_watchController = null;
|
||||||
|
await _spine.close();
|
||||||
|
});
|
||||||
|
|
||||||
|
/// Free all resources for the DHTLog and delete it from the DHT
|
||||||
|
/// Will wait until the short array is closed to delete it
|
||||||
|
@override
|
||||||
|
Future<void> delete() async {
|
||||||
|
await _spine.delete();
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Public API
|
||||||
|
|
||||||
|
/// Get the record key for this log
|
||||||
|
TypedKey get recordKey => _spine.recordKey;
|
||||||
|
|
||||||
|
/// Get the record pointer foir this log
|
||||||
|
OwnedDHTRecordPointer get recordPointer => _spine.recordPointer;
|
||||||
|
|
||||||
|
/// Runs a closure allowing read-only access to the log
|
||||||
|
Future<T?> operate<T>(Future<T?> Function(DHTRandomRead) closure) async {
|
||||||
|
if (!isOpen) {
|
||||||
|
throw StateError('log is not open"');
|
||||||
|
}
|
||||||
|
|
||||||
|
return _spine.operate((spine) async {
|
||||||
|
final reader = _DHTLogRead._(spine);
|
||||||
|
return closure(reader);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Runs a closure allowing append/truncate access to the log
|
||||||
|
/// Makes only one attempt to consistently write the changes to the DHT
|
||||||
|
/// Returns result of the closure if the write could be performed
|
||||||
|
/// Throws DHTOperateException if the write could not be performed
|
||||||
|
/// at this time
|
||||||
|
Future<T> operateAppend<T>(
|
||||||
|
Future<T> Function(DHTAppendTruncateRandomRead) closure) async {
|
||||||
|
if (!isOpen) {
|
||||||
|
throw StateError('log is not open"');
|
||||||
|
}
|
||||||
|
|
||||||
|
return _spine.operateAppend((spine) async {
|
||||||
|
final writer = _DHTLogAppend._(spine);
|
||||||
|
return closure(writer);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Runs a closure allowing append/truncate access to the log
|
||||||
|
/// Will execute the closure multiple times if a consistent write to the DHT
|
||||||
|
/// is not achieved. Timeout if specified will be thrown as a
|
||||||
|
/// TimeoutException. The closure should return true if its changes also
|
||||||
|
/// succeeded, returning false will trigger another eventual consistency
|
||||||
|
/// attempt.
|
||||||
|
Future<void> operateAppendEventual(
|
||||||
|
Future<bool> Function(DHTAppendTruncateRandomRead) closure,
|
||||||
|
{Duration? timeout}) async {
|
||||||
|
if (!isOpen) {
|
||||||
|
throw StateError('log is not open"');
|
||||||
|
}
|
||||||
|
|
||||||
|
return _spine.operateAppendEventual((spine) async {
|
||||||
|
final writer = _DHTLogAppend._(spine);
|
||||||
|
return closure(writer);
|
||||||
|
}, timeout: timeout);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Listen to and any all changes to the structure of this log
|
||||||
|
/// regardless of where the changes are coming from
|
||||||
|
Future<StreamSubscription<void>> listen(
|
||||||
|
void Function(DHTLogUpdate) onChanged,
|
||||||
|
) {
|
||||||
|
if (!isOpen) {
|
||||||
|
throw StateError('log is not open"');
|
||||||
|
}
|
||||||
|
|
||||||
|
return _listenMutex.protect(() async {
|
||||||
|
// If don't have a controller yet, set it up
|
||||||
|
if (_watchController == null) {
|
||||||
|
// Set up watch requirements
|
||||||
|
_watchController =
|
||||||
|
StreamController<DHTLogUpdate>.broadcast(onCancel: () {
|
||||||
|
// If there are no more listeners then we can get
|
||||||
|
// rid of the controller and drop our subscriptions
|
||||||
|
unawaited(_listenMutex.protect(() async {
|
||||||
|
// Cancel watches of head record
|
||||||
|
await _spine.cancelWatch();
|
||||||
|
_watchController = null;
|
||||||
|
}));
|
||||||
|
});
|
||||||
|
|
||||||
|
// Start watching head subkey of the spine
|
||||||
|
await _spine.watch();
|
||||||
|
}
|
||||||
|
// Return subscription
|
||||||
|
return _watchController!.stream.listen((upd) => onChanged(upd));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////
|
||||||
|
// Fields
|
||||||
|
|
||||||
|
// 56 subkeys * 512 segments * 36 bytes per typedkey =
|
||||||
|
// 1032192 bytes per record
|
||||||
|
// 512*36 = 18432 bytes per subkey
|
||||||
|
// 28672 shortarrays * 256 elements = 7340032 elements
|
||||||
|
static const spineSubkeys = 56;
|
||||||
|
static const segmentsPerSubkey = 512;
|
||||||
|
|
||||||
|
// Internal representation refreshed from spine record
|
||||||
|
final _DHTLogSpine _spine;
|
||||||
|
|
||||||
|
// Openable
|
||||||
|
int _openCount;
|
||||||
|
final _mutex = Mutex();
|
||||||
|
|
||||||
|
// Watch mutex to ensure we keep the representation valid
|
||||||
|
final Mutex _listenMutex = Mutex();
|
||||||
|
// Stream of external changes
|
||||||
|
StreamController<DHTLogUpdate>? _watchController;
|
||||||
|
}
|
@ -0,0 +1,94 @@
|
|||||||
|
part of 'dht_log.dart';
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Append/truncate implementation
|
||||||
|
|
||||||
|
class _DHTLogAppend extends _DHTLogRead implements DHTAppendTruncateRandomRead {
|
||||||
|
_DHTLogAppend._(super.spine) : super._();
|
||||||
|
|
||||||
|
@override
|
||||||
|
Future<bool> tryAppendItem(Uint8List value) async {
|
||||||
|
// Allocate empty index at the end of the list
|
||||||
|
final insertPos = _spine.length;
|
||||||
|
_spine.allocateTail(1);
|
||||||
|
final lookup = await _spine.lookupPosition(insertPos);
|
||||||
|
if (lookup == null) {
|
||||||
|
throw StateError("can't write to dht log");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write item to the segment
|
||||||
|
return lookup.scope((sa) => sa.operateWrite((write) async {
|
||||||
|
// If this a new segment, then clear it in case we have wrapped around
|
||||||
|
if (lookup.pos == 0) {
|
||||||
|
await write.clear();
|
||||||
|
} else if (lookup.pos != write.length) {
|
||||||
|
// We should always be appending at the length
|
||||||
|
throw StateError('appending should be at the end');
|
||||||
|
}
|
||||||
|
return write.tryAddItem(value);
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
@override
|
||||||
|
Future<bool> tryAppendItems(List<Uint8List> values) async {
|
||||||
|
// Allocate empty index at the end of the list
|
||||||
|
final insertPos = _spine.length;
|
||||||
|
_spine.allocateTail(values.length);
|
||||||
|
|
||||||
|
// Look up the first position and shortarray
|
||||||
|
final dws = DelayedWaitSet<void>();
|
||||||
|
|
||||||
|
var success = true;
|
||||||
|
for (var valueIdx = 0; valueIdx < values.length;) {
|
||||||
|
final remaining = values.length - valueIdx;
|
||||||
|
|
||||||
|
final lookup = await _spine.lookupPosition(insertPos + valueIdx);
|
||||||
|
if (lookup == null) {
|
||||||
|
throw StateError("can't write to dht log");
|
||||||
|
}
|
||||||
|
|
||||||
|
final sacount = min(remaining, DHTShortArray.maxElements - lookup.pos);
|
||||||
|
final sublistValues = values.sublist(valueIdx, valueIdx + sacount);
|
||||||
|
|
||||||
|
dws.add(() async {
|
||||||
|
final ok = await lookup.scope((sa) => sa.operateWrite((write) async {
|
||||||
|
// If this a new segment, then clear it in
|
||||||
|
// case we have wrapped around
|
||||||
|
if (lookup.pos == 0) {
|
||||||
|
await write.clear();
|
||||||
|
} else if (lookup.pos != write.length) {
|
||||||
|
// We should always be appending at the length
|
||||||
|
throw StateError('appending should be at the end');
|
||||||
|
}
|
||||||
|
return write.tryAddItems(sublistValues);
|
||||||
|
}));
|
||||||
|
if (!ok) {
|
||||||
|
success = false;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
valueIdx += sacount;
|
||||||
|
}
|
||||||
|
|
||||||
|
await dws();
|
||||||
|
|
||||||
|
return success;
|
||||||
|
}
|
||||||
|
|
||||||
|
@override
|
||||||
|
Future<void> truncate(int count) async {
|
||||||
|
count = min(count, _spine.length);
|
||||||
|
if (count == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (count < 0) {
|
||||||
|
throw StateError('can not remove negative items');
|
||||||
|
}
|
||||||
|
await _spine.releaseHead(count);
|
||||||
|
}
|
||||||
|
|
||||||
|
@override
|
||||||
|
Future<void> clear() async {
|
||||||
|
await _spine.releaseHead(_spine.length);
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,220 @@
|
|||||||
|
import 'dart:async';
|
||||||
|
|
||||||
|
import 'package:async_tools/async_tools.dart';
|
||||||
|
import 'package:bloc/bloc.dart';
|
||||||
|
import 'package:bloc_advanced_tools/bloc_advanced_tools.dart';
|
||||||
|
import 'package:equatable/equatable.dart';
|
||||||
|
import 'package:fast_immutable_collections/fast_immutable_collections.dart';
|
||||||
|
import 'package:meta/meta.dart';
|
||||||
|
|
||||||
|
import '../../../veilid_support.dart';
|
||||||
|
import '../interfaces/dht_append_truncate.dart';
|
||||||
|
|
||||||
|
@immutable
|
||||||
|
class DHTLogElementState<T> extends Equatable {
|
||||||
|
const DHTLogElementState({required this.value, required this.isOffline});
|
||||||
|
final T value;
|
||||||
|
final bool isOffline;
|
||||||
|
|
||||||
|
@override
|
||||||
|
List<Object?> get props => [value, isOffline];
|
||||||
|
}
|
||||||
|
|
||||||
|
@immutable
|
||||||
|
class DHTLogStateData<T> extends Equatable {
|
||||||
|
const DHTLogStateData(
|
||||||
|
{required this.elements,
|
||||||
|
required this.tail,
|
||||||
|
required this.count,
|
||||||
|
required this.follow});
|
||||||
|
// The view of the elements in the dhtlog
|
||||||
|
// Span is from [tail-length, tail)
|
||||||
|
final IList<DHTLogElementState<T>> elements;
|
||||||
|
// One past the end of the last element
|
||||||
|
final int tail;
|
||||||
|
// The total number of elements to try to keep in 'elements'
|
||||||
|
final int count;
|
||||||
|
// If we should have the tail following the log
|
||||||
|
final bool follow;
|
||||||
|
|
||||||
|
@override
|
||||||
|
List<Object?> get props => [elements, tail, count, follow];
|
||||||
|
}
|
||||||
|
|
||||||
|
typedef DHTLogState<T> = AsyncValue<DHTLogStateData<T>>;
|
||||||
|
typedef DHTLogBusyState<T> = BlocBusyState<DHTLogState<T>>;
|
||||||
|
|
||||||
|
class DHTLogCubit<T> extends Cubit<DHTLogBusyState<T>>
|
||||||
|
with BlocBusyWrapper<DHTLogState<T>> {
|
||||||
|
DHTLogCubit({
|
||||||
|
required Future<DHTLog> Function() open,
|
||||||
|
required T Function(List<int> data) decodeElement,
|
||||||
|
}) : _decodeElement = decodeElement,
|
||||||
|
super(const BlocBusyState(AsyncValue.loading())) {
|
||||||
|
_initWait.add(() async {
|
||||||
|
// Open DHT record
|
||||||
|
_log = await open();
|
||||||
|
_wantsCloseRecord = true;
|
||||||
|
|
||||||
|
// Make initial state update
|
||||||
|
await _refreshNoWait();
|
||||||
|
_subscription = await _log.listen(_update);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the tail position of the log for pagination.
|
||||||
|
// If tail is 0, the end of the log is used.
|
||||||
|
// If tail is negative, the position is subtracted from the current log
|
||||||
|
// length.
|
||||||
|
// If tail is positive, the position is absolute from the head of the log
|
||||||
|
// If follow is enabled, the tail offset will update when the log changes
|
||||||
|
Future<void> setWindow(
|
||||||
|
{int? tail, int? count, bool? follow, bool forceRefresh = false}) async {
|
||||||
|
await _initWait();
|
||||||
|
if (tail != null) {
|
||||||
|
_tail = tail;
|
||||||
|
}
|
||||||
|
if (count != null) {
|
||||||
|
_count = count;
|
||||||
|
}
|
||||||
|
if (follow != null) {
|
||||||
|
_follow = follow;
|
||||||
|
}
|
||||||
|
await _refreshNoWait(forceRefresh: forceRefresh);
|
||||||
|
}
|
||||||
|
|
||||||
|
Future<void> refresh({bool forceRefresh = false}) async {
|
||||||
|
await _initWait();
|
||||||
|
await _refreshNoWait(forceRefresh: forceRefresh);
|
||||||
|
}
|
||||||
|
|
||||||
|
Future<void> _refreshNoWait({bool forceRefresh = false}) async =>
|
||||||
|
busy((emit) async => _refreshInner(emit, forceRefresh: forceRefresh));
|
||||||
|
|
||||||
|
Future<void> _refreshInner(void Function(AsyncValue<DHTLogStateData<T>>) emit,
|
||||||
|
{bool forceRefresh = false}) async {
|
||||||
|
final avElements = await _loadElements(_tail, _count);
|
||||||
|
final err = avElements.asError;
|
||||||
|
if (err != null) {
|
||||||
|
emit(AsyncValue.error(err.error, err.stackTrace));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
final loading = avElements.asLoading;
|
||||||
|
if (loading != null) {
|
||||||
|
emit(const AsyncValue.loading());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
final elements = avElements.asData!.value;
|
||||||
|
emit(AsyncValue.data(DHTLogStateData(
|
||||||
|
elements: elements, tail: _tail, count: _count, follow: _follow)));
|
||||||
|
}
|
||||||
|
|
||||||
|
Future<AsyncValue<IList<DHTLogElementState<T>>>> _loadElements(
|
||||||
|
int tail, int count,
|
||||||
|
{bool forceRefresh = false}) async {
|
||||||
|
try {
|
||||||
|
final allItems = await _log.operate((reader) async {
|
||||||
|
final length = reader.length;
|
||||||
|
final end = ((tail - 1) % length) + 1;
|
||||||
|
final start = (count < end) ? end - count : 0;
|
||||||
|
|
||||||
|
final offlinePositions = await reader.getOfflinePositions();
|
||||||
|
final allItems = (await reader.getItemRange(start,
|
||||||
|
length: end - start, forceRefresh: forceRefresh))
|
||||||
|
?.indexed
|
||||||
|
.map((x) => DHTLogElementState(
|
||||||
|
value: _decodeElement(x.$2),
|
||||||
|
isOffline: offlinePositions.contains(x.$1)))
|
||||||
|
.toIList();
|
||||||
|
return allItems;
|
||||||
|
});
|
||||||
|
if (allItems == null) {
|
||||||
|
return const AsyncValue.loading();
|
||||||
|
}
|
||||||
|
return AsyncValue.data(allItems);
|
||||||
|
} on Exception catch (e, st) {
|
||||||
|
return AsyncValue.error(e, st);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void _update(DHTLogUpdate upd) {
|
||||||
|
// Run at most one background update process
|
||||||
|
// Because this is async, we could get an update while we're
|
||||||
|
// still processing the last one. Only called after init future has run
|
||||||
|
// so we dont have to wait for that here.
|
||||||
|
|
||||||
|
// Accumulate head and tail deltas
|
||||||
|
_headDelta += upd.headDelta;
|
||||||
|
_tailDelta += upd.tailDelta;
|
||||||
|
|
||||||
|
_sspUpdate.busyUpdate<T, DHTLogState<T>>(busy, (emit) async {
|
||||||
|
// apply follow
|
||||||
|
if (_follow) {
|
||||||
|
if (_tail <= 0) {
|
||||||
|
// Negative tail is already following tail changes
|
||||||
|
} else {
|
||||||
|
// Positive tail is measured from the head, so apply deltas
|
||||||
|
_tail = (_tail + _tailDelta - _headDelta) % upd.length;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (_tail <= 0) {
|
||||||
|
// Negative tail is following tail changes so apply deltas
|
||||||
|
var posTail = _tail + upd.length;
|
||||||
|
posTail = (posTail + _tailDelta - _headDelta) % upd.length;
|
||||||
|
_tail = posTail - upd.length;
|
||||||
|
} else {
|
||||||
|
// Positive tail is measured from head so not following tail
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_headDelta = 0;
|
||||||
|
_tailDelta = 0;
|
||||||
|
|
||||||
|
await _refreshInner(emit);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
@override
|
||||||
|
Future<void> close() async {
|
||||||
|
await _initWait();
|
||||||
|
await _subscription?.cancel();
|
||||||
|
_subscription = null;
|
||||||
|
if (_wantsCloseRecord) {
|
||||||
|
await _log.close();
|
||||||
|
}
|
||||||
|
await super.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
Future<R?> operate<R>(Future<R?> Function(DHTRandomRead) closure) async {
|
||||||
|
await _initWait();
|
||||||
|
return _log.operate(closure);
|
||||||
|
}
|
||||||
|
|
||||||
|
Future<R> operateAppend<R>(
|
||||||
|
Future<R> Function(DHTAppendTruncateRandomRead) closure) async {
|
||||||
|
await _initWait();
|
||||||
|
return _log.operateAppend(closure);
|
||||||
|
}
|
||||||
|
|
||||||
|
Future<void> operateAppendEventual(
|
||||||
|
Future<bool> Function(DHTAppendTruncateRandomRead) closure,
|
||||||
|
{Duration? timeout}) async {
|
||||||
|
await _initWait();
|
||||||
|
return _log.operateAppendEventual(closure, timeout: timeout);
|
||||||
|
}
|
||||||
|
|
||||||
|
final WaitSet<void> _initWait = WaitSet();
|
||||||
|
late final DHTLog _log;
|
||||||
|
final T Function(List<int> data) _decodeElement;
|
||||||
|
StreamSubscription<void>? _subscription;
|
||||||
|
bool _wantsCloseRecord = false;
|
||||||
|
final _sspUpdate = SingleStatelessProcessor();
|
||||||
|
|
||||||
|
// Accumulated deltas since last update
|
||||||
|
var _headDelta = 0;
|
||||||
|
var _tailDelta = 0;
|
||||||
|
|
||||||
|
// Cubit window into the DHTLog
|
||||||
|
var _tail = 0;
|
||||||
|
var _count = DHTShortArray.maxElements;
|
||||||
|
var _follow = true;
|
||||||
|
}
|
@ -0,0 +1,103 @@
|
|||||||
|
part of 'dht_log.dart';
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Reader-only implementation
|
||||||
|
|
||||||
|
class _DHTLogRead implements DHTRandomRead {
|
||||||
|
_DHTLogRead._(_DHTLogSpine spine) : _spine = spine;
|
||||||
|
|
||||||
|
@override
|
||||||
|
int get length => _spine.length;
|
||||||
|
|
||||||
|
@override
|
||||||
|
Future<Uint8List?> getItem(int pos, {bool forceRefresh = false}) async {
|
||||||
|
if (pos < 0 || pos >= length) {
|
||||||
|
throw IndexError.withLength(pos, length);
|
||||||
|
}
|
||||||
|
final lookup = await _spine.lookupPosition(pos);
|
||||||
|
if (lookup == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
return lookup.scope((sa) => sa.operate(
|
||||||
|
(read) => read.getItem(lookup.pos, forceRefresh: forceRefresh)));
|
||||||
|
}
|
||||||
|
|
||||||
|
(int, int) _clampStartLen(int start, int? len) {
|
||||||
|
len ??= _spine.length;
|
||||||
|
if (start < 0) {
|
||||||
|
throw IndexError.withLength(start, _spine.length);
|
||||||
|
}
|
||||||
|
if (start > _spine.length) {
|
||||||
|
throw IndexError.withLength(start, _spine.length);
|
||||||
|
}
|
||||||
|
if ((len + start) > _spine.length) {
|
||||||
|
len = _spine.length - start;
|
||||||
|
}
|
||||||
|
return (start, len);
|
||||||
|
}
|
||||||
|
|
||||||
|
@override
|
||||||
|
Future<List<Uint8List>?> getItemRange(int start,
|
||||||
|
{int? length, bool forceRefresh = false}) async {
|
||||||
|
final out = <Uint8List>[];
|
||||||
|
(start, length) = _clampStartLen(start, length);
|
||||||
|
|
||||||
|
final chunks = Iterable<int>.generate(length).slices(maxDHTConcurrency).map(
|
||||||
|
(chunk) => chunk
|
||||||
|
.map((pos) => getItem(pos + start, forceRefresh: forceRefresh)));
|
||||||
|
|
||||||
|
for (final chunk in chunks) {
|
||||||
|
final elems = await chunk.wait;
|
||||||
|
if (elems.contains(null)) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
out.addAll(elems.cast<Uint8List>());
|
||||||
|
}
|
||||||
|
|
||||||
|
return out;
|
||||||
|
}
|
||||||
|
|
||||||
|
@override
|
||||||
|
Future<Set<int>> getOfflinePositions() async {
|
||||||
|
final positionOffline = <int>{};
|
||||||
|
|
||||||
|
// Iterate positions backward from most recent
|
||||||
|
for (var pos = _spine.length - 1; pos >= 0; pos--) {
|
||||||
|
final lookup = await _spine.lookupPosition(pos);
|
||||||
|
if (lookup == null) {
|
||||||
|
throw StateError('Unable to look up position');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check each segment for offline positions
|
||||||
|
var foundOffline = false;
|
||||||
|
await lookup.scope((sa) => sa.operate((read) async {
|
||||||
|
final segmentOffline = await read.getOfflinePositions();
|
||||||
|
|
||||||
|
// For each shortarray segment go through their segment positions
|
||||||
|
// in reverse order and see if they are offline
|
||||||
|
for (var segmentPos = lookup.pos;
|
||||||
|
segmentPos >= 0 && pos >= 0;
|
||||||
|
segmentPos--, pos--) {
|
||||||
|
// If the position in the segment is offline, then
|
||||||
|
// mark the position in the log as offline
|
||||||
|
if (segmentOffline.contains(segmentPos)) {
|
||||||
|
positionOffline.add(pos);
|
||||||
|
foundOffline = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
|
||||||
|
// If we found nothing offline in this segment then we can stop
|
||||||
|
if (!foundOffline) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return positionOffline;
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Fields
|
||||||
|
final _DHTLogSpine _spine;
|
||||||
|
}
|
@ -0,0 +1,707 @@
|
|||||||
|
part of 'dht_log.dart';
|
||||||
|
|
||||||
|
class _DHTLogPosition extends DHTCloseable<_DHTLogPosition, DHTShortArray> {
|
||||||
|
_DHTLogPosition._({
|
||||||
|
required _DHTLogSpine dhtLogSpine,
|
||||||
|
required DHTShortArray shortArray,
|
||||||
|
required this.pos,
|
||||||
|
required int segmentNumber,
|
||||||
|
}) : _segmentShortArray = shortArray,
|
||||||
|
_dhtLogSpine = dhtLogSpine,
|
||||||
|
_segmentNumber = segmentNumber;
|
||||||
|
final int pos;
|
||||||
|
|
||||||
|
final _DHTLogSpine _dhtLogSpine;
|
||||||
|
final DHTShortArray _segmentShortArray;
|
||||||
|
var _openCount = 1;
|
||||||
|
final int _segmentNumber;
|
||||||
|
final Mutex _mutex = Mutex();
|
||||||
|
|
||||||
|
/// Check if the DHTLogPosition is open
|
||||||
|
@override
|
||||||
|
bool get isOpen => _openCount > 0;
|
||||||
|
|
||||||
|
/// The type of the openable scope
|
||||||
|
@override
|
||||||
|
FutureOr<DHTShortArray> scoped() => _segmentShortArray;
|
||||||
|
|
||||||
|
/// Add a reference to this log
|
||||||
|
@override
|
||||||
|
Future<_DHTLogPosition> ref() async => _mutex.protect(() async {
|
||||||
|
_openCount++;
|
||||||
|
return this;
|
||||||
|
});
|
||||||
|
|
||||||
|
/// Free all resources for the DHTLogPosition
|
||||||
|
@override
|
||||||
|
Future<void> close() async => _mutex.protect(() async {
|
||||||
|
if (_openCount == 0) {
|
||||||
|
throw StateError('already closed');
|
||||||
|
}
|
||||||
|
_openCount--;
|
||||||
|
if (_openCount != 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
await _dhtLogSpine._segmentClosed(_segmentNumber);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
class _OpenedSegment {
|
||||||
|
_OpenedSegment._({
|
||||||
|
required this.shortArray,
|
||||||
|
});
|
||||||
|
|
||||||
|
final DHTShortArray shortArray;
|
||||||
|
int openCount = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
class _DHTLogSegmentLookup extends Equatable {
|
||||||
|
const _DHTLogSegmentLookup({required this.subkey, required this.segment});
|
||||||
|
final int subkey;
|
||||||
|
final int segment;
|
||||||
|
|
||||||
|
@override
|
||||||
|
List<Object?> get props => [subkey, segment];
|
||||||
|
}
|
||||||
|
|
||||||
|
class _SubkeyData {
|
||||||
|
_SubkeyData({required this.subkey, required this.data});
|
||||||
|
int subkey;
|
||||||
|
Uint8List data;
|
||||||
|
bool changed = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
class _DHTLogSpine {
|
||||||
|
_DHTLogSpine._(
|
||||||
|
{required DHTRecord spineRecord,
|
||||||
|
required int head,
|
||||||
|
required int tail,
|
||||||
|
required int stride})
|
||||||
|
: _spineRecord = spineRecord,
|
||||||
|
_head = head,
|
||||||
|
_tail = tail,
|
||||||
|
_segmentStride = stride,
|
||||||
|
_openedSegments = {},
|
||||||
|
_spineCache = [];
|
||||||
|
|
||||||
|
// Create a new spine record and push it to the network
|
||||||
|
static Future<_DHTLogSpine> create(
|
||||||
|
{required DHTRecord spineRecord, required int segmentStride}) async {
|
||||||
|
// Construct new spinehead
|
||||||
|
final spine = _DHTLogSpine._(
|
||||||
|
spineRecord: spineRecord, head: 0, tail: 0, stride: segmentStride);
|
||||||
|
|
||||||
|
// Write new spine head record to the network
|
||||||
|
await spine.operate((spine) async {
|
||||||
|
final success = await spine.writeSpineHead();
|
||||||
|
assert(success, 'false return should never happen on create');
|
||||||
|
});
|
||||||
|
|
||||||
|
return spine;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pull the latest or updated copy of the spine head record from the network
|
||||||
|
static Future<_DHTLogSpine> load({required DHTRecord spineRecord}) async {
|
||||||
|
// Get an updated spine head record copy if one exists
|
||||||
|
final spineHead = await spineRecord.getProtobuf(proto.DHTLog.fromBuffer,
|
||||||
|
subkey: 0, refreshMode: DHTRecordRefreshMode.network);
|
||||||
|
if (spineHead == null) {
|
||||||
|
throw StateError('spine head missing during refresh');
|
||||||
|
}
|
||||||
|
return _DHTLogSpine._(
|
||||||
|
spineRecord: spineRecord,
|
||||||
|
head: spineHead.head,
|
||||||
|
tail: spineHead.tail,
|
||||||
|
stride: spineHead.stride);
|
||||||
|
}
|
||||||
|
|
||||||
|
proto.DHTLog _toProto() {
|
||||||
|
assert(_spineMutex.isLocked, 'should be in mutex here');
|
||||||
|
|
||||||
|
final logHead = proto.DHTLog()
|
||||||
|
..head = _head
|
||||||
|
..tail = _tail
|
||||||
|
..stride = _segmentStride;
|
||||||
|
return logHead;
|
||||||
|
}
|
||||||
|
|
||||||
|
Future<void> close() async {
|
||||||
|
await _spineMutex.protect(() async {
|
||||||
|
if (!isOpen) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
final futures = <Future<void>>[_spineRecord.close()];
|
||||||
|
for (final (_, sc) in _spineCache) {
|
||||||
|
futures.add(sc.close());
|
||||||
|
}
|
||||||
|
await Future.wait(futures);
|
||||||
|
|
||||||
|
assert(_openedSegments.isEmpty, 'should have closed all segments by now');
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Future<void> delete() async {
|
||||||
|
await _spineMutex.protect(() async {
|
||||||
|
// Will deep delete all segment records as they are children
|
||||||
|
await _spineRecord.delete();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Future<T> operate<T>(Future<T> Function(_DHTLogSpine) closure) async =>
|
||||||
|
// ignore: prefer_expression_function_bodies
|
||||||
|
_spineMutex.protect(() async {
|
||||||
|
return closure(this);
|
||||||
|
});
|
||||||
|
|
||||||
|
Future<T> operateAppend<T>(Future<T> Function(_DHTLogSpine) closure) async =>
|
||||||
|
_spineMutex.protect(() async {
|
||||||
|
final oldHead = _head;
|
||||||
|
final oldTail = _tail;
|
||||||
|
try {
|
||||||
|
final out = await closure(this);
|
||||||
|
// Write head assuming it has been changed
|
||||||
|
if (!await writeSpineHead(old: (oldHead, oldTail))) {
|
||||||
|
// Failed to write head means head got overwritten so write should
|
||||||
|
// be considered failed
|
||||||
|
throw DHTExceptionTryAgain();
|
||||||
|
}
|
||||||
|
return out;
|
||||||
|
} on Exception {
|
||||||
|
// Exception means state needs to be reverted
|
||||||
|
_head = oldHead;
|
||||||
|
_tail = oldTail;
|
||||||
|
rethrow;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
Future<void> operateAppendEventual(
|
||||||
|
Future<bool> Function(_DHTLogSpine) closure,
|
||||||
|
{Duration? timeout}) async {
|
||||||
|
final timeoutTs = timeout == null
|
||||||
|
? null
|
||||||
|
: Veilid.instance.now().offset(TimestampDuration.fromDuration(timeout));
|
||||||
|
|
||||||
|
await _spineMutex.protect(() async {
|
||||||
|
late int oldHead;
|
||||||
|
late int oldTail;
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Iterate until we have a successful element and head write
|
||||||
|
do {
|
||||||
|
// Save off old values each pass of writeSpineHead because the head
|
||||||
|
// will have changed
|
||||||
|
oldHead = _head;
|
||||||
|
oldTail = _tail;
|
||||||
|
|
||||||
|
// Try to do the element write
|
||||||
|
while (true) {
|
||||||
|
if (timeoutTs != null) {
|
||||||
|
final now = Veilid.instance.now();
|
||||||
|
if (now >= timeoutTs) {
|
||||||
|
throw TimeoutException('timeout reached');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (await closure(this)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
// Failed to write in closure resets state
|
||||||
|
_head = oldHead;
|
||||||
|
_tail = oldTail;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to do the head write
|
||||||
|
} while (!await writeSpineHead(old: (oldHead, oldTail)));
|
||||||
|
} on Exception {
|
||||||
|
// Exception means state needs to be reverted
|
||||||
|
_head = oldHead;
|
||||||
|
_tail = oldTail;
|
||||||
|
rethrow;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Serialize and write out the current spine head subkey, possibly updating
|
||||||
|
/// it if a newer copy is available online. Returns true if the write was
|
||||||
|
/// successful
|
||||||
|
Future<bool> writeSpineHead({(int, int)? old}) async {
|
||||||
|
assert(_spineMutex.isLocked, 'should be in mutex here');
|
||||||
|
|
||||||
|
final headBuffer = _toProto().writeToBuffer();
|
||||||
|
|
||||||
|
final existingData = await _spineRecord.tryWriteBytes(headBuffer);
|
||||||
|
if (existingData != null) {
|
||||||
|
// Head write failed, incorporate update
|
||||||
|
await _updateHead(proto.DHTLog.fromBuffer(existingData));
|
||||||
|
if (old != null) {
|
||||||
|
sendUpdate(old.$1, old.$2);
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (old != null) {
|
||||||
|
sendUpdate(old.$1, old.$2);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Send a spine update callback
|
||||||
|
void sendUpdate(int oldHead, int oldTail) {
|
||||||
|
final oldLength = _ringDistance(oldTail, oldHead);
|
||||||
|
if (oldHead != _head || oldTail != _tail || oldLength != length) {
|
||||||
|
onUpdatedSpine?.call(DHTLogUpdate(
|
||||||
|
headDelta: _ringDistance(_head, oldHead),
|
||||||
|
tailDelta: _ringDistance(_tail, oldTail),
|
||||||
|
length: length));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Validate a new spine head subkey that has come in from the network
|
||||||
|
Future<void> _updateHead(proto.DHTLog spineHead) async {
|
||||||
|
assert(_spineMutex.isLocked, 'should be in mutex here');
|
||||||
|
|
||||||
|
_head = spineHead.head;
|
||||||
|
_tail = spineHead.tail;
|
||||||
|
}
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Spine element management
|
||||||
|
|
||||||
|
static final Uint8List _emptySegmentKey =
|
||||||
|
Uint8List.fromList(List.filled(TypedKey.decodedLength<TypedKey>(), 0));
|
||||||
|
static Uint8List _makeEmptySubkey() => Uint8List.fromList(List.filled(
|
||||||
|
DHTLog.segmentsPerSubkey * TypedKey.decodedLength<TypedKey>(), 0));
|
||||||
|
|
||||||
|
static TypedKey? _getSegmentKey(Uint8List subkeyData, int segment) {
|
||||||
|
final decodedLength = TypedKey.decodedLength<TypedKey>();
|
||||||
|
final segmentKeyBytes = subkeyData.sublist(
|
||||||
|
decodedLength * segment, decodedLength * (segment + 1));
|
||||||
|
if (segmentKeyBytes.equals(_emptySegmentKey)) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
return TypedKey.fromBytes(segmentKeyBytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void _setSegmentKey(
|
||||||
|
Uint8List subkeyData, int segment, TypedKey? segmentKey) {
|
||||||
|
final decodedLength = TypedKey.decodedLength<TypedKey>();
|
||||||
|
late final Uint8List segmentKeyBytes;
|
||||||
|
if (segmentKey == null) {
|
||||||
|
segmentKeyBytes = _emptySegmentKey;
|
||||||
|
} else {
|
||||||
|
segmentKeyBytes = segmentKey.decode();
|
||||||
|
}
|
||||||
|
subkeyData.setRange(decodedLength * segment, decodedLength * (segment + 1),
|
||||||
|
segmentKeyBytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
Future<DHTShortArray> _openOrCreateSegmentInner(int segmentNumber) async {
|
||||||
|
assert(_spineMutex.isLocked, 'should be in mutex here');
|
||||||
|
assert(_spineRecord.writer != null, 'should be writable');
|
||||||
|
|
||||||
|
// Lookup what subkey and segment subrange has this position's segment
|
||||||
|
// shortarray
|
||||||
|
final l = _lookupSegment(segmentNumber);
|
||||||
|
final subkey = l.subkey;
|
||||||
|
final segment = l.segment;
|
||||||
|
|
||||||
|
var subkeyData = await _spineRecord.get(subkey: subkey);
|
||||||
|
subkeyData ??= _makeEmptySubkey();
|
||||||
|
while (true) {
|
||||||
|
final segmentKey = _getSegmentKey(subkeyData!, segment);
|
||||||
|
if (segmentKey == null) {
|
||||||
|
// Create a shortarray segment
|
||||||
|
final segmentRec = await DHTShortArray.create(
|
||||||
|
debugName: '${_spineRecord.debugName}_spine_${subkey}_$segment',
|
||||||
|
stride: _segmentStride,
|
||||||
|
crypto: _spineRecord.crypto,
|
||||||
|
parent: _spineRecord.key,
|
||||||
|
routingContext: _spineRecord.routingContext,
|
||||||
|
writer: _spineRecord.writer,
|
||||||
|
);
|
||||||
|
var success = false;
|
||||||
|
try {
|
||||||
|
// Write it back to the spine record
|
||||||
|
_setSegmentKey(subkeyData, segment, segmentRec.recordKey);
|
||||||
|
subkeyData =
|
||||||
|
await _spineRecord.tryWriteBytes(subkeyData, subkey: subkey);
|
||||||
|
// If the write was successful then we're done
|
||||||
|
if (subkeyData == null) {
|
||||||
|
// Return it
|
||||||
|
success = true;
|
||||||
|
return segmentRec;
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
if (!success) {
|
||||||
|
await segmentRec.close();
|
||||||
|
await segmentRec.delete();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Open a shortarray segment
|
||||||
|
final segmentRec = await DHTShortArray.openWrite(
|
||||||
|
segmentKey,
|
||||||
|
_spineRecord.writer!,
|
||||||
|
debugName: '${_spineRecord.debugName}_spine_${subkey}_$segment',
|
||||||
|
crypto: _spineRecord.crypto,
|
||||||
|
parent: _spineRecord.key,
|
||||||
|
routingContext: _spineRecord.routingContext,
|
||||||
|
);
|
||||||
|
return segmentRec;
|
||||||
|
}
|
||||||
|
// Loop if we need to try again with the new data from the network
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Future<DHTShortArray?> _openSegmentInner(int segmentNumber) async {
|
||||||
|
assert(_spineMutex.isLocked, 'should be in mutex here');
|
||||||
|
|
||||||
|
// Lookup what subkey and segment subrange has this position's segment
|
||||||
|
// shortarray
|
||||||
|
final l = _lookupSegment(segmentNumber);
|
||||||
|
final subkey = l.subkey;
|
||||||
|
final segment = l.segment;
|
||||||
|
|
||||||
|
final subkeyData = await _spineRecord.get(subkey: subkey);
|
||||||
|
if (subkeyData == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
final segmentKey = _getSegmentKey(subkeyData, segment);
|
||||||
|
if (segmentKey == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open a shortarray segment
|
||||||
|
final segmentRec = await DHTShortArray.openRead(
|
||||||
|
segmentKey,
|
||||||
|
debugName: '${_spineRecord.debugName}_spine_${subkey}_$segment',
|
||||||
|
crypto: _spineRecord.crypto,
|
||||||
|
parent: _spineRecord.key,
|
||||||
|
routingContext: _spineRecord.routingContext,
|
||||||
|
);
|
||||||
|
return segmentRec;
|
||||||
|
}
|
||||||
|
|
||||||
|
Future<DHTShortArray> _openOrCreateSegment(int segmentNumber) async {
|
||||||
|
assert(_spineMutex.isLocked, 'should be in mutex here');
|
||||||
|
|
||||||
|
// See if we already have this in the cache
|
||||||
|
for (var i = 0; i < _spineCache.length; i++) {
|
||||||
|
if (_spineCache[i].$1 == segmentNumber) {
|
||||||
|
// Touch the element
|
||||||
|
final x = _spineCache.removeAt(i);
|
||||||
|
_spineCache.add(x);
|
||||||
|
// Return the shortarray for this position
|
||||||
|
return x.$2.ref();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we don't have it in the cache, get/create it and then cache a ref
|
||||||
|
final segment = await _openOrCreateSegmentInner(segmentNumber);
|
||||||
|
_spineCache.add((segmentNumber, await segment.ref()));
|
||||||
|
if (_spineCache.length > _spineCacheLength) {
|
||||||
|
// Trim the LRU cache
|
||||||
|
final (_, sa) = _spineCache.removeAt(0);
|
||||||
|
await sa.close();
|
||||||
|
}
|
||||||
|
return segment;
|
||||||
|
}
|
||||||
|
|
||||||
|
Future<DHTShortArray?> _openSegment(int segmentNumber) async {
|
||||||
|
assert(_spineMutex.isLocked, 'should be in mutex here');
|
||||||
|
|
||||||
|
// See if we already have this in the cache
|
||||||
|
for (var i = 0; i < _spineCache.length; i++) {
|
||||||
|
if (_spineCache[i].$1 == segmentNumber) {
|
||||||
|
// Touch the element
|
||||||
|
final x = _spineCache.removeAt(i);
|
||||||
|
_spineCache.add(x);
|
||||||
|
// Return the shortarray for this position
|
||||||
|
return x.$2.ref();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we don't have it in the cache, get it and then cache it
|
||||||
|
final segment = await _openSegmentInner(segmentNumber);
|
||||||
|
if (segment == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
_spineCache.add((segmentNumber, await segment.ref()));
|
||||||
|
if (_spineCache.length > _spineCacheLength) {
|
||||||
|
// Trim the LRU cache
|
||||||
|
final (_, sa) = _spineCache.removeAt(0);
|
||||||
|
await sa.close();
|
||||||
|
}
|
||||||
|
return segment;
|
||||||
|
}
|
||||||
|
|
||||||
|
_DHTLogSegmentLookup _lookupSegment(int segmentNumber) {
|
||||||
|
assert(_spineMutex.isLocked, 'should be in mutex here');
|
||||||
|
|
||||||
|
if (segmentNumber < 0) {
|
||||||
|
throw IndexError.withLength(
|
||||||
|
segmentNumber, DHTLog.spineSubkeys * DHTLog.segmentsPerSubkey);
|
||||||
|
}
|
||||||
|
final subkey = segmentNumber ~/ DHTLog.segmentsPerSubkey;
|
||||||
|
if (subkey >= DHTLog.spineSubkeys) {
|
||||||
|
throw IndexError.withLength(
|
||||||
|
segmentNumber, DHTLog.spineSubkeys * DHTLog.segmentsPerSubkey);
|
||||||
|
}
|
||||||
|
final segment = segmentNumber % DHTLog.segmentsPerSubkey;
|
||||||
|
return _DHTLogSegmentLookup(subkey: subkey + 1, segment: segment);
|
||||||
|
}
|
||||||
|
|
||||||
|
///////////////////////////////////////////
|
||||||
|
// API for public interfaces
|
||||||
|
|
||||||
|
Future<_DHTLogPosition?> lookupPosition(int pos) async {
|
||||||
|
assert(_spineMutex.isLocked, 'should be locked');
|
||||||
|
return _spineCacheMutex.protect(() async {
|
||||||
|
// Check if our position is in bounds
|
||||||
|
final endPos = length;
|
||||||
|
if (pos < 0 || pos >= endPos) {
|
||||||
|
throw IndexError.withLength(pos, endPos);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate absolute position, ring-buffer style
|
||||||
|
final absolutePosition = (_head + pos) % _positionLimit;
|
||||||
|
|
||||||
|
// Determine the segment number and position within the segment
|
||||||
|
final segmentNumber = absolutePosition ~/ DHTShortArray.maxElements;
|
||||||
|
final segmentPos = absolutePosition % DHTShortArray.maxElements;
|
||||||
|
|
||||||
|
// Get the segment shortArray
|
||||||
|
final openedSegment = _openedSegments[segmentNumber];
|
||||||
|
late final DHTShortArray shortArray;
|
||||||
|
if (openedSegment != null) {
|
||||||
|
openedSegment.openCount++;
|
||||||
|
shortArray = openedSegment.shortArray;
|
||||||
|
} else {
|
||||||
|
final newShortArray = (_spineRecord.writer == null)
|
||||||
|
? await _openSegment(segmentNumber)
|
||||||
|
: await _openOrCreateSegment(segmentNumber);
|
||||||
|
if (newShortArray == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
_openedSegments[segmentNumber] =
|
||||||
|
_OpenedSegment._(shortArray: newShortArray);
|
||||||
|
|
||||||
|
shortArray = newShortArray;
|
||||||
|
}
|
||||||
|
|
||||||
|
return _DHTLogPosition._(
|
||||||
|
dhtLogSpine: this,
|
||||||
|
shortArray: shortArray,
|
||||||
|
pos: segmentPos,
|
||||||
|
segmentNumber: segmentNumber);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Future<void> _segmentClosed(int segmentNumber) async {
|
||||||
|
assert(_spineMutex.isLocked, 'should be locked');
|
||||||
|
await _spineCacheMutex.protect(() async {
|
||||||
|
final os = _openedSegments[segmentNumber]!;
|
||||||
|
os.openCount--;
|
||||||
|
if (os.openCount == 0) {
|
||||||
|
_openedSegments.remove(segmentNumber);
|
||||||
|
await os.shortArray.close();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
void allocateTail(int count) {
|
||||||
|
assert(_spineMutex.isLocked, 'should be locked');
|
||||||
|
|
||||||
|
final currentLength = length;
|
||||||
|
if (count <= 0) {
|
||||||
|
throw StateError('count should be > 0');
|
||||||
|
}
|
||||||
|
if (currentLength + count >= _positionLimit) {
|
||||||
|
throw StateError('ring buffer overflow');
|
||||||
|
}
|
||||||
|
|
||||||
|
_tail = (_tail + count) % _positionLimit;
|
||||||
|
}
|
||||||
|
|
||||||
|
Future<void> releaseHead(int count) async {
|
||||||
|
assert(_spineMutex.isLocked, 'should be locked');
|
||||||
|
|
||||||
|
final currentLength = length;
|
||||||
|
if (count <= 0) {
|
||||||
|
throw StateError('count should be > 0');
|
||||||
|
}
|
||||||
|
if (count > currentLength) {
|
||||||
|
throw StateError('ring buffer underflow');
|
||||||
|
}
|
||||||
|
|
||||||
|
final oldHead = _head;
|
||||||
|
_head = (_head + count) % _positionLimit;
|
||||||
|
final newHead = _head;
|
||||||
|
await _purgeSegments(oldHead, newHead);
|
||||||
|
}
|
||||||
|
|
||||||
|
Future<void> _deleteSegmentsContiguous(int start, int end) async {
|
||||||
|
assert(_spineMutex.isLocked, 'should be in mutex here');
|
||||||
|
DHTRecordPool.instance
|
||||||
|
.log('_deleteSegmentsContiguous: start=$start, end=$end');
|
||||||
|
|
||||||
|
final startSegmentNumber = start ~/ DHTShortArray.maxElements;
|
||||||
|
final startSegmentPos = start % DHTShortArray.maxElements;
|
||||||
|
|
||||||
|
final endSegmentNumber = end ~/ DHTShortArray.maxElements;
|
||||||
|
final endSegmentPos = end % DHTShortArray.maxElements;
|
||||||
|
|
||||||
|
final firstDeleteSegment =
|
||||||
|
(startSegmentPos == 0) ? startSegmentNumber : startSegmentNumber + 1;
|
||||||
|
final lastDeleteSegment =
|
||||||
|
(endSegmentPos == 0) ? endSegmentNumber - 1 : endSegmentNumber - 2;
|
||||||
|
|
||||||
|
_SubkeyData? lastSubkeyData;
|
||||||
|
for (var segmentNumber = firstDeleteSegment;
|
||||||
|
segmentNumber <= lastDeleteSegment;
|
||||||
|
segmentNumber++) {
|
||||||
|
// Lookup what subkey and segment subrange has this position's segment
|
||||||
|
// shortarray
|
||||||
|
final l = _lookupSegment(segmentNumber);
|
||||||
|
final subkey = l.subkey;
|
||||||
|
final segment = l.segment;
|
||||||
|
|
||||||
|
if (subkey != lastSubkeyData?.subkey) {
|
||||||
|
// Flush subkey writes
|
||||||
|
if (lastSubkeyData != null && lastSubkeyData.changed) {
|
||||||
|
await _spineRecord.eventualWriteBytes(lastSubkeyData.data,
|
||||||
|
subkey: lastSubkeyData.subkey);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get next subkey if available locally
|
||||||
|
final data = await _spineRecord.get(
|
||||||
|
subkey: subkey, refreshMode: DHTRecordRefreshMode.local);
|
||||||
|
if (data != null) {
|
||||||
|
lastSubkeyData = _SubkeyData(subkey: subkey, data: data);
|
||||||
|
} else {
|
||||||
|
lastSubkeyData = null;
|
||||||
|
// If the subkey was not available locally we can go to the
|
||||||
|
// last segment number at the end of this subkey
|
||||||
|
segmentNumber = ((subkey + 1) * DHTLog.segmentsPerSubkey) - 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (lastSubkeyData != null) {
|
||||||
|
final segmentKey = _getSegmentKey(lastSubkeyData.data, segment);
|
||||||
|
if (segmentKey != null) {
|
||||||
|
await DHTRecordPool.instance.deleteRecord(segmentKey);
|
||||||
|
_setSegmentKey(lastSubkeyData.data, segment, null);
|
||||||
|
lastSubkeyData.changed = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Flush subkey writes
|
||||||
|
if (lastSubkeyData != null) {
|
||||||
|
await _spineRecord.eventualWriteBytes(lastSubkeyData.data,
|
||||||
|
subkey: lastSubkeyData.subkey);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Future<void> _purgeSegments(int from, int to) async {
|
||||||
|
assert(_spineMutex.isLocked, 'should be in mutex here');
|
||||||
|
if (from < to) {
|
||||||
|
await _deleteSegmentsContiguous(from, to);
|
||||||
|
} else if (from > to) {
|
||||||
|
await _deleteSegmentsContiguous(from, _positionLimit);
|
||||||
|
await _deleteSegmentsContiguous(0, to);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Watch For Updates
|
||||||
|
|
||||||
|
// Watch head for changes
|
||||||
|
Future<void> watch() async {
|
||||||
|
// This will update any existing watches if necessary
|
||||||
|
try {
|
||||||
|
await _spineRecord.watch(subkeys: [ValueSubkeyRange.single(0)]);
|
||||||
|
|
||||||
|
// Update changes to the head record
|
||||||
|
// Don't watch for local changes because this class already handles
|
||||||
|
// notifying listeners and knows when it makes local changes
|
||||||
|
_subscription ??=
|
||||||
|
await _spineRecord.listen(localChanges: false, _onSpineChanged);
|
||||||
|
} on Exception {
|
||||||
|
// If anything fails, try to cancel the watches
|
||||||
|
await cancelWatch();
|
||||||
|
rethrow;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop watching for changes to head and linked records
|
||||||
|
Future<void> cancelWatch() async {
|
||||||
|
await _spineRecord.cancelWatch();
|
||||||
|
await _subscription?.cancel();
|
||||||
|
_subscription = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Called when the log changes online and we find out from a watch
|
||||||
|
// but not when we make a change locally
|
||||||
|
Future<void> _onSpineChanged(
|
||||||
|
DHTRecord record, Uint8List? data, List<ValueSubkeyRange> subkeys) async {
|
||||||
|
// If head record subkey zero changes, then the layout
|
||||||
|
// of the dhtshortarray has changed
|
||||||
|
if (data == null) {
|
||||||
|
throw StateError('spine head changed without data');
|
||||||
|
}
|
||||||
|
if (record.key != _spineRecord.key ||
|
||||||
|
subkeys.length != 1 ||
|
||||||
|
subkeys[0] != ValueSubkeyRange.single(0)) {
|
||||||
|
throw StateError('watch returning wrong subkey range');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode updated head
|
||||||
|
final headData = proto.DHTLog.fromBuffer(data);
|
||||||
|
|
||||||
|
// Then update the head record
|
||||||
|
await _spineMutex.protect(() async {
|
||||||
|
final oldHead = _head;
|
||||||
|
final oldTail = _tail;
|
||||||
|
await _updateHead(headData);
|
||||||
|
sendUpdate(oldHead, oldTail);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
TypedKey get recordKey => _spineRecord.key;
|
||||||
|
OwnedDHTRecordPointer get recordPointer => _spineRecord.ownedDHTRecordPointer;
|
||||||
|
int get length => _ringDistance(_tail, _head);
|
||||||
|
|
||||||
|
bool get isOpen => _spineRecord.isOpen;
|
||||||
|
|
||||||
|
// Ring buffer distance from old to new
|
||||||
|
static int _ringDistance(int n, int o) =>
|
||||||
|
(n < o) ? (_positionLimit - o) + n : n - o;
|
||||||
|
|
||||||
|
static const _positionLimit = DHTLog.segmentsPerSubkey *
|
||||||
|
DHTLog.spineSubkeys *
|
||||||
|
DHTShortArray.maxElements;
|
||||||
|
|
||||||
|
// Spine head mutex to ensure we keep the representation valid
|
||||||
|
final Mutex _spineMutex = Mutex();
|
||||||
|
// Subscription to head record internal changes
|
||||||
|
StreamSubscription<DHTRecordWatchChange>? _subscription;
|
||||||
|
// Notify closure for external spine head changes
|
||||||
|
void Function(DHTLogUpdate)? onUpdatedSpine;
|
||||||
|
|
||||||
|
// Spine DHT record
|
||||||
|
final DHTRecord _spineRecord;
|
||||||
|
// Segment stride to use for spine elements
|
||||||
|
final int _segmentStride;
|
||||||
|
|
||||||
|
// Position of the start of the log (oldest items)
|
||||||
|
int _head;
|
||||||
|
// Position of the end of the log (newest items) (exclusive)
|
||||||
|
int _tail;
|
||||||
|
|
||||||
|
// LRU cache of DHT spine elements accessed recently
|
||||||
|
// Pair of position and associated shortarray segment
|
||||||
|
final Mutex _spineCacheMutex = Mutex();
|
||||||
|
final List<(int, DHTShortArray)> _spineCache;
|
||||||
|
final Map<int, _OpenedSegment> _openedSegments;
|
||||||
|
static const int _spineCacheLength = 3;
|
||||||
|
}
|
@ -38,7 +38,8 @@ class DefaultDHTRecordCubit<T> extends DHTRecordCubit<T> {
|
|||||||
final Uint8List data;
|
final Uint8List data;
|
||||||
final firstSubkey = subkeys.firstOrNull!.low;
|
final firstSubkey = subkeys.firstOrNull!.low;
|
||||||
if (firstSubkey != defaultSubkey || updatedata == null) {
|
if (firstSubkey != defaultSubkey || updatedata == null) {
|
||||||
final maybeData = await record.get(forceRefresh: true);
|
final maybeData =
|
||||||
|
await record.get(refreshMode: DHTRecordRefreshMode.network);
|
||||||
if (maybeData == null) {
|
if (maybeData == null) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
@ -13,9 +13,30 @@ class DHTRecordWatchChange extends Equatable {
|
|||||||
List<Object?> get props => [local, data, subkeys];
|
List<Object?> get props => [local, data, subkeys];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Refresh mode for DHT record 'get'
|
||||||
|
enum DHTRecordRefreshMode {
|
||||||
|
/// Return existing subkey values if they exist locally already
|
||||||
|
/// And then check the network for a newer value
|
||||||
|
/// This is the default refresh mode
|
||||||
|
cached,
|
||||||
|
|
||||||
|
/// Return existing subkey values only if they exist locally already
|
||||||
|
local,
|
||||||
|
|
||||||
|
/// Always check the network for a newer subkey value
|
||||||
|
network,
|
||||||
|
|
||||||
|
/// Always check the network for a newer subkey value but only
|
||||||
|
/// return that value if its sequence number is newer than the local value
|
||||||
|
update;
|
||||||
|
|
||||||
|
bool get _forceRefresh => this == network || this == update;
|
||||||
|
bool get _inspectLocal => this == local || this == update;
|
||||||
|
}
|
||||||
|
|
||||||
/////////////////////////////////////////////////
|
/////////////////////////////////////////////////
|
||||||
|
|
||||||
class DHTRecord {
|
class DHTRecord implements DHTDeleteable<DHTRecord, DHTRecord> {
|
||||||
DHTRecord._(
|
DHTRecord._(
|
||||||
{required VeilidRoutingContext routingContext,
|
{required VeilidRoutingContext routingContext,
|
||||||
required SharedDHTRecordData sharedDHTRecordData,
|
required SharedDHTRecordData sharedDHTRecordData,
|
||||||
@ -27,23 +48,52 @@ class DHTRecord {
|
|||||||
_routingContext = routingContext,
|
_routingContext = routingContext,
|
||||||
_defaultSubkey = defaultSubkey,
|
_defaultSubkey = defaultSubkey,
|
||||||
_writer = writer,
|
_writer = writer,
|
||||||
_open = true,
|
_openCount = 1,
|
||||||
_sharedDHTRecordData = sharedDHTRecordData;
|
_sharedDHTRecordData = sharedDHTRecordData;
|
||||||
|
|
||||||
final SharedDHTRecordData _sharedDHTRecordData;
|
////////////////////////////////////////////////////////////////////////////
|
||||||
final VeilidRoutingContext _routingContext;
|
// DHTCloseable
|
||||||
final int _defaultSubkey;
|
|
||||||
final KeyPair? _writer;
|
|
||||||
final DHTRecordCrypto _crypto;
|
|
||||||
final String debugName;
|
|
||||||
|
|
||||||
bool _open;
|
/// Check if the DHTRecord is open
|
||||||
@internal
|
@override
|
||||||
StreamController<DHTRecordWatchChange>? watchController;
|
bool get isOpen => _openCount > 0;
|
||||||
@internal
|
|
||||||
WatchState? watchState;
|
|
||||||
|
|
||||||
int subkeyOrDefault(int subkey) => (subkey == -1) ? _defaultSubkey : subkey;
|
/// The type of the openable scope
|
||||||
|
@override
|
||||||
|
FutureOr<DHTRecord> scoped() => this;
|
||||||
|
|
||||||
|
/// Add a reference to this DHTRecord
|
||||||
|
@override
|
||||||
|
Future<DHTRecord> ref() async => _mutex.protect(() async {
|
||||||
|
_openCount++;
|
||||||
|
return this;
|
||||||
|
});
|
||||||
|
|
||||||
|
/// Free all resources for the DHTRecord
|
||||||
|
@override
|
||||||
|
Future<void> close() async => _mutex.protect(() async {
|
||||||
|
if (_openCount == 0) {
|
||||||
|
throw StateError('already closed');
|
||||||
|
}
|
||||||
|
_openCount--;
|
||||||
|
if (_openCount != 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
await _watchController?.close();
|
||||||
|
_watchController = null;
|
||||||
|
await DHTRecordPool.instance._recordClosed(this);
|
||||||
|
});
|
||||||
|
|
||||||
|
/// Free all resources for the DHTRecord and delete it from the DHT
|
||||||
|
/// Will wait until the record is closed to delete it
|
||||||
|
@override
|
||||||
|
Future<void> delete() async => _mutex.protect(() async {
|
||||||
|
await DHTRecordPool.instance.deleteRecord(key);
|
||||||
|
});
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Public API
|
||||||
|
|
||||||
VeilidRoutingContext get routingContext => _routingContext;
|
VeilidRoutingContext get routingContext => _routingContext;
|
||||||
TypedKey get key => _sharedDHTRecordData.recordDescriptor.key;
|
TypedKey get key => _sharedDHTRecordData.recordDescriptor.key;
|
||||||
@ -57,85 +107,69 @@ class DHTRecord {
|
|||||||
DHTRecordCrypto get crypto => _crypto;
|
DHTRecordCrypto get crypto => _crypto;
|
||||||
OwnedDHTRecordPointer get ownedDHTRecordPointer =>
|
OwnedDHTRecordPointer get ownedDHTRecordPointer =>
|
||||||
OwnedDHTRecordPointer(recordKey: key, owner: ownerKeyPair!);
|
OwnedDHTRecordPointer(recordKey: key, owner: ownerKeyPair!);
|
||||||
bool get isOpen => _open;
|
int subkeyOrDefault(int subkey) => (subkey == -1) ? _defaultSubkey : subkey;
|
||||||
|
|
||||||
Future<void> close() async {
|
|
||||||
if (!_open) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
await watchController?.close();
|
|
||||||
await DHTRecordPool.instance._recordClosed(this);
|
|
||||||
_open = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
Future<T> scope<T>(Future<T> Function(DHTRecord) scopeFunction) async {
|
|
||||||
try {
|
|
||||||
return await scopeFunction(this);
|
|
||||||
} finally {
|
|
||||||
await close();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Future<T> deleteScope<T>(Future<T> Function(DHTRecord) scopeFunction) async {
|
|
||||||
try {
|
|
||||||
final out = await scopeFunction(this);
|
|
||||||
if (_open) {
|
|
||||||
await close();
|
|
||||||
}
|
|
||||||
return out;
|
|
||||||
} on Exception catch (_) {
|
|
||||||
if (_open) {
|
|
||||||
await close();
|
|
||||||
}
|
|
||||||
await DHTRecordPool.instance.deleteRecord(key);
|
|
||||||
rethrow;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Future<T> maybeDeleteScope<T>(
|
|
||||||
bool delete, Future<T> Function(DHTRecord) scopeFunction) async {
|
|
||||||
if (delete) {
|
|
||||||
return deleteScope(scopeFunction);
|
|
||||||
} else {
|
|
||||||
return scope(scopeFunction);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
/// Get a subkey value from this record.
|
||||||
|
/// Returns the most recent value data for this subkey or null if this subkey
|
||||||
|
/// has not yet been written to.
|
||||||
|
/// * 'refreshMode' determines whether or not to return a locally existing
|
||||||
|
/// value or always check the network
|
||||||
|
/// * 'outSeqNum' optionally returns the sequence number of the value being
|
||||||
|
/// returned if one was returned.
|
||||||
Future<Uint8List?> get(
|
Future<Uint8List?> get(
|
||||||
{int subkey = -1,
|
{int subkey = -1,
|
||||||
DHTRecordCrypto? crypto,
|
DHTRecordCrypto? crypto,
|
||||||
bool forceRefresh = false,
|
DHTRecordRefreshMode refreshMode = DHTRecordRefreshMode.cached,
|
||||||
bool onlyUpdates = false,
|
|
||||||
Output<int>? outSeqNum}) async {
|
Output<int>? outSeqNum}) async {
|
||||||
subkey = subkeyOrDefault(subkey);
|
subkey = subkeyOrDefault(subkey);
|
||||||
|
|
||||||
|
// Get the last sequence number if we need it
|
||||||
|
final lastSeq =
|
||||||
|
refreshMode._inspectLocal ? await _localSubkeySeq(subkey) : null;
|
||||||
|
|
||||||
|
// See if we only ever want the locally stored value
|
||||||
|
if (refreshMode == DHTRecordRefreshMode.local && lastSeq == null) {
|
||||||
|
// If it's not available locally already just return null now
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
final valueData = await _routingContext.getDHTValue(key, subkey,
|
final valueData = await _routingContext.getDHTValue(key, subkey,
|
||||||
forceRefresh: forceRefresh);
|
forceRefresh: refreshMode._forceRefresh);
|
||||||
if (valueData == null) {
|
if (valueData == null) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
final lastSeq = _sharedDHTRecordData.subkeySeqCache[subkey];
|
// See if this get resulted in a newer sequence number
|
||||||
if (onlyUpdates && lastSeq != null && valueData.seq <= lastSeq) {
|
if (refreshMode == DHTRecordRefreshMode.update &&
|
||||||
|
lastSeq != null &&
|
||||||
|
valueData.seq <= lastSeq) {
|
||||||
|
// If we're only returning updates then punt now
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
// If we're returning a value, decrypt it
|
||||||
final out = (crypto ?? _crypto).decrypt(valueData.data, subkey);
|
final out = (crypto ?? _crypto).decrypt(valueData.data, subkey);
|
||||||
if (outSeqNum != null) {
|
if (outSeqNum != null) {
|
||||||
outSeqNum.save(valueData.seq);
|
outSeqNum.save(valueData.seq);
|
||||||
}
|
}
|
||||||
_sharedDHTRecordData.subkeySeqCache[subkey] = valueData.seq;
|
|
||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get a subkey value from this record.
|
||||||
|
/// Process the record returned with a JSON unmarshal function 'fromJson'.
|
||||||
|
/// Returns the most recent value data for this subkey or null if this subkey
|
||||||
|
/// has not yet been written to.
|
||||||
|
/// * 'refreshMode' determines whether or not to return a locally existing
|
||||||
|
/// value or always check the network
|
||||||
|
/// * 'outSeqNum' optionally returns the sequence number of the value being
|
||||||
|
/// returned if one was returned.
|
||||||
Future<T?> getJson<T>(T Function(dynamic) fromJson,
|
Future<T?> getJson<T>(T Function(dynamic) fromJson,
|
||||||
{int subkey = -1,
|
{int subkey = -1,
|
||||||
DHTRecordCrypto? crypto,
|
DHTRecordCrypto? crypto,
|
||||||
bool forceRefresh = false,
|
DHTRecordRefreshMode refreshMode = DHTRecordRefreshMode.cached,
|
||||||
bool onlyUpdates = false,
|
|
||||||
Output<int>? outSeqNum}) async {
|
Output<int>? outSeqNum}) async {
|
||||||
final data = await get(
|
final data = await get(
|
||||||
subkey: subkey,
|
subkey: subkey,
|
||||||
crypto: crypto,
|
crypto: crypto,
|
||||||
forceRefresh: forceRefresh,
|
refreshMode: refreshMode,
|
||||||
onlyUpdates: onlyUpdates,
|
|
||||||
outSeqNum: outSeqNum);
|
outSeqNum: outSeqNum);
|
||||||
if (data == null) {
|
if (data == null) {
|
||||||
return null;
|
return null;
|
||||||
@ -143,18 +177,25 @@ class DHTRecord {
|
|||||||
return jsonDecodeBytes(fromJson, data);
|
return jsonDecodeBytes(fromJson, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get a subkey value from this record.
|
||||||
|
/// Process the record returned with a protobuf unmarshal
|
||||||
|
/// function 'fromBuffer'.
|
||||||
|
/// Returns the most recent value data for this subkey or null if this subkey
|
||||||
|
/// has not yet been written to.
|
||||||
|
/// * 'refreshMode' determines whether or not to return a locally existing
|
||||||
|
/// value or always check the network
|
||||||
|
/// * 'outSeqNum' optionally returns the sequence number of the value being
|
||||||
|
/// returned if one was returned.
|
||||||
Future<T?> getProtobuf<T extends GeneratedMessage>(
|
Future<T?> getProtobuf<T extends GeneratedMessage>(
|
||||||
T Function(List<int> i) fromBuffer,
|
T Function(List<int> i) fromBuffer,
|
||||||
{int subkey = -1,
|
{int subkey = -1,
|
||||||
DHTRecordCrypto? crypto,
|
DHTRecordCrypto? crypto,
|
||||||
bool forceRefresh = false,
|
DHTRecordRefreshMode refreshMode = DHTRecordRefreshMode.cached,
|
||||||
bool onlyUpdates = false,
|
|
||||||
Output<int>? outSeqNum}) async {
|
Output<int>? outSeqNum}) async {
|
||||||
final data = await get(
|
final data = await get(
|
||||||
subkey: subkey,
|
subkey: subkey,
|
||||||
crypto: crypto,
|
crypto: crypto,
|
||||||
forceRefresh: forceRefresh,
|
refreshMode: refreshMode,
|
||||||
onlyUpdates: onlyUpdates,
|
|
||||||
outSeqNum: outSeqNum);
|
outSeqNum: outSeqNum);
|
||||||
if (data == null) {
|
if (data == null) {
|
||||||
return null;
|
return null;
|
||||||
@ -162,13 +203,16 @@ class DHTRecord {
|
|||||||
return fromBuffer(data.toList());
|
return fromBuffer(data.toList());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Attempt to write a byte buffer to a DHTRecord subkey
|
||||||
|
/// If a newer value was found on the network, it is returned
|
||||||
|
/// If the value was succesfully written, null is returned
|
||||||
Future<Uint8List?> tryWriteBytes(Uint8List newValue,
|
Future<Uint8List?> tryWriteBytes(Uint8List newValue,
|
||||||
{int subkey = -1,
|
{int subkey = -1,
|
||||||
DHTRecordCrypto? crypto,
|
DHTRecordCrypto? crypto,
|
||||||
KeyPair? writer,
|
KeyPair? writer,
|
||||||
Output<int>? outSeqNum}) async {
|
Output<int>? outSeqNum}) async {
|
||||||
subkey = subkeyOrDefault(subkey);
|
subkey = subkeyOrDefault(subkey);
|
||||||
final lastSeq = _sharedDHTRecordData.subkeySeqCache[subkey];
|
final lastSeq = await _localSubkeySeq(subkey);
|
||||||
final encryptedNewValue =
|
final encryptedNewValue =
|
||||||
await (crypto ?? _crypto).encrypt(newValue, subkey);
|
await (crypto ?? _crypto).encrypt(newValue, subkey);
|
||||||
|
|
||||||
@ -190,7 +234,6 @@ class DHTRecord {
|
|||||||
if (isUpdated && outSeqNum != null) {
|
if (isUpdated && outSeqNum != null) {
|
||||||
outSeqNum.save(newValueData.seq);
|
outSeqNum.save(newValueData.seq);
|
||||||
}
|
}
|
||||||
_sharedDHTRecordData.subkeySeqCache[subkey] = newValueData.seq;
|
|
||||||
|
|
||||||
// See if the encrypted data returned is exactly the same
|
// See if the encrypted data returned is exactly the same
|
||||||
// if so, shortcut and don't bother decrypting it
|
// if so, shortcut and don't bother decrypting it
|
||||||
@ -211,13 +254,16 @@ class DHTRecord {
|
|||||||
return decryptedNewValue;
|
return decryptedNewValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Attempt to write a byte buffer to a DHTRecord subkey
|
||||||
|
/// If a newer value was found on the network, another attempt
|
||||||
|
/// will be made to write the subkey until this succeeds
|
||||||
Future<void> eventualWriteBytes(Uint8List newValue,
|
Future<void> eventualWriteBytes(Uint8List newValue,
|
||||||
{int subkey = -1,
|
{int subkey = -1,
|
||||||
DHTRecordCrypto? crypto,
|
DHTRecordCrypto? crypto,
|
||||||
KeyPair? writer,
|
KeyPair? writer,
|
||||||
Output<int>? outSeqNum}) async {
|
Output<int>? outSeqNum}) async {
|
||||||
subkey = subkeyOrDefault(subkey);
|
subkey = subkeyOrDefault(subkey);
|
||||||
final lastSeq = _sharedDHTRecordData.subkeySeqCache[subkey];
|
final lastSeq = await _localSubkeySeq(subkey);
|
||||||
final encryptedNewValue =
|
final encryptedNewValue =
|
||||||
await (crypto ?? _crypto).encrypt(newValue, subkey);
|
await (crypto ?? _crypto).encrypt(newValue, subkey);
|
||||||
|
|
||||||
@ -243,7 +289,6 @@ class DHTRecord {
|
|||||||
if (outSeqNum != null) {
|
if (outSeqNum != null) {
|
||||||
outSeqNum.save(newValueData.seq);
|
outSeqNum.save(newValueData.seq);
|
||||||
}
|
}
|
||||||
_sharedDHTRecordData.subkeySeqCache[subkey] = newValueData.seq;
|
|
||||||
|
|
||||||
// The encrypted data returned should be exactly the same
|
// The encrypted data returned should be exactly the same
|
||||||
// as what we are trying to set,
|
// as what we are trying to set,
|
||||||
@ -256,6 +301,11 @@ class DHTRecord {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Attempt to write a byte buffer to a DHTRecord subkey
|
||||||
|
/// If a newer value was found on the network, another attempt
|
||||||
|
/// will be made to write the subkey until this succeeds
|
||||||
|
/// Each attempt to write the value calls an update function with the
|
||||||
|
/// old value to determine what new value should be attempted for that write.
|
||||||
Future<void> eventualUpdateBytes(
|
Future<void> eventualUpdateBytes(
|
||||||
Future<Uint8List> Function(Uint8List? oldValue) update,
|
Future<Uint8List> Function(Uint8List? oldValue) update,
|
||||||
{int subkey = -1,
|
{int subkey = -1,
|
||||||
@ -281,6 +331,7 @@ class DHTRecord {
|
|||||||
} while (oldValue != null);
|
} while (oldValue != null);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Like 'tryWriteBytes' but with JSON marshal/unmarshal of the value
|
||||||
Future<T?> tryWriteJson<T>(T Function(dynamic) fromJson, T newValue,
|
Future<T?> tryWriteJson<T>(T Function(dynamic) fromJson, T newValue,
|
||||||
{int subkey = -1,
|
{int subkey = -1,
|
||||||
DHTRecordCrypto? crypto,
|
DHTRecordCrypto? crypto,
|
||||||
@ -298,6 +349,7 @@ class DHTRecord {
|
|||||||
return jsonDecodeBytes(fromJson, out);
|
return jsonDecodeBytes(fromJson, out);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
/// Like 'tryWriteBytes' but with protobuf marshal/unmarshal of the value
|
||||||
Future<T?> tryWriteProtobuf<T extends GeneratedMessage>(
|
Future<T?> tryWriteProtobuf<T extends GeneratedMessage>(
|
||||||
T Function(List<int>) fromBuffer, T newValue,
|
T Function(List<int>) fromBuffer, T newValue,
|
||||||
{int subkey = -1,
|
{int subkey = -1,
|
||||||
@ -316,6 +368,7 @@ class DHTRecord {
|
|||||||
return fromBuffer(out);
|
return fromBuffer(out);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
/// Like 'eventualWriteBytes' but with JSON marshal/unmarshal of the value
|
||||||
Future<void> eventualWriteJson<T>(T newValue,
|
Future<void> eventualWriteJson<T>(T newValue,
|
||||||
{int subkey = -1,
|
{int subkey = -1,
|
||||||
DHTRecordCrypto? crypto,
|
DHTRecordCrypto? crypto,
|
||||||
@ -324,6 +377,7 @@ class DHTRecord {
|
|||||||
eventualWriteBytes(jsonEncodeBytes(newValue),
|
eventualWriteBytes(jsonEncodeBytes(newValue),
|
||||||
subkey: subkey, crypto: crypto, writer: writer, outSeqNum: outSeqNum);
|
subkey: subkey, crypto: crypto, writer: writer, outSeqNum: outSeqNum);
|
||||||
|
|
||||||
|
/// Like 'eventualWriteBytes' but with protobuf marshal/unmarshal of the value
|
||||||
Future<void> eventualWriteProtobuf<T extends GeneratedMessage>(T newValue,
|
Future<void> eventualWriteProtobuf<T extends GeneratedMessage>(T newValue,
|
||||||
{int subkey = -1,
|
{int subkey = -1,
|
||||||
DHTRecordCrypto? crypto,
|
DHTRecordCrypto? crypto,
|
||||||
@ -332,6 +386,7 @@ class DHTRecord {
|
|||||||
eventualWriteBytes(newValue.writeToBuffer(),
|
eventualWriteBytes(newValue.writeToBuffer(),
|
||||||
subkey: subkey, crypto: crypto, writer: writer, outSeqNum: outSeqNum);
|
subkey: subkey, crypto: crypto, writer: writer, outSeqNum: outSeqNum);
|
||||||
|
|
||||||
|
/// Like 'eventualUpdateBytes' but with JSON marshal/unmarshal of the value
|
||||||
Future<void> eventualUpdateJson<T>(
|
Future<void> eventualUpdateJson<T>(
|
||||||
T Function(dynamic) fromJson, Future<T> Function(T?) update,
|
T Function(dynamic) fromJson, Future<T> Function(T?) update,
|
||||||
{int subkey = -1,
|
{int subkey = -1,
|
||||||
@ -341,6 +396,7 @@ class DHTRecord {
|
|||||||
eventualUpdateBytes(jsonUpdate(fromJson, update),
|
eventualUpdateBytes(jsonUpdate(fromJson, update),
|
||||||
subkey: subkey, crypto: crypto, writer: writer, outSeqNum: outSeqNum);
|
subkey: subkey, crypto: crypto, writer: writer, outSeqNum: outSeqNum);
|
||||||
|
|
||||||
|
/// Like 'eventualUpdateBytes' but with protobuf marshal/unmarshal of the value
|
||||||
Future<void> eventualUpdateProtobuf<T extends GeneratedMessage>(
|
Future<void> eventualUpdateProtobuf<T extends GeneratedMessage>(
|
||||||
T Function(List<int>) fromBuffer, Future<T> Function(T?) update,
|
T Function(List<int>) fromBuffer, Future<T> Function(T?) update,
|
||||||
{int subkey = -1,
|
{int subkey = -1,
|
||||||
@ -350,6 +406,8 @@ class DHTRecord {
|
|||||||
eventualUpdateBytes(protobufUpdate(fromBuffer, update),
|
eventualUpdateBytes(protobufUpdate(fromBuffer, update),
|
||||||
subkey: subkey, crypto: crypto, writer: writer, outSeqNum: outSeqNum);
|
subkey: subkey, crypto: crypto, writer: writer, outSeqNum: outSeqNum);
|
||||||
|
|
||||||
|
/// Watch a subkey range of this DHT record for changes
|
||||||
|
/// Takes effect on the next DHTRecordPool tick
|
||||||
Future<void> watch(
|
Future<void> watch(
|
||||||
{List<ValueSubkeyRange>? subkeys,
|
{List<ValueSubkeyRange>? subkeys,
|
||||||
Timestamp? expiration,
|
Timestamp? expiration,
|
||||||
@ -363,6 +421,13 @@ class DHTRecord {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Register a callback for changes made on this this DHT record.
|
||||||
|
/// You must 'watch' the record as well as listen to it in order for this
|
||||||
|
/// call back to be called.
|
||||||
|
/// * 'localChanges' also enables calling the callback if changed are made
|
||||||
|
/// locally, otherwise only changes seen from the network itself are
|
||||||
|
/// reported
|
||||||
|
///
|
||||||
Future<StreamSubscription<DHTRecordWatchChange>> listen(
|
Future<StreamSubscription<DHTRecordWatchChange>> listen(
|
||||||
Future<void> Function(
|
Future<void> Function(
|
||||||
DHTRecord record, Uint8List? data, List<ValueSubkeyRange> subkeys)
|
DHTRecord record, Uint8List? data, List<ValueSubkeyRange> subkeys)
|
||||||
@ -371,13 +436,13 @@ class DHTRecord {
|
|||||||
DHTRecordCrypto? crypto,
|
DHTRecordCrypto? crypto,
|
||||||
}) async {
|
}) async {
|
||||||
// Set up watch requirements
|
// Set up watch requirements
|
||||||
watchController ??=
|
_watchController ??=
|
||||||
StreamController<DHTRecordWatchChange>.broadcast(onCancel: () {
|
StreamController<DHTRecordWatchChange>.broadcast(onCancel: () {
|
||||||
// If there are no more listeners then we can get rid of the controller
|
// If there are no more listeners then we can get rid of the controller
|
||||||
watchController = null;
|
_watchController = null;
|
||||||
});
|
});
|
||||||
|
|
||||||
return watchController!.stream.listen(
|
return _watchController!.stream.listen(
|
||||||
(change) {
|
(change) {
|
||||||
if (change.local && !localChanges) {
|
if (change.local && !localChanges) {
|
||||||
return;
|
return;
|
||||||
@ -400,11 +465,13 @@ class DHTRecord {
|
|||||||
},
|
},
|
||||||
cancelOnError: true,
|
cancelOnError: true,
|
||||||
onError: (e) async {
|
onError: (e) async {
|
||||||
await watchController!.close();
|
await _watchController!.close();
|
||||||
watchController = null;
|
_watchController = null;
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Stop watching this record for changes
|
||||||
|
/// Takes effect on the next DHTRecordPool tick
|
||||||
Future<void> cancelWatch() async {
|
Future<void> cancelWatch() async {
|
||||||
// Tear down watch requirements
|
// Tear down watch requirements
|
||||||
if (watchState != null) {
|
if (watchState != null) {
|
||||||
@ -413,11 +480,23 @@ class DHTRecord {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Return the inspection state of a set of subkeys of the DHTRecord
|
||||||
|
/// See Veilid's 'inspectDHTRecord' call for details on how this works
|
||||||
Future<DHTRecordReport> inspect(
|
Future<DHTRecordReport> inspect(
|
||||||
{List<ValueSubkeyRange>? subkeys,
|
{List<ValueSubkeyRange>? subkeys,
|
||||||
DHTReportScope scope = DHTReportScope.local}) =>
|
DHTReportScope scope = DHTReportScope.local}) =>
|
||||||
_routingContext.inspectDHTRecord(key, subkeys: subkeys, scope: scope);
|
_routingContext.inspectDHTRecord(key, subkeys: subkeys, scope: scope);
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
Future<int?> _localSubkeySeq(int subkey) async {
|
||||||
|
final rr = await _routingContext.inspectDHTRecord(
|
||||||
|
key,
|
||||||
|
subkeys: [ValueSubkeyRange.single(subkey)],
|
||||||
|
);
|
||||||
|
return rr.localSeqs.firstOrNull ?? 0xFFFFFFFF;
|
||||||
|
}
|
||||||
|
|
||||||
void _addValueChange(
|
void _addValueChange(
|
||||||
{required bool local,
|
{required bool local,
|
||||||
required Uint8List? data,
|
required Uint8List? data,
|
||||||
@ -427,7 +506,7 @@ class DHTRecord {
|
|||||||
final watchedSubkeys = ws.subkeys;
|
final watchedSubkeys = ws.subkeys;
|
||||||
if (watchedSubkeys == null) {
|
if (watchedSubkeys == null) {
|
||||||
// Report all subkeys
|
// Report all subkeys
|
||||||
watchController?.add(
|
_watchController?.add(
|
||||||
DHTRecordWatchChange(local: local, data: data, subkeys: subkeys));
|
DHTRecordWatchChange(local: local, data: data, subkeys: subkeys));
|
||||||
} else {
|
} else {
|
||||||
// Only some subkeys are being watched, see if the reported update
|
// Only some subkeys are being watched, see if the reported update
|
||||||
@ -442,7 +521,7 @@ class DHTRecord {
|
|||||||
overlappedFirstSubkey == updateFirstSubkey ? data : null;
|
overlappedFirstSubkey == updateFirstSubkey ? data : null;
|
||||||
|
|
||||||
// Report only watched subkeys
|
// Report only watched subkeys
|
||||||
watchController?.add(DHTRecordWatchChange(
|
_watchController?.add(DHTRecordWatchChange(
|
||||||
local: local, data: updatedData, subkeys: overlappedSubkeys));
|
local: local, data: updatedData, subkeys: overlappedSubkeys));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -458,4 +537,18 @@ class DHTRecord {
|
|||||||
_addValueChange(
|
_addValueChange(
|
||||||
local: false, data: update.value?.data, subkeys: update.subkeys);
|
local: false, data: update.value?.data, subkeys: update.subkeys);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
final SharedDHTRecordData _sharedDHTRecordData;
|
||||||
|
final VeilidRoutingContext _routingContext;
|
||||||
|
final int _defaultSubkey;
|
||||||
|
final KeyPair? _writer;
|
||||||
|
final DHTRecordCrypto _crypto;
|
||||||
|
final String debugName;
|
||||||
|
final _mutex = Mutex();
|
||||||
|
int _openCount;
|
||||||
|
StreamController<DHTRecordWatchChange>? _watchController;
|
||||||
|
@internal
|
||||||
|
WatchState? watchState;
|
||||||
}
|
}
|
||||||
|
@ -93,7 +93,7 @@ class DHTRecordCubit<T> extends Cubit<AsyncValue<T>> {
|
|||||||
for (final skr in subkeys) {
|
for (final skr in subkeys) {
|
||||||
for (var sk = skr.low; sk <= skr.high; sk++) {
|
for (var sk = skr.low; sk <= skr.high; sk++) {
|
||||||
final data = await _record.get(
|
final data = await _record.get(
|
||||||
subkey: sk, forceRefresh: true, onlyUpdates: true);
|
subkey: sk, refreshMode: DHTRecordRefreshMode.update);
|
||||||
if (data != null) {
|
if (data != null) {
|
||||||
final newState = await _stateFunction(_record, updateSubkeys, data);
|
final newState = await _stateFunction(_record, updateSubkeys, data);
|
||||||
if (newState != null) {
|
if (newState != null) {
|
||||||
|
@ -88,10 +88,8 @@ class SharedDHTRecordData {
|
|||||||
DHTRecordDescriptor recordDescriptor;
|
DHTRecordDescriptor recordDescriptor;
|
||||||
KeyPair? defaultWriter;
|
KeyPair? defaultWriter;
|
||||||
VeilidRoutingContext defaultRoutingContext;
|
VeilidRoutingContext defaultRoutingContext;
|
||||||
Map<int, int> subkeySeqCache = {};
|
|
||||||
bool needsWatchStateUpdate = false;
|
bool needsWatchStateUpdate = false;
|
||||||
WatchState? unionWatchState;
|
WatchState? unionWatchState;
|
||||||
bool deleteOnClose = false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Per opened record data
|
// Per opened record data
|
||||||
@ -128,6 +126,7 @@ class DHTRecordPool with TableDBBackedJson<DHTRecordPoolAllocations> {
|
|||||||
: _state = const DHTRecordPoolAllocations(),
|
: _state = const DHTRecordPoolAllocations(),
|
||||||
_mutex = Mutex(),
|
_mutex = Mutex(),
|
||||||
_opened = <TypedKey, OpenedRecordInfo>{},
|
_opened = <TypedKey, OpenedRecordInfo>{},
|
||||||
|
_markedForDelete = <TypedKey>{},
|
||||||
_routingContext = routingContext,
|
_routingContext = routingContext,
|
||||||
_veilid = veilid;
|
_veilid = veilid;
|
||||||
|
|
||||||
@ -140,6 +139,8 @@ class DHTRecordPool with TableDBBackedJson<DHTRecordPoolAllocations> {
|
|||||||
final Mutex _mutex;
|
final Mutex _mutex;
|
||||||
// Which DHT records are currently open
|
// Which DHT records are currently open
|
||||||
final Map<TypedKey, OpenedRecordInfo> _opened;
|
final Map<TypedKey, OpenedRecordInfo> _opened;
|
||||||
|
// Which DHT records are marked for deletion
|
||||||
|
final Set<TypedKey> _markedForDelete;
|
||||||
// Default routing context to use for new keys
|
// Default routing context to use for new keys
|
||||||
final VeilidRoutingContext _routingContext;
|
final VeilidRoutingContext _routingContext;
|
||||||
// Convenience accessor
|
// Convenience accessor
|
||||||
@ -288,6 +289,8 @@ class DHTRecordPool with TableDBBackedJson<DHTRecordPoolAllocations> {
|
|||||||
return openedRecordInfo;
|
return openedRecordInfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Called when a DHTRecord is closed
|
||||||
|
// Cleans up the opened record housekeeping and processes any late deletions
|
||||||
Future<void> _recordClosed(DHTRecord record) async {
|
Future<void> _recordClosed(DHTRecord record) async {
|
||||||
await _mutex.protect(() async {
|
await _mutex.protect(() async {
|
||||||
final key = record.key;
|
final key = record.key;
|
||||||
@ -301,14 +304,37 @@ class DHTRecordPool with TableDBBackedJson<DHTRecordPoolAllocations> {
|
|||||||
}
|
}
|
||||||
if (openedRecordInfo.records.isEmpty) {
|
if (openedRecordInfo.records.isEmpty) {
|
||||||
await _routingContext.closeDHTRecord(key);
|
await _routingContext.closeDHTRecord(key);
|
||||||
if (openedRecordInfo.shared.deleteOnClose) {
|
|
||||||
await _deleteRecordInner(key);
|
|
||||||
}
|
|
||||||
_opened.remove(key);
|
_opened.remove(key);
|
||||||
|
|
||||||
|
await _checkForLateDeletesInner(key);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check to see if this key can finally be deleted
|
||||||
|
// If any parents are marked for deletion, try them first
|
||||||
|
Future<void> _checkForLateDeletesInner(TypedKey key) async {
|
||||||
|
// Get parent list in bottom up order including our own key
|
||||||
|
final parents = <TypedKey>[];
|
||||||
|
TypedKey? nextParent = key;
|
||||||
|
while (nextParent != null) {
|
||||||
|
parents.add(nextParent);
|
||||||
|
nextParent = getParentRecordKey(nextParent);
|
||||||
|
}
|
||||||
|
|
||||||
|
// If any parent is ready to delete all its children do it
|
||||||
|
for (final parent in parents) {
|
||||||
|
if (_markedForDelete.contains(parent)) {
|
||||||
|
final deleted = await _deleteRecordInner(parent);
|
||||||
|
if (!deleted) {
|
||||||
|
// If we couldn't delete a child then no 'marked for delete' parents
|
||||||
|
// above us will be ready to delete either
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Collect all dependencies (including the record itself)
|
// Collect all dependencies (including the record itself)
|
||||||
// in reverse (bottom-up/delete order)
|
// in reverse (bottom-up/delete order)
|
||||||
List<TypedKey> _collectChildrenInner(TypedKey recordKey) {
|
List<TypedKey> _collectChildrenInner(TypedKey recordKey) {
|
||||||
@ -328,7 +354,13 @@ class DHTRecordPool with TableDBBackedJson<DHTRecordPoolAllocations> {
|
|||||||
return allDeps.reversedView;
|
return allDeps.reversedView;
|
||||||
}
|
}
|
||||||
|
|
||||||
String _debugChildren(TypedKey recordKey, {List<TypedKey>? allDeps}) {
|
/// Collect all dependencies (including the record itself)
|
||||||
|
/// in reverse (bottom-up/delete order)
|
||||||
|
Future<List<TypedKey>> collectChildren(TypedKey recordKey) =>
|
||||||
|
_mutex.protect(() async => _collectChildrenInner(recordKey));
|
||||||
|
|
||||||
|
/// Print children
|
||||||
|
String debugChildren(TypedKey recordKey, {List<TypedKey>? allDeps}) {
|
||||||
allDeps ??= _collectChildrenInner(recordKey);
|
allDeps ??= _collectChildrenInner(recordKey);
|
||||||
// ignore: avoid_print
|
// ignore: avoid_print
|
||||||
var out =
|
var out =
|
||||||
@ -342,32 +374,48 @@ class DHTRecordPool with TableDBBackedJson<DHTRecordPoolAllocations> {
|
|||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
|
|
||||||
Future<void> _deleteRecordInner(TypedKey recordKey) async {
|
// Actual delete function
|
||||||
log('deleteDHTRecord: key=$recordKey');
|
Future<void> _finalizeDeleteRecordInner(TypedKey recordKey) async {
|
||||||
|
log('_finalizeDeleteRecordInner: key=$recordKey');
|
||||||
|
|
||||||
// Remove this child from parents
|
// Remove this child from parents
|
||||||
await _removeDependenciesInner([recordKey]);
|
await _removeDependenciesInner([recordKey]);
|
||||||
await _routingContext.deleteDHTRecord(recordKey);
|
await _routingContext.deleteDHTRecord(recordKey);
|
||||||
|
_markedForDelete.remove(recordKey);
|
||||||
}
|
}
|
||||||
|
|
||||||
Future<void> deleteRecord(TypedKey recordKey) async {
|
// Deep delete mechanism inside mutex
|
||||||
await _mutex.protect(() async {
|
Future<bool> _deleteRecordInner(TypedKey recordKey) async {
|
||||||
final allDeps = _collectChildrenInner(recordKey);
|
final toDelete = _readyForDeleteInner(recordKey);
|
||||||
|
if (toDelete.isNotEmpty) {
|
||||||
if (allDeps.singleOrNull != recordKey) {
|
|
||||||
final dbgstr = _debugChildren(recordKey, allDeps: allDeps);
|
|
||||||
throw StateError('must delete children first: $dbgstr');
|
|
||||||
}
|
|
||||||
|
|
||||||
final ori = _opened[recordKey];
|
|
||||||
if (ori != null) {
|
|
||||||
// delete after close
|
|
||||||
ori.shared.deleteOnClose = true;
|
|
||||||
} else {
|
|
||||||
// delete now
|
// delete now
|
||||||
await _deleteRecordInner(recordKey);
|
for (final deleteKey in toDelete) {
|
||||||
|
await _finalizeDeleteRecordInner(deleteKey);
|
||||||
}
|
}
|
||||||
});
|
return true;
|
||||||
|
}
|
||||||
|
// mark for deletion
|
||||||
|
_markedForDelete.add(recordKey);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Delete a record and its children if they are all closed
|
||||||
|
/// otherwise mark that record for deletion eventually
|
||||||
|
/// Returns true if the deletion was processed immediately
|
||||||
|
/// Returns false if the deletion was marked for later
|
||||||
|
Future<bool> deleteRecord(TypedKey recordKey) async =>
|
||||||
|
_mutex.protect(() async => _deleteRecordInner(recordKey));
|
||||||
|
|
||||||
|
// If everything underneath is closed including itself, return the
|
||||||
|
// list of children (and itself) to finally actually delete
|
||||||
|
List<TypedKey> _readyForDeleteInner(TypedKey recordKey) {
|
||||||
|
final allDeps = _collectChildrenInner(recordKey);
|
||||||
|
for (final dep in allDeps) {
|
||||||
|
if (_opened.containsKey(dep)) {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return allDeps;
|
||||||
}
|
}
|
||||||
|
|
||||||
void _validateParentInner(TypedKey? parent, TypedKey child) {
|
void _validateParentInner(TypedKey? parent, TypedKey child) {
|
||||||
@ -456,6 +504,19 @@ class DHTRecordPool with TableDBBackedJson<DHTRecordPoolAllocations> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool _isValidRecordKeyInner(TypedKey key) {
|
||||||
|
if (_state.rootRecords.contains(key)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (_state.childrenByParent.containsKey(key.toJson())) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
Future<bool> isValidRecordKey(TypedKey key) =>
|
||||||
|
_mutex.protect(() async => _isValidRecordKeyInner(key));
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
/// Create a root DHTRecord that has no dependent records
|
/// Create a root DHTRecord that has no dependent records
|
||||||
|
@ -3,7 +3,6 @@ import 'dart:typed_data';
|
|||||||
|
|
||||||
import 'package:async_tools/async_tools.dart';
|
import 'package:async_tools/async_tools.dart';
|
||||||
import 'package:collection/collection.dart';
|
import 'package:collection/collection.dart';
|
||||||
import 'package:protobuf/protobuf.dart';
|
|
||||||
|
|
||||||
import '../../../veilid_support.dart';
|
import '../../../veilid_support.dart';
|
||||||
import '../../proto/proto.dart' as proto;
|
import '../../proto/proto.dart' as proto;
|
||||||
@ -14,12 +13,13 @@ part 'dht_short_array_write.dart';
|
|||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
class DHTShortArray {
|
class DHTShortArray implements DHTDeleteable<DHTShortArray, DHTShortArray> {
|
||||||
////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////
|
||||||
// Constructors
|
// Constructors
|
||||||
|
|
||||||
DHTShortArray._({required DHTRecord headRecord})
|
DHTShortArray._({required DHTRecord headRecord})
|
||||||
: _head = _DHTShortArrayHead(headRecord: headRecord) {
|
: _head = _DHTShortArrayHead(headRecord: headRecord),
|
||||||
|
_openCount = 1 {
|
||||||
_head.onUpdatedHead = () {
|
_head.onUpdatedHead = () {
|
||||||
_watchController?.sink.add(null);
|
_watchController?.sink.add(null);
|
||||||
};
|
};
|
||||||
@ -34,22 +34,22 @@ class DHTShortArray {
|
|||||||
VeilidRoutingContext? routingContext,
|
VeilidRoutingContext? routingContext,
|
||||||
TypedKey? parent,
|
TypedKey? parent,
|
||||||
DHTRecordCrypto? crypto,
|
DHTRecordCrypto? crypto,
|
||||||
KeyPair? smplWriter}) async {
|
KeyPair? writer}) async {
|
||||||
assert(stride <= maxElements, 'stride too long');
|
assert(stride <= maxElements, 'stride too long');
|
||||||
final pool = DHTRecordPool.instance;
|
final pool = DHTRecordPool.instance;
|
||||||
|
|
||||||
late final DHTRecord dhtRecord;
|
late final DHTRecord dhtRecord;
|
||||||
if (smplWriter != null) {
|
if (writer != null) {
|
||||||
final schema = DHTSchema.smpl(
|
final schema = DHTSchema.smpl(
|
||||||
oCnt: 0,
|
oCnt: 0,
|
||||||
members: [DHTSchemaMember(mKey: smplWriter.key, mCnt: stride + 1)]);
|
members: [DHTSchemaMember(mKey: writer.key, mCnt: stride + 1)]);
|
||||||
dhtRecord = await pool.createRecord(
|
dhtRecord = await pool.createRecord(
|
||||||
debugName: debugName,
|
debugName: debugName,
|
||||||
parent: parent,
|
parent: parent,
|
||||||
routingContext: routingContext,
|
routingContext: routingContext,
|
||||||
schema: schema,
|
schema: schema,
|
||||||
crypto: crypto,
|
crypto: crypto,
|
||||||
writer: smplWriter);
|
writer: writer);
|
||||||
} else {
|
} else {
|
||||||
final schema = DHTSchema.dflt(oCnt: stride + 1);
|
final schema = DHTSchema.dflt(oCnt: stride + 1);
|
||||||
dhtRecord = await pool.createRecord(
|
dhtRecord = await pool.createRecord(
|
||||||
@ -120,21 +120,62 @@ class DHTShortArray {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static Future<DHTShortArray> openOwned(
|
static Future<DHTShortArray> openOwned(
|
||||||
OwnedDHTRecordPointer ownedDHTRecordPointer, {
|
OwnedDHTRecordPointer ownedShortArrayRecordPointer, {
|
||||||
required String debugName,
|
required String debugName,
|
||||||
required TypedKey parent,
|
required TypedKey parent,
|
||||||
VeilidRoutingContext? routingContext,
|
VeilidRoutingContext? routingContext,
|
||||||
DHTRecordCrypto? crypto,
|
DHTRecordCrypto? crypto,
|
||||||
}) =>
|
}) =>
|
||||||
openWrite(
|
openWrite(
|
||||||
ownedDHTRecordPointer.recordKey,
|
ownedShortArrayRecordPointer.recordKey,
|
||||||
ownedDHTRecordPointer.owner,
|
ownedShortArrayRecordPointer.owner,
|
||||||
debugName: debugName,
|
debugName: debugName,
|
||||||
routingContext: routingContext,
|
routingContext: routingContext,
|
||||||
parent: parent,
|
parent: parent,
|
||||||
crypto: crypto,
|
crypto: crypto,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
// DHTCloseable
|
||||||
|
|
||||||
|
/// Check if the shortarray is open
|
||||||
|
@override
|
||||||
|
bool get isOpen => _openCount > 0;
|
||||||
|
|
||||||
|
/// The type of the openable scope
|
||||||
|
@override
|
||||||
|
FutureOr<DHTShortArray> scoped() => this;
|
||||||
|
|
||||||
|
/// Add a reference to this shortarray
|
||||||
|
@override
|
||||||
|
Future<DHTShortArray> ref() async => _mutex.protect(() async {
|
||||||
|
_openCount++;
|
||||||
|
return this;
|
||||||
|
});
|
||||||
|
|
||||||
|
/// Free all resources for the DHTShortArray
|
||||||
|
@override
|
||||||
|
Future<void> close() async => _mutex.protect(() async {
|
||||||
|
if (_openCount == 0) {
|
||||||
|
throw StateError('already closed');
|
||||||
|
}
|
||||||
|
_openCount--;
|
||||||
|
if (_openCount != 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
await _watchController?.close();
|
||||||
|
_watchController = null;
|
||||||
|
await _head.close();
|
||||||
|
});
|
||||||
|
|
||||||
|
/// Free all resources for the DHTShortArray and delete it from the DHT
|
||||||
|
/// Will wait until the short array is closed to delete it
|
||||||
|
@override
|
||||||
|
Future<void> delete() async {
|
||||||
|
await _head.delete();
|
||||||
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////
|
||||||
// Public API
|
// Public API
|
||||||
|
|
||||||
@ -144,59 +185,8 @@ class DHTShortArray {
|
|||||||
/// Get the record pointer foir this shortarray
|
/// Get the record pointer foir this shortarray
|
||||||
OwnedDHTRecordPointer get recordPointer => _head.recordPointer;
|
OwnedDHTRecordPointer get recordPointer => _head.recordPointer;
|
||||||
|
|
||||||
/// Check if the shortarray is open
|
|
||||||
bool get isOpen => _head.isOpen;
|
|
||||||
|
|
||||||
/// Free all resources for the DHTShortArray
|
|
||||||
Future<void> close() async {
|
|
||||||
if (!isOpen) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
await _watchController?.close();
|
|
||||||
_watchController = null;
|
|
||||||
await _head.close();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Free all resources for the DHTShortArray and delete it from the DHT
|
|
||||||
/// Will wait until the short array is closed to delete it
|
|
||||||
Future<void> delete() async {
|
|
||||||
await _head.delete();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Runs a closure that guarantees the DHTShortArray
|
|
||||||
/// will be closed upon exit, even if an uncaught exception is thrown
|
|
||||||
Future<T> scope<T>(Future<T> Function(DHTShortArray) scopeFunction) async {
|
|
||||||
if (!isOpen) {
|
|
||||||
throw StateError('short array is not open"');
|
|
||||||
}
|
|
||||||
try {
|
|
||||||
return await scopeFunction(this);
|
|
||||||
} finally {
|
|
||||||
await close();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Runs a closure that guarantees the DHTShortArray
|
|
||||||
/// will be closed upon exit, and deleted if an an
|
|
||||||
/// uncaught exception is thrown
|
|
||||||
Future<T> deleteScope<T>(
|
|
||||||
Future<T> Function(DHTShortArray) scopeFunction) async {
|
|
||||||
if (!isOpen) {
|
|
||||||
throw StateError('short array is not open"');
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
final out = await scopeFunction(this);
|
|
||||||
await close();
|
|
||||||
return out;
|
|
||||||
} on Exception catch (_) {
|
|
||||||
await delete();
|
|
||||||
rethrow;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Runs a closure allowing read-only access to the shortarray
|
/// Runs a closure allowing read-only access to the shortarray
|
||||||
Future<T?> operate<T>(Future<T?> Function(DHTShortArrayRead) closure) async {
|
Future<T> operate<T>(Future<T> Function(DHTRandomRead) closure) async {
|
||||||
if (!isOpen) {
|
if (!isOpen) {
|
||||||
throw StateError('short array is not open"');
|
throw StateError('short array is not open"');
|
||||||
}
|
}
|
||||||
@ -209,14 +199,20 @@ class DHTShortArray {
|
|||||||
|
|
||||||
/// Runs a closure allowing read-write access to the shortarray
|
/// Runs a closure allowing read-write access to the shortarray
|
||||||
/// Makes only one attempt to consistently write the changes to the DHT
|
/// Makes only one attempt to consistently write the changes to the DHT
|
||||||
/// Returns (result, true) of the closure if the write could be performed
|
/// Returns result of the closure if the write could be performed
|
||||||
/// Returns (null, false) if the write could not be performed at this time
|
/// Throws DHTOperateException if the write could not be performed
|
||||||
Future<(T?, bool)> operateWrite<T>(
|
/// at this time
|
||||||
Future<T?> Function(DHTShortArrayWrite) closure) async =>
|
Future<T> operateWrite<T>(
|
||||||
_head.operateWrite((head) async {
|
Future<T> Function(DHTRandomReadWrite) closure) async {
|
||||||
|
if (!isOpen) {
|
||||||
|
throw StateError('short array is not open"');
|
||||||
|
}
|
||||||
|
|
||||||
|
return _head.operateWrite((head) async {
|
||||||
final writer = _DHTShortArrayWrite._(head);
|
final writer = _DHTShortArrayWrite._(head);
|
||||||
return closure(writer);
|
return closure(writer);
|
||||||
});
|
});
|
||||||
|
}
|
||||||
|
|
||||||
/// Runs a closure allowing read-write access to the shortarray
|
/// Runs a closure allowing read-write access to the shortarray
|
||||||
/// Will execute the closure multiple times if a consistent write to the DHT
|
/// Will execute the closure multiple times if a consistent write to the DHT
|
||||||
@ -225,7 +221,7 @@ class DHTShortArray {
|
|||||||
/// succeeded, returning false will trigger another eventual consistency
|
/// succeeded, returning false will trigger another eventual consistency
|
||||||
/// attempt.
|
/// attempt.
|
||||||
Future<void> operateWriteEventual(
|
Future<void> operateWriteEventual(
|
||||||
Future<bool> Function(DHTShortArrayWrite) closure,
|
Future<bool> Function(DHTRandomReadWrite) closure,
|
||||||
{Duration? timeout}) async {
|
{Duration? timeout}) async {
|
||||||
if (!isOpen) {
|
if (!isOpen) {
|
||||||
throw StateError('short array is not open"');
|
throw StateError('short array is not open"');
|
||||||
@ -276,6 +272,10 @@ class DHTShortArray {
|
|||||||
// Internal representation refreshed from head record
|
// Internal representation refreshed from head record
|
||||||
final _DHTShortArrayHead _head;
|
final _DHTShortArrayHead _head;
|
||||||
|
|
||||||
|
// Openable
|
||||||
|
int _openCount;
|
||||||
|
final _mutex = Mutex();
|
||||||
|
|
||||||
// Watch mutex to ensure we keep the representation valid
|
// Watch mutex to ensure we keep the representation valid
|
||||||
final Mutex _listenMutex = Mutex();
|
final Mutex _listenMutex = Mutex();
|
||||||
// Stream of external changes
|
// Stream of external changes
|
||||||
|
@ -41,19 +41,6 @@ class DHTShortArrayCubit<T> extends Cubit<DHTShortArrayBusyState<T>>
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// DHTShortArrayCubit.value({
|
|
||||||
// required DHTShortArray shortArray,
|
|
||||||
// required T Function(List<int> data) decodeElement,
|
|
||||||
// }) : _shortArray = shortArray,
|
|
||||||
// _decodeElement = decodeElement,
|
|
||||||
// super(const BlocBusyState(AsyncValue.loading())) {
|
|
||||||
// _initFuture = Future(() async {
|
|
||||||
// // Make initial state update
|
|
||||||
// unawaited(_refreshNoWait());
|
|
||||||
// _subscription = await shortArray.listen(_update);
|
|
||||||
// });
|
|
||||||
// }
|
|
||||||
|
|
||||||
Future<void> refresh({bool forceRefresh = false}) async {
|
Future<void> refresh({bool forceRefresh = false}) async {
|
||||||
await _initWait();
|
await _initWait();
|
||||||
await _refreshNoWait(forceRefresh: forceRefresh);
|
await _refreshNoWait(forceRefresh: forceRefresh);
|
||||||
@ -67,7 +54,8 @@ class DHTShortArrayCubit<T> extends Cubit<DHTShortArrayBusyState<T>>
|
|||||||
try {
|
try {
|
||||||
final newState = await _shortArray.operate((reader) async {
|
final newState = await _shortArray.operate((reader) async {
|
||||||
final offlinePositions = await reader.getOfflinePositions();
|
final offlinePositions = await reader.getOfflinePositions();
|
||||||
final allItems = (await reader.getAllItems(forceRefresh: forceRefresh))
|
final allItems =
|
||||||
|
(await reader.getItemRange(0, forceRefresh: forceRefresh))
|
||||||
?.indexed
|
?.indexed
|
||||||
.map((x) => DHTShortArrayElementState(
|
.map((x) => DHTShortArrayElementState(
|
||||||
value: _decodeElement(x.$2),
|
value: _decodeElement(x.$2),
|
||||||
@ -103,19 +91,19 @@ class DHTShortArrayCubit<T> extends Cubit<DHTShortArrayBusyState<T>>
|
|||||||
await super.close();
|
await super.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
Future<R?> operate<R>(Future<R?> Function(DHTShortArrayRead) closure) async {
|
Future<R> operate<R>(Future<R> Function(DHTRandomRead) closure) async {
|
||||||
await _initWait();
|
await _initWait();
|
||||||
return _shortArray.operate(closure);
|
return _shortArray.operate(closure);
|
||||||
}
|
}
|
||||||
|
|
||||||
Future<(R?, bool)> operateWrite<R>(
|
Future<R> operateWrite<R>(
|
||||||
Future<R?> Function(DHTShortArrayWrite) closure) async {
|
Future<R> Function(DHTRandomReadWrite) closure) async {
|
||||||
await _initWait();
|
await _initWait();
|
||||||
return _shortArray.operateWrite(closure);
|
return _shortArray.operateWrite(closure);
|
||||||
}
|
}
|
||||||
|
|
||||||
Future<void> operateWriteEventual(
|
Future<void> operateWriteEventual(
|
||||||
Future<bool> Function(DHTShortArrayWrite) closure,
|
Future<bool> Function(DHTRandomReadWrite) closure,
|
||||||
{Duration? timeout}) async {
|
{Duration? timeout}) async {
|
||||||
await _initWait();
|
await _initWait();
|
||||||
return _shortArray.operateWriteEventual(closure, timeout: timeout);
|
return _shortArray.operateWriteEventual(closure, timeout: timeout);
|
||||||
|
@ -67,12 +67,8 @@ class _DHTShortArrayHead {
|
|||||||
|
|
||||||
Future<void> delete() async {
|
Future<void> delete() async {
|
||||||
await _headMutex.protect(() async {
|
await _headMutex.protect(() async {
|
||||||
final pool = DHTRecordPool.instance;
|
// Will deep delete all linked records as they are children
|
||||||
final futures = <Future<void>>[pool.deleteRecord(_headRecord.key)];
|
await _headRecord.delete();
|
||||||
for (final lr in _linkedRecords) {
|
|
||||||
futures.add(pool.deleteRecord(lr.key));
|
|
||||||
}
|
|
||||||
await Future.wait(futures);
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -82,8 +78,8 @@ class _DHTShortArrayHead {
|
|||||||
return closure(this);
|
return closure(this);
|
||||||
});
|
});
|
||||||
|
|
||||||
Future<(T?, bool)> operateWrite<T>(
|
Future<T> operateWrite<T>(
|
||||||
Future<T?> Function(_DHTShortArrayHead) closure) async =>
|
Future<T> Function(_DHTShortArrayHead) closure) async =>
|
||||||
_headMutex.protect(() async {
|
_headMutex.protect(() async {
|
||||||
final oldLinkedRecords = List.of(_linkedRecords);
|
final oldLinkedRecords = List.of(_linkedRecords);
|
||||||
final oldIndex = List.of(_index);
|
final oldIndex = List.of(_index);
|
||||||
@ -95,11 +91,11 @@ class _DHTShortArrayHead {
|
|||||||
if (!await _writeHead()) {
|
if (!await _writeHead()) {
|
||||||
// Failed to write head means head got overwritten so write should
|
// Failed to write head means head got overwritten so write should
|
||||||
// be considered failed
|
// be considered failed
|
||||||
return (null, false);
|
throw DHTExceptionTryAgain();
|
||||||
}
|
}
|
||||||
|
|
||||||
onUpdatedHead?.call();
|
onUpdatedHead?.call();
|
||||||
return (out, true);
|
return out;
|
||||||
} on Exception {
|
} on Exception {
|
||||||
// Exception means state needs to be reverted
|
// Exception means state needs to be reverted
|
||||||
_linkedRecords = oldLinkedRecords;
|
_linkedRecords = oldLinkedRecords;
|
||||||
@ -219,7 +215,7 @@ class _DHTShortArrayHead {
|
|||||||
}
|
}
|
||||||
} on Exception catch (_) {
|
} on Exception catch (_) {
|
||||||
// On any exception close the records we have opened
|
// On any exception close the records we have opened
|
||||||
await Future.wait(newRecords.entries.map((e) => e.value.close()));
|
await newRecords.entries.map((e) => e.value.close()).wait;
|
||||||
rethrow;
|
rethrow;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -249,34 +245,36 @@ class _DHTShortArrayHead {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Pull the latest or updated copy of the head record from the network
|
// Pull the latest or updated copy of the head record from the network
|
||||||
Future<bool> _loadHead(
|
Future<void> _loadHead() async {
|
||||||
{bool forceRefresh = true, bool onlyUpdates = false}) async {
|
|
||||||
// Get an updated head record copy if one exists
|
// Get an updated head record copy if one exists
|
||||||
final head = await _headRecord.getProtobuf(proto.DHTShortArray.fromBuffer,
|
final head = await _headRecord.getProtobuf(proto.DHTShortArray.fromBuffer,
|
||||||
subkey: 0, forceRefresh: forceRefresh, onlyUpdates: onlyUpdates);
|
subkey: 0, refreshMode: DHTRecordRefreshMode.network);
|
||||||
if (head == null) {
|
if (head == null) {
|
||||||
if (onlyUpdates) {
|
throw StateError('shortarray head missing during refresh');
|
||||||
// No update
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
throw StateError('head missing during refresh');
|
|
||||||
}
|
}
|
||||||
|
|
||||||
await _updateHead(head);
|
await _updateHead(head);
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
// Linked record management
|
// Linked record management
|
||||||
|
|
||||||
Future<DHTRecord> _getOrCreateLinkedRecord(int recordNumber) async {
|
Future<DHTRecord> _getOrCreateLinkedRecord(
|
||||||
|
int recordNumber, bool allowCreate) async {
|
||||||
if (recordNumber == 0) {
|
if (recordNumber == 0) {
|
||||||
return _headRecord;
|
return _headRecord;
|
||||||
}
|
}
|
||||||
final pool = DHTRecordPool.instance;
|
|
||||||
recordNumber--;
|
recordNumber--;
|
||||||
while (recordNumber >= _linkedRecords.length) {
|
if (recordNumber < _linkedRecords.length) {
|
||||||
|
return _linkedRecords[recordNumber];
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!allowCreate) {
|
||||||
|
throw StateError("asked for non-existent record and can't create");
|
||||||
|
}
|
||||||
|
|
||||||
|
final pool = DHTRecordPool.instance;
|
||||||
|
for (var rn = _linkedRecords.length; rn <= recordNumber; rn++) {
|
||||||
// Linked records must use SMPL schema so writer can be specified
|
// Linked records must use SMPL schema so writer can be specified
|
||||||
// Use the same writer as the head record
|
// Use the same writer as the head record
|
||||||
final smplWriter = _headRecord.writer!;
|
final smplWriter = _headRecord.writer!;
|
||||||
@ -298,9 +296,6 @@ class _DHTShortArrayHead {
|
|||||||
// Add to linked records
|
// Add to linked records
|
||||||
_linkedRecords.add(dhtRecord);
|
_linkedRecords.add(dhtRecord);
|
||||||
}
|
}
|
||||||
if (!await _writeHead()) {
|
|
||||||
throw StateError('failed to add linked record');
|
|
||||||
}
|
|
||||||
return _linkedRecords[recordNumber];
|
return _linkedRecords[recordNumber];
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -324,15 +319,16 @@ class _DHTShortArrayHead {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
Future<DHTShortArrayHeadLookup> lookupPosition(int pos) async {
|
Future<DHTShortArrayHeadLookup> lookupPosition(
|
||||||
|
int pos, bool allowCreate) async {
|
||||||
final idx = _index[pos];
|
final idx = _index[pos];
|
||||||
return lookupIndex(idx);
|
return lookupIndex(idx, allowCreate);
|
||||||
}
|
}
|
||||||
|
|
||||||
Future<DHTShortArrayHeadLookup> lookupIndex(int idx) async {
|
Future<DHTShortArrayHeadLookup> lookupIndex(int idx, bool allowCreate) async {
|
||||||
final seq = idx < _seqs.length ? _seqs[idx] : 0xFFFFFFFF;
|
final seq = idx < _seqs.length ? _seqs[idx] : 0xFFFFFFFF;
|
||||||
final recordNumber = idx ~/ _stride;
|
final recordNumber = idx ~/ _stride;
|
||||||
final record = await _getOrCreateLinkedRecord(recordNumber);
|
final record = await _getOrCreateLinkedRecord(recordNumber, allowCreate);
|
||||||
final recordSubkey = (idx % _stride) + ((recordNumber == 0) ? 1 : 0);
|
final recordSubkey = (idx % _stride) + ((recordNumber == 0) ? 1 : 0);
|
||||||
return DHTShortArrayHeadLookup(
|
return DHTShortArrayHeadLookup(
|
||||||
record: record, recordSubkey: recordSubkey, seq: seq);
|
record: record, recordSubkey: recordSubkey, seq: seq);
|
||||||
@ -389,7 +385,7 @@ class _DHTShortArrayHead {
|
|||||||
assert(
|
assert(
|
||||||
newKeys.length <=
|
newKeys.length <=
|
||||||
(DHTShortArray.maxElements + (_stride - 1)) ~/ _stride,
|
(DHTShortArray.maxElements + (_stride - 1)) ~/ _stride,
|
||||||
'too many keys');
|
'too many keys: $newKeys.length');
|
||||||
assert(newKeys.length == linkedKeys.length, 'duplicated linked keys');
|
assert(newKeys.length == linkedKeys.length, 'duplicated linked keys');
|
||||||
final newIndex = index.toSet();
|
final newIndex = index.toSet();
|
||||||
assert(newIndex.length <= DHTShortArray.maxElements, 'too many indexes');
|
assert(newIndex.length <= DHTShortArray.maxElements, 'too many indexes');
|
||||||
|
@ -1,83 +1,29 @@
|
|||||||
part of 'dht_short_array.dart';
|
part of 'dht_short_array.dart';
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////
|
|
||||||
// Reader interface
|
|
||||||
abstract class DHTShortArrayRead {
|
|
||||||
/// Returns the number of elements in the DHTShortArray
|
|
||||||
int get length;
|
|
||||||
|
|
||||||
/// Return the item at position 'pos' in the DHTShortArray. If 'forceRefresh'
|
|
||||||
/// is specified, the network will always be checked for newer values
|
|
||||||
/// rather than returning the existing locally stored copy of the elements.
|
|
||||||
Future<Uint8List?> getItem(int pos, {bool forceRefresh = false});
|
|
||||||
|
|
||||||
/// Return a list of all of the items in the DHTShortArray. If 'forceRefresh'
|
|
||||||
/// is specified, the network will always be checked for newer values
|
|
||||||
/// rather than returning the existing locally stored copy of the elements.
|
|
||||||
Future<List<Uint8List>?> getAllItems({bool forceRefresh = false});
|
|
||||||
|
|
||||||
/// Get a list of the positions that were written offline and not flushed yet
|
|
||||||
Future<Set<int>> getOfflinePositions();
|
|
||||||
}
|
|
||||||
|
|
||||||
extension DHTShortArrayReadExt on DHTShortArrayRead {
|
|
||||||
/// Convenience function:
|
|
||||||
/// Like getItem but also parses the returned element as JSON
|
|
||||||
Future<T?> getItemJson<T>(T Function(dynamic) fromJson, int pos,
|
|
||||||
{bool forceRefresh = false}) =>
|
|
||||||
getItem(pos, forceRefresh: forceRefresh)
|
|
||||||
.then((out) => jsonDecodeOptBytes(fromJson, out));
|
|
||||||
|
|
||||||
/// Convenience function:
|
|
||||||
/// Like getAllItems but also parses the returned elements as JSON
|
|
||||||
Future<List<T>?> getAllItemsJson<T>(T Function(dynamic) fromJson,
|
|
||||||
{bool forceRefresh = false}) =>
|
|
||||||
getAllItems(forceRefresh: forceRefresh)
|
|
||||||
.then((out) => out?.map(fromJson).toList());
|
|
||||||
|
|
||||||
/// Convenience function:
|
|
||||||
/// Like getItem but also parses the returned element as a protobuf object
|
|
||||||
Future<T?> getItemProtobuf<T extends GeneratedMessage>(
|
|
||||||
T Function(List<int>) fromBuffer, int pos,
|
|
||||||
{bool forceRefresh = false}) =>
|
|
||||||
getItem(pos, forceRefresh: forceRefresh)
|
|
||||||
.then((out) => (out == null) ? null : fromBuffer(out));
|
|
||||||
|
|
||||||
/// Convenience function:
|
|
||||||
/// Like getAllItems but also parses the returned elements as protobuf objects
|
|
||||||
Future<List<T>?> getAllItemsProtobuf<T extends GeneratedMessage>(
|
|
||||||
T Function(List<int>) fromBuffer,
|
|
||||||
{bool forceRefresh = false}) =>
|
|
||||||
getAllItems(forceRefresh: forceRefresh)
|
|
||||||
.then((out) => out?.map(fromBuffer).toList());
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////
|
||||||
// Reader-only implementation
|
// Reader-only implementation
|
||||||
|
|
||||||
class _DHTShortArrayRead implements DHTShortArrayRead {
|
class _DHTShortArrayRead implements DHTRandomRead {
|
||||||
_DHTShortArrayRead._(_DHTShortArrayHead head) : _head = head;
|
_DHTShortArrayRead._(_DHTShortArrayHead head) : _head = head;
|
||||||
|
|
||||||
/// Returns the number of elements in the DHTShortArray
|
|
||||||
@override
|
@override
|
||||||
int get length => _head.length;
|
int get length => _head.length;
|
||||||
|
|
||||||
/// Return the item at position 'pos' in the DHTShortArray. If 'forceRefresh'
|
|
||||||
/// is specified, the network will always be checked for newer values
|
|
||||||
/// rather than returning the existing locally stored copy of the elements.
|
|
||||||
@override
|
@override
|
||||||
Future<Uint8List?> getItem(int pos, {bool forceRefresh = false}) async {
|
Future<Uint8List?> getItem(int pos, {bool forceRefresh = false}) async {
|
||||||
if (pos < 0 || pos >= length) {
|
if (pos < 0 || pos >= length) {
|
||||||
throw IndexError.withLength(pos, length);
|
throw IndexError.withLength(pos, length);
|
||||||
}
|
}
|
||||||
|
|
||||||
final lookup = await _head.lookupPosition(pos);
|
final lookup = await _head.lookupPosition(pos, false);
|
||||||
|
|
||||||
final refresh = forceRefresh || _head.positionNeedsRefresh(pos);
|
final refresh = forceRefresh || _head.positionNeedsRefresh(pos);
|
||||||
final outSeqNum = Output<int>();
|
final outSeqNum = Output<int>();
|
||||||
final out = lookup.record.get(
|
final out = lookup.record.get(
|
||||||
subkey: lookup.recordSubkey,
|
subkey: lookup.recordSubkey,
|
||||||
forceRefresh: refresh,
|
refreshMode: refresh
|
||||||
|
? DHTRecordRefreshMode.network
|
||||||
|
: DHTRecordRefreshMode.cached,
|
||||||
outSeqNum: outSeqNum);
|
outSeqNum: outSeqNum);
|
||||||
if (outSeqNum.value != null) {
|
if (outSeqNum.value != null) {
|
||||||
_head.updatePositionSeq(pos, false, outSeqNum.value!);
|
_head.updatePositionSeq(pos, false, outSeqNum.value!);
|
||||||
@ -86,17 +32,29 @@ class _DHTShortArrayRead implements DHTShortArrayRead {
|
|||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return a list of all of the items in the DHTShortArray. If 'forceRefresh'
|
(int, int) _clampStartLen(int start, int? len) {
|
||||||
/// is specified, the network will always be checked for newer values
|
len ??= _head.length;
|
||||||
/// rather than returning the existing locally stored copy of the elements.
|
if (start < 0) {
|
||||||
@override
|
throw IndexError.withLength(start, _head.length);
|
||||||
Future<List<Uint8List>?> getAllItems({bool forceRefresh = false}) async {
|
}
|
||||||
final out = <Uint8List>[];
|
if (start > _head.length) {
|
||||||
|
throw IndexError.withLength(start, _head.length);
|
||||||
|
}
|
||||||
|
if ((len + start) > _head.length) {
|
||||||
|
len = _head.length - start;
|
||||||
|
}
|
||||||
|
return (start, len);
|
||||||
|
}
|
||||||
|
|
||||||
final chunks = Iterable<int>.generate(_head.length)
|
@override
|
||||||
.slices(maxDHTConcurrency)
|
Future<List<Uint8List>?> getItemRange(int start,
|
||||||
.map((chunk) =>
|
{int? length, bool forceRefresh = false}) async {
|
||||||
chunk.map((pos) => getItem(pos, forceRefresh: forceRefresh)));
|
final out = <Uint8List>[];
|
||||||
|
(start, length) = _clampStartLen(start, length);
|
||||||
|
|
||||||
|
final chunks = Iterable<int>.generate(length).slices(maxDHTConcurrency).map(
|
||||||
|
(chunk) => chunk
|
||||||
|
.map((pos) => getItem(pos + start, forceRefresh: forceRefresh)));
|
||||||
|
|
||||||
for (final chunk in chunks) {
|
for (final chunk in chunks) {
|
||||||
final elems = await chunk.wait;
|
final elems = await chunk.wait;
|
||||||
@ -109,9 +67,10 @@ class _DHTShortArrayRead implements DHTShortArrayRead {
|
|||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get a list of the positions that were written offline and not flushed yet
|
|
||||||
@override
|
@override
|
||||||
Future<Set<int>> getOfflinePositions() async {
|
Future<Set<int>> getOfflinePositions() async {
|
||||||
|
final (start, length) = _clampStartLen(0, DHTShortArray.maxElements);
|
||||||
|
|
||||||
final indexOffline = <int>{};
|
final indexOffline = <int>{};
|
||||||
final inspects = await [
|
final inspects = await [
|
||||||
_head._headRecord.inspect(),
|
_head._headRecord.inspect(),
|
||||||
@ -134,7 +93,7 @@ class _DHTShortArrayRead implements DHTShortArrayRead {
|
|||||||
|
|
||||||
// See which positions map to offline indexes
|
// See which positions map to offline indexes
|
||||||
final positionOffline = <int>{};
|
final positionOffline = <int>{};
|
||||||
for (var i = 0; i < _head._index.length; i++) {
|
for (var i = start; i < (start + length); i++) {
|
||||||
final idx = _head._index[i];
|
final idx = _head._index[i];
|
||||||
if (indexOffline.contains(idx)) {
|
if (indexOffline.contains(idx)) {
|
||||||
positionOffline.add(i);
|
positionOffline.add(i);
|
||||||
|
@ -1,134 +1,98 @@
|
|||||||
part of 'dht_short_array.dart';
|
part of 'dht_short_array.dart';
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////
|
|
||||||
// Writer interface
|
|
||||||
abstract class DHTShortArrayWrite implements DHTShortArrayRead {
|
|
||||||
/// Try to add an item to the end of the DHTShortArray. Return true if the
|
|
||||||
/// element was successfully added, and false if the state changed before
|
|
||||||
/// the element could be added or a newer value was found on the network.
|
|
||||||
/// This may throw an exception if the number elements added exceeds the
|
|
||||||
/// built-in limit of 'maxElements = 256' entries.
|
|
||||||
Future<bool> tryAddItem(Uint8List value);
|
|
||||||
|
|
||||||
/// Try to insert an item as position 'pos' of the DHTShortArray.
|
|
||||||
/// Return true if the element was successfully inserted, and false if the
|
|
||||||
/// state changed before the element could be inserted or a newer value was
|
|
||||||
/// found on the network.
|
|
||||||
/// This may throw an exception if the number elements added exceeds the
|
|
||||||
/// built-in limit of 'maxElements = 256' entries.
|
|
||||||
Future<bool> tryInsertItem(int pos, Uint8List value);
|
|
||||||
|
|
||||||
/// Try to swap items at position 'aPos' and 'bPos' in the DHTShortArray.
|
|
||||||
/// Return true if the elements were successfully swapped, and false if the
|
|
||||||
/// state changed before the elements could be swapped or newer values were
|
|
||||||
/// found on the network.
|
|
||||||
/// This may throw an exception if either of the positions swapped exceed
|
|
||||||
/// the length of the list
|
|
||||||
Future<bool> trySwapItem(int aPos, int bPos);
|
|
||||||
|
|
||||||
/// Try to remove an item at position 'pos' in the DHTShortArray.
|
|
||||||
/// Return the element if it was successfully removed, and null if the
|
|
||||||
/// state changed before the elements could be removed or newer values were
|
|
||||||
/// found on the network.
|
|
||||||
/// This may throw an exception if the position removed exceeeds the length of
|
|
||||||
/// the list.
|
|
||||||
Future<Uint8List?> tryRemoveItem(int pos);
|
|
||||||
|
|
||||||
/// Try to remove all items in the DHTShortArray.
|
|
||||||
/// Return true if it was successfully cleared, and false if the
|
|
||||||
/// state changed before the elements could be cleared or newer values were
|
|
||||||
/// found on the network.
|
|
||||||
Future<bool> tryClear();
|
|
||||||
|
|
||||||
/// Try to set an item at position 'pos' of the DHTShortArray.
|
|
||||||
/// If the set was successful this returns:
|
|
||||||
/// * The prior contents of the element, or null if there was no value yet
|
|
||||||
/// * A boolean true
|
|
||||||
/// If the set was found a newer value on the network:
|
|
||||||
/// * The newer value of the element, or null if the head record
|
|
||||||
/// changed.
|
|
||||||
/// * A boolean false
|
|
||||||
/// This may throw an exception if the position exceeds the built-in limit of
|
|
||||||
/// 'maxElements = 256' entries.
|
|
||||||
Future<(Uint8List?, bool)> tryWriteItem(int pos, Uint8List newValue);
|
|
||||||
}
|
|
||||||
|
|
||||||
extension DHTShortArrayWriteExt on DHTShortArrayWrite {
|
|
||||||
/// Convenience function:
|
|
||||||
/// Like removeItem but also parses the returned element as JSON
|
|
||||||
Future<T?> tryRemoveItemJson<T>(
|
|
||||||
T Function(dynamic) fromJson,
|
|
||||||
int pos,
|
|
||||||
) =>
|
|
||||||
tryRemoveItem(pos).then((out) => jsonDecodeOptBytes(fromJson, out));
|
|
||||||
|
|
||||||
/// Convenience function:
|
|
||||||
/// Like removeItem but also parses the returned element as JSON
|
|
||||||
Future<T?> tryRemoveItemProtobuf<T extends GeneratedMessage>(
|
|
||||||
T Function(List<int>) fromBuffer, int pos) =>
|
|
||||||
getItem(pos).then((out) => (out == null) ? null : fromBuffer(out));
|
|
||||||
|
|
||||||
/// Convenience function:
|
|
||||||
/// Like tryWriteItem but also encodes the input value as JSON and parses the
|
|
||||||
/// returned element as JSON
|
|
||||||
Future<(T?, bool)> tryWriteItemJson<T>(
|
|
||||||
T Function(dynamic) fromJson,
|
|
||||||
int pos,
|
|
||||||
T newValue,
|
|
||||||
) =>
|
|
||||||
tryWriteItem(pos, jsonEncodeBytes(newValue))
|
|
||||||
.then((out) => (jsonDecodeOptBytes(fromJson, out.$1), out.$2));
|
|
||||||
|
|
||||||
/// Convenience function:
|
|
||||||
/// Like tryWriteItem but also encodes the input value as a protobuf object
|
|
||||||
/// and parses the returned element as a protobuf object
|
|
||||||
Future<(T?, bool)> tryWriteItemProtobuf<T extends GeneratedMessage>(
|
|
||||||
T Function(List<int>) fromBuffer,
|
|
||||||
int pos,
|
|
||||||
T newValue,
|
|
||||||
) =>
|
|
||||||
tryWriteItem(pos, newValue.writeToBuffer()).then(
|
|
||||||
(out) => ((out.$1 == null ? null : fromBuffer(out.$1!)), out.$2));
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////
|
||||||
// Writer implementation
|
// Writer implementation
|
||||||
|
|
||||||
class _DHTShortArrayWrite extends _DHTShortArrayRead
|
class _DHTShortArrayWrite extends _DHTShortArrayRead
|
||||||
implements DHTShortArrayWrite {
|
implements DHTRandomReadWrite {
|
||||||
_DHTShortArrayWrite._(super.head) : super._();
|
_DHTShortArrayWrite._(super.head) : super._();
|
||||||
|
|
||||||
@override
|
@override
|
||||||
Future<bool> tryAddItem(Uint8List value) async {
|
Future<bool> tryAddItem(Uint8List value) =>
|
||||||
// Allocate empty index at the end of the list
|
tryInsertItem(_head.length, value);
|
||||||
final pos = _head.length;
|
|
||||||
_head.allocateIndex(pos);
|
|
||||||
|
|
||||||
// Write item
|
@override
|
||||||
final (_, wasSet) = await tryWriteItem(pos, value);
|
Future<bool> tryAddItems(List<Uint8List> values) =>
|
||||||
if (!wasSet) {
|
tryInsertItems(_head.length, values);
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
@override
|
@override
|
||||||
Future<bool> tryInsertItem(int pos, Uint8List value) async {
|
Future<bool> tryInsertItem(int pos, Uint8List value) async {
|
||||||
// Allocate empty index at position
|
if (pos < 0 || pos > _head.length) {
|
||||||
_head.allocateIndex(pos);
|
throw IndexError.withLength(pos, _head.length);
|
||||||
|
|
||||||
// Write item
|
|
||||||
final (_, wasSet) = await tryWriteItem(pos, value);
|
|
||||||
if (!wasSet) {
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Allocate empty index at position
|
||||||
|
_head.allocateIndex(pos);
|
||||||
|
var success = false;
|
||||||
|
try {
|
||||||
|
// Write item
|
||||||
|
success = await tryWriteItem(pos, value);
|
||||||
|
} finally {
|
||||||
|
if (!success) {
|
||||||
|
_head.freeIndex(pos);
|
||||||
|
}
|
||||||
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@override
|
@override
|
||||||
Future<bool> trySwapItem(int aPos, int bPos) async {
|
Future<bool> tryInsertItems(int pos, List<Uint8List> values) async {
|
||||||
|
if (pos < 0 || pos > _head.length) {
|
||||||
|
throw IndexError.withLength(pos, _head.length);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allocate empty indices
|
||||||
|
for (var i = 0; i < values.length; i++) {
|
||||||
|
_head.allocateIndex(pos + i);
|
||||||
|
}
|
||||||
|
|
||||||
|
var success = true;
|
||||||
|
final outSeqNums = List.generate(values.length, (_) => Output<int>());
|
||||||
|
final lookups = <DHTShortArrayHeadLookup>[];
|
||||||
|
try {
|
||||||
|
// do all lookups
|
||||||
|
for (var i = 0; i < values.length; i++) {
|
||||||
|
final lookup = await _head.lookupPosition(pos + i, true);
|
||||||
|
lookups.add(lookup);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write items in parallel
|
||||||
|
final dws = DelayedWaitSet<void>();
|
||||||
|
for (var i = 0; i < values.length; i++) {
|
||||||
|
final lookup = lookups[i];
|
||||||
|
final value = values[i];
|
||||||
|
final outSeqNum = outSeqNums[i];
|
||||||
|
dws.add(() async {
|
||||||
|
final outValue = await lookup.record.tryWriteBytes(value,
|
||||||
|
subkey: lookup.recordSubkey, outSeqNum: outSeqNum);
|
||||||
|
if (outValue != null) {
|
||||||
|
success = false;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
await dws(chunkSize: maxDHTConcurrency, onChunkDone: (_) => success);
|
||||||
|
} finally {
|
||||||
|
// Update sequence numbers
|
||||||
|
for (var i = 0; i < values.length; i++) {
|
||||||
|
if (outSeqNums[i].value != null) {
|
||||||
|
_head.updatePositionSeq(pos + i, true, outSeqNums[i].value!);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Free indices if this was a failure
|
||||||
|
if (!success) {
|
||||||
|
for (var i = 0; i < values.length; i++) {
|
||||||
|
_head.freeIndex(pos);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return success;
|
||||||
|
}
|
||||||
|
|
||||||
|
@override
|
||||||
|
Future<void> swapItem(int aPos, int bPos) async {
|
||||||
if (aPos < 0 || aPos >= _head.length) {
|
if (aPos < 0 || aPos >= _head.length) {
|
||||||
throw IndexError.withLength(aPos, _head.length);
|
throw IndexError.withLength(aPos, _head.length);
|
||||||
}
|
}
|
||||||
@ -137,16 +101,14 @@ class _DHTShortArrayWrite extends _DHTShortArrayRead
|
|||||||
}
|
}
|
||||||
// Swap indices
|
// Swap indices
|
||||||
_head.swapIndex(aPos, bPos);
|
_head.swapIndex(aPos, bPos);
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@override
|
@override
|
||||||
Future<Uint8List> tryRemoveItem(int pos) async {
|
Future<void> removeItem(int pos, {Output<Uint8List>? output}) async {
|
||||||
if (pos < 0 || pos >= _head.length) {
|
if (pos < 0 || pos >= _head.length) {
|
||||||
throw IndexError.withLength(pos, _head.length);
|
throw IndexError.withLength(pos, _head.length);
|
||||||
}
|
}
|
||||||
final lookup = await _head.lookupPosition(pos);
|
final lookup = await _head.lookupPosition(pos, true);
|
||||||
|
|
||||||
final outSeqNum = Output<int>();
|
final outSeqNum = Output<int>();
|
||||||
|
|
||||||
@ -162,44 +124,44 @@ class _DHTShortArrayWrite extends _DHTShortArrayRead
|
|||||||
throw StateError('Element does not exist');
|
throw StateError('Element does not exist');
|
||||||
}
|
}
|
||||||
_head.freeIndex(pos);
|
_head.freeIndex(pos);
|
||||||
return result;
|
output?.save(result);
|
||||||
}
|
}
|
||||||
|
|
||||||
@override
|
@override
|
||||||
Future<bool> tryClear() async {
|
Future<void> clear() async {
|
||||||
_head.clearIndex();
|
_head.clearIndex();
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@override
|
@override
|
||||||
Future<(Uint8List?, bool)> tryWriteItem(int pos, Uint8List newValue) async {
|
Future<bool> tryWriteItem(int pos, Uint8List newValue,
|
||||||
|
{Output<Uint8List>? output}) async {
|
||||||
if (pos < 0 || pos >= _head.length) {
|
if (pos < 0 || pos >= _head.length) {
|
||||||
throw IndexError.withLength(pos, _head.length);
|
throw IndexError.withLength(pos, _head.length);
|
||||||
}
|
}
|
||||||
final lookup = await _head.lookupPosition(pos);
|
final lookup = await _head.lookupPosition(pos, true);
|
||||||
|
|
||||||
final outSeqNum = Output<int>();
|
|
||||||
|
|
||||||
|
final outSeqNumRead = Output<int>();
|
||||||
final oldValue = lookup.seq == 0xFFFFFFFF
|
final oldValue = lookup.seq == 0xFFFFFFFF
|
||||||
? null
|
? null
|
||||||
: await lookup.record
|
: await lookup.record
|
||||||
.get(subkey: lookup.recordSubkey, outSeqNum: outSeqNum);
|
.get(subkey: lookup.recordSubkey, outSeqNum: outSeqNumRead);
|
||||||
|
if (outSeqNumRead.value != null) {
|
||||||
if (outSeqNum.value != null) {
|
_head.updatePositionSeq(pos, false, outSeqNumRead.value!);
|
||||||
_head.updatePositionSeq(pos, false, outSeqNum.value!);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
final outSeqNumWrite = Output<int>();
|
||||||
final result = await lookup.record.tryWriteBytes(newValue,
|
final result = await lookup.record.tryWriteBytes(newValue,
|
||||||
subkey: lookup.recordSubkey, outSeqNum: outSeqNum);
|
subkey: lookup.recordSubkey, outSeqNum: outSeqNumWrite);
|
||||||
|
if (outSeqNumWrite.value != null) {
|
||||||
if (outSeqNum.value != null) {
|
_head.updatePositionSeq(pos, true, outSeqNumWrite.value!);
|
||||||
_head.updatePositionSeq(pos, true, outSeqNum.value!);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (result != null) {
|
if (result != null) {
|
||||||
// A result coming back means the element was overwritten already
|
// A result coming back means the element was overwritten already
|
||||||
return (result, false);
|
output?.save(result);
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
return (oldValue, true);
|
output?.save(oldValue);
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,51 @@
|
|||||||
|
import 'dart:typed_data';
|
||||||
|
|
||||||
|
import 'package:protobuf/protobuf.dart';
|
||||||
|
|
||||||
|
import '../../../veilid_support.dart';
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Append/truncate interface
|
||||||
|
abstract class DHTAppendTruncate {
|
||||||
|
/// Try to add an item to the end of the DHT data structure.
|
||||||
|
/// Return true if the element was successfully added, and false if the state
|
||||||
|
/// changed before the element could be added or a newer value was found on
|
||||||
|
/// the network.
|
||||||
|
/// This may throw an exception if the number elements added exceeds limits.
|
||||||
|
Future<bool> tryAppendItem(Uint8List value);
|
||||||
|
|
||||||
|
/// Try to add a list of items to the end of the DHT data structure.
|
||||||
|
/// Return true if the elements were successfully added, and false if the
|
||||||
|
/// state changed before the element could be added or a newer value was found
|
||||||
|
/// on the network.
|
||||||
|
/// This may throw an exception if the number elements added exceeds limits.
|
||||||
|
Future<bool> tryAppendItems(List<Uint8List> values);
|
||||||
|
|
||||||
|
/// Try to remove a number of items from the head of the DHT data structure.
|
||||||
|
/// Throws StateError if count < 0
|
||||||
|
Future<void> truncate(int count);
|
||||||
|
|
||||||
|
/// Remove all items in the DHT data structure.
|
||||||
|
Future<void> clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
abstract class DHTAppendTruncateRandomRead
|
||||||
|
implements DHTAppendTruncate, DHTRandomRead {}
|
||||||
|
|
||||||
|
extension DHTAppendTruncateExt on DHTAppendTruncate {
|
||||||
|
/// Convenience function:
|
||||||
|
/// Like tryAppendItem but also encodes the input value as JSON and parses the
|
||||||
|
/// returned element as JSON
|
||||||
|
Future<bool> tryAppendItemJson<T>(
|
||||||
|
T newValue,
|
||||||
|
) =>
|
||||||
|
tryAppendItem(jsonEncodeBytes(newValue));
|
||||||
|
|
||||||
|
/// Convenience function:
|
||||||
|
/// Like tryAppendItem but also encodes the input value as a protobuf object
|
||||||
|
/// and parses the returned element as a protobuf object
|
||||||
|
Future<bool> tryAppendItemProtobuf<T extends GeneratedMessage>(
|
||||||
|
T newValue,
|
||||||
|
) =>
|
||||||
|
tryAppendItem(newValue.writeToBuffer());
|
||||||
|
}
|
@ -0,0 +1,59 @@
|
|||||||
|
import 'dart:async';
|
||||||
|
|
||||||
|
import 'package:meta/meta.dart';
|
||||||
|
|
||||||
|
abstract class DHTCloseable<C, D> {
|
||||||
|
bool get isOpen;
|
||||||
|
@protected
|
||||||
|
FutureOr<D> scoped();
|
||||||
|
Future<C> ref();
|
||||||
|
Future<void> close();
|
||||||
|
}
|
||||||
|
|
||||||
|
abstract class DHTDeleteable<C, D> extends DHTCloseable<C, D> {
|
||||||
|
Future<void> delete();
|
||||||
|
}
|
||||||
|
|
||||||
|
extension DHTCloseableExt<C, D> on DHTCloseable<C, D> {
|
||||||
|
/// Runs a closure that guarantees the DHTCloseable
|
||||||
|
/// will be closed upon exit, even if an uncaught exception is thrown
|
||||||
|
Future<T> scope<T>(Future<T> Function(D) scopeFunction) async {
|
||||||
|
if (!isOpen) {
|
||||||
|
throw StateError('not open in scope');
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
return await scopeFunction(await scoped());
|
||||||
|
} finally {
|
||||||
|
await close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
extension DHTDeletableExt<C, D> on DHTDeleteable<C, D> {
|
||||||
|
/// Runs a closure that guarantees the DHTCloseable
|
||||||
|
/// will be closed upon exit, and deleted if an an
|
||||||
|
/// uncaught exception is thrown
|
||||||
|
Future<T> deleteScope<T>(Future<T> Function(D) scopeFunction) async {
|
||||||
|
if (!isOpen) {
|
||||||
|
throw StateError('not open in deleteScope');
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
return await scopeFunction(await scoped());
|
||||||
|
} on Exception {
|
||||||
|
await delete();
|
||||||
|
rethrow;
|
||||||
|
} finally {
|
||||||
|
await close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Scopes a closure that conditionally deletes the DHTCloseable on exit
|
||||||
|
Future<T> maybeDeleteScope<T>(
|
||||||
|
bool delete, Future<T> Function(D) scopeFunction) async {
|
||||||
|
if (delete) {
|
||||||
|
return deleteScope(scopeFunction);
|
||||||
|
}
|
||||||
|
return scope(scopeFunction);
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,63 @@
|
|||||||
|
import 'dart:typed_data';
|
||||||
|
|
||||||
|
import 'package:protobuf/protobuf.dart';
|
||||||
|
|
||||||
|
import '../../../veilid_support.dart';
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Reader interface
|
||||||
|
abstract class DHTRandomRead {
|
||||||
|
/// Returns the number of elements in the DHTArray
|
||||||
|
/// This number will be >= 0 and <= DHTShortArray.maxElements (256)
|
||||||
|
int get length;
|
||||||
|
|
||||||
|
/// Return the item at position 'pos' in the DHTArray. If 'forceRefresh'
|
||||||
|
/// is specified, the network will always be checked for newer values
|
||||||
|
/// rather than returning the existing locally stored copy of the elements.
|
||||||
|
/// * 'pos' must be >= 0 and < 'length'
|
||||||
|
Future<Uint8List?> getItem(int pos, {bool forceRefresh = false});
|
||||||
|
|
||||||
|
/// Return a list of a range of items in the DHTArray. If 'forceRefresh'
|
||||||
|
/// is specified, the network will always be checked for newer values
|
||||||
|
/// rather than returning the existing locally stored copy of the elements.
|
||||||
|
/// * 'start' must be >= 0
|
||||||
|
/// * 'len' must be >= 0 and <= DHTShortArray.maxElements (256) and defaults
|
||||||
|
/// to the maximum length
|
||||||
|
Future<List<Uint8List>?> getItemRange(int start,
|
||||||
|
{int? length, bool forceRefresh = false});
|
||||||
|
|
||||||
|
/// Get a list of the positions that were written offline and not flushed yet
|
||||||
|
Future<Set<int>> getOfflinePositions();
|
||||||
|
}
|
||||||
|
|
||||||
|
extension DHTRandomReadExt on DHTRandomRead {
|
||||||
|
/// Convenience function:
|
||||||
|
/// Like getItem but also parses the returned element as JSON
|
||||||
|
Future<T?> getItemJson<T>(T Function(dynamic) fromJson, int pos,
|
||||||
|
{bool forceRefresh = false}) =>
|
||||||
|
getItem(pos, forceRefresh: forceRefresh)
|
||||||
|
.then((out) => jsonDecodeOptBytes(fromJson, out));
|
||||||
|
|
||||||
|
/// Convenience function:
|
||||||
|
/// Like getAllItems but also parses the returned elements as JSON
|
||||||
|
Future<List<T>?> getItemRangeJson<T>(T Function(dynamic) fromJson, int start,
|
||||||
|
{int? length, bool forceRefresh = false}) =>
|
||||||
|
getItemRange(start, length: length, forceRefresh: forceRefresh)
|
||||||
|
.then((out) => out?.map(fromJson).toList());
|
||||||
|
|
||||||
|
/// Convenience function:
|
||||||
|
/// Like getItem but also parses the returned element as a protobuf object
|
||||||
|
Future<T?> getItemProtobuf<T extends GeneratedMessage>(
|
||||||
|
T Function(List<int>) fromBuffer, int pos,
|
||||||
|
{bool forceRefresh = false}) =>
|
||||||
|
getItem(pos, forceRefresh: forceRefresh)
|
||||||
|
.then((out) => (out == null) ? null : fromBuffer(out));
|
||||||
|
|
||||||
|
/// Convenience function:
|
||||||
|
/// Like getAllItems but also parses the returned elements as protobuf objects
|
||||||
|
Future<List<T>?> getItemRangeProtobuf<T extends GeneratedMessage>(
|
||||||
|
T Function(List<int>) fromBuffer, int start,
|
||||||
|
{int? length, bool forceRefresh = false}) =>
|
||||||
|
getItemRange(start, length: length, forceRefresh: forceRefresh)
|
||||||
|
.then((out) => out?.map(fromBuffer).toList());
|
||||||
|
}
|
@ -0,0 +1,119 @@
|
|||||||
|
import 'dart:typed_data';
|
||||||
|
|
||||||
|
import 'package:protobuf/protobuf.dart';
|
||||||
|
|
||||||
|
import '../../../veilid_support.dart';
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Writer interface
|
||||||
|
abstract class DHTRandomWrite {
|
||||||
|
/// Try to set an item at position 'pos' of the DHTArray.
|
||||||
|
/// If the set was successful this returns:
|
||||||
|
/// * A boolean true
|
||||||
|
/// * outValue will return the prior contents of the element,
|
||||||
|
/// or null if there was no value yet
|
||||||
|
///
|
||||||
|
/// If the set was found a newer value on the network this returns:
|
||||||
|
/// * A boolean false
|
||||||
|
/// * outValue will return the newer value of the element,
|
||||||
|
/// or null if the head record changed.
|
||||||
|
///
|
||||||
|
/// This may throw an exception if the position exceeds the built-in limit of
|
||||||
|
/// 'maxElements = 256' entries.
|
||||||
|
Future<bool> tryWriteItem(int pos, Uint8List newValue,
|
||||||
|
{Output<Uint8List>? output});
|
||||||
|
|
||||||
|
/// Try to add an item to the end of the DHTArray. Return true if the
|
||||||
|
/// element was successfully added, and false if the state changed before
|
||||||
|
/// the element could be added or a newer value was found on the network.
|
||||||
|
/// This may throw an exception if the number elements added exceeds the
|
||||||
|
/// built-in limit of 'maxElements = 256' entries.
|
||||||
|
Future<bool> tryAddItem(Uint8List value);
|
||||||
|
|
||||||
|
/// Try to add a list of items to the end of the DHTArray. Return true if the
|
||||||
|
/// elements were successfully added, and false if the state changed before
|
||||||
|
/// the elements could be added or a newer value was found on the network.
|
||||||
|
/// This may throw an exception if the number elements added exceeds the
|
||||||
|
/// built-in limit of 'maxElements = 256' entries.
|
||||||
|
Future<bool> tryAddItems(List<Uint8List> values);
|
||||||
|
|
||||||
|
/// Try to insert an item as position 'pos' of the DHTArray.
|
||||||
|
/// Return true if the element was successfully inserted, and false if the
|
||||||
|
/// state changed before the element could be inserted or a newer value was
|
||||||
|
/// found on the network.
|
||||||
|
/// This may throw an exception if the number elements added exceeds the
|
||||||
|
/// built-in limit of 'maxElements = 256' entries.
|
||||||
|
Future<bool> tryInsertItem(int pos, Uint8List value);
|
||||||
|
|
||||||
|
/// Try to insert items at position 'pos' of the DHTArray.
|
||||||
|
/// Return true if the elements were successfully inserted, and false if the
|
||||||
|
/// state changed before the elements could be inserted or a newer value was
|
||||||
|
/// found on the network.
|
||||||
|
/// This may throw an exception if the number elements added exceeds the
|
||||||
|
/// built-in limit of 'maxElements = 256' entries.
|
||||||
|
Future<bool> tryInsertItems(int pos, List<Uint8List> values);
|
||||||
|
|
||||||
|
/// Swap items at position 'aPos' and 'bPos' in the DHTArray.
|
||||||
|
/// Throws IndexError if either of the positions swapped exceed
|
||||||
|
/// the length of the list
|
||||||
|
Future<void> swapItem(int aPos, int bPos);
|
||||||
|
|
||||||
|
/// Remove an item at position 'pos' in the DHTArray.
|
||||||
|
/// If the remove was successful this returns:
|
||||||
|
/// * outValue will return the prior contents of the element
|
||||||
|
/// Throws IndexError if the position removed exceeds the length of
|
||||||
|
/// the list.
|
||||||
|
Future<void> removeItem(int pos, {Output<Uint8List>? output});
|
||||||
|
|
||||||
|
/// Remove all items in the DHTShortArray.
|
||||||
|
Future<void> clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
extension DHTRandomWriteExt on DHTRandomWrite {
|
||||||
|
/// Convenience function:
|
||||||
|
/// Like tryWriteItem but also encodes the input value as JSON and parses the
|
||||||
|
/// returned element as JSON
|
||||||
|
Future<bool> tryWriteItemJson<T>(
|
||||||
|
T Function(dynamic) fromJson, int pos, T newValue,
|
||||||
|
{Output<T>? output}) async {
|
||||||
|
final outValueBytes = output == null ? null : Output<Uint8List>();
|
||||||
|
final out = await tryWriteItem(pos, jsonEncodeBytes(newValue),
|
||||||
|
output: outValueBytes);
|
||||||
|
output.mapSave(outValueBytes, (b) => jsonDecodeBytes(fromJson, b));
|
||||||
|
return out;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convenience function:
|
||||||
|
/// Like tryWriteItem but also encodes the input value as a protobuf object
|
||||||
|
/// and parses the returned element as a protobuf object
|
||||||
|
Future<bool> tryWriteItemProtobuf<T extends GeneratedMessage>(
|
||||||
|
T Function(List<int>) fromBuffer, int pos, T newValue,
|
||||||
|
{Output<T>? output}) async {
|
||||||
|
final outValueBytes = output == null ? null : Output<Uint8List>();
|
||||||
|
final out = await tryWriteItem(pos, newValue.writeToBuffer(),
|
||||||
|
output: outValueBytes);
|
||||||
|
output.mapSave(outValueBytes, fromBuffer);
|
||||||
|
return out;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convenience function:
|
||||||
|
/// Like removeItem but also parses the returned element as JSON
|
||||||
|
Future<void> removeItemJson<T>(T Function(dynamic) fromJson, int pos,
|
||||||
|
{Output<T>? output}) async {
|
||||||
|
final outValueBytes = output == null ? null : Output<Uint8List>();
|
||||||
|
await removeItem(pos, output: outValueBytes);
|
||||||
|
output.mapSave(outValueBytes, (b) => jsonDecodeBytes(fromJson, b));
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convenience function:
|
||||||
|
/// Like removeItem but also parses the returned element as JSON
|
||||||
|
Future<void> removeItemProtobuf<T extends GeneratedMessage>(
|
||||||
|
T Function(List<int>) fromBuffer, int pos,
|
||||||
|
{Output<T>? output}) async {
|
||||||
|
final outValueBytes = output == null ? null : Output<Uint8List>();
|
||||||
|
await removeItem(pos, output: outValueBytes);
|
||||||
|
output.mapSave(outValueBytes, fromBuffer);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
abstract class DHTRandomReadWrite implements DHTRandomRead, DHTRandomWrite {}
|
@ -0,0 +1,5 @@
|
|||||||
|
class DHTExceptionTryAgain implements Exception {
|
||||||
|
DHTExceptionTryAgain(
|
||||||
|
[this.cause = 'operation failed due to newer dht value']);
|
||||||
|
String cause;
|
||||||
|
}
|
@ -0,0 +1,4 @@
|
|||||||
|
export 'dht_closeable.dart';
|
||||||
|
export 'dht_random_read.dart';
|
||||||
|
export 'dht_random_write.dart';
|
||||||
|
export 'exceptions.dart';
|
@ -83,6 +83,68 @@ class DHTData extends $pb.GeneratedMessage {
|
|||||||
void clearSize() => clearField(4);
|
void clearSize() => clearField(4);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class DHTLog extends $pb.GeneratedMessage {
|
||||||
|
factory DHTLog() => create();
|
||||||
|
DHTLog._() : super();
|
||||||
|
factory DHTLog.fromBuffer($core.List<$core.int> i, [$pb.ExtensionRegistry r = $pb.ExtensionRegistry.EMPTY]) => create()..mergeFromBuffer(i, r);
|
||||||
|
factory DHTLog.fromJson($core.String i, [$pb.ExtensionRegistry r = $pb.ExtensionRegistry.EMPTY]) => create()..mergeFromJson(i, r);
|
||||||
|
|
||||||
|
static final $pb.BuilderInfo _i = $pb.BuilderInfo(_omitMessageNames ? '' : 'DHTLog', package: const $pb.PackageName(_omitMessageNames ? '' : 'dht'), createEmptyInstance: create)
|
||||||
|
..a<$core.int>(1, _omitFieldNames ? '' : 'head', $pb.PbFieldType.OU3)
|
||||||
|
..a<$core.int>(2, _omitFieldNames ? '' : 'tail', $pb.PbFieldType.OU3)
|
||||||
|
..a<$core.int>(3, _omitFieldNames ? '' : 'stride', $pb.PbFieldType.OU3)
|
||||||
|
..hasRequiredFields = false
|
||||||
|
;
|
||||||
|
|
||||||
|
@$core.Deprecated(
|
||||||
|
'Using this can add significant overhead to your binary. '
|
||||||
|
'Use [GeneratedMessageGenericExtensions.deepCopy] instead. '
|
||||||
|
'Will be removed in next major version')
|
||||||
|
DHTLog clone() => DHTLog()..mergeFromMessage(this);
|
||||||
|
@$core.Deprecated(
|
||||||
|
'Using this can add significant overhead to your binary. '
|
||||||
|
'Use [GeneratedMessageGenericExtensions.rebuild] instead. '
|
||||||
|
'Will be removed in next major version')
|
||||||
|
DHTLog copyWith(void Function(DHTLog) updates) => super.copyWith((message) => updates(message as DHTLog)) as DHTLog;
|
||||||
|
|
||||||
|
$pb.BuilderInfo get info_ => _i;
|
||||||
|
|
||||||
|
@$core.pragma('dart2js:noInline')
|
||||||
|
static DHTLog create() => DHTLog._();
|
||||||
|
DHTLog createEmptyInstance() => create();
|
||||||
|
static $pb.PbList<DHTLog> createRepeated() => $pb.PbList<DHTLog>();
|
||||||
|
@$core.pragma('dart2js:noInline')
|
||||||
|
static DHTLog getDefault() => _defaultInstance ??= $pb.GeneratedMessage.$_defaultFor<DHTLog>(create);
|
||||||
|
static DHTLog? _defaultInstance;
|
||||||
|
|
||||||
|
@$pb.TagNumber(1)
|
||||||
|
$core.int get head => $_getIZ(0);
|
||||||
|
@$pb.TagNumber(1)
|
||||||
|
set head($core.int v) { $_setUnsignedInt32(0, v); }
|
||||||
|
@$pb.TagNumber(1)
|
||||||
|
$core.bool hasHead() => $_has(0);
|
||||||
|
@$pb.TagNumber(1)
|
||||||
|
void clearHead() => clearField(1);
|
||||||
|
|
||||||
|
@$pb.TagNumber(2)
|
||||||
|
$core.int get tail => $_getIZ(1);
|
||||||
|
@$pb.TagNumber(2)
|
||||||
|
set tail($core.int v) { $_setUnsignedInt32(1, v); }
|
||||||
|
@$pb.TagNumber(2)
|
||||||
|
$core.bool hasTail() => $_has(1);
|
||||||
|
@$pb.TagNumber(2)
|
||||||
|
void clearTail() => clearField(2);
|
||||||
|
|
||||||
|
@$pb.TagNumber(3)
|
||||||
|
$core.int get stride => $_getIZ(2);
|
||||||
|
@$pb.TagNumber(3)
|
||||||
|
set stride($core.int v) { $_setUnsignedInt32(2, v); }
|
||||||
|
@$pb.TagNumber(3)
|
||||||
|
$core.bool hasStride() => $_has(2);
|
||||||
|
@$pb.TagNumber(3)
|
||||||
|
void clearStride() => clearField(3);
|
||||||
|
}
|
||||||
|
|
||||||
class DHTShortArray extends $pb.GeneratedMessage {
|
class DHTShortArray extends $pb.GeneratedMessage {
|
||||||
factory DHTShortArray() => create();
|
factory DHTShortArray() => create();
|
||||||
DHTShortArray._() : super();
|
DHTShortArray._() : super();
|
||||||
@ -133,68 +195,6 @@ class DHTShortArray extends $pb.GeneratedMessage {
|
|||||||
$core.List<$core.int> get seqs => $_getList(2);
|
$core.List<$core.int> get seqs => $_getList(2);
|
||||||
}
|
}
|
||||||
|
|
||||||
class DHTLog extends $pb.GeneratedMessage {
|
|
||||||
factory DHTLog() => create();
|
|
||||||
DHTLog._() : super();
|
|
||||||
factory DHTLog.fromBuffer($core.List<$core.int> i, [$pb.ExtensionRegistry r = $pb.ExtensionRegistry.EMPTY]) => create()..mergeFromBuffer(i, r);
|
|
||||||
factory DHTLog.fromJson($core.String i, [$pb.ExtensionRegistry r = $pb.ExtensionRegistry.EMPTY]) => create()..mergeFromJson(i, r);
|
|
||||||
|
|
||||||
static final $pb.BuilderInfo _i = $pb.BuilderInfo(_omitMessageNames ? '' : 'DHTLog', package: const $pb.PackageName(_omitMessageNames ? '' : 'dht'), createEmptyInstance: create)
|
|
||||||
..pc<$0.TypedKey>(1, _omitFieldNames ? '' : 'keys', $pb.PbFieldType.PM, subBuilder: $0.TypedKey.create)
|
|
||||||
..aOM<$0.TypedKey>(2, _omitFieldNames ? '' : 'back', subBuilder: $0.TypedKey.create)
|
|
||||||
..p<$core.int>(3, _omitFieldNames ? '' : 'subkeyCounts', $pb.PbFieldType.KU3)
|
|
||||||
..a<$core.int>(4, _omitFieldNames ? '' : 'totalSubkeys', $pb.PbFieldType.OU3)
|
|
||||||
..hasRequiredFields = false
|
|
||||||
;
|
|
||||||
|
|
||||||
@$core.Deprecated(
|
|
||||||
'Using this can add significant overhead to your binary. '
|
|
||||||
'Use [GeneratedMessageGenericExtensions.deepCopy] instead. '
|
|
||||||
'Will be removed in next major version')
|
|
||||||
DHTLog clone() => DHTLog()..mergeFromMessage(this);
|
|
||||||
@$core.Deprecated(
|
|
||||||
'Using this can add significant overhead to your binary. '
|
|
||||||
'Use [GeneratedMessageGenericExtensions.rebuild] instead. '
|
|
||||||
'Will be removed in next major version')
|
|
||||||
DHTLog copyWith(void Function(DHTLog) updates) => super.copyWith((message) => updates(message as DHTLog)) as DHTLog;
|
|
||||||
|
|
||||||
$pb.BuilderInfo get info_ => _i;
|
|
||||||
|
|
||||||
@$core.pragma('dart2js:noInline')
|
|
||||||
static DHTLog create() => DHTLog._();
|
|
||||||
DHTLog createEmptyInstance() => create();
|
|
||||||
static $pb.PbList<DHTLog> createRepeated() => $pb.PbList<DHTLog>();
|
|
||||||
@$core.pragma('dart2js:noInline')
|
|
||||||
static DHTLog getDefault() => _defaultInstance ??= $pb.GeneratedMessage.$_defaultFor<DHTLog>(create);
|
|
||||||
static DHTLog? _defaultInstance;
|
|
||||||
|
|
||||||
@$pb.TagNumber(1)
|
|
||||||
$core.List<$0.TypedKey> get keys => $_getList(0);
|
|
||||||
|
|
||||||
@$pb.TagNumber(2)
|
|
||||||
$0.TypedKey get back => $_getN(1);
|
|
||||||
@$pb.TagNumber(2)
|
|
||||||
set back($0.TypedKey v) { setField(2, v); }
|
|
||||||
@$pb.TagNumber(2)
|
|
||||||
$core.bool hasBack() => $_has(1);
|
|
||||||
@$pb.TagNumber(2)
|
|
||||||
void clearBack() => clearField(2);
|
|
||||||
@$pb.TagNumber(2)
|
|
||||||
$0.TypedKey ensureBack() => $_ensure(1);
|
|
||||||
|
|
||||||
@$pb.TagNumber(3)
|
|
||||||
$core.List<$core.int> get subkeyCounts => $_getList(2);
|
|
||||||
|
|
||||||
@$pb.TagNumber(4)
|
|
||||||
$core.int get totalSubkeys => $_getIZ(3);
|
|
||||||
@$pb.TagNumber(4)
|
|
||||||
set totalSubkeys($core.int v) { $_setUnsignedInt32(3, v); }
|
|
||||||
@$pb.TagNumber(4)
|
|
||||||
$core.bool hasTotalSubkeys() => $_has(3);
|
|
||||||
@$pb.TagNumber(4)
|
|
||||||
void clearTotalSubkeys() => clearField(4);
|
|
||||||
}
|
|
||||||
|
|
||||||
enum DataReference_Kind {
|
enum DataReference_Kind {
|
||||||
dhtData,
|
dhtData,
|
||||||
notSet
|
notSet
|
||||||
|
@ -30,6 +30,21 @@ final $typed_data.Uint8List dHTDataDescriptor = $convert.base64Decode(
|
|||||||
'gCIAEoCzIQLnZlaWxpZC5UeXBlZEtleVIEaGFzaBIUCgVjaHVuaxgDIAEoDVIFY2h1bmsSEgoE'
|
'gCIAEoCzIQLnZlaWxpZC5UeXBlZEtleVIEaGFzaBIUCgVjaHVuaxgDIAEoDVIFY2h1bmsSEgoE'
|
||||||
'c2l6ZRgEIAEoDVIEc2l6ZQ==');
|
'c2l6ZRgEIAEoDVIEc2l6ZQ==');
|
||||||
|
|
||||||
|
@$core.Deprecated('Use dHTLogDescriptor instead')
|
||||||
|
const DHTLog$json = {
|
||||||
|
'1': 'DHTLog',
|
||||||
|
'2': [
|
||||||
|
{'1': 'head', '3': 1, '4': 1, '5': 13, '10': 'head'},
|
||||||
|
{'1': 'tail', '3': 2, '4': 1, '5': 13, '10': 'tail'},
|
||||||
|
{'1': 'stride', '3': 3, '4': 1, '5': 13, '10': 'stride'},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Descriptor for `DHTLog`. Decode as a `google.protobuf.DescriptorProto`.
|
||||||
|
final $typed_data.Uint8List dHTLogDescriptor = $convert.base64Decode(
|
||||||
|
'CgZESFRMb2cSEgoEaGVhZBgBIAEoDVIEaGVhZBISCgR0YWlsGAIgASgNUgR0YWlsEhYKBnN0cm'
|
||||||
|
'lkZRgDIAEoDVIGc3RyaWRl');
|
||||||
|
|
||||||
@$core.Deprecated('Use dHTShortArrayDescriptor instead')
|
@$core.Deprecated('Use dHTShortArrayDescriptor instead')
|
||||||
const DHTShortArray$json = {
|
const DHTShortArray$json = {
|
||||||
'1': 'DHTShortArray',
|
'1': 'DHTShortArray',
|
||||||
@ -45,23 +60,6 @@ final $typed_data.Uint8List dHTShortArrayDescriptor = $convert.base64Decode(
|
|||||||
'Cg1ESFRTaG9ydEFycmF5EiQKBGtleXMYASADKAsyEC52ZWlsaWQuVHlwZWRLZXlSBGtleXMSFA'
|
'Cg1ESFRTaG9ydEFycmF5EiQKBGtleXMYASADKAsyEC52ZWlsaWQuVHlwZWRLZXlSBGtleXMSFA'
|
||||||
'oFaW5kZXgYAiABKAxSBWluZGV4EhIKBHNlcXMYAyADKA1SBHNlcXM=');
|
'oFaW5kZXgYAiABKAxSBWluZGV4EhIKBHNlcXMYAyADKA1SBHNlcXM=');
|
||||||
|
|
||||||
@$core.Deprecated('Use dHTLogDescriptor instead')
|
|
||||||
const DHTLog$json = {
|
|
||||||
'1': 'DHTLog',
|
|
||||||
'2': [
|
|
||||||
{'1': 'keys', '3': 1, '4': 3, '5': 11, '6': '.veilid.TypedKey', '10': 'keys'},
|
|
||||||
{'1': 'back', '3': 2, '4': 1, '5': 11, '6': '.veilid.TypedKey', '10': 'back'},
|
|
||||||
{'1': 'subkey_counts', '3': 3, '4': 3, '5': 13, '10': 'subkeyCounts'},
|
|
||||||
{'1': 'total_subkeys', '3': 4, '4': 1, '5': 13, '10': 'totalSubkeys'},
|
|
||||||
],
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Descriptor for `DHTLog`. Decode as a `google.protobuf.DescriptorProto`.
|
|
||||||
final $typed_data.Uint8List dHTLogDescriptor = $convert.base64Decode(
|
|
||||||
'CgZESFRMb2cSJAoEa2V5cxgBIAMoCzIQLnZlaWxpZC5UeXBlZEtleVIEa2V5cxIkCgRiYWNrGA'
|
|
||||||
'IgASgLMhAudmVpbGlkLlR5cGVkS2V5UgRiYWNrEiMKDXN1YmtleV9jb3VudHMYAyADKA1SDHN1'
|
|
||||||
'YmtleUNvdW50cxIjCg10b3RhbF9zdWJrZXlzGAQgASgNUgx0b3RhbFN1YmtleXM=');
|
|
||||||
|
|
||||||
@$core.Deprecated('Use dataReferenceDescriptor instead')
|
@$core.Deprecated('Use dataReferenceDescriptor instead')
|
||||||
const DataReference$json = {
|
const DataReference$json = {
|
||||||
'1': 'DataReference',
|
'1': 'DataReference',
|
||||||
|
@ -300,8 +300,8 @@ Future<IdentityMaster> openIdentityMaster(
|
|||||||
debugName:
|
debugName:
|
||||||
'IdentityMaster::openIdentityMaster::IdentityMasterRecord'))
|
'IdentityMaster::openIdentityMaster::IdentityMasterRecord'))
|
||||||
.deleteScope((masterRec) async {
|
.deleteScope((masterRec) async {
|
||||||
final identityMaster =
|
final identityMaster = (await masterRec.getJson(IdentityMaster.fromJson,
|
||||||
(await masterRec.getJson(IdentityMaster.fromJson, forceRefresh: true))!;
|
refreshMode: DHTRecordRefreshMode.network))!;
|
||||||
|
|
||||||
// Validate IdentityMaster
|
// Validate IdentityMaster
|
||||||
final masterRecordKey = masterRec.key;
|
final masterRecordKey = masterRec.key;
|
||||||
|
33
packages/veilid_support/lib/src/output.dart
Normal file
33
packages/veilid_support/lib/src/output.dart
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
import 'package:fast_immutable_collections/fast_immutable_collections.dart';
|
||||||
|
|
||||||
|
export 'package:fast_immutable_collections/fast_immutable_collections.dart'
|
||||||
|
show Output;
|
||||||
|
|
||||||
|
extension OutputNullExt<T> on Output<T>? {
|
||||||
|
void mapSave<S>(Output<S>? other, T Function(S output) closure) {
|
||||||
|
if (this == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (other == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
final v = other.value;
|
||||||
|
if (v == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
return this!.save(closure(v));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
extension OutputExt<T> on Output<T> {
|
||||||
|
void mapSave<S>(Output<S>? other, T Function(S output) closure) {
|
||||||
|
if (other == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
final v = other.value;
|
||||||
|
if (v == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
return save(closure(v));
|
||||||
|
}
|
||||||
|
}
|
@ -10,6 +10,7 @@ export 'src/config.dart';
|
|||||||
export 'src/identity.dart';
|
export 'src/identity.dart';
|
||||||
export 'src/json_tools.dart';
|
export 'src/json_tools.dart';
|
||||||
export 'src/memory_tools.dart';
|
export 'src/memory_tools.dart';
|
||||||
|
export 'src/output.dart';
|
||||||
export 'src/persistent_queue.dart';
|
export 'src/persistent_queue.dart';
|
||||||
export 'src/protobuf_tools.dart';
|
export 'src/protobuf_tools.dart';
|
||||||
export 'src/table_db.dart';
|
export 'src/table_db.dart';
|
||||||
|
@ -85,10 +85,10 @@ packages:
|
|||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
name: build_daemon
|
name: build_daemon
|
||||||
sha256: "0343061a33da9c5810b2d6cee51945127d8f4c060b7fbdd9d54917f0a3feaaa1"
|
sha256: "79b2aef6ac2ed00046867ed354c88778c9c0f029df8a20fe10b5436826721ef9"
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "4.0.1"
|
version: "4.0.2"
|
||||||
build_resolvers:
|
build_resolvers:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -101,10 +101,10 @@ packages:
|
|||||||
dependency: "direct dev"
|
dependency: "direct dev"
|
||||||
description:
|
description:
|
||||||
name: build_runner
|
name: build_runner
|
||||||
sha256: "3ac61a79bfb6f6cc11f693591063a7f19a7af628dc52f141743edac5c16e8c22"
|
sha256: "1414d6d733a85d8ad2f1dfcb3ea7945759e35a123cb99ccfac75d0758f75edfa"
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "2.4.9"
|
version: "2.4.10"
|
||||||
build_runner_core:
|
build_runner_core:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -221,10 +221,10 @@ packages:
|
|||||||
dependency: "direct main"
|
dependency: "direct main"
|
||||||
description:
|
description:
|
||||||
name: fast_immutable_collections
|
name: fast_immutable_collections
|
||||||
sha256: "38fbc50df5b219dcfb83ebbc3275ec09872530ca1153858fc56fceadb310d037"
|
sha256: "533806a7f0c624c2e479d05d3fdce4c87109a7cd0db39b8cc3830d3a2e8dedc7"
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "10.2.2"
|
version: "10.2.3"
|
||||||
ffi:
|
ffi:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -399,10 +399,10 @@ packages:
|
|||||||
dependency: "direct main"
|
dependency: "direct main"
|
||||||
description:
|
description:
|
||||||
name: meta
|
name: meta
|
||||||
sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04
|
sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136"
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "1.11.0"
|
version: "1.12.0"
|
||||||
mime:
|
mime:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -763,10 +763,10 @@ packages:
|
|||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
name: win32
|
name: win32
|
||||||
sha256: "0eaf06e3446824099858367950a813472af675116bf63f008a4c2a75ae13e9cb"
|
sha256: a79dbe579cb51ecd6d30b17e0cae4e0ea15e2c0e66f69ad4198f22a6789e94f4
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "5.5.0"
|
version: "5.5.1"
|
||||||
xdg_directories:
|
xdg_directories:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -784,5 +784,5 @@ packages:
|
|||||||
source: hosted
|
source: hosted
|
||||||
version: "3.1.2"
|
version: "3.1.2"
|
||||||
sdks:
|
sdks:
|
||||||
dart: ">=3.3.0 <4.0.0"
|
dart: ">=3.4.0 <4.0.0"
|
||||||
flutter: ">=3.19.1"
|
flutter: ">=3.19.1"
|
||||||
|
@ -12,11 +12,11 @@ dependencies:
|
|||||||
bloc_advanced_tools: ^0.1.1
|
bloc_advanced_tools: ^0.1.1
|
||||||
collection: ^1.18.0
|
collection: ^1.18.0
|
||||||
equatable: ^2.0.5
|
equatable: ^2.0.5
|
||||||
fast_immutable_collections: ^10.2.2
|
fast_immutable_collections: ^10.2.3
|
||||||
freezed_annotation: ^2.4.1
|
freezed_annotation: ^2.4.1
|
||||||
json_annotation: ^4.9.0
|
json_annotation: ^4.9.0
|
||||||
loggy: ^2.0.3
|
loggy: ^2.0.3
|
||||||
meta: ^1.11.0
|
meta: ^1.12.0
|
||||||
|
|
||||||
protobuf: ^3.1.0
|
protobuf: ^3.1.0
|
||||||
veilid:
|
veilid:
|
||||||
@ -24,7 +24,7 @@ dependencies:
|
|||||||
path: ../../../veilid/veilid-flutter
|
path: ../../../veilid/veilid-flutter
|
||||||
|
|
||||||
dev_dependencies:
|
dev_dependencies:
|
||||||
build_runner: ^2.4.9
|
build_runner: ^2.4.10
|
||||||
freezed: ^2.5.2
|
freezed: ^2.5.2
|
||||||
json_serializable: ^6.8.0
|
json_serializable: ^6.8.0
|
||||||
lint_hard: ^4.0.0
|
lint_hard: ^4.0.0
|
||||||
|
134
pubspec.lock
134
pubspec.lock
@ -37,10 +37,10 @@ packages:
|
|||||||
dependency: "direct main"
|
dependency: "direct main"
|
||||||
description:
|
description:
|
||||||
name: archive
|
name: archive
|
||||||
sha256: "22600aa1e926be775fa5fe7e6894e7fb3df9efda8891c73f70fb3262399a432d"
|
sha256: ecf4273855368121b1caed0d10d4513c7241dfc813f7d3c8933b36622ae9b265
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "3.4.10"
|
version: "3.5.1"
|
||||||
args:
|
args:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -68,10 +68,10 @@ packages:
|
|||||||
dependency: "direct main"
|
dependency: "direct main"
|
||||||
description:
|
description:
|
||||||
name: awesome_extensions
|
name: awesome_extensions
|
||||||
sha256: c3bf11d07a69fe10ff5541717b920661c7a87a791ee182851f1c92a2d15b95a2
|
sha256: "07e52221467e651cab9219a26286245760831c3852ea2c54883a48a54f120d7c"
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "2.0.14"
|
version: "2.0.16"
|
||||||
badges:
|
badges:
|
||||||
dependency: "direct main"
|
dependency: "direct main"
|
||||||
description:
|
description:
|
||||||
@ -139,10 +139,10 @@ packages:
|
|||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
name: build_daemon
|
name: build_daemon
|
||||||
sha256: "0343061a33da9c5810b2d6cee51945127d8f4c060b7fbdd9d54917f0a3feaaa1"
|
sha256: "79b2aef6ac2ed00046867ed354c88778c9c0f029df8a20fe10b5436826721ef9"
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "4.0.1"
|
version: "4.0.2"
|
||||||
build_resolvers:
|
build_resolvers:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -155,10 +155,10 @@ packages:
|
|||||||
dependency: "direct dev"
|
dependency: "direct dev"
|
||||||
description:
|
description:
|
||||||
name: build_runner
|
name: build_runner
|
||||||
sha256: "3ac61a79bfb6f6cc11f693591063a7f19a7af628dc52f141743edac5c16e8c22"
|
sha256: "1414d6d733a85d8ad2f1dfcb3ea7945759e35a123cb99ccfac75d0758f75edfa"
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "2.4.9"
|
version: "2.4.10"
|
||||||
build_runner_core:
|
build_runner_core:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -203,34 +203,34 @@ packages:
|
|||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
name: cached_network_image_web
|
name: cached_network_image_web
|
||||||
sha256: "42a835caa27c220d1294311ac409a43361088625a4f23c820b006dd9bffb3316"
|
sha256: "205d6a9f1862de34b93184f22b9d2d94586b2f05c581d546695e3d8f6a805cd7"
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "1.1.1"
|
version: "1.2.0"
|
||||||
camera:
|
camera:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
name: camera
|
name: camera
|
||||||
sha256: "9499cbc2e51d8eb0beadc158b288380037618ce4e30c9acbc4fae1ac3ecb5797"
|
sha256: dfa8fc5a1adaeb95e7a54d86a5bd56f4bb0e035515354c8ac6d262e35cec2ec8
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "0.10.5+9"
|
version: "0.10.6"
|
||||||
camera_android:
|
camera_android:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
name: camera_android
|
name: camera_android
|
||||||
sha256: "7b0aba6398afa8475e2bc9115d976efb49cf8db781e922572d443795c04a4f4f"
|
sha256: b350ac087f111467e705b2b76cc1322f7f5bdc122aa83b4b243b0872f390d229
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "0.10.9+1"
|
version: "0.10.9+2"
|
||||||
camera_avfoundation:
|
camera_avfoundation:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
name: camera_avfoundation
|
name: camera_avfoundation
|
||||||
sha256: "9dbbb253aaf201a69c40cf95571f366ca936305d2de012684e21f6f1b1433d31"
|
sha256: "7d021e8cd30d9b71b8b92b4ad669e80af432d722d18d6aac338572754a786c15"
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "0.9.15+4"
|
version: "0.9.16"
|
||||||
camera_platform_interface:
|
camera_platform_interface:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -387,10 +387,10 @@ packages:
|
|||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
name: diffutil_dart
|
name: diffutil_dart
|
||||||
sha256: e0297e4600b9797edff228ed60f4169a778ea357691ec98408fa3b72994c7d06
|
sha256: "5e74883aedf87f3b703cb85e815bdc1ed9208b33501556e4a8a5572af9845c81"
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "3.0.0"
|
version: "4.0.1"
|
||||||
equatable:
|
equatable:
|
||||||
dependency: "direct main"
|
dependency: "direct main"
|
||||||
description:
|
description:
|
||||||
@ -403,10 +403,10 @@ packages:
|
|||||||
dependency: "direct main"
|
dependency: "direct main"
|
||||||
description:
|
description:
|
||||||
name: fast_immutable_collections
|
name: fast_immutable_collections
|
||||||
sha256: "38fbc50df5b219dcfb83ebbc3275ec09872530ca1153858fc56fceadb310d037"
|
sha256: "533806a7f0c624c2e479d05d3fdce4c87109a7cd0db39b8cc3830d3a2e8dedc7"
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "10.2.2"
|
version: "10.2.3"
|
||||||
ffi:
|
ffi:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -456,10 +456,10 @@ packages:
|
|||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
name: flutter_cache_manager
|
name: flutter_cache_manager
|
||||||
sha256: "8207f27539deb83732fdda03e259349046a39a4c767269285f449ade355d54ba"
|
sha256: "395d6b7831f21f3b989ebedbb785545932adb9afe2622c1ffacf7f4b53a7e544"
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "3.3.1"
|
version: "3.3.2"
|
||||||
flutter_chat_types:
|
flutter_chat_types:
|
||||||
dependency: "direct main"
|
dependency: "direct main"
|
||||||
description:
|
description:
|
||||||
@ -472,10 +472,10 @@ packages:
|
|||||||
dependency: "direct main"
|
dependency: "direct main"
|
||||||
description:
|
description:
|
||||||
name: flutter_chat_ui
|
name: flutter_chat_ui
|
||||||
sha256: c8580c85e2d29359ffc84147e643d08d883eb6e757208652377f0105ef58807f
|
sha256: "40fb37acc328dd179eadc3d67bf8bd2d950dc0da34464aa8d48e8707e0234c09"
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "1.6.12"
|
version: "1.6.13"
|
||||||
flutter_form_builder:
|
flutter_form_builder:
|
||||||
dependency: "direct main"
|
dependency: "direct main"
|
||||||
description:
|
description:
|
||||||
@ -634,10 +634,10 @@ packages:
|
|||||||
dependency: "direct main"
|
dependency: "direct main"
|
||||||
description:
|
description:
|
||||||
name: go_router
|
name: go_router
|
||||||
sha256: "771c8feb40ad0ef639973d7ecf1b43d55ffcedb2207fd43fab030f5639e40446"
|
sha256: aa073287b8f43553678e6fa9e8bb9c83212ff76e09542129a8099bbc8db4df65
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "13.2.4"
|
version: "14.1.2"
|
||||||
graphs:
|
graphs:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -714,10 +714,10 @@ packages:
|
|||||||
dependency: "direct main"
|
dependency: "direct main"
|
||||||
description:
|
description:
|
||||||
name: intl
|
name: intl
|
||||||
sha256: "3bc132a9dbce73a7e4a21a17d06e1878839ffbf975568bc875c60537824b0c4d"
|
sha256: d6f56758b7d3014a48af9701c085700aac781a92a87a62b1333b46d8879661cf
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "0.18.1"
|
version: "0.19.0"
|
||||||
io:
|
io:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -802,10 +802,10 @@ packages:
|
|||||||
dependency: "direct main"
|
dependency: "direct main"
|
||||||
description:
|
description:
|
||||||
name: meta
|
name: meta
|
||||||
sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04
|
sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136"
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "1.11.0"
|
version: "1.12.0"
|
||||||
mime:
|
mime:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -818,10 +818,10 @@ packages:
|
|||||||
dependency: "direct main"
|
dependency: "direct main"
|
||||||
description:
|
description:
|
||||||
name: mobile_scanner
|
name: mobile_scanner
|
||||||
sha256: "827765afbd4792ff3fd105ad593821ac0f6d8a7d352689013b07ee85be336312"
|
sha256: b8c0e9afcfd52534f85ec666f3d52156f560b5e6c25b1e3d4fe2087763607926
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "4.0.1"
|
version: "5.1.1"
|
||||||
motion_toast:
|
motion_toast:
|
||||||
dependency: "direct main"
|
dependency: "direct main"
|
||||||
description:
|
description:
|
||||||
@ -898,10 +898,10 @@ packages:
|
|||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
name: path_provider_foundation
|
name: path_provider_foundation
|
||||||
sha256: "5a7999be66e000916500be4f15a3633ebceb8302719b47b9cc49ce924125350f"
|
sha256: f234384a3fdd67f989b4d54a5d73ca2a6c422fa55ae694381ae0f4375cd1ea16
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "2.3.2"
|
version: "2.4.0"
|
||||||
path_provider_linux:
|
path_provider_linux:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -938,10 +938,10 @@ packages:
|
|||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
name: photo_view
|
name: photo_view
|
||||||
sha256: "8036802a00bae2a78fc197af8a158e3e2f7b500561ed23b4c458107685e645bb"
|
sha256: "1fc3d970a91295fbd1364296575f854c9863f225505c28c46e0a03e48960c75e"
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "0.14.0"
|
version: "0.15.0"
|
||||||
pinput:
|
pinput:
|
||||||
dependency: "direct main"
|
dependency: "direct main"
|
||||||
description:
|
description:
|
||||||
@ -970,10 +970,10 @@ packages:
|
|||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
name: pointycastle
|
name: pointycastle
|
||||||
sha256: "79fbafed02cfdbe85ef3fd06c7f4bc2cbcba0177e61b765264853d4253b21744"
|
sha256: "4be0097fcf3fd3e8449e53730c631200ebc7b88016acecab2b0da2f0149222fe"
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "3.9.0"
|
version: "3.9.1"
|
||||||
pool:
|
pool:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -1106,26 +1106,26 @@ packages:
|
|||||||
dependency: "direct main"
|
dependency: "direct main"
|
||||||
description:
|
description:
|
||||||
name: searchable_listview
|
name: searchable_listview
|
||||||
sha256: d8513a968bdd540cb011220a5670b23b346e04a7bcb99690a859ed58092f72a4
|
sha256: dfa6358f5e097f45b5b51a160cb6189e112e3abe0f728f4740349cd3b6575617
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "2.11.2"
|
version: "2.13.0"
|
||||||
share_plus:
|
share_plus:
|
||||||
dependency: "direct main"
|
dependency: "direct main"
|
||||||
description:
|
description:
|
||||||
name: share_plus
|
name: share_plus
|
||||||
sha256: fb5319f3aab4c5dda5ebb92dca978179ba21f8c783ee4380910ef4c1c6824f51
|
sha256: ef3489a969683c4f3d0239010cc8b7a2a46543a8d139e111c06c558875083544
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "8.0.3"
|
version: "9.0.0"
|
||||||
share_plus_platform_interface:
|
share_plus_platform_interface:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
name: share_plus_platform_interface
|
name: share_plus_platform_interface
|
||||||
sha256: "251eb156a8b5fa9ce033747d73535bf53911071f8d3b6f4f0b578505ce0d4496"
|
sha256: "0f9e4418835d1b2c3ae78fdb918251959106cefdbc4dd43526e182f80e82f6d4"
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "3.4.0"
|
version: "4.0.0"
|
||||||
shared_preferences:
|
shared_preferences:
|
||||||
dependency: "direct main"
|
dependency: "direct main"
|
||||||
description:
|
description:
|
||||||
@ -1146,10 +1146,10 @@ packages:
|
|||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
name: shared_preferences_foundation
|
name: shared_preferences_foundation
|
||||||
sha256: "7708d83064f38060c7b39db12aefe449cb8cdc031d6062280087bc4cdb988f5c"
|
sha256: "0a8a893bf4fd1152f93fec03a415d11c27c74454d96e2318a7ac38dd18683ab7"
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "2.3.5"
|
version: "2.4.0"
|
||||||
shared_preferences_linux:
|
shared_preferences_linux:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -1194,10 +1194,10 @@ packages:
|
|||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
name: shelf_web_socket
|
name: shelf_web_socket
|
||||||
sha256: "9ca081be41c60190ebcb4766b2486a7d50261db7bd0f5d9615f2d653637a84c1"
|
sha256: "073c147238594ecd0d193f3456a5fe91c4b0abbcc68bf5cd95b36c4e194ac611"
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "1.0.4"
|
version: "2.0.0"
|
||||||
signal_strength_indicator:
|
signal_strength_indicator:
|
||||||
dependency: "direct main"
|
dependency: "direct main"
|
||||||
description:
|
description:
|
||||||
@ -1263,10 +1263,10 @@ packages:
|
|||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
name: sqflite
|
name: sqflite
|
||||||
sha256: "5ce2e1a15e822c3b4bfb5400455775e421da7098eed8adc8f26298ada7c9308c"
|
sha256: a43e5a27235518c03ca238e7b4732cf35eabe863a369ceba6cbefa537a66f16d
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "2.3.3"
|
version: "2.3.3+1"
|
||||||
sqflite_common:
|
sqflite_common:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -1399,18 +1399,18 @@ packages:
|
|||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
name: url_launcher_android
|
name: url_launcher_android
|
||||||
sha256: "360a6ed2027f18b73c8d98e159dda67a61b7f2e0f6ec26e86c3ada33b0621775"
|
sha256: "17cd5e205ea615e2c6ea7a77323a11712dffa0720a8a90540db57a01347f9ad9"
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "6.3.1"
|
version: "6.3.2"
|
||||||
url_launcher_ios:
|
url_launcher_ios:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
name: url_launcher_ios
|
name: url_launcher_ios
|
||||||
sha256: "9149d493b075ed740901f3ee844a38a00b33116c7c5c10d7fb27df8987fb51d5"
|
sha256: "7068716403343f6ba4969b4173cbf3b84fc768042124bc2c011e5d782b24fe89"
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "6.2.5"
|
version: "6.3.0"
|
||||||
url_launcher_linux:
|
url_launcher_linux:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -1423,10 +1423,10 @@ packages:
|
|||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
name: url_launcher_macos
|
name: url_launcher_macos
|
||||||
sha256: b7244901ea3cf489c5335bdacda07264a6e960b1c1b1a9f91e4bc371d9e68234
|
sha256: "9a1a42d5d2d95400c795b2914c36fdcb525870c752569438e4ebb09a2b5d90de"
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "3.1.0"
|
version: "3.2.0"
|
||||||
url_launcher_platform_interface:
|
url_launcher_platform_interface:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -1529,30 +1529,38 @@ packages:
|
|||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "0.5.1"
|
version: "0.5.1"
|
||||||
|
web_socket:
|
||||||
|
dependency: transitive
|
||||||
|
description:
|
||||||
|
name: web_socket
|
||||||
|
sha256: "217f49b5213796cb508d6a942a5dc604ce1cb6a0a6b3d8cb3f0c314f0ecea712"
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "0.1.4"
|
||||||
web_socket_channel:
|
web_socket_channel:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
name: web_socket_channel
|
name: web_socket_channel
|
||||||
sha256: "58c6666b342a38816b2e7e50ed0f1e261959630becd4c879c4f26bfa14aa5a42"
|
sha256: a2d56211ee4d35d9b344d9d4ce60f362e4f5d1aafb988302906bd732bc731276
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "2.4.5"
|
version: "3.0.0"
|
||||||
win32:
|
win32:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
name: win32
|
name: win32
|
||||||
sha256: "0a989dc7ca2bb51eac91e8fd00851297cfffd641aa7538b165c62637ca0eaa4a"
|
sha256: a79dbe579cb51ecd6d30b17e0cae4e0ea15e2c0e66f69ad4198f22a6789e94f4
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "5.4.0"
|
version: "5.5.1"
|
||||||
window_manager:
|
window_manager:
|
||||||
dependency: "direct main"
|
dependency: "direct main"
|
||||||
description:
|
description:
|
||||||
name: window_manager
|
name: window_manager
|
||||||
sha256: b3c895bdf936c77b83c5254bec2e6b3f066710c1f89c38b20b8acc382b525494
|
sha256: "8699323b30da4cdbe2aa2e7c9de567a6abd8a97d9a5c850a3c86dcd0b34bbfbf"
|
||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "0.3.8"
|
version: "0.3.9"
|
||||||
xdg_directories:
|
xdg_directories:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
@ -1610,5 +1618,5 @@ packages:
|
|||||||
source: hosted
|
source: hosted
|
||||||
version: "1.1.2"
|
version: "1.1.2"
|
||||||
sdks:
|
sdks:
|
||||||
dart: ">=3.3.0 <4.0.0"
|
dart: ">=3.4.0 <4.0.0"
|
||||||
flutter: ">=3.19.1"
|
flutter: ">=3.19.1"
|
||||||
|
17
pubspec.yaml
17
pubspec.yaml
@ -10,7 +10,7 @@ environment:
|
|||||||
dependencies:
|
dependencies:
|
||||||
animated_theme_switcher: ^2.0.10
|
animated_theme_switcher: ^2.0.10
|
||||||
ansicolor: ^2.0.2
|
ansicolor: ^2.0.2
|
||||||
archive: ^3.4.10
|
archive: ^3.5.1
|
||||||
async_tools: ^0.1.1
|
async_tools: ^0.1.1
|
||||||
awesome_extensions: ^2.0.14
|
awesome_extensions: ^2.0.14
|
||||||
badges: ^3.1.2
|
badges: ^3.1.2
|
||||||
@ -44,14 +44,14 @@ dependencies:
|
|||||||
flutter_translate: ^4.0.4
|
flutter_translate: ^4.0.4
|
||||||
form_builder_validators: ^9.1.0
|
form_builder_validators: ^9.1.0
|
||||||
freezed_annotation: ^2.4.1
|
freezed_annotation: ^2.4.1
|
||||||
go_router: ^13.2.4
|
go_router: ^14.1.2
|
||||||
hydrated_bloc: ^9.1.5
|
hydrated_bloc: ^9.1.5
|
||||||
image: ^4.1.7
|
image: ^4.1.7
|
||||||
intl: ^0.18.1
|
intl: ^0.18.1
|
||||||
json_annotation: ^4.8.1
|
json_annotation: ^4.9.0
|
||||||
loggy: ^2.0.3
|
loggy: ^2.0.3
|
||||||
meta: ^1.11.0
|
meta: ^1.11.0
|
||||||
mobile_scanner: ^4.0.1
|
mobile_scanner: ^5.1.1
|
||||||
motion_toast: ^2.9.1
|
motion_toast: ^2.9.1
|
||||||
pasteboard: ^0.2.0
|
pasteboard: ^0.2.0
|
||||||
path: ^1.9.0
|
path: ^1.9.0
|
||||||
@ -65,8 +65,8 @@ dependencies:
|
|||||||
quickalert: ^1.1.0
|
quickalert: ^1.1.0
|
||||||
radix_colors: ^1.0.4
|
radix_colors: ^1.0.4
|
||||||
reorderable_grid: ^1.0.10
|
reorderable_grid: ^1.0.10
|
||||||
searchable_listview: ^2.11.2
|
searchable_listview: ^2.12.0
|
||||||
share_plus: ^8.0.3
|
share_plus: ^9.0.0
|
||||||
shared_preferences: ^2.2.3
|
shared_preferences: ^2.2.3
|
||||||
signal_strength_indicator: ^0.4.1
|
signal_strength_indicator: ^0.4.1
|
||||||
split_view: ^3.2.1
|
split_view: ^3.2.1
|
||||||
@ -88,12 +88,15 @@ dependency_overrides:
|
|||||||
path: ../dart_async_tools
|
path: ../dart_async_tools
|
||||||
bloc_advanced_tools:
|
bloc_advanced_tools:
|
||||||
path: ../bloc_advanced_tools
|
path: ../bloc_advanced_tools
|
||||||
|
# REMOVE ONCE form_builder_validators HAS A FIX UPSTREAM
|
||||||
|
intl: 0.19.0
|
||||||
|
|
||||||
|
|
||||||
dev_dependencies:
|
dev_dependencies:
|
||||||
build_runner: ^2.4.9
|
build_runner: ^2.4.9
|
||||||
freezed: ^2.5.2
|
freezed: ^2.5.2
|
||||||
icons_launcher: ^2.1.7
|
icons_launcher: ^2.1.7
|
||||||
json_serializable: ^6.7.1
|
json_serializable: ^6.8.0
|
||||||
lint_hard: ^4.0.0
|
lint_hard: ^4.0.0
|
||||||
|
|
||||||
flutter_native_splash:
|
flutter_native_splash:
|
||||||
|
Loading…
Reference in New Issue
Block a user