mirror of
https://gitlab.com/veilid/veilidchat.git
synced 2024-10-01 06:55:46 -04:00
dht log implementation
This commit is contained in:
parent
c4d25fecb0
commit
3315644ba8
@ -1,2 +1,2 @@
|
|||||||
export 'dht_array.dart';
|
export 'dht_log.dart';
|
||||||
export 'dht_array_cubit.dart';
|
export 'dht_log_cubit.dart';
|
||||||
|
@ -4,6 +4,7 @@ import 'dart:typed_data';
|
|||||||
import 'package:async_tools/async_tools.dart';
|
import 'package:async_tools/async_tools.dart';
|
||||||
import 'package:collection/collection.dart';
|
import 'package:collection/collection.dart';
|
||||||
import 'package:equatable/equatable.dart';
|
import 'package:equatable/equatable.dart';
|
||||||
|
import 'package:meta/meta.dart';
|
||||||
|
|
||||||
import '../../../veilid_support.dart';
|
import '../../../veilid_support.dart';
|
||||||
import '../../proto/proto.dart' as proto;
|
import '../../proto/proto.dart' as proto;
|
||||||
@ -15,6 +16,21 @@ part 'dht_log_append.dart';
|
|||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
@immutable
|
||||||
|
class DHTLogUpdate extends Equatable {
|
||||||
|
const DHTLogUpdate(
|
||||||
|
{required this.headDelta, required this.tailDelta, required this.length})
|
||||||
|
: assert(headDelta >= 0, 'should never have negative head delta'),
|
||||||
|
assert(tailDelta >= 0, 'should never have negative tail delta'),
|
||||||
|
assert(length >= 0, 'should never have negative length');
|
||||||
|
final int headDelta;
|
||||||
|
final int tailDelta;
|
||||||
|
final int length;
|
||||||
|
|
||||||
|
@override
|
||||||
|
List<Object?> get props => [headDelta, tailDelta, length];
|
||||||
|
}
|
||||||
|
|
||||||
/// DHTLog is a ring-buffer queue like data structure with the following
|
/// DHTLog is a ring-buffer queue like data structure with the following
|
||||||
/// operations:
|
/// operations:
|
||||||
/// * Add elements to the tail
|
/// * Add elements to the tail
|
||||||
@ -30,8 +46,8 @@ class DHTLog implements DHTOpenable {
|
|||||||
// Constructors
|
// Constructors
|
||||||
|
|
||||||
DHTLog._({required _DHTLogSpine spine}) : _spine = spine {
|
DHTLog._({required _DHTLogSpine spine}) : _spine = spine {
|
||||||
_spine.onUpdatedSpine = () {
|
_spine.onUpdatedSpine = (update) {
|
||||||
_watchController?.sink.add(null);
|
_watchController?.sink.add(update);
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -225,7 +241,7 @@ class DHTLog implements DHTOpenable {
|
|||||||
/// Listen to and any all changes to the structure of this log
|
/// Listen to and any all changes to the structure of this log
|
||||||
/// regardless of where the changes are coming from
|
/// regardless of where the changes are coming from
|
||||||
Future<StreamSubscription<void>> listen(
|
Future<StreamSubscription<void>> listen(
|
||||||
void Function() onChanged,
|
void Function(DHTLogUpdate) onChanged,
|
||||||
) {
|
) {
|
||||||
if (!isOpen) {
|
if (!isOpen) {
|
||||||
throw StateError('log is not open"');
|
throw StateError('log is not open"');
|
||||||
@ -235,7 +251,8 @@ class DHTLog implements DHTOpenable {
|
|||||||
// If don't have a controller yet, set it up
|
// If don't have a controller yet, set it up
|
||||||
if (_watchController == null) {
|
if (_watchController == null) {
|
||||||
// Set up watch requirements
|
// Set up watch requirements
|
||||||
_watchController = StreamController<void>.broadcast(onCancel: () {
|
_watchController =
|
||||||
|
StreamController<DHTLogUpdate>.broadcast(onCancel: () {
|
||||||
// If there are no more listeners then we can get
|
// If there are no more listeners then we can get
|
||||||
// rid of the controller and drop our subscriptions
|
// rid of the controller and drop our subscriptions
|
||||||
unawaited(_listenMutex.protect(() async {
|
unawaited(_listenMutex.protect(() async {
|
||||||
@ -249,7 +266,7 @@ class DHTLog implements DHTOpenable {
|
|||||||
await _spine.watch();
|
await _spine.watch();
|
||||||
}
|
}
|
||||||
// Return subscription
|
// Return subscription
|
||||||
return _watchController!.stream.listen((_) => onChanged());
|
return _watchController!.stream.listen((upd) => onChanged(upd));
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -269,5 +286,5 @@ class DHTLog implements DHTOpenable {
|
|||||||
// Watch mutex to ensure we keep the representation valid
|
// Watch mutex to ensure we keep the representation valid
|
||||||
final Mutex _listenMutex = Mutex();
|
final Mutex _listenMutex = Mutex();
|
||||||
// Stream of external changes
|
// Stream of external changes
|
||||||
StreamController<void>? _watchController;
|
StreamController<DHTLogUpdate>? _watchController;
|
||||||
}
|
}
|
||||||
|
@ -8,112 +8,213 @@ import 'package:fast_immutable_collections/fast_immutable_collections.dart';
|
|||||||
import 'package:meta/meta.dart';
|
import 'package:meta/meta.dart';
|
||||||
|
|
||||||
import '../../../veilid_support.dart';
|
import '../../../veilid_support.dart';
|
||||||
|
import '../interfaces/dht_append_truncate.dart';
|
||||||
|
|
||||||
// xxx paginate and remember to paginate watches (could use short array cubit as a subcubit here?)
|
@immutable
|
||||||
|
class DHTLogElementState<T> extends Equatable {
|
||||||
|
const DHTLogElementState({required this.value, required this.isOffline});
|
||||||
|
final T value;
|
||||||
|
final bool isOffline;
|
||||||
|
|
||||||
// @immutable
|
@override
|
||||||
// class DHTArrayElementState<T> extends Equatable {
|
List<Object?> get props => [value, isOffline];
|
||||||
// const DHTArrayElementState(
|
}
|
||||||
// {required this.value, required this.isOffline});
|
|
||||||
// final T value;
|
|
||||||
// final bool isOffline;
|
|
||||||
|
|
||||||
// @override
|
@immutable
|
||||||
// List<Object?> get props => [value, isOffline];
|
class DHTLogStateData<T> extends Equatable {
|
||||||
// }
|
const DHTLogStateData(
|
||||||
|
{required this.elements,
|
||||||
|
required this.tail,
|
||||||
|
required this.count,
|
||||||
|
required this.follow});
|
||||||
|
// The view of the elements in the dhtlog
|
||||||
|
// Span is from [tail-length, tail)
|
||||||
|
final IList<DHTLogElementState<T>> elements;
|
||||||
|
// One past the end of the last element
|
||||||
|
final int tail;
|
||||||
|
// The total number of elements to try to keep in 'elements'
|
||||||
|
final int count;
|
||||||
|
// If we should have the tail following the log
|
||||||
|
final bool follow;
|
||||||
|
|
||||||
// typedef DHTArrayState<T> = AsyncValue<IList<DHTArrayElementState<T>>>;
|
@override
|
||||||
// typedef DHTArrayBusyState<T> = BlocBusyState<DHTArrayState<T>>;
|
List<Object?> get props => [elements, tail, count, follow];
|
||||||
|
}
|
||||||
|
|
||||||
// class DHTArrayCubit<T> extends Cubit<DHTArrayBusyState<T>>
|
typedef DHTLogState<T> = AsyncValue<DHTLogStateData<T>>;
|
||||||
// with BlocBusyWrapper<DHTArrayState<T>> {
|
typedef DHTLogBusyState<T> = BlocBusyState<DHTLogState<T>>;
|
||||||
// DHTArrayCubit({
|
|
||||||
// required Future<DHTArray> Function() open,
|
|
||||||
// required T Function(List<int> data) decodeElement,
|
|
||||||
// }) : _decodeElement = decodeElement,
|
|
||||||
// super(const BlocBusyState(AsyncValue.loading())) {
|
|
||||||
// _initWait.add(() async {
|
|
||||||
// // Open DHT record
|
|
||||||
// _array = await open();
|
|
||||||
// _wantsCloseRecord = true;
|
|
||||||
|
|
||||||
// // Make initial state update
|
class DHTLogCubit<T> extends Cubit<DHTLogBusyState<T>>
|
||||||
// await _refreshNoWait();
|
with BlocBusyWrapper<DHTLogState<T>> {
|
||||||
// _subscription = await _array.listen(_update);
|
DHTLogCubit({
|
||||||
// });
|
required Future<DHTLog> Function() open,
|
||||||
// }
|
required T Function(List<int> data) decodeElement,
|
||||||
|
}) : _decodeElement = decodeElement,
|
||||||
|
super(const BlocBusyState(AsyncValue.loading())) {
|
||||||
|
_initWait.add(() async {
|
||||||
|
// Open DHT record
|
||||||
|
_log = await open();
|
||||||
|
_wantsCloseRecord = true;
|
||||||
|
|
||||||
// Future<void> refresh({bool forceRefresh = false}) async {
|
// Make initial state update
|
||||||
// await _initWait();
|
await _refreshNoWait();
|
||||||
// await _refreshNoWait(forceRefresh: forceRefresh);
|
_subscription = await _log.listen(_update);
|
||||||
// }
|
});
|
||||||
|
}
|
||||||
|
|
||||||
// Future<void> _refreshNoWait({bool forceRefresh = false}) async =>
|
// Set the tail position of the log for pagination.
|
||||||
// busy((emit) async => _refreshInner(emit, forceRefresh: forceRefresh));
|
// If tail is 0, the end of the log is used.
|
||||||
|
// If tail is negative, the position is subtracted from the current log
|
||||||
|
// length.
|
||||||
|
// If tail is positive, the position is absolute from the head of the log
|
||||||
|
// If follow is enabled, the tail offset will update when the log changes
|
||||||
|
Future<void> setWindow(
|
||||||
|
{int? tail, int? count, bool? follow, bool forceRefresh = false}) async {
|
||||||
|
await _initWait();
|
||||||
|
if (tail != null) {
|
||||||
|
_tail = tail;
|
||||||
|
}
|
||||||
|
if (count != null) {
|
||||||
|
_count = count;
|
||||||
|
}
|
||||||
|
if (follow != null) {
|
||||||
|
_follow = follow;
|
||||||
|
}
|
||||||
|
await _refreshNoWait(forceRefresh: forceRefresh);
|
||||||
|
}
|
||||||
|
|
||||||
// Future<void> _refreshInner(void Function(DHTShortArrayState<T>) emit,
|
Future<void> refresh({bool forceRefresh = false}) async {
|
||||||
// {bool forceRefresh = false}) async {
|
await _initWait();
|
||||||
// try {
|
await _refreshNoWait(forceRefresh: forceRefresh);
|
||||||
// final newState = await _shortArray.operate((reader) async {
|
}
|
||||||
// final offlinePositions = await reader.getOfflinePositions();
|
|
||||||
// final allItems = (await reader.getAllItems(forceRefresh: forceRefresh))
|
|
||||||
// ?.indexed
|
|
||||||
// .map((x) => DHTShortArrayElementState(
|
|
||||||
// value: _decodeElement(x.$2),
|
|
||||||
// isOffline: offlinePositions.contains(x.$1)))
|
|
||||||
// .toIList();
|
|
||||||
// return allItems;
|
|
||||||
// });
|
|
||||||
// if (newState != null) {
|
|
||||||
// emit(AsyncValue.data(newState));
|
|
||||||
// }
|
|
||||||
// } on Exception catch (e) {
|
|
||||||
// emit(AsyncValue.error(e));
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// void _update() {
|
Future<void> _refreshNoWait({bool forceRefresh = false}) async =>
|
||||||
// // Run at most one background update process
|
busy((emit) async => _refreshInner(emit, forceRefresh: forceRefresh));
|
||||||
// // Because this is async, we could get an update while we're
|
|
||||||
// // still processing the last one. Only called after init future has run
|
|
||||||
// // so we dont have to wait for that here.
|
|
||||||
// _sspUpdate.busyUpdate<T, DHTShortArrayState<T>>(
|
|
||||||
// busy, (emit) async => _refreshInner(emit));
|
|
||||||
// }
|
|
||||||
|
|
||||||
// @override
|
Future<void> _refreshInner(void Function(AsyncValue<DHTLogStateData<T>>) emit,
|
||||||
// Future<void> close() async {
|
{bool forceRefresh = false}) async {
|
||||||
// await _initWait();
|
final avElements = await _loadElements(_tail, _count);
|
||||||
// await _subscription?.cancel();
|
final err = avElements.asError;
|
||||||
// _subscription = null;
|
if (err != null) {
|
||||||
// if (_wantsCloseRecord) {
|
emit(AsyncValue.error(err.error, err.stackTrace));
|
||||||
// await _shortArray.close();
|
return;
|
||||||
// }
|
}
|
||||||
// await super.close();
|
final loading = avElements.asLoading;
|
||||||
// }
|
if (loading != null) {
|
||||||
|
emit(const AsyncValue.loading());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
final elements = avElements.asData!.value;
|
||||||
|
emit(AsyncValue.data(DHTLogStateData(
|
||||||
|
elements: elements, tail: _tail, count: _count, follow: _follow)));
|
||||||
|
}
|
||||||
|
|
||||||
// Future<R?> operate<R>(Future<R?> Function(DHTShortArrayRead) closure) async {
|
Future<AsyncValue<IList<DHTLogElementState<T>>>> _loadElements(
|
||||||
// await _initWait();
|
int tail, int count,
|
||||||
// return _shortArray.operate(closure);
|
{bool forceRefresh = false}) async {
|
||||||
// }
|
try {
|
||||||
|
final allItems = await _log.operate((reader) async {
|
||||||
|
final length = reader.length;
|
||||||
|
final end = ((tail - 1) % length) + 1;
|
||||||
|
final start = (count < end) ? end - count : 0;
|
||||||
|
|
||||||
// Future<(R?, bool)> operateWrite<R>(
|
final offlinePositions = await reader.getOfflinePositions();
|
||||||
// Future<R?> Function(DHTShortArrayWrite) closure) async {
|
final allItems = (await reader.getItemRange(start,
|
||||||
// await _initWait();
|
length: end - start, forceRefresh: forceRefresh))
|
||||||
// return _shortArray.operateWrite(closure);
|
?.indexed
|
||||||
// }
|
.map((x) => DHTLogElementState(
|
||||||
|
value: _decodeElement(x.$2),
|
||||||
|
isOffline: offlinePositions.contains(x.$1)))
|
||||||
|
.toIList();
|
||||||
|
return allItems;
|
||||||
|
});
|
||||||
|
if (allItems == null) {
|
||||||
|
return const AsyncValue.loading();
|
||||||
|
}
|
||||||
|
return AsyncValue.data(allItems);
|
||||||
|
} on Exception catch (e, st) {
|
||||||
|
return AsyncValue.error(e, st);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Future<void> operateWriteEventual(
|
void _update(DHTLogUpdate upd) {
|
||||||
// Future<bool> Function(DHTShortArrayWrite) closure,
|
// Run at most one background update process
|
||||||
// {Duration? timeout}) async {
|
// Because this is async, we could get an update while we're
|
||||||
// await _initWait();
|
// still processing the last one. Only called after init future has run
|
||||||
// return _shortArray.operateWriteEventual(closure, timeout: timeout);
|
// so we dont have to wait for that here.
|
||||||
// }
|
|
||||||
|
|
||||||
// final WaitSet<void> _initWait = WaitSet();
|
// Accumulate head and tail deltas
|
||||||
// late final DHTShortArray _shortArray;
|
_headDelta += upd.headDelta;
|
||||||
// final T Function(List<int> data) _decodeElement;
|
_tailDelta += upd.tailDelta;
|
||||||
// StreamSubscription<void>? _subscription;
|
|
||||||
// bool _wantsCloseRecord = false;
|
_sspUpdate.busyUpdate<T, DHTLogState<T>>(busy, (emit) async {
|
||||||
// final _sspUpdate = SingleStatelessProcessor();
|
// apply follow
|
||||||
// }
|
if (_follow) {
|
||||||
|
if (_tail <= 0) {
|
||||||
|
// Negative tail is already following tail changes
|
||||||
|
} else {
|
||||||
|
// Positive tail is measured from the head, so apply deltas
|
||||||
|
_tail = (_tail + _tailDelta - _headDelta) % upd.length;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (_tail <= 0) {
|
||||||
|
// Negative tail is following tail changes so apply deltas
|
||||||
|
var posTail = _tail + upd.length;
|
||||||
|
posTail = (posTail + _tailDelta - _headDelta) % upd.length;
|
||||||
|
_tail = posTail - upd.length;
|
||||||
|
} else {
|
||||||
|
// Positive tail is measured from head so not following tail
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_headDelta = 0;
|
||||||
|
_tailDelta = 0;
|
||||||
|
|
||||||
|
await _refreshInner(emit);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
@override
|
||||||
|
Future<void> close() async {
|
||||||
|
await _initWait();
|
||||||
|
await _subscription?.cancel();
|
||||||
|
_subscription = null;
|
||||||
|
if (_wantsCloseRecord) {
|
||||||
|
await _log.close();
|
||||||
|
}
|
||||||
|
await super.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
Future<R?> operate<R>(Future<R?> Function(DHTRandomRead) closure) async {
|
||||||
|
await _initWait();
|
||||||
|
return _log.operate(closure);
|
||||||
|
}
|
||||||
|
|
||||||
|
Future<R> operateAppend<R>(
|
||||||
|
Future<R> Function(DHTAppendTruncateRandomRead) closure) async {
|
||||||
|
await _initWait();
|
||||||
|
return _log.operateAppend(closure);
|
||||||
|
}
|
||||||
|
|
||||||
|
Future<void> operateAppendEventual(
|
||||||
|
Future<bool> Function(DHTAppendTruncateRandomRead) closure,
|
||||||
|
{Duration? timeout}) async {
|
||||||
|
await _initWait();
|
||||||
|
return _log.operateAppendEventual(closure, timeout: timeout);
|
||||||
|
}
|
||||||
|
|
||||||
|
final WaitSet<void> _initWait = WaitSet();
|
||||||
|
late final DHTLog _log;
|
||||||
|
final T Function(List<int> data) _decodeElement;
|
||||||
|
StreamSubscription<void>? _subscription;
|
||||||
|
bool _wantsCloseRecord = false;
|
||||||
|
final _sspUpdate = SingleStatelessProcessor();
|
||||||
|
|
||||||
|
// Accumulated deltas since last update
|
||||||
|
var _headDelta = 0;
|
||||||
|
var _tailDelta = 0;
|
||||||
|
|
||||||
|
// Cubit window into the DHTLog
|
||||||
|
var _tail = 0;
|
||||||
|
var _count = DHTShortArray.maxElements;
|
||||||
|
var _follow = true;
|
||||||
|
}
|
||||||
|
@ -105,13 +105,11 @@ class _DHTLogSpine {
|
|||||||
try {
|
try {
|
||||||
final out = await closure(this);
|
final out = await closure(this);
|
||||||
// Write head assuming it has been changed
|
// Write head assuming it has been changed
|
||||||
if (!await writeSpineHead()) {
|
if (!await writeSpineHead(old: (oldHead, oldTail))) {
|
||||||
// Failed to write head means head got overwritten so write should
|
// Failed to write head means head got overwritten so write should
|
||||||
// be considered failed
|
// be considered failed
|
||||||
throw DHTExceptionTryAgain();
|
throw DHTExceptionTryAgain();
|
||||||
}
|
}
|
||||||
|
|
||||||
onUpdatedSpine?.call();
|
|
||||||
return out;
|
return out;
|
||||||
} on Exception {
|
} on Exception {
|
||||||
// Exception means state needs to be reverted
|
// Exception means state needs to be reverted
|
||||||
@ -134,7 +132,6 @@ class _DHTLogSpine {
|
|||||||
|
|
||||||
try {
|
try {
|
||||||
// Iterate until we have a successful element and head write
|
// Iterate until we have a successful element and head write
|
||||||
|
|
||||||
do {
|
do {
|
||||||
// Save off old values each pass of writeSpineHead because the head
|
// Save off old values each pass of writeSpineHead because the head
|
||||||
// will have changed
|
// will have changed
|
||||||
@ -158,9 +155,7 @@ class _DHTLogSpine {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Try to do the head write
|
// Try to do the head write
|
||||||
} while (!await writeSpineHead());
|
} while (!await writeSpineHead(old: (oldHead, oldTail)));
|
||||||
|
|
||||||
onUpdatedSpine?.call();
|
|
||||||
} on Exception {
|
} on Exception {
|
||||||
// Exception means state needs to be reverted
|
// Exception means state needs to be reverted
|
||||||
_head = oldHead;
|
_head = oldHead;
|
||||||
@ -173,7 +168,7 @@ class _DHTLogSpine {
|
|||||||
/// Serialize and write out the current spine head subkey, possibly updating
|
/// Serialize and write out the current spine head subkey, possibly updating
|
||||||
/// it if a newer copy is available online. Returns true if the write was
|
/// it if a newer copy is available online. Returns true if the write was
|
||||||
/// successful
|
/// successful
|
||||||
Future<bool> writeSpineHead() async {
|
Future<bool> writeSpineHead({(int, int)? old}) async {
|
||||||
assert(_spineMutex.isLocked, 'should be in mutex here');
|
assert(_spineMutex.isLocked, 'should be in mutex here');
|
||||||
|
|
||||||
final headBuffer = _toProto().writeToBuffer();
|
final headBuffer = _toProto().writeToBuffer();
|
||||||
@ -182,12 +177,28 @@ class _DHTLogSpine {
|
|||||||
if (existingData != null) {
|
if (existingData != null) {
|
||||||
// Head write failed, incorporate update
|
// Head write failed, incorporate update
|
||||||
await _updateHead(proto.DHTLog.fromBuffer(existingData));
|
await _updateHead(proto.DHTLog.fromBuffer(existingData));
|
||||||
|
if (old != null) {
|
||||||
|
sendUpdate(old.$1, old.$2);
|
||||||
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
if (old != null) {
|
||||||
|
sendUpdate(old.$1, old.$2);
|
||||||
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Send a spine update callback
|
||||||
|
void sendUpdate(int oldHead, int oldTail) {
|
||||||
|
final oldLength = _ringDistance(oldTail, oldHead);
|
||||||
|
if (oldHead != _head || oldTail != _tail || oldLength != length) {
|
||||||
|
onUpdatedSpine?.call(DHTLogUpdate(
|
||||||
|
headDelta: _ringDistance(_head, oldHead),
|
||||||
|
tailDelta: _ringDistance(_tail, oldTail),
|
||||||
|
length: length));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Validate a new spine head subkey that has come in from the network
|
/// Validate a new spine head subkey that has come in from the network
|
||||||
Future<void> _updateHead(proto.DHTLog spineHead) async {
|
Future<void> _updateHead(proto.DHTLog spineHead) async {
|
||||||
assert(_spineMutex.isLocked, 'should be in mutex here');
|
assert(_spineMutex.isLocked, 'should be in mutex here');
|
||||||
@ -486,8 +497,10 @@ class _DHTLogSpine {
|
|||||||
|
|
||||||
// Then update the head record
|
// Then update the head record
|
||||||
await _spineMutex.protect(() async {
|
await _spineMutex.protect(() async {
|
||||||
|
final oldHead = _head;
|
||||||
|
final oldTail = _tail;
|
||||||
await _updateHead(headData);
|
await _updateHead(headData);
|
||||||
onUpdatedSpine?.call();
|
sendUpdate(oldHead, oldTail);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -495,10 +508,14 @@ class _DHTLogSpine {
|
|||||||
|
|
||||||
TypedKey get recordKey => _spineRecord.key;
|
TypedKey get recordKey => _spineRecord.key;
|
||||||
OwnedDHTRecordPointer get recordPointer => _spineRecord.ownedDHTRecordPointer;
|
OwnedDHTRecordPointer get recordPointer => _spineRecord.ownedDHTRecordPointer;
|
||||||
int get length =>
|
int get length => _ringDistance(_tail, _head);
|
||||||
(_tail < _head) ? (_positionLimit - _head) + _tail : _tail - _head;
|
|
||||||
bool get isOpen => _spineRecord.isOpen;
|
bool get isOpen => _spineRecord.isOpen;
|
||||||
|
|
||||||
|
// Ring buffer distance from old to new
|
||||||
|
static int _ringDistance(int n, int o) =>
|
||||||
|
(n < o) ? (_positionLimit - o) + n : n - o;
|
||||||
|
|
||||||
static const _positionLimit = DHTLog.segmentsPerSubkey *
|
static const _positionLimit = DHTLog.segmentsPerSubkey *
|
||||||
DHTLog.spineSubkeys *
|
DHTLog.spineSubkeys *
|
||||||
DHTShortArray.maxElements;
|
DHTShortArray.maxElements;
|
||||||
@ -508,7 +525,7 @@ class _DHTLogSpine {
|
|||||||
// Subscription to head record internal changes
|
// Subscription to head record internal changes
|
||||||
StreamSubscription<DHTRecordWatchChange>? _subscription;
|
StreamSubscription<DHTRecordWatchChange>? _subscription;
|
||||||
// Notify closure for external spine head changes
|
// Notify closure for external spine head changes
|
||||||
void Function()? onUpdatedSpine;
|
void Function(DHTLogUpdate)? onUpdatedSpine;
|
||||||
|
|
||||||
// Spine DHT record
|
// Spine DHT record
|
||||||
final DHTRecord _spineRecord;
|
final DHTRecord _spineRecord;
|
||||||
|
@ -183,7 +183,8 @@ class DHTShortArray implements DHTOpenable {
|
|||||||
/// Runs a closure allowing read-write access to the shortarray
|
/// Runs a closure allowing read-write access to the shortarray
|
||||||
/// Makes only one attempt to consistently write the changes to the DHT
|
/// Makes only one attempt to consistently write the changes to the DHT
|
||||||
/// Returns result of the closure if the write could be performed
|
/// Returns result of the closure if the write could be performed
|
||||||
/// Throws DHTOperateException if the write could not be performed at this time
|
/// Throws DHTOperateException if the write could not be performed
|
||||||
|
/// at this time
|
||||||
Future<T> operateWrite<T>(
|
Future<T> operateWrite<T>(
|
||||||
Future<T> Function(DHTRandomReadWrite) closure) async {
|
Future<T> Function(DHTRandomReadWrite) closure) async {
|
||||||
if (!isOpen) {
|
if (!isOpen) {
|
||||||
|
Loading…
Reference in New Issue
Block a user