Local Rehydration

This commit is contained in:
Christien Rioux 2025-04-25 17:18:39 -04:00
parent b964d0db40
commit c194f61644
48 changed files with 10889 additions and 11940 deletions

View file

@ -1,5 +1,7 @@
use super::*;
impl_veilid_log_facility!("net");
impl NetworkManager {
// Direct bootstrap request handler (separate fallback mechanism from cheaper TXT bootstrap mechanism)
#[instrument(level = "trace", target = "net", skip(self), ret, err)]
@ -16,6 +18,8 @@ impl NetworkManager {
.collect();
let json_bytes = serialize_json(bootstrap_peerinfo).as_bytes().to_vec();
veilid_log!(self trace "BOOT reponse: {}", String::from_utf8_lossy(&json_bytes));
// Reply with a chunk of signed routing table
let net = self.net();
match pin_future_closure!(net.send_data_to_existing_flow(flow, json_bytes)).await? {

View file

@ -899,44 +899,49 @@ impl RoutingTable {
return false;
}
// does it have some dial info we need?
let filter = |n: &NodeInfo| {
let mut keep = false;
// Bootstraps must have -only- inbound capable network class
if !matches!(n.network_class(), NetworkClass::InboundCapable) {
// Only nodes with direct publicinternet node info
let Some(signed_node_info) = e.signed_node_info(RoutingDomain::PublicInternet)
else {
return false;
};
let SignedNodeInfo::Direct(signed_direct_node_info) = signed_node_info else {
return false;
};
let node_info = signed_direct_node_info.node_info();
// Bootstraps must have -only- inbound capable network class
if !matches!(node_info.network_class(), NetworkClass::InboundCapable) {
return false;
}
// Check for direct dialinfo and a good mix of protocol and address types
let mut keep = false;
for did in node_info.dial_info_detail_list() {
// Bootstraps must have -only- direct dial info
if !matches!(did.class, DialInfoClass::Direct) {
return false;
}
for did in n.dial_info_detail_list() {
// Bootstraps must have -only- direct dial info
if !matches!(did.class, DialInfoClass::Direct) {
return false;
}
if matches!(did.dial_info.address_type(), AddressType::IPV4) {
for (n, protocol_type) in protocol_types.iter().enumerate() {
if nodes_proto_v4[n] < max_per_type
&& did.dial_info.protocol_type() == *protocol_type
{
nodes_proto_v4[n] += 1;
keep = true;
}
if matches!(did.dial_info.address_type(), AddressType::IPV4) {
for (n, protocol_type) in protocol_types.iter().enumerate() {
if nodes_proto_v4[n] < max_per_type
&& did.dial_info.protocol_type() == *protocol_type
{
nodes_proto_v4[n] += 1;
keep = true;
}
} else if matches!(did.dial_info.address_type(), AddressType::IPV6) {
for (n, protocol_type) in protocol_types.iter().enumerate() {
if nodes_proto_v6[n] < max_per_type
&& did.dial_info.protocol_type() == *protocol_type
{
nodes_proto_v6[n] += 1;
keep = true;
}
}
} else if matches!(did.dial_info.address_type(), AddressType::IPV6) {
for (n, protocol_type) in protocol_types.iter().enumerate() {
if nodes_proto_v6[n] < max_per_type
&& did.dial_info.protocol_type() == *protocol_type
{
nodes_proto_v6[n] += 1;
keep = true;
}
}
}
keep
};
e.node_info(RoutingDomain::PublicInternet)
.map(filter)
.unwrap_or(false)
}
keep
})
},
) as RoutingTableEntryFilter;

View file

@ -5,22 +5,13 @@ const MAX_INSPECT_VALUE_Q_SUBKEY_RANGES_LEN: usize = 512;
pub const MAX_INSPECT_VALUE_A_SEQS_LEN: usize = 512;
const MAX_INSPECT_VALUE_A_PEERS_LEN: usize = 20;
#[derive(Clone)]
#[derive(Debug, Clone)]
pub(in crate::rpc_processor) struct ValidateInspectValueContext {
pub last_descriptor: Option<SignedValueDescriptor>,
pub subkeys: ValueSubkeyRangeSet,
pub crypto_kind: CryptoKind,
}
impl fmt::Debug for ValidateInspectValueContext {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ValidateInspectValueContext")
.field("last_descriptor", &self.last_descriptor)
.field("crypto_kind", &self.crypto_kind)
.finish()
}
}
#[derive(Debug, Clone)]
pub(in crate::rpc_processor) struct RPCOperationInspectValueQ {
key: TypedKey,
@ -161,12 +152,20 @@ impl RPCOperationInspectValueA {
};
// Ensure seqs returned does not exceeed subkeys requested
#[allow(clippy::unnecessary_cast)]
if self.seqs.len() as u64 > inspect_value_context.subkeys.len() as u64 {
let subkey_count = if inspect_value_context.subkeys.is_empty()
|| inspect_value_context.subkeys.is_full()
|| inspect_value_context.subkeys.len() > MAX_INSPECT_VALUE_A_SEQS_LEN as u64
{
MAX_INSPECT_VALUE_A_SEQS_LEN as u64
} else {
inspect_value_context.subkeys.len()
};
if self.seqs.len() as u64 > subkey_count {
return Err(RPCError::protocol(format!(
"InspectValue seqs length is greater than subkeys requested: {} > {}",
"InspectValue seqs length is greater than subkeys requested: {} > {}: {:#?}",
self.seqs.len(),
inspect_value_context.subkeys.len()
subkey_count,
inspect_value_context
)));
}

View file

@ -5,7 +5,7 @@ impl_veilid_log_facility!("rpc");
#[derive(Clone, Debug)]
pub struct InspectValueAnswer {
pub seqs: Vec<ValueSeqNum>,
pub seqs: Vec<Option<ValueSeqNum>>,
pub peers: Vec<Arc<PeerInfo>>,
pub descriptor: Option<SignedValueDescriptor>,
}
@ -110,6 +110,11 @@ impl RPCProcessor {
};
let (seqs, peers, descriptor) = inspect_value_a.destructure();
let seqs = seqs
.into_iter()
.map(|x| if x == ValueSeqNum::MAX { None } else { Some(x) })
.collect::<Vec<_>>();
if debug_target_enabled!("dht") {
let debug_string_answer = format!(
"OUT <== InspectValueA({} {} peers={}) <= {} seqs:\n{}",
@ -232,8 +237,15 @@ impl RPCProcessor {
.inbound_inspect_value(key, subkeys, want_descriptor)
.await
.map_err(RPCError::internal)?);
(inspect_result.seqs, inspect_result.opt_descriptor)
(
inspect_result.seqs().to_vec(),
inspect_result.opt_descriptor(),
)
};
let inspect_result_seqs = inspect_result_seqs
.into_iter()
.map(|x| if let Some(s) = x { s } else { ValueSubkey::MAX })
.collect::<Vec<_>>();
if debug_target_enabled!("dht") {
let debug_string_answer = format!(

View file

@ -79,7 +79,7 @@ impl StorageManager {
pub async fn debug_local_record_subkey_info(
&self,
key: TypedKey,
record_key: TypedKey,
subkey: ValueSubkey,
) -> String {
let inner = self.inner.lock().await;
@ -87,12 +87,12 @@ impl StorageManager {
return "not initialized".to_owned();
};
local_record_store
.debug_record_subkey_info(key, subkey)
.debug_record_subkey_info(record_key, subkey)
.await
}
pub async fn debug_remote_record_subkey_info(
&self,
key: TypedKey,
record_key: TypedKey,
subkey: ValueSubkey,
) -> String {
let inner = self.inner.lock().await;
@ -100,17 +100,17 @@ impl StorageManager {
return "not initialized".to_owned();
};
remote_record_store
.debug_record_subkey_info(key, subkey)
.debug_record_subkey_info(record_key, subkey)
.await
}
pub async fn debug_local_record_info(&self, key: TypedKey) -> String {
pub async fn debug_local_record_info(&self, record_key: TypedKey) -> String {
let inner = self.inner.lock().await;
let Some(local_record_store) = &inner.local_record_store else {
return "not initialized".to_owned();
};
let local_debug = local_record_store.debug_record_info(key);
let local_debug = local_record_store.debug_record_info(record_key);
let opened_debug = if let Some(o) = inner.opened_records.get(&key) {
let opened_debug = if let Some(o) = inner.opened_records.get(&record_key) {
format!("Opened Record: {:#?}\n", o)
} else {
"".to_owned()
@ -119,11 +119,11 @@ impl StorageManager {
format!("{}\n{}", local_debug, opened_debug)
}
pub async fn debug_remote_record_info(&self, key: TypedKey) -> String {
pub async fn debug_remote_record_info(&self, record_key: TypedKey) -> String {
let inner = self.inner.lock().await;
let Some(remote_record_store) = &inner.remote_record_store else {
return "not initialized".to_owned();
};
remote_record_store.debug_record_info(key)
remote_record_store.debug_record_info(record_key)
}
}

View file

@ -28,7 +28,7 @@ impl StorageManager {
#[instrument(level = "trace", target = "dht", skip_all, err)]
pub(super) async fn outbound_get_value(
&self,
key: TypedKey,
record_key: TypedKey,
subkey: ValueSubkey,
safety_selection: SafetySelection,
last_get_result: GetResult,
@ -47,7 +47,7 @@ impl StorageManager {
// Get the nodes we know are caching this value to seed the fanout
let init_fanout_queue = {
self.get_value_nodes(key)
self.get_value_nodes(record_key)
.await?
.unwrap_or_default()
.into_iter()
@ -93,7 +93,7 @@ impl StorageManager {
.rpc_call_get_value(
Destination::direct(next_node.routing_domain_filtered(routing_domain))
.with_safety(safety_selection),
key,
record_key,
subkey,
last_descriptor.map(|x| (*x).clone()),
)
@ -255,7 +255,7 @@ impl StorageManager {
let routing_table = registry.routing_table();
let fanout_call = FanoutCall::new(
&routing_table,
key,
record_key,
key_count,
fanout,
consensus_count,

View file

@ -28,7 +28,7 @@ impl DescriptorInfo {
/// Info tracked per subkey
struct SubkeySeqCount {
/// The newest sequence number found for a subkey
pub seq: ValueSeqNum,
pub seq: Option<ValueSeqNum>,
/// The set of nodes that had the most recent value for this subkey
pub consensus_nodes: Vec<NodeRef>,
/// The set of nodes that had any value for this subkey
@ -44,6 +44,7 @@ struct OutboundInspectValueContext {
}
/// The result of the outbound_get_value operation
#[derive(Debug, Clone)]
pub(super) struct OutboundInspectValueResult {
/// Fanout results for each subkey
pub subkey_fanout_results: Vec<FanoutResult>,
@ -56,13 +57,14 @@ impl StorageManager {
#[instrument(level = "trace", target = "dht", skip_all, err)]
pub(super) async fn outbound_inspect_value(
&self,
key: TypedKey,
record_key: TypedKey,
subkeys: ValueSubkeyRangeSet,
safety_selection: SafetySelection,
local_inspect_result: InspectResult,
use_set_scope: bool,
) -> VeilidAPIResult<OutboundInspectValueResult> {
let routing_domain = RoutingDomain::PublicInternet;
let requested_subkeys = subkeys.clone();
// Get the DHT parameters for 'InspectValue'
// Can use either 'get scope' or 'set scope' depending on the purpose of the inspection
@ -86,7 +88,7 @@ impl StorageManager {
// Get the nodes we know are caching this value to seed the fanout
let init_fanout_queue = {
self.get_value_nodes(key)
self.get_value_nodes(record_key)
.await?
.unwrap_or_default()
.into_iter()
@ -99,16 +101,16 @@ impl StorageManager {
};
// Make do-inspect-value answer context
let opt_descriptor_info = if let Some(descriptor) = &local_inspect_result.opt_descriptor {
let opt_descriptor_info = if let Some(descriptor) = local_inspect_result.opt_descriptor() {
// Get the descriptor info. This also truncates the subkeys list to what can be returned from the network.
Some(DescriptorInfo::new(descriptor.clone(), &subkeys)?)
Some(DescriptorInfo::new(descriptor, &subkeys)?)
} else {
None
};
let context = Arc::new(Mutex::new(OutboundInspectValueContext {
seqcounts: local_inspect_result
.seqs
.seqs()
.iter()
.map(|s| SubkeySeqCount {
seq: *s,
@ -127,7 +129,7 @@ impl StorageManager {
move |next_node: NodeRef| -> PinBoxFutureStatic<FanoutCallResult> {
let context = context.clone();
let registry = registry.clone();
let opt_descriptor = local_inspect_result.opt_descriptor.clone();
let opt_descriptor = local_inspect_result.opt_descriptor();
let subkeys = subkeys.clone();
Box::pin(async move {
let rpc_processor = registry.rpc_processor();
@ -136,7 +138,7 @@ impl StorageManager {
rpc_processor
.rpc_call_inspect_value(
Destination::direct(next_node.routing_domain_filtered(routing_domain)).with_safety(safety_selection),
key,
record_key,
subkeys.clone(),
opt_descriptor.map(|x| (*x).clone()),
)
@ -237,13 +239,13 @@ impl StorageManager {
// Then take that sequence number and note that we have gotten newer sequence numbers so we keep
// looking for consensus
// If the sequence number matches the old sequence number, then we keep the value node for reference later
if answer_seq != ValueSeqNum::MAX {
if ctx_seqcnt.seq == ValueSeqNum::MAX || answer_seq > ctx_seqcnt.seq
if let Some(answer_seq) = answer_seq {
if ctx_seqcnt.seq.is_none() || answer_seq > ctx_seqcnt.seq.unwrap()
{
// One node has shown us the latest sequence numbers so far
ctx_seqcnt.seq = answer_seq;
ctx_seqcnt.seq = Some(answer_seq);
ctx_seqcnt.consensus_nodes = vec![next_node.clone()];
} else if answer_seq == ctx_seqcnt.seq {
} else if answer_seq == ctx_seqcnt.seq.unwrap() {
// Keep the nodes that showed us the latest values
ctx_seqcnt.consensus_nodes.push(next_node.clone());
}
@ -288,7 +290,7 @@ impl StorageManager {
let routing_table = self.routing_table();
let fanout_call = FanoutCall::new(
&routing_table,
key,
record_key,
key_count,
fanout,
consensus_count,
@ -322,28 +324,41 @@ impl StorageManager {
veilid_log!(self debug "InspectValue Fanout: {:#}:\n{}", fanout_result, debug_fanout_results(&subkey_fanout_results));
}
Ok(OutboundInspectValueResult {
let result = OutboundInspectValueResult {
subkey_fanout_results,
inspect_result: InspectResult {
subkeys: ctx
.opt_descriptor_info
inspect_result: InspectResult::new(
self,
requested_subkeys,
"outbound_inspect_value",
ctx.opt_descriptor_info
.as_ref()
.map(|d| d.subkeys.clone())
.unwrap_or_default(),
seqs: ctx.seqcounts.iter().map(|cs| cs.seq).collect(),
opt_descriptor: ctx
.opt_descriptor_info
ctx.seqcounts.iter().map(|cs| cs.seq).collect(),
ctx.opt_descriptor_info
.as_ref()
.map(|d| d.descriptor.clone()),
},
})
)?,
};
#[allow(clippy::unnecessary_cast)]
{
if result.inspect_result.subkeys().len() as u64
!= result.subkey_fanout_results.len() as u64
{
veilid_log!(self error "mismatch between subkeys returned and fanout results returned: {}!={}", result.inspect_result.subkeys().len(), result.subkey_fanout_results.len());
apibail_internal!("subkey and fanout list length mismatched");
}
}
Ok(result)
}
/// Handle a received 'Inspect Value' query
#[instrument(level = "trace", target = "dht", skip_all)]
pub async fn inbound_inspect_value(
&self,
key: TypedKey,
record_key: TypedKey,
subkeys: ValueSubkeyRangeSet,
want_descriptor: bool,
) -> VeilidAPIResult<NetworkResult<InspectResult>> {
@ -352,24 +367,25 @@ impl StorageManager {
// See if this is a remote or local value
let (_is_local, inspect_result) = {
// See if the subkey we are getting has a last known local value
let mut local_inspect_result =
Self::handle_inspect_local_value_inner(&mut inner, key, subkeys.clone(), true)
.await?;
let mut local_inspect_result = self
.handle_inspect_local_value_inner(&mut inner, record_key, subkeys.clone(), true)
.await?;
// If this is local, it must have a descriptor already
if local_inspect_result.opt_descriptor.is_some() {
if local_inspect_result.opt_descriptor().is_some() {
if !want_descriptor {
local_inspect_result.opt_descriptor = None;
local_inspect_result.drop_descriptor();
}
(true, local_inspect_result)
} else {
// See if the subkey we are getting has a last known remote value
let remote_inspect_result = Self::handle_inspect_remote_value_inner(
&mut inner,
key,
subkeys,
want_descriptor,
)
.await?;
let remote_inspect_result = self
.handle_inspect_remote_value_inner(
&mut inner,
record_key,
subkeys,
want_descriptor,
)
.await?;
(false, remote_inspect_result)
}
};

File diff suppressed because it is too large Load diff

View file

@ -133,7 +133,7 @@ impl OutboundWatchManager {
// Watch does not exist, add one if that's what is desired
if let Some(desired) = desired_watch {
self.outbound_watches
.insert(record_key, OutboundWatch::new(desired));
.insert(record_key, OutboundWatch::new(record_key, desired));
}
}
}

View file

@ -4,6 +4,9 @@ impl_veilid_log_facility!("stor");
#[derive(Clone, Debug, Serialize, Deserialize)]
pub(in crate::storage_manager) struct OutboundWatch {
/// Record key being watched
record_key: TypedKey,
/// Current state
/// None means inactive/cancelled
state: Option<OutboundWatchState>,
@ -34,12 +37,14 @@ impl fmt::Display for OutboundWatch {
impl OutboundWatch {
/// Create new outbound watch with desired parameters
pub fn new(desired: OutboundWatchParameters) -> Self {
pub fn new(record_key: TypedKey, desired: OutboundWatchParameters) -> Self {
Self {
record_key,
state: None,
desired: Some(desired),
}
}
/// Get current watch state if it exists
pub fn state(&self) -> Option<&OutboundWatchState> {
self.state.as_ref()
@ -107,7 +112,7 @@ impl OutboundWatch {
/// Returns true if this outbound watch needs to be cancelled
pub fn needs_cancel(&self, registry: &VeilidComponentRegistry) -> bool {
if self.is_dead() {
veilid_log!(registry warn "should have checked for is_dead first");
veilid_log!(registry warn "Should have checked for is_dead first");
return false;
}
@ -118,6 +123,7 @@ impl OutboundWatch {
// If the desired parameters is None then cancel
let Some(_desired) = self.desired.as_ref() else {
veilid_log!(registry debug target: "dht", "OutboundWatch({}): needs_cancel because desired is None", self.record_key);
return true;
};
@ -132,7 +138,7 @@ impl OutboundWatch {
cur_ts: Timestamp,
) -> bool {
if self.is_dead() || self.needs_cancel(registry) {
veilid_log!(registry warn "should have checked for is_dead and needs_cancel first");
veilid_log!(registry warn "Should have checked for is_dead and needs_cancel first");
return false;
}
@ -156,11 +162,17 @@ impl OutboundWatch {
// If we have a consensus but need to renew because some per-node watches
// either expired or had their routes die, do it
if self.wants_per_node_watch_update(registry, state, cur_ts) {
veilid_log!(registry debug target: "dht", "OutboundWatch({}): needs_renew because per_node_watch wants update", self.record_key);
return true;
}
// If the desired parameters have changed, then we should renew with them
state.params() != desired
if state.params() != desired {
veilid_log!(registry debug target: "dht", "OutboundWatch({}): needs_renew because desired params have changed: {} != {}", self.record_key, state.params(), desired);
return true;
}
false
}
/// Returns true if there is work to be done on getting the outbound
@ -175,7 +187,7 @@ impl OutboundWatch {
|| self.needs_cancel(registry)
|| self.needs_renew(registry, consensus_count, cur_ts)
{
veilid_log!(registry warn "should have checked for is_dead, needs_cancel, needs_renew first");
veilid_log!(registry warn "Should have checked for is_dead, needs_cancel, needs_renew first");
return false;
}
@ -187,6 +199,7 @@ impl OutboundWatch {
// If there is a desired watch but no current state, then reconcile
let Some(state) = self.state() else {
veilid_log!(registry debug target: "dht", "OutboundWatch({}): needs_reconcile because state is empty", self.record_key);
return true;
};
@ -195,13 +208,17 @@ impl OutboundWatch {
if state.nodes().len() < consensus_count
&& cur_ts >= state.next_reconcile_ts().unwrap_or_default()
{
veilid_log!(registry debug target: "dht", "OutboundWatch({}): needs_reconcile because consensus count is too low {} < {}", self.record_key, state.nodes().len(), consensus_count);
return true;
}
// Try to reconcile if our number of nodes currently is less than what we got from
// the previous reconciliation attempt
if let Some(last_consensus_node_count) = state.last_consensus_node_count() {
if state.nodes().len() < last_consensus_node_count {
if state.nodes().len() < last_consensus_node_count
&& state.nodes().len() < consensus_count
{
veilid_log!(registry debug target: "dht", "OutboundWatch({}): needs_reconcile because node count is less than last consensus {} < {}", self.record_key, state.nodes().len(), last_consensus_node_count);
return true;
}
}
@ -209,11 +226,17 @@ impl OutboundWatch {
// If we have a consensus, or are not attempting consensus at this time,
// but need to reconcile because some per-node watches either expired or had their routes die, do it
if self.wants_per_node_watch_update(registry, state, cur_ts) {
veilid_log!(registry debug target: "dht", "OutboundWatch({}): needs_reconcile because per_node_watch wants update", self.record_key);
return true;
}
// If the desired parameters have changed, then we should reconcile with them
state.params() != desired
if state.params() != desired {
veilid_log!(registry debug target: "dht", "OutboundWatch({}): needs_reconcile because desired params have changed: {} != {}", self.record_key, state.params(), desired);
return true;
}
false
}
/// Returns true if we need to update our per-node watches due to expiration,
@ -233,6 +256,7 @@ impl OutboundWatch {
&& (state.params().expiration_ts.as_u64() == 0
|| renew_ts < state.params().expiration_ts)
{
veilid_log!(registry debug target: "dht", "OutboundWatch({}): wants_per_node_watch_update because cur_ts is in expiration renew window", self.record_key);
return true;
}
@ -244,6 +268,7 @@ impl OutboundWatch {
for vcr in state.value_changed_routes() {
if rss.get_route_id_for_key(vcr).is_none() {
// Route we would receive value changes on is dead
veilid_log!(registry debug target: "dht", "OutboundWatch({}): wants_per_node_watch_update because route is dead: {}", self.record_key, vcr);
return true;
}
}

View file

@ -4,7 +4,7 @@ const L2_CACHE_DEPTH: usize = 4; // XXX: i just picked this. we could probably d
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct InspectCacheL2Value {
pub seqs: Vec<ValueSeqNum>,
pub seqs: Vec<Option<ValueSeqNum>>,
}
#[derive(Debug, Clone, Eq, PartialEq)]
@ -67,7 +67,7 @@ impl InspectCache {
continue;
};
if idx < entry.1.seqs.len() {
entry.1.seqs[idx] = seq;
entry.1.seqs[idx] = Some(seq);
} else {
panic!(
"representational error in l2 inspect cache: {} >= {}",

View file

@ -128,11 +128,55 @@ pub struct GetResult {
#[derive(Default, Clone, Debug)]
pub struct InspectResult {
/// The actual in-schema subkey range being reported on
pub subkeys: ValueSubkeyRangeSet,
subkeys: ValueSubkeyRangeSet,
/// The sequence map
pub seqs: Vec<ValueSeqNum>,
seqs: Vec<Option<ValueSeqNum>>,
/// The descriptor if we got a fresh one or empty if no descriptor was needed
pub opt_descriptor: Option<Arc<SignedValueDescriptor>>,
opt_descriptor: Option<Arc<SignedValueDescriptor>>,
}
impl InspectResult {
pub fn new(
registry_accessor: &impl VeilidComponentRegistryAccessor,
requested_subkeys: ValueSubkeyRangeSet,
log_context: &str,
subkeys: ValueSubkeyRangeSet,
seqs: Vec<Option<ValueSeqNum>>,
opt_descriptor: Option<Arc<SignedValueDescriptor>>,
) -> VeilidAPIResult<Self> {
#[allow(clippy::unnecessary_cast)]
{
if subkeys.len() as u64 != seqs.len() as u64 {
veilid_log!(registry_accessor error "{}: mismatch between subkeys returned and sequence number list returned: {}!={}", log_context, subkeys.len(), seqs.len());
apibail_internal!("list length mismatch");
}
}
if !subkeys.is_subset(&requested_subkeys) {
veilid_log!(registry_accessor error "{}: more subkeys returned than requested: {} not a subset of {}", log_context, subkeys, requested_subkeys);
apibail_internal!("invalid subkeys returned");
}
Ok(InspectResult {
subkeys,
seqs,
opt_descriptor,
})
}
pub fn subkeys(&self) -> &ValueSubkeyRangeSet {
&self.subkeys
}
pub fn seqs(&self) -> &[Option<ValueSeqNum>] {
&self.seqs
}
pub fn seqs_mut(&mut self) -> &mut [Option<ValueSeqNum>] {
&mut self.seqs
}
pub fn opt_descriptor(&self) -> Option<Arc<SignedValueDescriptor>> {
self.opt_descriptor.clone()
}
pub fn drop_descriptor(&mut self) {
self.opt_descriptor = None;
}
}
impl<D> RecordStore<D>
@ -822,18 +866,18 @@ where
pub async fn inspect_record(
&mut self,
key: TypedKey,
subkeys: ValueSubkeyRangeSet,
subkeys: &ValueSubkeyRangeSet,
want_descriptor: bool,
) -> VeilidAPIResult<Option<InspectResult>> {
// Get record from index
let Some((subkeys, opt_descriptor)) = self.with_record(key, |record| {
let Some((schema_subkeys, opt_descriptor)) = self.with_record(key, |record| {
// Get number of subkeys from schema and ensure we are getting the
// right number of sequence numbers betwen that and what we asked for
let truncated_subkeys = record
let schema_subkeys = record
.schema()
.truncate_subkeys(&subkeys, Some(MAX_INSPECT_VALUE_A_SEQS_LEN));
.truncate_subkeys(subkeys, Some(MAX_INSPECT_VALUE_A_SEQS_LEN));
(
truncated_subkeys,
schema_subkeys,
if want_descriptor {
Some(record.descriptor().clone())
} else {
@ -846,56 +890,60 @@ where
};
// Check if we can return some subkeys
if subkeys.is_empty() {
apibail_invalid_argument!("subkeys set does not overlap schema", "subkeys", subkeys);
if schema_subkeys.is_empty() {
apibail_invalid_argument!(
"subkeys set does not overlap schema",
"subkeys",
schema_subkeys
);
}
// See if we have this inspection cached
if let Some(icv) = self.inspect_cache.get(&key, &subkeys) {
return Ok(Some(InspectResult {
subkeys,
seqs: icv.seqs,
if let Some(icv) = self.inspect_cache.get(&key, &schema_subkeys) {
return Ok(Some(InspectResult::new(
self,
subkeys.clone(),
"inspect_record",
schema_subkeys.clone(),
icv.seqs,
opt_descriptor,
}));
)?));
}
// Build sequence number list to return
#[allow(clippy::unnecessary_cast)]
let mut seqs = Vec::with_capacity(subkeys.len() as usize);
for subkey in subkeys.iter() {
let mut seqs = Vec::with_capacity(schema_subkeys.len() as usize);
for subkey in schema_subkeys.iter() {
let stk = SubkeyTableKey { key, subkey };
let seq = if let Some(record_data) = self.subkey_cache.peek(&stk) {
record_data.signed_value_data().value_data().seq()
let opt_seq = if let Some(record_data) = self.subkey_cache.peek(&stk) {
Some(record_data.signed_value_data().value_data().seq())
} else {
// If not in cache, try to pull from table store if it is in our stored subkey set
// XXX: This would be better if it didn't have to pull the whole record data to get the seq.
if let Some(record_data) = self
.subkey_table
self.subkey_table
.load_json::<RecordData>(0, &stk.bytes())
.await
.map_err(VeilidAPIError::internal)?
{
record_data.signed_value_data().value_data().seq()
} else {
// Subkey not written to
ValueSubkey::MAX
}
.map(|record_data| record_data.signed_value_data().value_data().seq())
};
seqs.push(seq)
seqs.push(opt_seq)
}
// Save seqs cache
self.inspect_cache.put(
key,
subkeys.clone(),
schema_subkeys.clone(),
InspectCacheL2Value { seqs: seqs.clone() },
);
Ok(Some(InspectResult {
subkeys,
Ok(Some(InspectResult::new(
self,
subkeys.clone(),
"inspect_record",
schema_subkeys,
seqs,
opt_descriptor,
}))
)?))
}
#[instrument(level = "trace", target = "stor", skip_all, err)]
@ -1242,7 +1290,7 @@ where
changes.push(ValueChangedInfo {
target: evci.target,
key: evci.key,
record_key: evci.key,
subkeys: evci.subkeys,
count: evci.count,
watch_id: evci.watch_id,

View file

@ -0,0 +1,271 @@
use super::{inspect_value::OutboundInspectValueResult, *};
#[allow(dead_code)]
#[derive(Debug, Clone)]
pub struct RehydrateReport {
/// The record key rehydrated
record_key: TypedKey,
/// The requested range of subkeys to rehydrate if necessary
subkeys: ValueSubkeyRangeSet,
/// The requested consensus count,
consensus_count: usize,
/// The range of subkeys that wanted rehydration
wanted: ValueSubkeyRangeSet,
/// The range of subkeys that actually could be rehydrated
rehydrated: ValueSubkeyRangeSet,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub(super) struct RehydrationRequest {
pub subkeys: ValueSubkeyRangeSet,
pub consensus_count: usize,
}
impl StorageManager {
/// Add a background rehydration request
#[instrument(level = "trace", target = "stor", skip_all)]
pub async fn add_rehydration_request(
&self,
record_key: TypedKey,
subkeys: ValueSubkeyRangeSet,
consensus_count: usize,
) {
let req = RehydrationRequest {
subkeys,
consensus_count,
};
veilid_log!(self debug "Adding rehydration request: {} {:?}", record_key, req);
let mut inner = self.inner.lock().await;
inner
.rehydration_requests
.entry(record_key)
.and_modify(|r| {
r.subkeys = r.subkeys.union(&req.subkeys);
r.consensus_count.max_assign(req.consensus_count);
})
.or_insert(req);
}
/// Sends the local copies of all of a record's subkeys back to the network
/// Triggers a subkey update if the consensus on the subkey is less than
/// the specified 'consensus_count'.
/// The subkey updates are performed in the background if rehydration was
/// determined to be necessary.
/// If a newer copy of a subkey's data is available online, the background
/// write will pick up the newest subkey data as it does the SetValue fanout
/// and will drive the newest values to consensus.
#[instrument(level = "trace", target = "stor", skip(self), ret, err)]
pub(super) async fn rehydrate_record(
&self,
record_key: TypedKey,
subkeys: ValueSubkeyRangeSet,
consensus_count: usize,
) -> VeilidAPIResult<RehydrateReport> {
veilid_log!(self debug "Checking for record rehydration: {} {} @ consensus {}", record_key, subkeys, consensus_count);
// Get subkey range for consideration
let subkeys = if subkeys.is_empty() {
ValueSubkeyRangeSet::full()
} else {
subkeys
};
// Get safety selection
let mut inner = self.inner.lock().await;
let safety_selection = {
if let Some(opened_record) = inner.opened_records.get(&record_key) {
opened_record.safety_selection()
} else {
// See if it's in the local record store
let Some(local_record_store) = inner.local_record_store.as_mut() else {
apibail_not_initialized!();
};
let Some(safety_selection) =
local_record_store.with_record(record_key, |rec| rec.detail().safety_selection)
else {
apibail_key_not_found!(record_key);
};
safety_selection
}
};
// See if the requested record is our local record store
let local_inspect_result = self
.handle_inspect_local_value_inner(&mut inner, record_key, subkeys.clone(), true)
.await?;
// Get rpc processor and drop mutex so we don't block while getting the value from the network
if !self.dht_is_online() {
apibail_try_again!("offline, try again later");
};
// Drop the lock for network access
drop(inner);
// Get the inspect record report from the network
let result = self
.outbound_inspect_value(
record_key,
subkeys.clone(),
safety_selection,
InspectResult::default(),
true,
)
.await?;
// If online result had no subkeys, then trigger writing the entire record in the background
if result.inspect_result.subkeys().is_empty()
|| result.inspect_result.opt_descriptor().is_none()
{
return self
.rehydrate_all_subkeys(
record_key,
subkeys,
consensus_count,
safety_selection,
local_inspect_result,
)
.await;
}
return self
.rehydrate_required_subkeys(
record_key,
subkeys,
consensus_count,
safety_selection,
local_inspect_result,
result,
)
.await;
}
#[instrument(level = "trace", target = "stor", skip(self), ret, err)]
pub(super) async fn rehydrate_all_subkeys(
&self,
record_key: TypedKey,
subkeys: ValueSubkeyRangeSet,
consensus_count: usize,
safety_selection: SafetySelection,
local_inspect_result: InspectResult,
) -> VeilidAPIResult<RehydrateReport> {
let mut inner = self.inner.lock().await;
veilid_log!(self debug "Rehydrating all subkeys: record={} subkeys={}", record_key, local_inspect_result.subkeys());
let mut rehydrated = ValueSubkeyRangeSet::new();
for (n, subkey) in local_inspect_result.subkeys().iter().enumerate() {
if local_inspect_result.seqs()[n].is_some() {
// Add to offline writes to flush
veilid_log!(self debug "Rehydrating: record={} subkey={}", record_key, subkey);
rehydrated.insert(subkey);
Self::add_offline_subkey_write_inner(
&mut inner,
record_key,
subkey,
safety_selection,
);
}
}
if rehydrated.is_empty() {
veilid_log!(self debug "Record wanted full rehydrating, but no subkey data available: record={} subkeys={}", record_key, subkeys);
} else {
veilid_log!(self debug "Record full rehydrating: record={} subkeys={} rehydrated={}", record_key, subkeys, rehydrated);
}
return Ok(RehydrateReport {
record_key,
subkeys,
consensus_count,
wanted: local_inspect_result.subkeys().clone(),
rehydrated,
});
}
#[instrument(level = "trace", target = "stor", skip(self), ret, err)]
pub(super) async fn rehydrate_required_subkeys(
&self,
record_key: TypedKey,
subkeys: ValueSubkeyRangeSet,
consensus_count: usize,
safety_selection: SafetySelection,
local_inspect_result: InspectResult,
outbound_inspect_result: OutboundInspectValueResult,
) -> VeilidAPIResult<RehydrateReport> {
let mut inner = self.inner.lock().await;
// Get cryptosystem
let crypto = self.crypto();
let Some(vcrypto) = crypto.get(record_key.kind) else {
apibail_generic!("unsupported cryptosystem");
};
if local_inspect_result.subkeys().len()
!= outbound_inspect_result.subkey_fanout_results.len() as u64
{
veilid_log!(self debug "Subkey count mismatch when rehydrating required subkeys: record={} {} != {}",
record_key, local_inspect_result.subkeys().len(), outbound_inspect_result.subkey_fanout_results.len());
apibail_internal!("subkey count mismatch");
}
// For each subkey, determine if we should rehydrate it
let mut wanted = ValueSubkeyRangeSet::new();
let mut rehydrated = ValueSubkeyRangeSet::new();
for (n, subkey) in local_inspect_result.subkeys().iter().enumerate() {
let sfr = outbound_inspect_result
.subkey_fanout_results
.get(n)
.unwrap();
// Does the online subkey have enough consensus?
// If not, schedule it to be written in the background
if sfr.consensus_nodes.len() < consensus_count {
wanted.insert(subkey);
if local_inspect_result.seqs()[n].is_some() {
// Add to offline writes to flush
veilid_log!(self debug "Rehydrating: record={} subkey={}", record_key, subkey);
rehydrated.insert(subkey);
Self::add_offline_subkey_write_inner(
&mut inner,
record_key,
subkey,
safety_selection,
);
}
}
}
if wanted.is_empty() {
veilid_log!(self debug "Record did not need rehydrating: record={} local_subkeys={}", record_key, local_inspect_result.subkeys());
} else if rehydrated.is_empty() {
veilid_log!(self debug "Record wanted rehydrating, but no subkey data available: record={} local_subkeys={} wanted={}", record_key, local_inspect_result.subkeys(), wanted);
} else {
veilid_log!(self debug "Record rehydrating: record={} local_subkeys={} wanted={} rehydrated={}", record_key, local_inspect_result.subkeys(), wanted, rehydrated);
}
// Keep the list of nodes that returned a value for later reference
let results_iter = outbound_inspect_result
.inspect_result
.subkeys()
.iter()
.map(ValueSubkeyRangeSet::single)
.zip(outbound_inspect_result.subkey_fanout_results.into_iter());
Self::process_fanout_results_inner(
&mut inner,
&vcrypto,
record_key,
results_iter,
false,
self.config()
.with(|c| c.network.dht.set_value_count as usize),
);
Ok(RehydrateReport {
record_key,
subkeys,
consensus_count,
wanted,
rehydrated,
})
}
}

View file

@ -28,7 +28,7 @@ impl StorageManager {
#[instrument(level = "trace", target = "dht", skip_all, err)]
pub(super) async fn outbound_set_value(
&self,
key: TypedKey,
record_key: TypedKey,
subkey: ValueSubkey,
safety_selection: SafetySelection,
value: Arc<SignedValueData>,
@ -48,7 +48,7 @@ impl StorageManager {
// Get the nodes we know are caching this value to seed the fanout
let init_fanout_queue = {
self.get_value_nodes(key)
self.get_value_nodes(record_key)
.await?
.unwrap_or_default()
.into_iter()
@ -99,7 +99,7 @@ impl StorageManager {
.rpc_call_set_value(
Destination::direct(next_node.routing_domain_filtered(routing_domain))
.with_safety(safety_selection),
key,
record_key,
subkey,
(*value).clone(),
(*descriptor).clone(),
@ -228,7 +228,7 @@ impl StorageManager {
let routing_table = registry.routing_table();
let fanout_call = FanoutCall::new(
&routing_table,
key,
record_key,
key_count,
fanout,
consensus_count,

View file

@ -2,6 +2,7 @@ pub mod check_inbound_watches;
pub mod check_outbound_watches;
pub mod flush_record_stores;
pub mod offline_subkey_writes;
pub mod rehydrate_records;
pub mod save_metadata;
pub mod send_value_changes;
@ -55,6 +56,15 @@ impl StorageManager {
check_inbound_watches_task,
check_inbound_watches_task_routine
);
// Set rehydrate records tick task
veilid_log!(self debug "starting rehydrate records task");
impl_setup_task!(
self,
Self,
rehydrate_records_task,
rehydrate_records_task_routine
);
}
#[instrument(parent = None, level = "trace", target = "stor", name = "StorageManager::tick", skip_all, err)]
@ -78,6 +88,11 @@ impl StorageManager {
self.offline_subkey_writes_task.tick().await?;
}
// Do requested rehydrations
if self.has_rehydration_requests().await {
self.rehydrate_records_task.tick().await?;
}
// Send value changed notifications
self.send_value_changes_task.tick().await?;
}
@ -106,5 +121,9 @@ impl StorageManager {
if let Err(e) = self.offline_subkey_writes_task.stop().await {
veilid_log!(self warn "offline_subkey_writes_task not stopped: {}", e);
}
veilid_log!(self debug "stopping record rehydration task");
if let Err(e) = self.rehydrate_records_task.stop().await {
veilid_log!(self warn "rehydrate_records_task not stopped: {}", e);
}
}
}

View file

@ -0,0 +1,50 @@
use super::*;
impl_veilid_log_facility!("stor");
impl StorageManager {
/// Process background rehydration requests
#[instrument(level = "trace", target = "stor", skip_all, err)]
pub(super) async fn rehydrate_records_task_routine(
&self,
stop_token: StopToken,
_last_ts: Timestamp,
_cur_ts: Timestamp,
) -> EyreResult<()> {
let reqs = {
let mut inner = self.inner.lock().await;
core::mem::take(&mut inner.rehydration_requests)
};
let mut futs = Vec::new();
for req in reqs {
futs.push(async move {
let res = self
.rehydrate_record(req.0, req.1.subkeys.clone(), req.1.consensus_count)
.await;
(req, res)
});
}
process_batched_future_queue(
futs,
REHYDRATE_BATCH_SIZE,
stop_token,
|(req, res)| async move {
let _report = match res {
Ok(v) => v,
Err(e) => {
veilid_log!(self debug "Rehydration request failed: {}", e);
// Try again later
self.add_rehydration_request(req.0, req.1.subkeys, req.1.consensus_count)
.await;
return;
}
};
},
)
.await;
Ok(())
}
}

View file

@ -263,7 +263,7 @@ impl StorageManager {
let disposition = if wva.answer.accepted {
if wva.answer.expiration_ts.as_u64() > 0 {
// If the expiration time is greater than zero this watch is active
veilid_log!(registry debug "WatchValue accepted for {}: id={} expiration_ts={} ({})", record_key, wva.answer.watch_id, display_ts(wva.answer.expiration_ts.as_u64()), next_node);
veilid_log!(registry debug target:"dht", "WatchValue accepted for {}: id={} expiration_ts={} ({})", record_key, wva.answer.watch_id, display_ts(wva.answer.expiration_ts.as_u64()), next_node);
// Add to accepted watches
let mut ctx = context.lock();
@ -279,7 +279,7 @@ impl StorageManager {
// If the returned expiration time is zero, this watch was cancelled
// If the expiration time is greater than zero this watch is active
veilid_log!(registry debug "WatchValue rejected for {}: id={} expiration_ts={} ({})", record_key, wva.answer.watch_id, display_ts(wva.answer.expiration_ts.as_u64()), next_node);
veilid_log!(registry debug target:"dht", "WatchValue rejected for {}: id={} expiration_ts={} ({})", record_key, wva.answer.watch_id, display_ts(wva.answer.expiration_ts.as_u64()), next_node);
// Add to rejected watches
let mut ctx = context.lock();
@ -344,10 +344,10 @@ impl StorageManager {
let fanout_result = fanout_call.run(init_fanout_queue).await.inspect_err(|e| {
// If we finished with an error, return that
veilid_log!(self debug "WatchValue fanout error: {}", e);
veilid_log!(self debug target:"dht", "WatchValue fanout error: {}", e);
})?;
veilid_log!(self debug "WatchValue Fanout: {:#}", fanout_result);
veilid_log!(self debug target:"dht", "WatchValue Fanout: {:#}", fanout_result);
// Get cryptosystem
let crypto = self.crypto();
@ -476,7 +476,7 @@ impl StorageManager {
cancelled.push(pnk);
}
Err(e) => {
veilid_log!(self debug "outbound watch cancel error: {}", e);
veilid_log!(self debug "Outbound watch cancel error: {}", e);
// xxx should do something different for network unreachable vs host unreachable
// Leave in the 'per node states' for now because we couldn't contact the node
@ -604,7 +604,7 @@ impl StorageManager {
};
}
Err(e) => {
veilid_log!(self debug "outbound watch change error: {}", e);
veilid_log!(self debug "Outbound watch change error: {}", e);
}
}
}
@ -718,7 +718,7 @@ impl StorageManager {
});
}
Err(e) => {
veilid_log!(self debug "outbound watch fanout error: {}", e);
veilid_log!(self debug "Outbound watch fanout error: {}", e);
}
}
@ -742,11 +742,11 @@ impl StorageManager {
.outbound_watches
.get_mut(&record_key)
else {
veilid_log!(self warn "outbound watch should have still been in the table");
veilid_log!(self warn "Outbound watch should have still been in the table");
return;
};
let Some(desired) = outbound_watch.desired() else {
veilid_log!(self warn "watch with result should have desired params");
veilid_log!(self warn "Watch with result should have desired params");
return;
};
@ -852,6 +852,10 @@ impl StorageManager {
.config()
.with(|c| c.network.dht.get_value_count as usize);
// Operate on this watch only if it isn't already being operated on
let watch_lock =
opt_watch_lock.or_else(|| self.outbound_watch_lock_table.try_lock_tag(key))?;
// Terminate the 'desired' params for watches
// that have no remaining count or have expired
outbound_watch.try_expire_desired_state(cur_ts);
@ -859,9 +863,6 @@ impl StorageManager {
// Check states
if outbound_watch.is_dead() {
// Outbound watch is dead
let watch_lock =
opt_watch_lock.or_else(|| self.outbound_watch_lock_table.try_lock_tag(key))?;
let fut = {
let registry = self.registry();
async move {
@ -874,9 +875,6 @@ impl StorageManager {
return Some(pin_dyn_future!(fut));
} else if outbound_watch.needs_cancel(&registry) {
// Outbound watch needs to be cancelled
let watch_lock =
opt_watch_lock.or_else(|| self.outbound_watch_lock_table.try_lock_tag(key))?;
let fut = {
let registry = self.registry();
async move {
@ -889,9 +887,6 @@ impl StorageManager {
return Some(pin_dyn_future!(fut));
} else if outbound_watch.needs_renew(&registry, consensus_count, cur_ts) {
// Outbound watch expired but can be renewed
let watch_lock =
opt_watch_lock.or_else(|| self.outbound_watch_lock_table.try_lock_tag(key))?;
let fut = {
let registry = self.registry();
async move {
@ -904,9 +899,6 @@ impl StorageManager {
return Some(pin_dyn_future!(fut));
} else if outbound_watch.needs_reconcile(&registry, consensus_count, cur_ts) {
// Outbound watch parameters have changed or it needs more nodes
let watch_lock =
opt_watch_lock.or_else(|| self.outbound_watch_lock_table.try_lock_tag(key))?;
let fut = {
let registry = self.registry();
async move {
@ -944,12 +936,12 @@ impl StorageManager {
return;
}
};
let mut changed_subkeys = report.changed_subkeys();
let mut newer_online_subkeys = report.newer_online_subkeys();
// Get changed first changed subkey until we find one to report
let mut n = 0;
while !changed_subkeys.is_empty() {
let first_changed_subkey = changed_subkeys.first().unwrap();
while !newer_online_subkeys.is_empty() {
let first_changed_subkey = newer_online_subkeys.first().unwrap();
let value = match this.get_value(record_key, first_changed_subkey, true).await {
Ok(v) => v,
@ -960,7 +952,8 @@ impl StorageManager {
};
if let Some(value) = value {
if value.seq() > report.local_seqs()[n] {
let opt_local_seq = report.local_seqs()[n];
if opt_local_seq.is_none() || value.seq() > opt_local_seq.unwrap() {
// Calculate the update
let (changed_subkeys, remaining_count, value) = {
let _watch_lock =
@ -991,7 +984,7 @@ impl StorageManager {
},
);
(changed_subkeys, remaining_count, value)
(newer_online_subkeys, remaining_count, value)
};
// Send the update
@ -1008,7 +1001,7 @@ impl StorageManager {
}
// If we didn't send an update, remove the first changed subkey and try again
changed_subkeys.pop_first();
newer_online_subkeys.pop_first();
n += 1;
}
}
@ -1111,14 +1104,14 @@ impl StorageManager {
inner.outbound_watch_manager.per_node_states.get_mut(&pnk)
else {
// No per node state means no callback
veilid_log!(self warn "missing per node state in outbound watch: {:?}", pnk);
veilid_log!(self warn "Missing per node state in outbound watch: {:?}", pnk);
return Ok(NetworkResult::value(()));
};
// If watch id doesn't match it's for an older watch and should be ignored
if per_node_state.watch_id != watch_id {
// No per node state means no callback
veilid_log!(self warn "incorrect watch id for per node state in outbound watch: {:?} {} != {}", pnk, per_node_state.watch_id, watch_id);
veilid_log!(self warn "Incorrect watch id for per node state in outbound watch: {:?} {} != {}", pnk, per_node_state.watch_id, watch_id);
return Ok(NetworkResult::value(()));
}
@ -1127,7 +1120,7 @@ impl StorageManager {
// If count is greater than our requested count then this is invalid, cancel the watch
// XXX: Should this be a punishment?
veilid_log!(self debug
"watch count went backward: {} @ {} id={}: {} > {}",
"Watch count went backward: {} @ {} id={}: {} > {}",
record_key,
inbound_node_id,
watch_id,
@ -1143,7 +1136,7 @@ impl StorageManager {
// Log this because watch counts should always be decrementing non a per-node basis.
// XXX: Should this be a punishment?
veilid_log!(self debug
"watch count duplicate: {} @ {} id={}: {} == {}",
"Watch count duplicate: {} @ {} id={}: {} == {}",
record_key,
inbound_node_id,
watch_id,
@ -1153,7 +1146,7 @@ impl StorageManager {
} else {
// Reduce the per-node watch count
veilid_log!(self debug
"watch count decremented: {} @ {} id={}: {} < {}",
"Watch count decremented: {} @ {} id={}: {} < {}",
record_key,
inbound_node_id,
watch_id,
@ -1285,7 +1278,7 @@ impl StorageManager {
remaining_count,
Some(value),
);
} else if reportable_subkeys.len() > 0 {
} else if !reportable_subkeys.is_empty() {
// We have subkeys that have be reported as possibly changed
// but not a specific record reported, so we should defer reporting and
// inspect the range to see what changed

View file

@ -1902,7 +1902,7 @@ impl VeilidAPI {
let (key, rc) =
self.clone()
.get_opened_dht_record_context(&args, "debug_record_watch", "key", 1)?;
.get_opened_dht_record_context(&args, "debug_record_inspect", "key", 1)?;
let mut rest_defaults = false;
@ -1947,6 +1947,62 @@ impl VeilidAPI {
Ok(format!("Success: report={:?}", report))
}
async fn debug_record_rehydrate(&self, args: Vec<String>) -> VeilidAPIResult<String> {
let registry = self.core_context()?.registry();
let storage_manager = registry.storage_manager();
let key = get_debug_argument_at(
&args,
1,
"debug_record_rehydrate",
"key",
get_dht_key_no_safety,
)?;
let mut rest_defaults = false;
let subkeys = if rest_defaults {
None
} else {
get_debug_argument_at(&args, 2, "debug_record_rehydrate", "subkeys", get_subkeys)
.inspect_err(|_| {
rest_defaults = true;
})
.ok()
};
let consensus_count = if rest_defaults {
None
} else {
get_debug_argument_at(
&args,
3,
"debug_record_rehydrate",
"consensus_count",
get_number,
)
.inspect_err(|_| {
rest_defaults = true;
})
.ok()
};
// Do a record rehydrate
storage_manager
.add_rehydration_request(
key,
subkeys.unwrap_or_default(),
consensus_count.unwrap_or_else(|| {
registry
.config()
.with(|c| c.network.dht.get_value_count as usize)
}),
)
.await;
Ok("Request added".to_owned())
}
async fn debug_record(&self, args: String) -> VeilidAPIResult<String> {
let args: Vec<String> =
shell_words::split(&args).map_err(|e| VeilidAPIError::parse_error(e, args))?;
@ -1977,6 +2033,8 @@ impl VeilidAPI {
self.debug_record_cancel(args).await
} else if command == "inspect" {
self.debug_record_inspect(args).await
} else if command == "rehydrate" {
self.debug_record_rehydrate(args).await
} else {
Ok(">>> Unknown command\n".to_owned())
}
@ -2144,6 +2202,7 @@ DHT Operations:
watch [<key>] [<subkeys> [<expiration> [<count>]]] - watch a record for changes
cancel [<key>] [<subkeys>] - cancel a dht record watch
inspect [<key>] [<scope> [<subkeys>]] - display a dht record's subkey status
rehydrate <key> [<subkeys>] [<consensus count>] - send a dht record's expired local data back to the network
TableDB Operations:
table list - list the names of all the tables in the TableDB

View file

@ -26,7 +26,7 @@ pub struct DHTRecordDescriptor {
from_impl_to_jsvalue!(DHTRecordDescriptor);
impl DHTRecordDescriptor {
pub fn new(
pub(crate) fn new(
key: TypedKey,
owner: PublicKey,
owner_secret: Option<SecretKey>,

View file

@ -16,25 +16,56 @@ pub struct DHTRecordReport {
/// The subkeys that have been writen offline that still need to be flushed
offline_subkeys: ValueSubkeyRangeSet,
/// The sequence numbers of each subkey requested from a locally stored DHT Record
local_seqs: Vec<ValueSeqNum>,
local_seqs: Vec<Option<ValueSeqNum>>,
/// The sequence numbers of each subkey requested from the DHT over the network
network_seqs: Vec<ValueSeqNum>,
network_seqs: Vec<Option<ValueSeqNum>>,
}
from_impl_to_jsvalue!(DHTRecordReport);
impl DHTRecordReport {
pub fn new(
pub(crate) fn new(
subkeys: ValueSubkeyRangeSet,
offline_subkeys: ValueSubkeyRangeSet,
local_seqs: Vec<ValueSeqNum>,
network_seqs: Vec<ValueSeqNum>,
) -> Self {
Self {
local_seqs: Vec<Option<ValueSeqNum>>,
network_seqs: Vec<Option<ValueSeqNum>>,
) -> VeilidAPIResult<Self> {
if subkeys.is_full() {
apibail_invalid_argument!("subkeys range should be exact", "subkeys", subkeys);
}
if subkeys.is_empty() {
apibail_invalid_argument!("subkeys range should not be empty", "subkeys", subkeys);
}
if subkeys.len() > MAX_INSPECT_VALUE_A_SEQS_LEN as u64 {
apibail_invalid_argument!("subkeys range is too large", "subkeys", subkeys);
}
if subkeys.len() != local_seqs.len() as u64 {
apibail_invalid_argument!(
"local seqs list does not match subkey length",
"local_seqs",
local_seqs.len()
);
}
if subkeys.len() != network_seqs.len() as u64 {
apibail_invalid_argument!(
"network seqs list does not match subkey length",
"network_seqs",
network_seqs.len()
);
}
if !offline_subkeys.is_subset(&subkeys) {
apibail_invalid_argument!(
"offline subkeys is not a subset of the whole subkey set",
"offline_subkeys",
offline_subkeys
);
}
Ok(Self {
subkeys,
offline_subkeys,
local_seqs,
network_seqs,
}
})
}
pub fn subkeys(&self) -> &ValueSubkeyRangeSet {
@ -44,26 +75,28 @@ impl DHTRecordReport {
&self.offline_subkeys
}
#[must_use]
pub fn local_seqs(&self) -> &[ValueSeqNum] {
pub fn local_seqs(&self) -> &[Option<ValueSeqNum>] {
&self.local_seqs
}
#[must_use]
pub fn network_seqs(&self) -> &[ValueSeqNum] {
pub fn network_seqs(&self) -> &[Option<ValueSeqNum>] {
&self.network_seqs
}
pub fn changed_subkeys(&self) -> ValueSubkeyRangeSet {
let mut changed = ValueSubkeyRangeSet::new();
pub fn newer_online_subkeys(&self) -> ValueSubkeyRangeSet {
let mut newer_online = ValueSubkeyRangeSet::new();
for ((sk, lseq), nseq) in self
.subkeys
.iter()
.zip(self.local_seqs.iter())
.zip(self.network_seqs.iter())
{
if nseq > lseq {
changed.insert(sk);
if let Some(nseq) = nseq {
if lseq.is_none() || *nseq > lseq.unwrap() {
newer_online.insert(sk);
}
}
}
changed
newer_online
}
}

View file

@ -19,7 +19,7 @@ pub type ValueSubkey = u32;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type ValueSeqNum = u32;
pub(crate) fn debug_seqs(seqs: &[ValueSeqNum]) -> String {
pub(crate) fn debug_seqs(seqs: &[Option<ValueSeqNum>]) -> String {
let mut col = 0;
let mut out = String::new();
let mut left = seqs.len();
@ -27,10 +27,10 @@ pub(crate) fn debug_seqs(seqs: &[ValueSeqNum]) -> String {
if col == 0 {
out += " ";
}
let sc = if *s == ValueSeqNum::MAX {
"-".to_owned()
} else {
let sc = if let Some(s) = s {
s.to_string()
} else {
"-".to_owned()
};
out += &sc;
out += ",";

View file

@ -53,6 +53,24 @@ impl ValueSubkeyRangeSet {
Self::new_with_data(&self.data | &other.data)
}
#[must_use]
#[allow(clippy::unnecessary_cast)]
pub fn len(&self) -> u64 {
self.data.len() as u64
}
#[must_use]
pub fn is_empty(&self) -> bool {
self.data.is_empty()
}
#[must_use]
pub fn is_full(&self) -> bool {
self.data.ranges_len() == 1
&& self.data.first().unwrap() == u32::MIN
&& self.data.last().unwrap() == u32::MAX
}
#[must_use]
pub fn data(&self) -> &RangeSetBlaze<ValueSubkey> {
&self.data

View file

@ -2,10 +2,11 @@ include: package:lint_hard/all.yaml
analyzer:
errors:
invalid_annotation_target: ignore
one_member_abstracts: ignore
exclude:
- '**/*.g.dart'
- '**/*.freezed.dart'
linter:
rules:
avoid_positional_boolean_parameters: false
avoid_positional_boolean_parameters: false

View file

@ -241,10 +241,19 @@ Future<void> testOpenWriterDHTValue() async {
throwsA(isA<VeilidAPIException>()));
// Verify subkey 0 can be set because override with the right writer
expect(
await rc.setDHTValue(key, 0, va,
writer: KeyPair(key: owner, secret: secret)),
isNull);
// Should have prior sequence number as its returned value because it
// exists online at seq 0
vdtemp = await rc.setDHTValue(key, 0, va,
writer: KeyPair(key: owner, secret: secret));
expect(vdtemp, isNotNull);
expect(vdtemp!.data, equals(vb));
expect(vdtemp.seq, equals(0));
expect(vdtemp.writer, equals(owner));
// Should update the second time to seq 1
vdtemp = await rc.setDHTValue(key, 0, va,
writer: KeyPair(key: owner, secret: secret));
expect(vdtemp, isNull);
// Clean up
await rc.closeDHTRecord(key);
@ -452,16 +461,18 @@ Future<void> testInspectDHTRecord() async {
expect(await rc.setDHTValue(rec.key, 0, utf8.encode('BLAH BLAH BLAH')),
isNull);
await settle(rc, rec.key, 0);
final rr = await rc.inspectDHTRecord(rec.key);
expect(rr.subkeys, equals([ValueSubkeyRange.make(0, 1)]));
expect(rr.localSeqs, equals([0, 0xFFFFFFFF]));
expect(rr.networkSeqs, equals([]));
expect(rr.localSeqs, equals([0, null]));
expect(rr.networkSeqs, equals([null, null]));
final rr2 =
await rc.inspectDHTRecord(rec.key, scope: DHTReportScope.syncGet);
expect(rr2.subkeys, equals([ValueSubkeyRange.make(0, 1)]));
expect(rr2.localSeqs, equals([0, 0xFFFFFFFF]));
expect(rr2.networkSeqs, equals([0, 0xFFFFFFFF]));
expect(rr2.localSeqs, equals([0, null]));
expect(rr2.networkSeqs, equals([0, null]));
await rc.closeDHTRecord(rec.key);
await rc.deleteDHTRecord(rec.key);

View file

@ -14,6 +14,7 @@ Future<void> testRoutingContexts() async {
{
final rc = await Veilid.instance.routingContext();
final rcp = rc.withDefaultSafety();
// More debuggable this way
// ignore: cascade_invocations
rcp.close();
rc.close();
@ -22,6 +23,7 @@ Future<void> testRoutingContexts() async {
{
final rc = await Veilid.instance.routingContext();
final rcp = rc.withSequencing(Sequencing.ensureOrdered);
// More debuggable this way
// ignore: cascade_invocations
rcp.close();
rc.close();
@ -34,6 +36,7 @@ Future<void> testRoutingContexts() async {
hopCount: 2,
stability: Stability.lowLatency,
sequencing: Sequencing.noPreference)));
// More debuggable this way
// ignore: cascade_invocations
rcp.close();
rc.close();
@ -42,6 +45,7 @@ Future<void> testRoutingContexts() async {
final rc = await Veilid.instance.routingContext();
final rcp = rc.withSafety(
const SafetySelectionUnsafe(sequencing: Sequencing.preferOrdered));
// More debuggable this way
// ignore: cascade_invocations
rcp.close();
rc.close();

View file

@ -81,7 +81,7 @@ sealed class DHTSchema with _$DHTSchema {
const DHTSchema defaultDHTSchema = DHTSchema.dflt(oCnt: 1);
@freezed
class DHTSchemaMember with _$DHTSchemaMember {
sealed class DHTSchemaMember with _$DHTSchemaMember {
@Assert('mCnt > 0 && mCnt <= 65535', 'value out of range')
const factory DHTSchemaMember({
required PublicKey mKey,
@ -96,7 +96,7 @@ class DHTSchemaMember with _$DHTSchemaMember {
/// DHTRecordDescriptor
@freezed
class DHTRecordDescriptor with _$DHTRecordDescriptor {
sealed class DHTRecordDescriptor with _$DHTRecordDescriptor {
const factory DHTRecordDescriptor({
required TypedKey key,
required PublicKey owner,
@ -134,7 +134,7 @@ extension DHTRecordDescriptorExt on DHTRecordDescriptor {
/// ValueData
@freezed
class ValueData with _$ValueData {
sealed class ValueData with _$ValueData {
@Assert('seq >= 0', 'seq out of range')
const factory ValueData({
required int seq,
@ -224,7 +224,7 @@ class SafetySelectionSafe extends Equatable implements SafetySelection {
/// Options for safety routes (sender privacy)
@freezed
class SafetySpec with _$SafetySpec {
sealed class SafetySpec with _$SafetySpec {
const factory SafetySpec({
required int hopCount,
required Stability stability,
@ -239,7 +239,7 @@ class SafetySpec with _$SafetySpec {
//////////////////////////////////////
/// RouteBlob
@freezed
class RouteBlob with _$RouteBlob {
sealed class RouteBlob with _$RouteBlob {
const factory RouteBlob(
{required String routeId,
@Uint8ListJsonConverter() required Uint8List blob}) = _RouteBlob;
@ -250,12 +250,12 @@ class RouteBlob with _$RouteBlob {
//////////////////////////////////////
/// Inspect
@freezed
class DHTRecordReport with _$DHTRecordReport {
sealed class DHTRecordReport with _$DHTRecordReport {
const factory DHTRecordReport({
required List<ValueSubkeyRange> subkeys,
required List<ValueSubkeyRange> offlineSubkeys,
required List<int> localSeqs,
required List<int> networkSeqs,
required List<int?> localSeqs,
required List<int?> networkSeqs,
}) = _DHTRecordReport;
factory DHTRecordReport.fromJson(dynamic json) =>
_$DHTRecordReportFromJson(json as Map<String, dynamic>);

File diff suppressed because it is too large Load diff

View file

@ -6,20 +6,20 @@ part of 'routing_context.dart';
// JsonSerializableGenerator
// **************************************************************************
_$DHTSchemaDFLTImpl _$$DHTSchemaDFLTImplFromJson(Map<String, dynamic> json) =>
_$DHTSchemaDFLTImpl(
DHTSchemaDFLT _$DHTSchemaDFLTFromJson(Map<String, dynamic> json) =>
DHTSchemaDFLT(
oCnt: (json['o_cnt'] as num).toInt(),
$type: json['kind'] as String?,
);
Map<String, dynamic> _$$DHTSchemaDFLTImplToJson(_$DHTSchemaDFLTImpl instance) =>
Map<String, dynamic> _$DHTSchemaDFLTToJson(DHTSchemaDFLT instance) =>
<String, dynamic>{
'o_cnt': instance.oCnt,
'kind': instance.$type,
};
_$DHTSchemaSMPLImpl _$$DHTSchemaSMPLImplFromJson(Map<String, dynamic> json) =>
_$DHTSchemaSMPLImpl(
DHTSchemaSMPL _$DHTSchemaSMPLFromJson(Map<String, dynamic> json) =>
DHTSchemaSMPL(
oCnt: (json['o_cnt'] as num).toInt(),
members: (json['members'] as List<dynamic>)
.map(DHTSchemaMember.fromJson)
@ -27,30 +27,27 @@ _$DHTSchemaSMPLImpl _$$DHTSchemaSMPLImplFromJson(Map<String, dynamic> json) =>
$type: json['kind'] as String?,
);
Map<String, dynamic> _$$DHTSchemaSMPLImplToJson(_$DHTSchemaSMPLImpl instance) =>
Map<String, dynamic> _$DHTSchemaSMPLToJson(DHTSchemaSMPL instance) =>
<String, dynamic>{
'o_cnt': instance.oCnt,
'members': instance.members.map((e) => e.toJson()).toList(),
'kind': instance.$type,
};
_$DHTSchemaMemberImpl _$$DHTSchemaMemberImplFromJson(
Map<String, dynamic> json) =>
_$DHTSchemaMemberImpl(
_DHTSchemaMember _$DHTSchemaMemberFromJson(Map<String, dynamic> json) =>
_DHTSchemaMember(
mKey: FixedEncodedString43.fromJson(json['m_key']),
mCnt: (json['m_cnt'] as num).toInt(),
);
Map<String, dynamic> _$$DHTSchemaMemberImplToJson(
_$DHTSchemaMemberImpl instance) =>
Map<String, dynamic> _$DHTSchemaMemberToJson(_DHTSchemaMember instance) =>
<String, dynamic>{
'm_key': instance.mKey.toJson(),
'm_cnt': instance.mCnt,
};
_$DHTRecordDescriptorImpl _$$DHTRecordDescriptorImplFromJson(
Map<String, dynamic> json) =>
_$DHTRecordDescriptorImpl(
_DHTRecordDescriptor _$DHTRecordDescriptorFromJson(Map<String, dynamic> json) =>
_DHTRecordDescriptor(
key: Typed<FixedEncodedString43>.fromJson(json['key']),
owner: FixedEncodedString43.fromJson(json['owner']),
schema: DHTSchema.fromJson(json['schema']),
@ -59,8 +56,8 @@ _$DHTRecordDescriptorImpl _$$DHTRecordDescriptorImplFromJson(
: FixedEncodedString43.fromJson(json['owner_secret']),
);
Map<String, dynamic> _$$DHTRecordDescriptorImplToJson(
_$DHTRecordDescriptorImpl instance) =>
Map<String, dynamic> _$DHTRecordDescriptorToJson(
_DHTRecordDescriptor instance) =>
<String, dynamic>{
'key': instance.key.toJson(),
'owner': instance.owner.toJson(),
@ -68,29 +65,27 @@ Map<String, dynamic> _$$DHTRecordDescriptorImplToJson(
'owner_secret': instance.ownerSecret?.toJson(),
};
_$ValueDataImpl _$$ValueDataImplFromJson(Map<String, dynamic> json) =>
_$ValueDataImpl(
_ValueData _$ValueDataFromJson(Map<String, dynamic> json) => _ValueData(
seq: (json['seq'] as num).toInt(),
data: const Uint8ListJsonConverter.jsIsArray().fromJson(json['data']),
writer: FixedEncodedString43.fromJson(json['writer']),
);
Map<String, dynamic> _$$ValueDataImplToJson(_$ValueDataImpl instance) =>
Map<String, dynamic> _$ValueDataToJson(_ValueData instance) =>
<String, dynamic>{
'seq': instance.seq,
'data': const Uint8ListJsonConverter.jsIsArray().toJson(instance.data),
'writer': instance.writer.toJson(),
};
_$SafetySpecImpl _$$SafetySpecImplFromJson(Map<String, dynamic> json) =>
_$SafetySpecImpl(
_SafetySpec _$SafetySpecFromJson(Map<String, dynamic> json) => _SafetySpec(
hopCount: (json['hop_count'] as num).toInt(),
stability: Stability.fromJson(json['stability']),
sequencing: Sequencing.fromJson(json['sequencing']),
preferredRoute: json['preferred_route'] as String?,
);
Map<String, dynamic> _$$SafetySpecImplToJson(_$SafetySpecImpl instance) =>
Map<String, dynamic> _$SafetySpecToJson(_SafetySpec instance) =>
<String, dynamic>{
'hop_count': instance.hopCount,
'stability': instance.stability.toJson(),
@ -98,21 +93,19 @@ Map<String, dynamic> _$$SafetySpecImplToJson(_$SafetySpecImpl instance) =>
'preferred_route': instance.preferredRoute,
};
_$RouteBlobImpl _$$RouteBlobImplFromJson(Map<String, dynamic> json) =>
_$RouteBlobImpl(
_RouteBlob _$RouteBlobFromJson(Map<String, dynamic> json) => _RouteBlob(
routeId: json['route_id'] as String,
blob: const Uint8ListJsonConverter().fromJson(json['blob']),
);
Map<String, dynamic> _$$RouteBlobImplToJson(_$RouteBlobImpl instance) =>
Map<String, dynamic> _$RouteBlobToJson(_RouteBlob instance) =>
<String, dynamic>{
'route_id': instance.routeId,
'blob': const Uint8ListJsonConverter().toJson(instance.blob),
};
_$DHTRecordReportImpl _$$DHTRecordReportImplFromJson(
Map<String, dynamic> json) =>
_$DHTRecordReportImpl(
_DHTRecordReport _$DHTRecordReportFromJson(Map<String, dynamic> json) =>
_DHTRecordReport(
subkeys: (json['subkeys'] as List<dynamic>)
.map(ValueSubkeyRange.fromJson)
.toList(),
@ -120,15 +113,14 @@ _$DHTRecordReportImpl _$$DHTRecordReportImplFromJson(
.map(ValueSubkeyRange.fromJson)
.toList(),
localSeqs: (json['local_seqs'] as List<dynamic>)
.map((e) => (e as num).toInt())
.map((e) => (e as num?)?.toInt())
.toList(),
networkSeqs: (json['network_seqs'] as List<dynamic>)
.map((e) => (e as num).toInt())
.map((e) => (e as num?)?.toInt())
.toList(),
);
Map<String, dynamic> _$$DHTRecordReportImplToJson(
_$DHTRecordReportImpl instance) =>
Map<String, dynamic> _$DHTRecordReportToJson(_DHTRecordReport instance) =>
<String, dynamic>{
'subkeys': instance.subkeys.map((e) => e.toJson()).toList(),
'offline_subkeys':

View file

@ -10,7 +10,8 @@ part 'veilid_config.g.dart';
//////////////////////////////////////////////////////////
// FFI Platform-specific config
@freezed
class VeilidFFIConfigLoggingTerminal with _$VeilidFFIConfigLoggingTerminal {
sealed class VeilidFFIConfigLoggingTerminal
with _$VeilidFFIConfigLoggingTerminal {
const factory VeilidFFIConfigLoggingTerminal({
required bool enabled,
required VeilidConfigLogLevel level,
@ -22,7 +23,7 @@ class VeilidFFIConfigLoggingTerminal with _$VeilidFFIConfigLoggingTerminal {
}
@freezed
class VeilidFFIConfigLoggingOtlp with _$VeilidFFIConfigLoggingOtlp {
sealed class VeilidFFIConfigLoggingOtlp with _$VeilidFFIConfigLoggingOtlp {
const factory VeilidFFIConfigLoggingOtlp({
required bool enabled,
required VeilidConfigLogLevel level,
@ -36,7 +37,7 @@ class VeilidFFIConfigLoggingOtlp with _$VeilidFFIConfigLoggingOtlp {
}
@freezed
class VeilidFFIConfigLoggingApi with _$VeilidFFIConfigLoggingApi {
sealed class VeilidFFIConfigLoggingApi with _$VeilidFFIConfigLoggingApi {
const factory VeilidFFIConfigLoggingApi({
required bool enabled,
required VeilidConfigLogLevel level,
@ -48,7 +49,7 @@ class VeilidFFIConfigLoggingApi with _$VeilidFFIConfigLoggingApi {
}
@freezed
class VeilidFFIConfigLoggingFlame with _$VeilidFFIConfigLoggingFlame {
sealed class VeilidFFIConfigLoggingFlame with _$VeilidFFIConfigLoggingFlame {
const factory VeilidFFIConfigLoggingFlame({
required bool enabled,
required String path,
@ -59,7 +60,7 @@ class VeilidFFIConfigLoggingFlame with _$VeilidFFIConfigLoggingFlame {
}
@freezed
class VeilidFFIConfigLogging with _$VeilidFFIConfigLogging {
sealed class VeilidFFIConfigLogging with _$VeilidFFIConfigLogging {
const factory VeilidFFIConfigLogging(
{required VeilidFFIConfigLoggingTerminal terminal,
required VeilidFFIConfigLoggingOtlp otlp,
@ -71,7 +72,7 @@ class VeilidFFIConfigLogging with _$VeilidFFIConfigLogging {
}
@freezed
class VeilidFFIConfig with _$VeilidFFIConfig {
sealed class VeilidFFIConfig with _$VeilidFFIConfig {
const factory VeilidFFIConfig({
required VeilidFFIConfigLogging logging,
}) = _VeilidFFIConfig;
@ -84,7 +85,7 @@ class VeilidFFIConfig with _$VeilidFFIConfig {
// WASM Platform-specific config
@freezed
class VeilidWASMConfigLoggingPerformance
sealed class VeilidWASMConfigLoggingPerformance
with _$VeilidWASMConfigLoggingPerformance {
const factory VeilidWASMConfigLoggingPerformance({
required bool enabled,
@ -100,7 +101,7 @@ class VeilidWASMConfigLoggingPerformance
}
@freezed
class VeilidWASMConfigLoggingApi with _$VeilidWASMConfigLoggingApi {
sealed class VeilidWASMConfigLoggingApi with _$VeilidWASMConfigLoggingApi {
const factory VeilidWASMConfigLoggingApi({
required bool enabled,
required VeilidConfigLogLevel level,
@ -112,7 +113,7 @@ class VeilidWASMConfigLoggingApi with _$VeilidWASMConfigLoggingApi {
}
@freezed
class VeilidWASMConfigLogging with _$VeilidWASMConfigLogging {
sealed class VeilidWASMConfigLogging with _$VeilidWASMConfigLogging {
const factory VeilidWASMConfigLogging(
{required VeilidWASMConfigLoggingPerformance performance,
required VeilidWASMConfigLoggingApi api}) = _VeilidWASMConfigLogging;
@ -122,7 +123,7 @@ class VeilidWASMConfigLogging with _$VeilidWASMConfigLogging {
}
@freezed
class VeilidWASMConfig with _$VeilidWASMConfig {
sealed class VeilidWASMConfig with _$VeilidWASMConfig {
const factory VeilidWASMConfig({
required VeilidWASMConfigLogging logging,
}) = _VeilidWASMConfig;
@ -151,7 +152,7 @@ enum VeilidConfigLogLevel {
/// VeilidConfig
@freezed
class VeilidConfigHTTPS with _$VeilidConfigHTTPS {
sealed class VeilidConfigHTTPS with _$VeilidConfigHTTPS {
const factory VeilidConfigHTTPS({
required bool enabled,
required String listenAddress,
@ -166,7 +167,7 @@ class VeilidConfigHTTPS with _$VeilidConfigHTTPS {
////////////
@freezed
class VeilidConfigHTTP with _$VeilidConfigHTTP {
sealed class VeilidConfigHTTP with _$VeilidConfigHTTP {
const factory VeilidConfigHTTP({
required bool enabled,
required String listenAddress,
@ -181,7 +182,7 @@ class VeilidConfigHTTP with _$VeilidConfigHTTP {
////////////
@freezed
class VeilidConfigApplication with _$VeilidConfigApplication {
sealed class VeilidConfigApplication with _$VeilidConfigApplication {
const factory VeilidConfigApplication({
required VeilidConfigHTTPS https,
required VeilidConfigHTTP http,
@ -193,7 +194,7 @@ class VeilidConfigApplication with _$VeilidConfigApplication {
////////////
@freezed
class VeilidConfigUDP with _$VeilidConfigUDP {
sealed class VeilidConfigUDP with _$VeilidConfigUDP {
const factory VeilidConfigUDP(
{required bool enabled,
required int socketPoolSize,
@ -206,7 +207,7 @@ class VeilidConfigUDP with _$VeilidConfigUDP {
////////////
@freezed
class VeilidConfigTCP with _$VeilidConfigTCP {
sealed class VeilidConfigTCP with _$VeilidConfigTCP {
const factory VeilidConfigTCP(
{required bool connect,
required bool listen,
@ -220,7 +221,7 @@ class VeilidConfigTCP with _$VeilidConfigTCP {
////////////
@freezed
class VeilidConfigWS with _$VeilidConfigWS {
sealed class VeilidConfigWS with _$VeilidConfigWS {
const factory VeilidConfigWS(
{required bool connect,
required bool listen,
@ -235,7 +236,7 @@ class VeilidConfigWS with _$VeilidConfigWS {
////////////
@freezed
class VeilidConfigWSS with _$VeilidConfigWSS {
sealed class VeilidConfigWSS with _$VeilidConfigWSS {
const factory VeilidConfigWSS(
{required bool connect,
required bool listen,
@ -251,7 +252,7 @@ class VeilidConfigWSS with _$VeilidConfigWSS {
////////////
@freezed
class VeilidConfigProtocol with _$VeilidConfigProtocol {
sealed class VeilidConfigProtocol with _$VeilidConfigProtocol {
const factory VeilidConfigProtocol({
required VeilidConfigUDP udp,
required VeilidConfigTCP tcp,
@ -266,7 +267,7 @@ class VeilidConfigProtocol with _$VeilidConfigProtocol {
////////////
@freezed
class VeilidConfigTLS with _$VeilidConfigTLS {
sealed class VeilidConfigTLS with _$VeilidConfigTLS {
const factory VeilidConfigTLS({
required String certificatePath,
required String privateKeyPath,
@ -279,7 +280,7 @@ class VeilidConfigTLS with _$VeilidConfigTLS {
////////////
@freezed
class VeilidConfigDHT with _$VeilidConfigDHT {
sealed class VeilidConfigDHT with _$VeilidConfigDHT {
const factory VeilidConfigDHT({
required int resolveNodeTimeoutMs,
required int resolveNodeCount,
@ -312,7 +313,7 @@ class VeilidConfigDHT with _$VeilidConfigDHT {
////////////
@freezed
class VeilidConfigRPC with _$VeilidConfigRPC {
sealed class VeilidConfigRPC with _$VeilidConfigRPC {
const factory VeilidConfigRPC(
{required int concurrency,
required int queueSize,
@ -329,7 +330,7 @@ class VeilidConfigRPC with _$VeilidConfigRPC {
////////////
@freezed
class VeilidConfigRoutingTable with _$VeilidConfigRoutingTable {
sealed class VeilidConfigRoutingTable with _$VeilidConfigRoutingTable {
const factory VeilidConfigRoutingTable({
required List<TypedKey> nodeId,
required List<TypedSecret> nodeIdSecret,
@ -348,7 +349,7 @@ class VeilidConfigRoutingTable with _$VeilidConfigRoutingTable {
////////////
@freezed
class VeilidConfigNetwork with _$VeilidConfigNetwork {
sealed class VeilidConfigNetwork with _$VeilidConfigNetwork {
const factory VeilidConfigNetwork({
required int connectionInitialTimeoutMs,
required int connectionInactivityTimeoutMs,
@ -378,7 +379,7 @@ class VeilidConfigNetwork with _$VeilidConfigNetwork {
////////////
@freezed
class VeilidConfigTableStore with _$VeilidConfigTableStore {
sealed class VeilidConfigTableStore with _$VeilidConfigTableStore {
const factory VeilidConfigTableStore({
required String directory,
required bool delete,
@ -391,7 +392,7 @@ class VeilidConfigTableStore with _$VeilidConfigTableStore {
////////////
@freezed
class VeilidConfigBlockStore with _$VeilidConfigBlockStore {
sealed class VeilidConfigBlockStore with _$VeilidConfigBlockStore {
const factory VeilidConfigBlockStore({
required String directory,
required bool delete,
@ -404,7 +405,7 @@ class VeilidConfigBlockStore with _$VeilidConfigBlockStore {
////////////
@freezed
class VeilidConfigProtectedStore with _$VeilidConfigProtectedStore {
sealed class VeilidConfigProtectedStore with _$VeilidConfigProtectedStore {
const factory VeilidConfigProtectedStore(
{required bool allowInsecureFallback,
required bool alwaysUseInsecureStorage,
@ -420,7 +421,7 @@ class VeilidConfigProtectedStore with _$VeilidConfigProtectedStore {
////////////
@freezed
class VeilidConfigCapabilities with _$VeilidConfigCapabilities {
sealed class VeilidConfigCapabilities with _$VeilidConfigCapabilities {
const factory VeilidConfigCapabilities({
required List<String> disable,
}) = _VeilidConfigCapabilities;
@ -432,7 +433,7 @@ class VeilidConfigCapabilities with _$VeilidConfigCapabilities {
////////////
@freezed
class VeilidConfig with _$VeilidConfig {
sealed class VeilidConfig with _$VeilidConfig {
const factory VeilidConfig({
required String programName,
required String namespace,

File diff suppressed because it is too large Load diff

View file

@ -6,28 +6,28 @@ part of 'veilid_config.dart';
// JsonSerializableGenerator
// **************************************************************************
_$VeilidFFIConfigLoggingTerminalImpl
_$$VeilidFFIConfigLoggingTerminalImplFromJson(Map<String, dynamic> json) =>
_$VeilidFFIConfigLoggingTerminalImpl(
enabled: json['enabled'] as bool,
level: VeilidConfigLogLevel.fromJson(json['level']),
ignoreLogTargets: (json['ignore_log_targets'] as List<dynamic>?)
?.map((e) => e as String)
.toList() ??
const [],
);
_VeilidFFIConfigLoggingTerminal _$VeilidFFIConfigLoggingTerminalFromJson(
Map<String, dynamic> json) =>
_VeilidFFIConfigLoggingTerminal(
enabled: json['enabled'] as bool,
level: VeilidConfigLogLevel.fromJson(json['level']),
ignoreLogTargets: (json['ignore_log_targets'] as List<dynamic>?)
?.map((e) => e as String)
.toList() ??
const [],
);
Map<String, dynamic> _$$VeilidFFIConfigLoggingTerminalImplToJson(
_$VeilidFFIConfigLoggingTerminalImpl instance) =>
Map<String, dynamic> _$VeilidFFIConfigLoggingTerminalToJson(
_VeilidFFIConfigLoggingTerminal instance) =>
<String, dynamic>{
'enabled': instance.enabled,
'level': instance.level.toJson(),
'ignore_log_targets': instance.ignoreLogTargets,
};
_$VeilidFFIConfigLoggingOtlpImpl _$$VeilidFFIConfigLoggingOtlpImplFromJson(
_VeilidFFIConfigLoggingOtlp _$VeilidFFIConfigLoggingOtlpFromJson(
Map<String, dynamic> json) =>
_$VeilidFFIConfigLoggingOtlpImpl(
_VeilidFFIConfigLoggingOtlp(
enabled: json['enabled'] as bool,
level: VeilidConfigLogLevel.fromJson(json['level']),
grpcEndpoint: json['grpc_endpoint'] as String,
@ -38,8 +38,8 @@ _$VeilidFFIConfigLoggingOtlpImpl _$$VeilidFFIConfigLoggingOtlpImplFromJson(
const [],
);
Map<String, dynamic> _$$VeilidFFIConfigLoggingOtlpImplToJson(
_$VeilidFFIConfigLoggingOtlpImpl instance) =>
Map<String, dynamic> _$VeilidFFIConfigLoggingOtlpToJson(
_VeilidFFIConfigLoggingOtlp instance) =>
<String, dynamic>{
'enabled': instance.enabled,
'level': instance.level.toJson(),
@ -48,9 +48,9 @@ Map<String, dynamic> _$$VeilidFFIConfigLoggingOtlpImplToJson(
'ignore_log_targets': instance.ignoreLogTargets,
};
_$VeilidFFIConfigLoggingApiImpl _$$VeilidFFIConfigLoggingApiImplFromJson(
_VeilidFFIConfigLoggingApi _$VeilidFFIConfigLoggingApiFromJson(
Map<String, dynamic> json) =>
_$VeilidFFIConfigLoggingApiImpl(
_VeilidFFIConfigLoggingApi(
enabled: json['enabled'] as bool,
level: VeilidConfigLogLevel.fromJson(json['level']),
ignoreLogTargets: (json['ignore_log_targets'] as List<dynamic>?)
@ -59,39 +59,39 @@ _$VeilidFFIConfigLoggingApiImpl _$$VeilidFFIConfigLoggingApiImplFromJson(
const [],
);
Map<String, dynamic> _$$VeilidFFIConfigLoggingApiImplToJson(
_$VeilidFFIConfigLoggingApiImpl instance) =>
Map<String, dynamic> _$VeilidFFIConfigLoggingApiToJson(
_VeilidFFIConfigLoggingApi instance) =>
<String, dynamic>{
'enabled': instance.enabled,
'level': instance.level.toJson(),
'ignore_log_targets': instance.ignoreLogTargets,
};
_$VeilidFFIConfigLoggingFlameImpl _$$VeilidFFIConfigLoggingFlameImplFromJson(
_VeilidFFIConfigLoggingFlame _$VeilidFFIConfigLoggingFlameFromJson(
Map<String, dynamic> json) =>
_$VeilidFFIConfigLoggingFlameImpl(
_VeilidFFIConfigLoggingFlame(
enabled: json['enabled'] as bool,
path: json['path'] as String,
);
Map<String, dynamic> _$$VeilidFFIConfigLoggingFlameImplToJson(
_$VeilidFFIConfigLoggingFlameImpl instance) =>
Map<String, dynamic> _$VeilidFFIConfigLoggingFlameToJson(
_VeilidFFIConfigLoggingFlame instance) =>
<String, dynamic>{
'enabled': instance.enabled,
'path': instance.path,
};
_$VeilidFFIConfigLoggingImpl _$$VeilidFFIConfigLoggingImplFromJson(
_VeilidFFIConfigLogging _$VeilidFFIConfigLoggingFromJson(
Map<String, dynamic> json) =>
_$VeilidFFIConfigLoggingImpl(
_VeilidFFIConfigLogging(
terminal: VeilidFFIConfigLoggingTerminal.fromJson(json['terminal']),
otlp: VeilidFFIConfigLoggingOtlp.fromJson(json['otlp']),
api: VeilidFFIConfigLoggingApi.fromJson(json['api']),
flame: VeilidFFIConfigLoggingFlame.fromJson(json['flame']),
);
Map<String, dynamic> _$$VeilidFFIConfigLoggingImplToJson(
_$VeilidFFIConfigLoggingImpl instance) =>
Map<String, dynamic> _$VeilidFFIConfigLoggingToJson(
_VeilidFFIConfigLogging instance) =>
<String, dynamic>{
'terminal': instance.terminal.toJson(),
'otlp': instance.otlp.toJson(),
@ -99,22 +99,19 @@ Map<String, dynamic> _$$VeilidFFIConfigLoggingImplToJson(
'flame': instance.flame.toJson(),
};
_$VeilidFFIConfigImpl _$$VeilidFFIConfigImplFromJson(
Map<String, dynamic> json) =>
_$VeilidFFIConfigImpl(
_VeilidFFIConfig _$VeilidFFIConfigFromJson(Map<String, dynamic> json) =>
_VeilidFFIConfig(
logging: VeilidFFIConfigLogging.fromJson(json['logging']),
);
Map<String, dynamic> _$$VeilidFFIConfigImplToJson(
_$VeilidFFIConfigImpl instance) =>
Map<String, dynamic> _$VeilidFFIConfigToJson(_VeilidFFIConfig instance) =>
<String, dynamic>{
'logging': instance.logging.toJson(),
};
_$VeilidWASMConfigLoggingPerformanceImpl
_$$VeilidWASMConfigLoggingPerformanceImplFromJson(
Map<String, dynamic> json) =>
_$VeilidWASMConfigLoggingPerformanceImpl(
_VeilidWASMConfigLoggingPerformance
_$VeilidWASMConfigLoggingPerformanceFromJson(Map<String, dynamic> json) =>
_VeilidWASMConfigLoggingPerformance(
enabled: json['enabled'] as bool,
level: VeilidConfigLogLevel.fromJson(json['level']),
logsInTimings: json['logs_in_timings'] as bool,
@ -125,8 +122,8 @@ _$VeilidWASMConfigLoggingPerformanceImpl
const [],
);
Map<String, dynamic> _$$VeilidWASMConfigLoggingPerformanceImplToJson(
_$VeilidWASMConfigLoggingPerformanceImpl instance) =>
Map<String, dynamic> _$VeilidWASMConfigLoggingPerformanceToJson(
_VeilidWASMConfigLoggingPerformance instance) =>
<String, dynamic>{
'enabled': instance.enabled,
'level': instance.level.toJson(),
@ -135,9 +132,9 @@ Map<String, dynamic> _$$VeilidWASMConfigLoggingPerformanceImplToJson(
'ignore_log_targets': instance.ignoreLogTargets,
};
_$VeilidWASMConfigLoggingApiImpl _$$VeilidWASMConfigLoggingApiImplFromJson(
_VeilidWASMConfigLoggingApi _$VeilidWASMConfigLoggingApiFromJson(
Map<String, dynamic> json) =>
_$VeilidWASMConfigLoggingApiImpl(
_VeilidWASMConfigLoggingApi(
enabled: json['enabled'] as bool,
level: VeilidConfigLogLevel.fromJson(json['level']),
ignoreLogTargets: (json['ignore_log_targets'] as List<dynamic>?)
@ -146,52 +143,48 @@ _$VeilidWASMConfigLoggingApiImpl _$$VeilidWASMConfigLoggingApiImplFromJson(
const [],
);
Map<String, dynamic> _$$VeilidWASMConfigLoggingApiImplToJson(
_$VeilidWASMConfigLoggingApiImpl instance) =>
Map<String, dynamic> _$VeilidWASMConfigLoggingApiToJson(
_VeilidWASMConfigLoggingApi instance) =>
<String, dynamic>{
'enabled': instance.enabled,
'level': instance.level.toJson(),
'ignore_log_targets': instance.ignoreLogTargets,
};
_$VeilidWASMConfigLoggingImpl _$$VeilidWASMConfigLoggingImplFromJson(
_VeilidWASMConfigLogging _$VeilidWASMConfigLoggingFromJson(
Map<String, dynamic> json) =>
_$VeilidWASMConfigLoggingImpl(
_VeilidWASMConfigLogging(
performance:
VeilidWASMConfigLoggingPerformance.fromJson(json['performance']),
api: VeilidWASMConfigLoggingApi.fromJson(json['api']),
);
Map<String, dynamic> _$$VeilidWASMConfigLoggingImplToJson(
_$VeilidWASMConfigLoggingImpl instance) =>
Map<String, dynamic> _$VeilidWASMConfigLoggingToJson(
_VeilidWASMConfigLogging instance) =>
<String, dynamic>{
'performance': instance.performance.toJson(),
'api': instance.api.toJson(),
};
_$VeilidWASMConfigImpl _$$VeilidWASMConfigImplFromJson(
Map<String, dynamic> json) =>
_$VeilidWASMConfigImpl(
_VeilidWASMConfig _$VeilidWASMConfigFromJson(Map<String, dynamic> json) =>
_VeilidWASMConfig(
logging: VeilidWASMConfigLogging.fromJson(json['logging']),
);
Map<String, dynamic> _$$VeilidWASMConfigImplToJson(
_$VeilidWASMConfigImpl instance) =>
Map<String, dynamic> _$VeilidWASMConfigToJson(_VeilidWASMConfig instance) =>
<String, dynamic>{
'logging': instance.logging.toJson(),
};
_$VeilidConfigHTTPSImpl _$$VeilidConfigHTTPSImplFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigHTTPSImpl(
_VeilidConfigHTTPS _$VeilidConfigHTTPSFromJson(Map<String, dynamic> json) =>
_VeilidConfigHTTPS(
enabled: json['enabled'] as bool,
listenAddress: json['listen_address'] as String,
path: json['path'] as String,
url: json['url'] as String?,
);
Map<String, dynamic> _$$VeilidConfigHTTPSImplToJson(
_$VeilidConfigHTTPSImpl instance) =>
Map<String, dynamic> _$VeilidConfigHTTPSToJson(_VeilidConfigHTTPS instance) =>
<String, dynamic>{
'enabled': instance.enabled,
'listen_address': instance.listenAddress,
@ -199,17 +192,15 @@ Map<String, dynamic> _$$VeilidConfigHTTPSImplToJson(
'url': instance.url,
};
_$VeilidConfigHTTPImpl _$$VeilidConfigHTTPImplFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigHTTPImpl(
_VeilidConfigHTTP _$VeilidConfigHTTPFromJson(Map<String, dynamic> json) =>
_VeilidConfigHTTP(
enabled: json['enabled'] as bool,
listenAddress: json['listen_address'] as String,
path: json['path'] as String,
url: json['url'] as String?,
);
Map<String, dynamic> _$$VeilidConfigHTTPImplToJson(
_$VeilidConfigHTTPImpl instance) =>
Map<String, dynamic> _$VeilidConfigHTTPToJson(_VeilidConfigHTTP instance) =>
<String, dynamic>{
'enabled': instance.enabled,
'listen_address': instance.listenAddress,
@ -217,31 +208,29 @@ Map<String, dynamic> _$$VeilidConfigHTTPImplToJson(
'url': instance.url,
};
_$VeilidConfigApplicationImpl _$$VeilidConfigApplicationImplFromJson(
_VeilidConfigApplication _$VeilidConfigApplicationFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigApplicationImpl(
_VeilidConfigApplication(
https: VeilidConfigHTTPS.fromJson(json['https']),
http: VeilidConfigHTTP.fromJson(json['http']),
);
Map<String, dynamic> _$$VeilidConfigApplicationImplToJson(
_$VeilidConfigApplicationImpl instance) =>
Map<String, dynamic> _$VeilidConfigApplicationToJson(
_VeilidConfigApplication instance) =>
<String, dynamic>{
'https': instance.https.toJson(),
'http': instance.http.toJson(),
};
_$VeilidConfigUDPImpl _$$VeilidConfigUDPImplFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigUDPImpl(
_VeilidConfigUDP _$VeilidConfigUDPFromJson(Map<String, dynamic> json) =>
_VeilidConfigUDP(
enabled: json['enabled'] as bool,
socketPoolSize: (json['socket_pool_size'] as num).toInt(),
listenAddress: json['listen_address'] as String,
publicAddress: json['public_address'] as String?,
);
Map<String, dynamic> _$$VeilidConfigUDPImplToJson(
_$VeilidConfigUDPImpl instance) =>
Map<String, dynamic> _$VeilidConfigUDPToJson(_VeilidConfigUDP instance) =>
<String, dynamic>{
'enabled': instance.enabled,
'socket_pool_size': instance.socketPoolSize,
@ -249,9 +238,8 @@ Map<String, dynamic> _$$VeilidConfigUDPImplToJson(
'public_address': instance.publicAddress,
};
_$VeilidConfigTCPImpl _$$VeilidConfigTCPImplFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigTCPImpl(
_VeilidConfigTCP _$VeilidConfigTCPFromJson(Map<String, dynamic> json) =>
_VeilidConfigTCP(
connect: json['connect'] as bool,
listen: json['listen'] as bool,
maxConnections: (json['max_connections'] as num).toInt(),
@ -259,8 +247,7 @@ _$VeilidConfigTCPImpl _$$VeilidConfigTCPImplFromJson(
publicAddress: json['public_address'] as String?,
);
Map<String, dynamic> _$$VeilidConfigTCPImplToJson(
_$VeilidConfigTCPImpl instance) =>
Map<String, dynamic> _$VeilidConfigTCPToJson(_VeilidConfigTCP instance) =>
<String, dynamic>{
'connect': instance.connect,
'listen': instance.listen,
@ -269,8 +256,8 @@ Map<String, dynamic> _$$VeilidConfigTCPImplToJson(
'public_address': instance.publicAddress,
};
_$VeilidConfigWSImpl _$$VeilidConfigWSImplFromJson(Map<String, dynamic> json) =>
_$VeilidConfigWSImpl(
_VeilidConfigWS _$VeilidConfigWSFromJson(Map<String, dynamic> json) =>
_VeilidConfigWS(
connect: json['connect'] as bool,
listen: json['listen'] as bool,
maxConnections: (json['max_connections'] as num).toInt(),
@ -279,8 +266,7 @@ _$VeilidConfigWSImpl _$$VeilidConfigWSImplFromJson(Map<String, dynamic> json) =>
url: json['url'] as String?,
);
Map<String, dynamic> _$$VeilidConfigWSImplToJson(
_$VeilidConfigWSImpl instance) =>
Map<String, dynamic> _$VeilidConfigWSToJson(_VeilidConfigWS instance) =>
<String, dynamic>{
'connect': instance.connect,
'listen': instance.listen,
@ -290,9 +276,8 @@ Map<String, dynamic> _$$VeilidConfigWSImplToJson(
'url': instance.url,
};
_$VeilidConfigWSSImpl _$$VeilidConfigWSSImplFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigWSSImpl(
_VeilidConfigWSS _$VeilidConfigWSSFromJson(Map<String, dynamic> json) =>
_VeilidConfigWSS(
connect: json['connect'] as bool,
listen: json['listen'] as bool,
maxConnections: (json['max_connections'] as num).toInt(),
@ -301,8 +286,7 @@ _$VeilidConfigWSSImpl _$$VeilidConfigWSSImplFromJson(
url: json['url'] as String?,
);
Map<String, dynamic> _$$VeilidConfigWSSImplToJson(
_$VeilidConfigWSSImpl instance) =>
Map<String, dynamic> _$VeilidConfigWSSToJson(_VeilidConfigWSS instance) =>
<String, dynamic>{
'connect': instance.connect,
'listen': instance.listen,
@ -312,17 +296,17 @@ Map<String, dynamic> _$$VeilidConfigWSSImplToJson(
'url': instance.url,
};
_$VeilidConfigProtocolImpl _$$VeilidConfigProtocolImplFromJson(
_VeilidConfigProtocol _$VeilidConfigProtocolFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigProtocolImpl(
_VeilidConfigProtocol(
udp: VeilidConfigUDP.fromJson(json['udp']),
tcp: VeilidConfigTCP.fromJson(json['tcp']),
ws: VeilidConfigWS.fromJson(json['ws']),
wss: VeilidConfigWSS.fromJson(json['wss']),
);
Map<String, dynamic> _$$VeilidConfigProtocolImplToJson(
_$VeilidConfigProtocolImpl instance) =>
Map<String, dynamic> _$VeilidConfigProtocolToJson(
_VeilidConfigProtocol instance) =>
<String, dynamic>{
'udp': instance.udp.toJson(),
'tcp': instance.tcp.toJson(),
@ -330,26 +314,23 @@ Map<String, dynamic> _$$VeilidConfigProtocolImplToJson(
'wss': instance.wss.toJson(),
};
_$VeilidConfigTLSImpl _$$VeilidConfigTLSImplFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigTLSImpl(
_VeilidConfigTLS _$VeilidConfigTLSFromJson(Map<String, dynamic> json) =>
_VeilidConfigTLS(
certificatePath: json['certificate_path'] as String,
privateKeyPath: json['private_key_path'] as String,
connectionInitialTimeoutMs:
(json['connection_initial_timeout_ms'] as num).toInt(),
);
Map<String, dynamic> _$$VeilidConfigTLSImplToJson(
_$VeilidConfigTLSImpl instance) =>
Map<String, dynamic> _$VeilidConfigTLSToJson(_VeilidConfigTLS instance) =>
<String, dynamic>{
'certificate_path': instance.certificatePath,
'private_key_path': instance.privateKeyPath,
'connection_initial_timeout_ms': instance.connectionInitialTimeoutMs,
};
_$VeilidConfigDHTImpl _$$VeilidConfigDHTImplFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigDHTImpl(
_VeilidConfigDHT _$VeilidConfigDHTFromJson(Map<String, dynamic> json) =>
_VeilidConfigDHT(
resolveNodeTimeoutMs: (json['resolve_node_timeout_ms'] as num).toInt(),
resolveNodeCount: (json['resolve_node_count'] as num).toInt(),
resolveNodeFanout: (json['resolve_node_fanout'] as num).toInt(),
@ -378,8 +359,7 @@ _$VeilidConfigDHTImpl _$$VeilidConfigDHTImplFromJson(
maxWatchExpirationMs: (json['max_watch_expiration_ms'] as num).toInt(),
);
Map<String, dynamic> _$$VeilidConfigDHTImplToJson(
_$VeilidConfigDHTImpl instance) =>
Map<String, dynamic> _$VeilidConfigDHTToJson(_VeilidConfigDHT instance) =>
<String, dynamic>{
'resolve_node_timeout_ms': instance.resolveNodeTimeoutMs,
'resolve_node_count': instance.resolveNodeCount,
@ -407,9 +387,8 @@ Map<String, dynamic> _$$VeilidConfigDHTImplToJson(
'max_watch_expiration_ms': instance.maxWatchExpirationMs,
};
_$VeilidConfigRPCImpl _$$VeilidConfigRPCImplFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigRPCImpl(
_VeilidConfigRPC _$VeilidConfigRPCFromJson(Map<String, dynamic> json) =>
_VeilidConfigRPC(
concurrency: (json['concurrency'] as num).toInt(),
queueSize: (json['queue_size'] as num).toInt(),
timeoutMs: (json['timeout_ms'] as num).toInt(),
@ -419,8 +398,7 @@ _$VeilidConfigRPCImpl _$$VeilidConfigRPCImplFromJson(
maxTimestampAheadMs: (json['max_timestamp_ahead_ms'] as num?)?.toInt(),
);
Map<String, dynamic> _$$VeilidConfigRPCImplToJson(
_$VeilidConfigRPCImpl instance) =>
Map<String, dynamic> _$VeilidConfigRPCToJson(_VeilidConfigRPC instance) =>
<String, dynamic>{
'concurrency': instance.concurrency,
'queue_size': instance.queueSize,
@ -431,9 +409,9 @@ Map<String, dynamic> _$$VeilidConfigRPCImplToJson(
'max_timestamp_ahead_ms': instance.maxTimestampAheadMs,
};
_$VeilidConfigRoutingTableImpl _$$VeilidConfigRoutingTableImplFromJson(
_VeilidConfigRoutingTable _$VeilidConfigRoutingTableFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigRoutingTableImpl(
_VeilidConfigRoutingTable(
nodeId: (json['node_id'] as List<dynamic>)
.map(Typed<FixedEncodedString43>.fromJson)
.toList(),
@ -449,8 +427,8 @@ _$VeilidConfigRoutingTableImpl _$$VeilidConfigRoutingTableImplFromJson(
limitAttachedWeak: (json['limit_attached_weak'] as num).toInt(),
);
Map<String, dynamic> _$$VeilidConfigRoutingTableImplToJson(
_$VeilidConfigRoutingTableImpl instance) =>
Map<String, dynamic> _$VeilidConfigRoutingTableToJson(
_VeilidConfigRoutingTable instance) =>
<String, dynamic>{
'node_id': instance.nodeId.map((e) => e.toJson()).toList(),
'node_id_secret': instance.nodeIdSecret.map((e) => e.toJson()).toList(),
@ -462,9 +440,8 @@ Map<String, dynamic> _$$VeilidConfigRoutingTableImplToJson(
'limit_attached_weak': instance.limitAttachedWeak,
};
_$VeilidConfigNetworkImpl _$$VeilidConfigNetworkImplFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigNetworkImpl(
_VeilidConfigNetwork _$VeilidConfigNetworkFromJson(Map<String, dynamic> json) =>
_VeilidConfigNetwork(
connectionInitialTimeoutMs:
(json['connection_initial_timeout_ms'] as num).toInt(),
connectionInactivityTimeoutMs:
@ -494,8 +471,8 @@ _$VeilidConfigNetworkImpl _$$VeilidConfigNetworkImplFromJson(
networkKeyPassword: json['network_key_password'] as String?,
);
Map<String, dynamic> _$$VeilidConfigNetworkImplToJson(
_$VeilidConfigNetworkImpl instance) =>
Map<String, dynamic> _$VeilidConfigNetworkToJson(
_VeilidConfigNetwork instance) =>
<String, dynamic>{
'connection_initial_timeout_ms': instance.connectionInitialTimeoutMs,
'connection_inactivity_timeout_ms':
@ -521,37 +498,37 @@ Map<String, dynamic> _$$VeilidConfigNetworkImplToJson(
'network_key_password': instance.networkKeyPassword,
};
_$VeilidConfigTableStoreImpl _$$VeilidConfigTableStoreImplFromJson(
_VeilidConfigTableStore _$VeilidConfigTableStoreFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigTableStoreImpl(
_VeilidConfigTableStore(
directory: json['directory'] as String,
delete: json['delete'] as bool,
);
Map<String, dynamic> _$$VeilidConfigTableStoreImplToJson(
_$VeilidConfigTableStoreImpl instance) =>
Map<String, dynamic> _$VeilidConfigTableStoreToJson(
_VeilidConfigTableStore instance) =>
<String, dynamic>{
'directory': instance.directory,
'delete': instance.delete,
};
_$VeilidConfigBlockStoreImpl _$$VeilidConfigBlockStoreImplFromJson(
_VeilidConfigBlockStore _$VeilidConfigBlockStoreFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigBlockStoreImpl(
_VeilidConfigBlockStore(
directory: json['directory'] as String,
delete: json['delete'] as bool,
);
Map<String, dynamic> _$$VeilidConfigBlockStoreImplToJson(
_$VeilidConfigBlockStoreImpl instance) =>
Map<String, dynamic> _$VeilidConfigBlockStoreToJson(
_VeilidConfigBlockStore instance) =>
<String, dynamic>{
'directory': instance.directory,
'delete': instance.delete,
};
_$VeilidConfigProtectedStoreImpl _$$VeilidConfigProtectedStoreImplFromJson(
_VeilidConfigProtectedStore _$VeilidConfigProtectedStoreFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigProtectedStoreImpl(
_VeilidConfigProtectedStore(
allowInsecureFallback: json['allow_insecure_fallback'] as bool,
alwaysUseInsecureStorage: json['always_use_insecure_storage'] as bool,
directory: json['directory'] as String,
@ -562,8 +539,8 @@ _$VeilidConfigProtectedStoreImpl _$$VeilidConfigProtectedStoreImplFromJson(
json['new_device_encryption_key_password'] as String?,
);
Map<String, dynamic> _$$VeilidConfigProtectedStoreImplToJson(
_$VeilidConfigProtectedStoreImpl instance) =>
Map<String, dynamic> _$VeilidConfigProtectedStoreToJson(
_VeilidConfigProtectedStore instance) =>
<String, dynamic>{
'allow_insecure_fallback': instance.allowInsecureFallback,
'always_use_insecure_storage': instance.alwaysUseInsecureStorage,
@ -574,21 +551,21 @@ Map<String, dynamic> _$$VeilidConfigProtectedStoreImplToJson(
instance.newDeviceEncryptionKeyPassword,
};
_$VeilidConfigCapabilitiesImpl _$$VeilidConfigCapabilitiesImplFromJson(
_VeilidConfigCapabilities _$VeilidConfigCapabilitiesFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigCapabilitiesImpl(
_VeilidConfigCapabilities(
disable:
(json['disable'] as List<dynamic>).map((e) => e as String).toList(),
);
Map<String, dynamic> _$$VeilidConfigCapabilitiesImplToJson(
_$VeilidConfigCapabilitiesImpl instance) =>
Map<String, dynamic> _$VeilidConfigCapabilitiesToJson(
_VeilidConfigCapabilities instance) =>
<String, dynamic>{
'disable': instance.disable,
};
_$VeilidConfigImpl _$$VeilidConfigImplFromJson(Map<String, dynamic> json) =>
_$VeilidConfigImpl(
_VeilidConfig _$VeilidConfigFromJson(Map<String, dynamic> json) =>
_VeilidConfig(
programName: json['program_name'] as String,
namespace: json['namespace'] as String,
capabilities: VeilidConfigCapabilities.fromJson(json['capabilities']),
@ -599,7 +576,7 @@ _$VeilidConfigImpl _$$VeilidConfigImplFromJson(Map<String, dynamic> json) =>
network: VeilidConfigNetwork.fromJson(json['network']),
);
Map<String, dynamic> _$$VeilidConfigImplToJson(_$VeilidConfigImpl instance) =>
Map<String, dynamic> _$VeilidConfigToJson(_VeilidConfig instance) =>
<String, dynamic>{
'program_name': instance.programName,
'namespace': instance.namespace,

View file

@ -46,7 +46,7 @@ enum VeilidLogLevel {
////////////
@freezed
class LatencyStats with _$LatencyStats {
sealed class LatencyStats with _$LatencyStats {
const factory LatencyStats({
required TimestampDuration fastest,
required TimestampDuration average,
@ -64,7 +64,7 @@ class LatencyStats with _$LatencyStats {
////////////
@freezed
class TransferStats with _$TransferStats {
sealed class TransferStats with _$TransferStats {
const factory TransferStats({
required BigInt total,
required BigInt maximum,
@ -79,7 +79,7 @@ class TransferStats with _$TransferStats {
////////////
@freezed
class TransferStatsDownUp with _$TransferStatsDownUp {
sealed class TransferStatsDownUp with _$TransferStatsDownUp {
const factory TransferStatsDownUp({
required TransferStats down,
required TransferStats up,
@ -92,7 +92,7 @@ class TransferStatsDownUp with _$TransferStatsDownUp {
////////////
@freezed
class StateStats with _$StateStats {
sealed class StateStats with _$StateStats {
const factory StateStats({
required TimestampDuration span,
required TimestampDuration reliable,
@ -109,7 +109,7 @@ class StateStats with _$StateStats {
////////////
@freezed
class StateReasonStats with _$StateReasonStats {
sealed class StateReasonStats with _$StateReasonStats {
const factory StateReasonStats({
required TimestampDuration canNotSend,
required TimestampDuration tooManyLostAnswers,
@ -127,7 +127,7 @@ class StateReasonStats with _$StateReasonStats {
////////////
@freezed
class AnswerStats with _$AnswerStats {
sealed class AnswerStats with _$AnswerStats {
const factory AnswerStats({
required TimestampDuration span,
required int questions,
@ -148,7 +148,7 @@ class AnswerStats with _$AnswerStats {
////////////
@freezed
class RPCStats with _$RPCStats {
sealed class RPCStats with _$RPCStats {
const factory RPCStats({
required int messagesSent,
required int messagesRcvd,
@ -170,7 +170,7 @@ class RPCStats with _$RPCStats {
////////////
@freezed
class PeerStats with _$PeerStats {
sealed class PeerStats with _$PeerStats {
const factory PeerStats({
required Timestamp timeAdded,
required RPCStats rpcStats,
@ -186,7 +186,7 @@ class PeerStats with _$PeerStats {
////////////
@freezed
class PeerTableData with _$PeerTableData {
sealed class PeerTableData with _$PeerTableData {
const factory PeerTableData({
required List<TypedKey> nodeIds,
required String peerAddress,
@ -251,7 +251,7 @@ sealed class VeilidUpdate with _$VeilidUpdate {
/// VeilidStateAttachment
@freezed
class VeilidStateAttachment with _$VeilidStateAttachment {
sealed class VeilidStateAttachment with _$VeilidStateAttachment {
const factory VeilidStateAttachment(
{required AttachmentState state,
required bool publicInternetReady,
@ -267,7 +267,7 @@ class VeilidStateAttachment with _$VeilidStateAttachment {
/// VeilidStateNetwork
@freezed
class VeilidStateNetwork with _$VeilidStateNetwork {
sealed class VeilidStateNetwork with _$VeilidStateNetwork {
const factory VeilidStateNetwork(
{required bool started,
required BigInt bpsDown,
@ -282,7 +282,7 @@ class VeilidStateNetwork with _$VeilidStateNetwork {
/// VeilidStateConfig
@freezed
class VeilidStateConfig with _$VeilidStateConfig {
sealed class VeilidStateConfig with _$VeilidStateConfig {
const factory VeilidStateConfig({
required VeilidConfig config,
}) = _VeilidStateConfig;
@ -295,7 +295,7 @@ class VeilidStateConfig with _$VeilidStateConfig {
/// VeilidState
@freezed
class VeilidState with _$VeilidState {
sealed class VeilidState with _$VeilidState {
const factory VeilidState({
required VeilidStateAttachment attachment,
required VeilidStateNetwork network,

File diff suppressed because it is too large Load diff

View file

@ -6,8 +6,8 @@ part of 'veilid_state.dart';
// JsonSerializableGenerator
// **************************************************************************
_$LatencyStatsImpl _$$LatencyStatsImplFromJson(Map<String, dynamic> json) =>
_$LatencyStatsImpl(
_LatencyStats _$LatencyStatsFromJson(Map<String, dynamic> json) =>
_LatencyStats(
fastest: TimestampDuration.fromJson(json['fastest']),
average: TimestampDuration.fromJson(json['average']),
slowest: TimestampDuration.fromJson(json['slowest']),
@ -17,7 +17,7 @@ _$LatencyStatsImpl _$$LatencyStatsImplFromJson(Map<String, dynamic> json) =>
p75: TimestampDuration.fromJson(json['p75']),
);
Map<String, dynamic> _$$LatencyStatsImplToJson(_$LatencyStatsImpl instance) =>
Map<String, dynamic> _$LatencyStatsToJson(_LatencyStats instance) =>
<String, dynamic>{
'fastest': instance.fastest.toJson(),
'average': instance.average.toJson(),
@ -28,15 +28,15 @@ Map<String, dynamic> _$$LatencyStatsImplToJson(_$LatencyStatsImpl instance) =>
'p75': instance.p75.toJson(),
};
_$TransferStatsImpl _$$TransferStatsImplFromJson(Map<String, dynamic> json) =>
_$TransferStatsImpl(
_TransferStats _$TransferStatsFromJson(Map<String, dynamic> json) =>
_TransferStats(
total: BigInt.parse(json['total'] as String),
maximum: BigInt.parse(json['maximum'] as String),
average: BigInt.parse(json['average'] as String),
minimum: BigInt.parse(json['minimum'] as String),
);
Map<String, dynamic> _$$TransferStatsImplToJson(_$TransferStatsImpl instance) =>
Map<String, dynamic> _$TransferStatsToJson(_TransferStats instance) =>
<String, dynamic>{
'total': instance.total.toString(),
'maximum': instance.maximum.toString(),
@ -44,22 +44,20 @@ Map<String, dynamic> _$$TransferStatsImplToJson(_$TransferStatsImpl instance) =>
'minimum': instance.minimum.toString(),
};
_$TransferStatsDownUpImpl _$$TransferStatsDownUpImplFromJson(
Map<String, dynamic> json) =>
_$TransferStatsDownUpImpl(
_TransferStatsDownUp _$TransferStatsDownUpFromJson(Map<String, dynamic> json) =>
_TransferStatsDownUp(
down: TransferStats.fromJson(json['down']),
up: TransferStats.fromJson(json['up']),
);
Map<String, dynamic> _$$TransferStatsDownUpImplToJson(
_$TransferStatsDownUpImpl instance) =>
Map<String, dynamic> _$TransferStatsDownUpToJson(
_TransferStatsDownUp instance) =>
<String, dynamic>{
'down': instance.down.toJson(),
'up': instance.up.toJson(),
};
_$StateStatsImpl _$$StateStatsImplFromJson(Map<String, dynamic> json) =>
_$StateStatsImpl(
_StateStats _$StateStatsFromJson(Map<String, dynamic> json) => _StateStats(
span: TimestampDuration.fromJson(json['span']),
reliable: TimestampDuration.fromJson(json['reliable']),
unreliable: TimestampDuration.fromJson(json['unreliable']),
@ -68,7 +66,7 @@ _$StateStatsImpl _$$StateStatsImplFromJson(Map<String, dynamic> json) =>
reason: StateReasonStats.fromJson(json['reason']),
);
Map<String, dynamic> _$$StateStatsImplToJson(_$StateStatsImpl instance) =>
Map<String, dynamic> _$StateStatsToJson(_StateStats instance) =>
<String, dynamic>{
'span': instance.span.toJson(),
'reliable': instance.reliable.toJson(),
@ -78,9 +76,8 @@ Map<String, dynamic> _$$StateStatsImplToJson(_$StateStatsImpl instance) =>
'reason': instance.reason.toJson(),
};
_$StateReasonStatsImpl _$$StateReasonStatsImplFromJson(
Map<String, dynamic> json) =>
_$StateReasonStatsImpl(
_StateReasonStats _$StateReasonStatsFromJson(Map<String, dynamic> json) =>
_StateReasonStats(
canNotSend: TimestampDuration.fromJson(json['can_not_send']),
tooManyLostAnswers:
TimestampDuration.fromJson(json['too_many_lost_answers']),
@ -93,8 +90,7 @@ _$StateReasonStatsImpl _$$StateReasonStatsImplFromJson(
TimestampDuration.fromJson(json['in_unreliable_ping_span']),
);
Map<String, dynamic> _$$StateReasonStatsImplToJson(
_$StateReasonStatsImpl instance) =>
Map<String, dynamic> _$StateReasonStatsToJson(_StateReasonStats instance) =>
<String, dynamic>{
'can_not_send': instance.canNotSend.toJson(),
'too_many_lost_answers': instance.tooManyLostAnswers.toJson(),
@ -105,8 +101,7 @@ Map<String, dynamic> _$$StateReasonStatsImplToJson(
'in_unreliable_ping_span': instance.inUnreliablePingSpan.toJson(),
};
_$AnswerStatsImpl _$$AnswerStatsImplFromJson(Map<String, dynamic> json) =>
_$AnswerStatsImpl(
_AnswerStats _$AnswerStatsFromJson(Map<String, dynamic> json) => _AnswerStats(
span: TimestampDuration.fromJson(json['span']),
questions: (json['questions'] as num).toInt(),
answers: (json['answers'] as num).toInt(),
@ -125,7 +120,7 @@ _$AnswerStatsImpl _$$AnswerStatsImplFromJson(Map<String, dynamic> json) =>
(json['consecutive_lost_answers_minimum'] as num).toInt(),
);
Map<String, dynamic> _$$AnswerStatsImplToJson(_$AnswerStatsImpl instance) =>
Map<String, dynamic> _$AnswerStatsToJson(_AnswerStats instance) =>
<String, dynamic>{
'span': instance.span.toJson(),
'questions': instance.questions,
@ -142,8 +137,7 @@ Map<String, dynamic> _$$AnswerStatsImplToJson(_$AnswerStatsImpl instance) =>
instance.consecutiveLostAnswersMinimum,
};
_$RPCStatsImpl _$$RPCStatsImplFromJson(Map<String, dynamic> json) =>
_$RPCStatsImpl(
_RPCStats _$RPCStatsFromJson(Map<String, dynamic> json) => _RPCStats(
messagesSent: (json['messages_sent'] as num).toInt(),
messagesRcvd: (json['messages_rcvd'] as num).toInt(),
questionsInFlight: (json['questions_in_flight'] as num).toInt(),
@ -165,8 +159,7 @@ _$RPCStatsImpl _$$RPCStatsImplFromJson(Map<String, dynamic> json) =>
answerOrdered: AnswerStats.fromJson(json['answer_ordered']),
);
Map<String, dynamic> _$$RPCStatsImplToJson(_$RPCStatsImpl instance) =>
<String, dynamic>{
Map<String, dynamic> _$RPCStatsToJson(_RPCStats instance) => <String, dynamic>{
'messages_sent': instance.messagesSent,
'messages_rcvd': instance.messagesRcvd,
'questions_in_flight': instance.questionsInFlight,
@ -180,8 +173,7 @@ Map<String, dynamic> _$$RPCStatsImplToJson(_$RPCStatsImpl instance) =>
'answer_ordered': instance.answerOrdered.toJson(),
};
_$PeerStatsImpl _$$PeerStatsImplFromJson(Map<String, dynamic> json) =>
_$PeerStatsImpl(
_PeerStats _$PeerStatsFromJson(Map<String, dynamic> json) => _PeerStats(
timeAdded: Timestamp.fromJson(json['time_added']),
rpcStats: RPCStats.fromJson(json['rpc_stats']),
transfer: TransferStatsDownUp.fromJson(json['transfer']),
@ -191,7 +183,7 @@ _$PeerStatsImpl _$$PeerStatsImplFromJson(Map<String, dynamic> json) =>
: LatencyStats.fromJson(json['latency']),
);
Map<String, dynamic> _$$PeerStatsImplToJson(_$PeerStatsImpl instance) =>
Map<String, dynamic> _$PeerStatsToJson(_PeerStats instance) =>
<String, dynamic>{
'time_added': instance.timeAdded.toJson(),
'rpc_stats': instance.rpcStats.toJson(),
@ -200,8 +192,8 @@ Map<String, dynamic> _$$PeerStatsImplToJson(_$PeerStatsImpl instance) =>
'latency': instance.latency?.toJson(),
};
_$PeerTableDataImpl _$$PeerTableDataImplFromJson(Map<String, dynamic> json) =>
_$PeerTableDataImpl(
_PeerTableData _$PeerTableDataFromJson(Map<String, dynamic> json) =>
_PeerTableData(
nodeIds: (json['node_ids'] as List<dynamic>)
.map(Typed<FixedEncodedString43>.fromJson)
.toList(),
@ -209,32 +201,29 @@ _$PeerTableDataImpl _$$PeerTableDataImplFromJson(Map<String, dynamic> json) =>
peerStats: PeerStats.fromJson(json['peer_stats']),
);
Map<String, dynamic> _$$PeerTableDataImplToJson(_$PeerTableDataImpl instance) =>
Map<String, dynamic> _$PeerTableDataToJson(_PeerTableData instance) =>
<String, dynamic>{
'node_ids': instance.nodeIds.map((e) => e.toJson()).toList(),
'peer_address': instance.peerAddress,
'peer_stats': instance.peerStats.toJson(),
};
_$VeilidLogImpl _$$VeilidLogImplFromJson(Map<String, dynamic> json) =>
_$VeilidLogImpl(
VeilidLog _$VeilidLogFromJson(Map<String, dynamic> json) => VeilidLog(
logLevel: VeilidLogLevel.fromJson(json['log_level']),
message: json['message'] as String,
backtrace: json['backtrace'] as String?,
$type: json['kind'] as String?,
);
Map<String, dynamic> _$$VeilidLogImplToJson(_$VeilidLogImpl instance) =>
<String, dynamic>{
Map<String, dynamic> _$VeilidLogToJson(VeilidLog instance) => <String, dynamic>{
'log_level': instance.logLevel.toJson(),
'message': instance.message,
'backtrace': instance.backtrace,
'kind': instance.$type,
};
_$VeilidAppMessageImpl _$$VeilidAppMessageImplFromJson(
Map<String, dynamic> json) =>
_$VeilidAppMessageImpl(
VeilidAppMessage _$VeilidAppMessageFromJson(Map<String, dynamic> json) =>
VeilidAppMessage(
message:
const Uint8ListJsonConverter.jsIsArray().fromJson(json['message']),
sender: json['sender'] == null
@ -244,8 +233,7 @@ _$VeilidAppMessageImpl _$$VeilidAppMessageImplFromJson(
$type: json['kind'] as String?,
);
Map<String, dynamic> _$$VeilidAppMessageImplToJson(
_$VeilidAppMessageImpl instance) =>
Map<String, dynamic> _$VeilidAppMessageToJson(VeilidAppMessage instance) =>
<String, dynamic>{
'message':
const Uint8ListJsonConverter.jsIsArray().toJson(instance.message),
@ -254,8 +242,8 @@ Map<String, dynamic> _$$VeilidAppMessageImplToJson(
'kind': instance.$type,
};
_$VeilidAppCallImpl _$$VeilidAppCallImplFromJson(Map<String, dynamic> json) =>
_$VeilidAppCallImpl(
VeilidAppCall _$VeilidAppCallFromJson(Map<String, dynamic> json) =>
VeilidAppCall(
message:
const Uint8ListJsonConverter.jsIsArray().fromJson(json['message']),
callId: json['call_id'] as String,
@ -266,7 +254,7 @@ _$VeilidAppCallImpl _$$VeilidAppCallImplFromJson(Map<String, dynamic> json) =>
$type: json['kind'] as String?,
);
Map<String, dynamic> _$$VeilidAppCallImplToJson(_$VeilidAppCallImpl instance) =>
Map<String, dynamic> _$VeilidAppCallToJson(VeilidAppCall instance) =>
<String, dynamic>{
'message':
const Uint8ListJsonConverter.jsIsArray().toJson(instance.message),
@ -276,9 +264,9 @@ Map<String, dynamic> _$$VeilidAppCallImplToJson(_$VeilidAppCallImpl instance) =>
'kind': instance.$type,
};
_$VeilidUpdateAttachmentImpl _$$VeilidUpdateAttachmentImplFromJson(
VeilidUpdateAttachment _$VeilidUpdateAttachmentFromJson(
Map<String, dynamic> json) =>
_$VeilidUpdateAttachmentImpl(
VeilidUpdateAttachment(
state: AttachmentState.fromJson(json['state']),
publicInternetReady: json['public_internet_ready'] as bool,
localNetworkReady: json['local_network_ready'] as bool,
@ -289,8 +277,8 @@ _$VeilidUpdateAttachmentImpl _$$VeilidUpdateAttachmentImplFromJson(
$type: json['kind'] as String?,
);
Map<String, dynamic> _$$VeilidUpdateAttachmentImplToJson(
_$VeilidUpdateAttachmentImpl instance) =>
Map<String, dynamic> _$VeilidUpdateAttachmentToJson(
VeilidUpdateAttachment instance) =>
<String, dynamic>{
'state': instance.state.toJson(),
'public_internet_ready': instance.publicInternetReady,
@ -300,9 +288,8 @@ Map<String, dynamic> _$$VeilidUpdateAttachmentImplToJson(
'kind': instance.$type,
};
_$VeilidUpdateNetworkImpl _$$VeilidUpdateNetworkImplFromJson(
Map<String, dynamic> json) =>
_$VeilidUpdateNetworkImpl(
VeilidUpdateNetwork _$VeilidUpdateNetworkFromJson(Map<String, dynamic> json) =>
VeilidUpdateNetwork(
started: json['started'] as bool,
bpsDown: BigInt.parse(json['bps_down'] as String),
bpsUp: BigInt.parse(json['bps_up'] as String),
@ -311,8 +298,8 @@ _$VeilidUpdateNetworkImpl _$$VeilidUpdateNetworkImplFromJson(
$type: json['kind'] as String?,
);
Map<String, dynamic> _$$VeilidUpdateNetworkImplToJson(
_$VeilidUpdateNetworkImpl instance) =>
Map<String, dynamic> _$VeilidUpdateNetworkToJson(
VeilidUpdateNetwork instance) =>
<String, dynamic>{
'started': instance.started,
'bps_down': instance.bpsDown.toString(),
@ -321,23 +308,21 @@ Map<String, dynamic> _$$VeilidUpdateNetworkImplToJson(
'kind': instance.$type,
};
_$VeilidUpdateConfigImpl _$$VeilidUpdateConfigImplFromJson(
Map<String, dynamic> json) =>
_$VeilidUpdateConfigImpl(
VeilidUpdateConfig _$VeilidUpdateConfigFromJson(Map<String, dynamic> json) =>
VeilidUpdateConfig(
config: VeilidConfig.fromJson(json['config']),
$type: json['kind'] as String?,
);
Map<String, dynamic> _$$VeilidUpdateConfigImplToJson(
_$VeilidUpdateConfigImpl instance) =>
Map<String, dynamic> _$VeilidUpdateConfigToJson(VeilidUpdateConfig instance) =>
<String, dynamic>{
'config': instance.config.toJson(),
'kind': instance.$type,
};
_$VeilidUpdateRouteChangeImpl _$$VeilidUpdateRouteChangeImplFromJson(
VeilidUpdateRouteChange _$VeilidUpdateRouteChangeFromJson(
Map<String, dynamic> json) =>
_$VeilidUpdateRouteChangeImpl(
VeilidUpdateRouteChange(
deadRoutes: (json['dead_routes'] as List<dynamic>)
.map((e) => e as String)
.toList(),
@ -347,17 +332,17 @@ _$VeilidUpdateRouteChangeImpl _$$VeilidUpdateRouteChangeImplFromJson(
$type: json['kind'] as String?,
);
Map<String, dynamic> _$$VeilidUpdateRouteChangeImplToJson(
_$VeilidUpdateRouteChangeImpl instance) =>
Map<String, dynamic> _$VeilidUpdateRouteChangeToJson(
VeilidUpdateRouteChange instance) =>
<String, dynamic>{
'dead_routes': instance.deadRoutes,
'dead_remote_routes': instance.deadRemoteRoutes,
'kind': instance.$type,
};
_$VeilidUpdateValueChangeImpl _$$VeilidUpdateValueChangeImplFromJson(
VeilidUpdateValueChange _$VeilidUpdateValueChangeFromJson(
Map<String, dynamic> json) =>
_$VeilidUpdateValueChangeImpl(
VeilidUpdateValueChange(
key: Typed<FixedEncodedString43>.fromJson(json['key']),
subkeys: (json['subkeys'] as List<dynamic>)
.map(ValueSubkeyRange.fromJson)
@ -367,8 +352,8 @@ _$VeilidUpdateValueChangeImpl _$$VeilidUpdateValueChangeImplFromJson(
$type: json['kind'] as String?,
);
Map<String, dynamic> _$$VeilidUpdateValueChangeImplToJson(
_$VeilidUpdateValueChangeImpl instance) =>
Map<String, dynamic> _$VeilidUpdateValueChangeToJson(
VeilidUpdateValueChange instance) =>
<String, dynamic>{
'key': instance.key.toJson(),
'subkeys': instance.subkeys.map((e) => e.toJson()).toList(),
@ -377,9 +362,9 @@ Map<String, dynamic> _$$VeilidUpdateValueChangeImplToJson(
'kind': instance.$type,
};
_$VeilidStateAttachmentImpl _$$VeilidStateAttachmentImplFromJson(
_VeilidStateAttachment _$VeilidStateAttachmentFromJson(
Map<String, dynamic> json) =>
_$VeilidStateAttachmentImpl(
_VeilidStateAttachment(
state: AttachmentState.fromJson(json['state']),
publicInternetReady: json['public_internet_ready'] as bool,
localNetworkReady: json['local_network_ready'] as bool,
@ -389,8 +374,8 @@ _$VeilidStateAttachmentImpl _$$VeilidStateAttachmentImplFromJson(
: TimestampDuration.fromJson(json['attached_uptime']),
);
Map<String, dynamic> _$$VeilidStateAttachmentImplToJson(
_$VeilidStateAttachmentImpl instance) =>
Map<String, dynamic> _$VeilidStateAttachmentToJson(
_VeilidStateAttachment instance) =>
<String, dynamic>{
'state': instance.state.toJson(),
'public_internet_ready': instance.publicInternetReady,
@ -399,9 +384,8 @@ Map<String, dynamic> _$$VeilidStateAttachmentImplToJson(
'attached_uptime': instance.attachedUptime?.toJson(),
};
_$VeilidStateNetworkImpl _$$VeilidStateNetworkImplFromJson(
Map<String, dynamic> json) =>
_$VeilidStateNetworkImpl(
_VeilidStateNetwork _$VeilidStateNetworkFromJson(Map<String, dynamic> json) =>
_VeilidStateNetwork(
started: json['started'] as bool,
bpsDown: BigInt.parse(json['bps_down'] as String),
bpsUp: BigInt.parse(json['bps_up'] as String),
@ -409,8 +393,7 @@ _$VeilidStateNetworkImpl _$$VeilidStateNetworkImplFromJson(
(json['peers'] as List<dynamic>).map(PeerTableData.fromJson).toList(),
);
Map<String, dynamic> _$$VeilidStateNetworkImplToJson(
_$VeilidStateNetworkImpl instance) =>
Map<String, dynamic> _$VeilidStateNetworkToJson(_VeilidStateNetwork instance) =>
<String, dynamic>{
'started': instance.started,
'bps_down': instance.bpsDown.toString(),
@ -418,26 +401,23 @@ Map<String, dynamic> _$$VeilidStateNetworkImplToJson(
'peers': instance.peers.map((e) => e.toJson()).toList(),
};
_$VeilidStateConfigImpl _$$VeilidStateConfigImplFromJson(
Map<String, dynamic> json) =>
_$VeilidStateConfigImpl(
_VeilidStateConfig _$VeilidStateConfigFromJson(Map<String, dynamic> json) =>
_VeilidStateConfig(
config: VeilidConfig.fromJson(json['config']),
);
Map<String, dynamic> _$$VeilidStateConfigImplToJson(
_$VeilidStateConfigImpl instance) =>
Map<String, dynamic> _$VeilidStateConfigToJson(_VeilidStateConfig instance) =>
<String, dynamic>{
'config': instance.config.toJson(),
};
_$VeilidStateImpl _$$VeilidStateImplFromJson(Map<String, dynamic> json) =>
_$VeilidStateImpl(
_VeilidState _$VeilidStateFromJson(Map<String, dynamic> json) => _VeilidState(
attachment: VeilidStateAttachment.fromJson(json['attachment']),
network: VeilidStateNetwork.fromJson(json['network']),
config: VeilidStateConfig.fromJson(json['config']),
);
Map<String, dynamic> _$$VeilidStateImplToJson(_$VeilidStateImpl instance) =>
Map<String, dynamic> _$VeilidStateToJson(_VeilidState instance) =>
<String, dynamic>{
'attachment': instance.attachment.toJson(),
'network': instance.network.toJson(),

View file

@ -9,7 +9,7 @@ include(FetchContent)
FetchContent_Declare(
Corrosion
GIT_REPOSITORY https://github.com/AndrewGaspar/corrosion.git
GIT_TAG v0.5.0 # Optionally specify a version tag or branch here
GIT_TAG v0.5.1 # Optionally specify a version tag or branch here
)
FetchContent_MakeAvailable(Corrosion)

View file

@ -9,7 +9,7 @@ include(FetchContent)
FetchContent_Declare(
Corrosion
GIT_REPOSITORY https://github.com/AndrewGaspar/corrosion.git
GIT_TAG v0.4.10 # Optionally specify a version tag or branch here
GIT_TAG v0.5.1 # Optionally specify a version tag or branch here
)
FetchContent_MakeAvailable(Corrosion)
@ -29,4 +29,3 @@ corrosion_import_crate(MANIFEST_PATH ${repository_root}/../veilid/Cargo.toml CRA
set(CRATE_NAME "veilid_flutter")
target_link_libraries(${PLUGIN_NAME} PUBLIC ${CRATE_NAME})
# list(APPEND PLUGIN_BUNDLED_LIBRARIES $<TARGET_FILE:${CRATE_NAME}-shared>)

View file

@ -7,7 +7,7 @@ import os
import veilid
from veilid import ValueSubkey, Timestamp, SafetySelection
from veilid.types import VeilidJSONEncoder
from veilid.types import ValueSeqNum, VeilidJSONEncoder
##################################################################
BOGUS_KEY = veilid.TypedKey.from_value(
@ -118,8 +118,8 @@ async def test_set_get_dht_value_with_owner(api_connection: veilid.VeilidAPI):
vd4 = await rc.get_dht_value(rec.key, ValueSubkey(1), False)
assert vd4 is None
print("vd2: {}", vd2.__dict__)
print("vd3: {}", vd3.__dict__)
#print("vd2: {}", vd2.__dict__)
#print("vd3: {}", vd3.__dict__)
assert vd2 == vd3
@ -135,7 +135,7 @@ async def test_open_writer_dht_value(api_connection: veilid.VeilidAPI):
key = rec.key
owner = rec.owner
secret = rec.owner_secret
print(f"key:{key}")
#print(f"key:{key}")
cs = await api_connection.get_crypto_system(rec.key.kind())
async with cs:
@ -237,6 +237,14 @@ async def test_open_writer_dht_value(api_connection: veilid.VeilidAPI):
await rc.set_dht_value(key, ValueSubkey(0), va)
# Verify subkey 0 can be set because override with the right writer
# Should have prior sequence number as its returned value because it exists online at seq 0
vdtemp = await rc.set_dht_value(key, ValueSubkey(0), va, veilid.KeyPair.from_parts(owner, secret))
assert vdtemp is not None
assert vdtemp.data == vb
assert vdtemp.seq == 0
assert vdtemp.writer == owner
# Should update the second time to seq 1
vdtemp = await rc.set_dht_value(key, ValueSubkey(0), va, veilid.KeyPair.from_parts(owner, secret))
assert vdtemp is None
@ -297,7 +305,7 @@ async def test_watch_dht_values():
await sync(rc0, [rec0])
# Server 0: Make a watch on all the subkeys
active = await rc0.watch_dht_values(rec0.key, [], Timestamp(0), 0xFFFFFFFF)
active = await rc0.watch_dht_values(rec0.key)
assert active
# Server 1: Open the subkey
@ -462,7 +470,7 @@ async def test_watch_many_dht_values():
assert vd is None
# Server 0: Make a watch on all the subkeys
active = await rc0.watch_dht_values(records[n].key, [], Timestamp(0), 0xFFFFFFFF)
active = await rc0.watch_dht_values(records[n].key)
assert active
# Open and set all records
@ -516,16 +524,18 @@ async def test_inspect_dht_record(api_connection: veilid.VeilidAPI):
assert vd is None
rr = await rc.inspect_dht_record(rec.key, [], veilid.DHTReportScope.LOCAL)
print("rr: {}", rr.__dict__)
assert rr.subkeys == [(0,1)]
assert rr.local_seqs == [0, 0xFFFFFFFF]
assert rr.network_seqs == []
#print("rr: {}", rr.__dict__)
assert rr.subkeys == [(0, 1)]
assert rr.local_seqs == [0, None]
assert rr.network_seqs == [None, None]
await sync(rc, [rec])
rr2 = await rc.inspect_dht_record(rec.key, [], veilid.DHTReportScope.SYNC_GET)
print("rr2: {}", rr2.__dict__)
assert rr2.subkeys == [(0,1)]
assert rr2.local_seqs == [0, 0xFFFFFFFF]
assert rr2.network_seqs == [0, 0xFFFFFFFF]
#print("rr2: {}", rr2.__dict__)
assert rr2.subkeys == [(0, 1)]
assert rr2.local_seqs == [0, None]
assert rr2.network_seqs == [0, None]
await rc.close_dht_record(rec.key)
await rc.delete_dht_record(rec.key)
@ -932,7 +942,7 @@ async def sync_win(
if key is not None:
futurerecords.remove(key)
if len(rr.subkeys) == 1 and rr.subkeys[0] == (0, subkey_count-1) and veilid.ValueSeqNum.NONE not in rr.local_seqs and len(rr.offline_subkeys) == 0:
if len(rr.subkeys) == 1 and rr.subkeys[0] == (0, subkey_count-1) and None not in rr.local_seqs and len(rr.offline_subkeys) == 0:
if key in recordreports:
del recordreports[key]
donerecords.add(key)
@ -959,7 +969,7 @@ async def sync_win(
win.addstr(n+2, 1, " " * subkey_count, curses.color_pair(1))
for (a,b) in rr.subkeys:
for m in range(a, b+1):
if rr.local_seqs[m] != veilid.ValueSeqNum.NONE:
if rr.local_seqs[m] != None:
win.addstr(n+2, m+1, " ", curses.color_pair(2))
for (a,b) in rr.offline_subkeys:
win.addstr(n+2, a+1, " " * (b-a+1), curses.color_pair(3))

View file

@ -2947,7 +2947,10 @@
"description": "The sequence numbers of each subkey requested from a locally stored DHT Record",
"type": "array",
"items": {
"type": "integer",
"type": [
"integer",
"null"
],
"format": "uint32",
"minimum": 0.0
}
@ -2956,7 +2959,10 @@
"description": "The sequence numbers of each subkey requested from the DHT over the network",
"type": "array",
"items": {
"type": "integer",
"type": [
"integer",
"null"
],
"format": "uint32",
"minimum": 0.0
}

View file

@ -237,8 +237,7 @@ class ValueSubkey(int):
class ValueSeqNum(int):
NONE = 4294967295
pass
####################################################################
@ -405,15 +404,15 @@ class DHTRecordDescriptor:
class DHTRecordReport:
subkeys: list[tuple[ValueSubkey, ValueSubkey]]
offline_subkeys: list[tuple[ValueSubkey, ValueSubkey]]
local_seqs: list[ValueSeqNum]
network_seqs: list[ValueSeqNum]
local_seqs: list[Optional[ValueSeqNum]]
network_seqs: list[Optional[ValueSeqNum]]
def __init__(
self,
subkeys: list[tuple[ValueSubkey, ValueSubkey]],
offline_subkeys: list[tuple[ValueSubkey, ValueSubkey]],
local_seqs: list[ValueSeqNum],
network_seqs: list[ValueSeqNum],
local_seqs: list[Optional[ValueSeqNum]],
network_seqs: list[Optional[ValueSeqNum]],
):
self.subkeys = subkeys
self.offline_subkeys = offline_subkeys
@ -428,8 +427,8 @@ class DHTRecordReport:
return cls(
[(p[0], p[1]) for p in j["subkeys"]],
[(p[0], p[1]) for p in j["offline_subkeys"]],
[ValueSeqNum(s) for s in j["local_seqs"]],
[ValueSeqNum(s) for s in j["network_seqs"]],
[(ValueSeqNum(s) if s is not None else None) for s in j["local_seqs"] ],
[(ValueSeqNum(s) if s is not None else None) for s in j["network_seqs"] ],
)
def to_json(self) -> dict:

View file

@ -212,13 +212,13 @@ fn main() -> EyreResult<()> {
settingsrw.logging.terminal.enabled = true;
settingsrw.logging.terminal.level = LogLevel::Debug;
settingsrw.logging.api.enabled = true;
settingsrw.logging.api.level = LogLevel::Debug;
settingsrw.logging.api.level = LogLevel::Info;
}
if args.logging.trace {
settingsrw.logging.terminal.enabled = true;
settingsrw.logging.terminal.level = LogLevel::Trace;
settingsrw.logging.api.enabled = true;
settingsrw.logging.api.level = LogLevel::Trace;
settingsrw.logging.api.level = LogLevel::Info;
}
if let Some(subnode_index) = args.subnode_index {

View file

@ -13,6 +13,8 @@ export type KeyPair = `${PublicKey}:${SecretKey}`;
export type FourCC = "NONE" | "VLD0" | string;
export type CryptoTyped<TCryptoKey extends string> = `${FourCC}:${TCryptoKey}`;
export type CryptoTypedGroup<TCryptoKey extends string> = Array<CryptoTyped<TCryptoKey>>;
export
"#;
#[wasm_bindgen]

View file

@ -17,7 +17,7 @@
},
"scripts": {
"test": "wdio run ./wdio.conf.ts",
"test:headless": "WDIO_HEADLESS=true npm run test",
"test:headless": "WDIO_HEADLESS=true npm run test --",
"start": "tsc && npm run test:headless"
}
}

View file

@ -149,6 +149,9 @@ describe('VeilidRoutingContext', () => {
);
expect(setValueRes).toBeUndefined();
// Wait for synchronization
await waitForOfflineSubkeyWrite(routingContext, dhtRecord.key);
const getValueRes = await routingContext.getDhtValue(
dhtRecord.key,
0,
@ -282,9 +285,9 @@ describe('VeilidRoutingContext', () => {
"Local",
);
expect(inspectRes).toBeDefined();
expect(inspectRes.subkeys.concat(inspectRes.offline_subkeys)).toEqual([[0, 0]]);
expect(inspectRes.subkeys).toEqual([[0, 0]]);
expect(inspectRes.local_seqs).toEqual([0]);
expect(inspectRes.network_seqs).toEqual([]);
expect(inspectRes.network_seqs).toEqual([undefined]);
// Wait for synchronization
await waitForOfflineSubkeyWrite(routingContext, dhtRecord.key);
@ -310,14 +313,17 @@ describe('VeilidRoutingContext', () => {
);
expect(setValueRes).toBeUndefined();
// Wait for synchronization
await waitForOfflineSubkeyWrite(routingContext, dhtRecord.key);
// Inspect locally
const inspectRes = await routingContext.inspectDhtRecord(
dhtRecord.key,
);
expect(inspectRes).toBeDefined();
expect(inspectRes.subkeys.concat(inspectRes.offline_subkeys)).toEqual([[0, 0]]);
expect(inspectRes.offline_subkeys).toEqual([]);
expect(inspectRes.local_seqs).toEqual([0]);
expect(inspectRes.network_seqs).toEqual([]);
expect(inspectRes.network_seqs).toEqual([undefined]);
});
});
});

View file

@ -21,5 +21,9 @@ export const veilidCoreInitConfig: VeilidWASMConfig = {
export var veilidCoreStartupConfig = (() => {
var defaultConfig = JSON.parse(veilidClient.defaultConfig());
defaultConfig.program_name = 'veilid-wasm-test';
// Ensure we are starting from scratch
defaultConfig.table_store.delete = true;
defaultConfig.protected_store.delete = true;
defaultConfig.block_store.delete = true;
return defaultConfig;
})();

View file

@ -5,8 +5,8 @@ import {
veilidCoreStartupConfig,
} from './utils/veilid-config';
import { VeilidState, veilidClient } from 'veilid-wasm';
import { asyncCallWithTimeout, waitForPublicAttachment } from './utils/wait-utils';
import { VeilidState, veilidClient } from '../../pkg/veilid_wasm';
import { asyncCallWithTimeout, waitForDetached, waitForPublicAttachment } from './utils/wait-utils';
describe('veilidClient', function () {
before('veilid startup', async function () {
@ -45,16 +45,17 @@ describe('veilidClient', function () {
await veilidClient.attach();
await asyncCallWithTimeout(waitForPublicAttachment(), 10000);
await veilidClient.detach();
await asyncCallWithTimeout(waitForDetached(), 10000);
});
describe('kitchen sink', function () {
before('attach', async function () {
await veilidClient.attach();
await waitForPublicAttachment();
await asyncCallWithTimeout(waitForPublicAttachment(), 10000);
});
after('detach', async function () {
await veilidClient.detach();
await asyncCallWithTimeout(waitForDetached(), 10000);
});
let state: VeilidState;

View file

@ -18,7 +18,7 @@ npm install
original_tmpdir=$TMPDIR
mkdir -p ~/tmp
export TMPDIR=~/tmp
npm run test:headless
npm run test:headless -- $@
export TMPDIR=$original_tmpdir
popd &> /dev/null