Local Rehydration

This commit is contained in:
Christien Rioux 2025-04-25 17:18:39 -04:00
parent b964d0db40
commit c194f61644
48 changed files with 10889 additions and 11940 deletions

View file

@ -1,5 +1,7 @@
use super::*; use super::*;
impl_veilid_log_facility!("net");
impl NetworkManager { impl NetworkManager {
// Direct bootstrap request handler (separate fallback mechanism from cheaper TXT bootstrap mechanism) // Direct bootstrap request handler (separate fallback mechanism from cheaper TXT bootstrap mechanism)
#[instrument(level = "trace", target = "net", skip(self), ret, err)] #[instrument(level = "trace", target = "net", skip(self), ret, err)]
@ -16,6 +18,8 @@ impl NetworkManager {
.collect(); .collect();
let json_bytes = serialize_json(bootstrap_peerinfo).as_bytes().to_vec(); let json_bytes = serialize_json(bootstrap_peerinfo).as_bytes().to_vec();
veilid_log!(self trace "BOOT reponse: {}", String::from_utf8_lossy(&json_bytes));
// Reply with a chunk of signed routing table // Reply with a chunk of signed routing table
let net = self.net(); let net = self.net();
match pin_future_closure!(net.send_data_to_existing_flow(flow, json_bytes)).await? { match pin_future_closure!(net.send_data_to_existing_flow(flow, json_bytes)).await? {

View file

@ -899,14 +899,24 @@ impl RoutingTable {
return false; return false;
} }
// does it have some dial info we need? // Only nodes with direct publicinternet node info
let filter = |n: &NodeInfo| { let Some(signed_node_info) = e.signed_node_info(RoutingDomain::PublicInternet)
let mut keep = false; else {
return false;
};
let SignedNodeInfo::Direct(signed_direct_node_info) = signed_node_info else {
return false;
};
let node_info = signed_direct_node_info.node_info();
// Bootstraps must have -only- inbound capable network class // Bootstraps must have -only- inbound capable network class
if !matches!(n.network_class(), NetworkClass::InboundCapable) { if !matches!(node_info.network_class(), NetworkClass::InboundCapable) {
return false; return false;
} }
for did in n.dial_info_detail_list() {
// Check for direct dialinfo and a good mix of protocol and address types
let mut keep = false;
for did in node_info.dial_info_detail_list() {
// Bootstraps must have -only- direct dial info // Bootstraps must have -only- direct dial info
if !matches!(did.class, DialInfoClass::Direct) { if !matches!(did.class, DialInfoClass::Direct) {
return false; return false;
@ -932,11 +942,6 @@ impl RoutingTable {
} }
} }
keep keep
};
e.node_info(RoutingDomain::PublicInternet)
.map(filter)
.unwrap_or(false)
}) })
}, },
) as RoutingTableEntryFilter; ) as RoutingTableEntryFilter;

View file

@ -5,22 +5,13 @@ const MAX_INSPECT_VALUE_Q_SUBKEY_RANGES_LEN: usize = 512;
pub const MAX_INSPECT_VALUE_A_SEQS_LEN: usize = 512; pub const MAX_INSPECT_VALUE_A_SEQS_LEN: usize = 512;
const MAX_INSPECT_VALUE_A_PEERS_LEN: usize = 20; const MAX_INSPECT_VALUE_A_PEERS_LEN: usize = 20;
#[derive(Clone)] #[derive(Debug, Clone)]
pub(in crate::rpc_processor) struct ValidateInspectValueContext { pub(in crate::rpc_processor) struct ValidateInspectValueContext {
pub last_descriptor: Option<SignedValueDescriptor>, pub last_descriptor: Option<SignedValueDescriptor>,
pub subkeys: ValueSubkeyRangeSet, pub subkeys: ValueSubkeyRangeSet,
pub crypto_kind: CryptoKind, pub crypto_kind: CryptoKind,
} }
impl fmt::Debug for ValidateInspectValueContext {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ValidateInspectValueContext")
.field("last_descriptor", &self.last_descriptor)
.field("crypto_kind", &self.crypto_kind)
.finish()
}
}
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub(in crate::rpc_processor) struct RPCOperationInspectValueQ { pub(in crate::rpc_processor) struct RPCOperationInspectValueQ {
key: TypedKey, key: TypedKey,
@ -161,12 +152,20 @@ impl RPCOperationInspectValueA {
}; };
// Ensure seqs returned does not exceeed subkeys requested // Ensure seqs returned does not exceeed subkeys requested
#[allow(clippy::unnecessary_cast)] let subkey_count = if inspect_value_context.subkeys.is_empty()
if self.seqs.len() as u64 > inspect_value_context.subkeys.len() as u64 { || inspect_value_context.subkeys.is_full()
return Err(RPCError::protocol(format!( || inspect_value_context.subkeys.len() > MAX_INSPECT_VALUE_A_SEQS_LEN as u64
"InspectValue seqs length is greater than subkeys requested: {} > {}", {
self.seqs.len(), MAX_INSPECT_VALUE_A_SEQS_LEN as u64
} else {
inspect_value_context.subkeys.len() inspect_value_context.subkeys.len()
};
if self.seqs.len() as u64 > subkey_count {
return Err(RPCError::protocol(format!(
"InspectValue seqs length is greater than subkeys requested: {} > {}: {:#?}",
self.seqs.len(),
subkey_count,
inspect_value_context
))); )));
} }

View file

@ -5,7 +5,7 @@ impl_veilid_log_facility!("rpc");
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct InspectValueAnswer { pub struct InspectValueAnswer {
pub seqs: Vec<ValueSeqNum>, pub seqs: Vec<Option<ValueSeqNum>>,
pub peers: Vec<Arc<PeerInfo>>, pub peers: Vec<Arc<PeerInfo>>,
pub descriptor: Option<SignedValueDescriptor>, pub descriptor: Option<SignedValueDescriptor>,
} }
@ -110,6 +110,11 @@ impl RPCProcessor {
}; };
let (seqs, peers, descriptor) = inspect_value_a.destructure(); let (seqs, peers, descriptor) = inspect_value_a.destructure();
let seqs = seqs
.into_iter()
.map(|x| if x == ValueSeqNum::MAX { None } else { Some(x) })
.collect::<Vec<_>>();
if debug_target_enabled!("dht") { if debug_target_enabled!("dht") {
let debug_string_answer = format!( let debug_string_answer = format!(
"OUT <== InspectValueA({} {} peers={}) <= {} seqs:\n{}", "OUT <== InspectValueA({} {} peers={}) <= {} seqs:\n{}",
@ -232,8 +237,15 @@ impl RPCProcessor {
.inbound_inspect_value(key, subkeys, want_descriptor) .inbound_inspect_value(key, subkeys, want_descriptor)
.await .await
.map_err(RPCError::internal)?); .map_err(RPCError::internal)?);
(inspect_result.seqs, inspect_result.opt_descriptor) (
inspect_result.seqs().to_vec(),
inspect_result.opt_descriptor(),
)
}; };
let inspect_result_seqs = inspect_result_seqs
.into_iter()
.map(|x| if let Some(s) = x { s } else { ValueSubkey::MAX })
.collect::<Vec<_>>();
if debug_target_enabled!("dht") { if debug_target_enabled!("dht") {
let debug_string_answer = format!( let debug_string_answer = format!(

View file

@ -79,7 +79,7 @@ impl StorageManager {
pub async fn debug_local_record_subkey_info( pub async fn debug_local_record_subkey_info(
&self, &self,
key: TypedKey, record_key: TypedKey,
subkey: ValueSubkey, subkey: ValueSubkey,
) -> String { ) -> String {
let inner = self.inner.lock().await; let inner = self.inner.lock().await;
@ -87,12 +87,12 @@ impl StorageManager {
return "not initialized".to_owned(); return "not initialized".to_owned();
}; };
local_record_store local_record_store
.debug_record_subkey_info(key, subkey) .debug_record_subkey_info(record_key, subkey)
.await .await
} }
pub async fn debug_remote_record_subkey_info( pub async fn debug_remote_record_subkey_info(
&self, &self,
key: TypedKey, record_key: TypedKey,
subkey: ValueSubkey, subkey: ValueSubkey,
) -> String { ) -> String {
let inner = self.inner.lock().await; let inner = self.inner.lock().await;
@ -100,17 +100,17 @@ impl StorageManager {
return "not initialized".to_owned(); return "not initialized".to_owned();
}; };
remote_record_store remote_record_store
.debug_record_subkey_info(key, subkey) .debug_record_subkey_info(record_key, subkey)
.await .await
} }
pub async fn debug_local_record_info(&self, key: TypedKey) -> String { pub async fn debug_local_record_info(&self, record_key: TypedKey) -> String {
let inner = self.inner.lock().await; let inner = self.inner.lock().await;
let Some(local_record_store) = &inner.local_record_store else { let Some(local_record_store) = &inner.local_record_store else {
return "not initialized".to_owned(); return "not initialized".to_owned();
}; };
let local_debug = local_record_store.debug_record_info(key); let local_debug = local_record_store.debug_record_info(record_key);
let opened_debug = if let Some(o) = inner.opened_records.get(&key) { let opened_debug = if let Some(o) = inner.opened_records.get(&record_key) {
format!("Opened Record: {:#?}\n", o) format!("Opened Record: {:#?}\n", o)
} else { } else {
"".to_owned() "".to_owned()
@ -119,11 +119,11 @@ impl StorageManager {
format!("{}\n{}", local_debug, opened_debug) format!("{}\n{}", local_debug, opened_debug)
} }
pub async fn debug_remote_record_info(&self, key: TypedKey) -> String { pub async fn debug_remote_record_info(&self, record_key: TypedKey) -> String {
let inner = self.inner.lock().await; let inner = self.inner.lock().await;
let Some(remote_record_store) = &inner.remote_record_store else { let Some(remote_record_store) = &inner.remote_record_store else {
return "not initialized".to_owned(); return "not initialized".to_owned();
}; };
remote_record_store.debug_record_info(key) remote_record_store.debug_record_info(record_key)
} }
} }

View file

@ -28,7 +28,7 @@ impl StorageManager {
#[instrument(level = "trace", target = "dht", skip_all, err)] #[instrument(level = "trace", target = "dht", skip_all, err)]
pub(super) async fn outbound_get_value( pub(super) async fn outbound_get_value(
&self, &self,
key: TypedKey, record_key: TypedKey,
subkey: ValueSubkey, subkey: ValueSubkey,
safety_selection: SafetySelection, safety_selection: SafetySelection,
last_get_result: GetResult, last_get_result: GetResult,
@ -47,7 +47,7 @@ impl StorageManager {
// Get the nodes we know are caching this value to seed the fanout // Get the nodes we know are caching this value to seed the fanout
let init_fanout_queue = { let init_fanout_queue = {
self.get_value_nodes(key) self.get_value_nodes(record_key)
.await? .await?
.unwrap_or_default() .unwrap_or_default()
.into_iter() .into_iter()
@ -93,7 +93,7 @@ impl StorageManager {
.rpc_call_get_value( .rpc_call_get_value(
Destination::direct(next_node.routing_domain_filtered(routing_domain)) Destination::direct(next_node.routing_domain_filtered(routing_domain))
.with_safety(safety_selection), .with_safety(safety_selection),
key, record_key,
subkey, subkey,
last_descriptor.map(|x| (*x).clone()), last_descriptor.map(|x| (*x).clone()),
) )
@ -255,7 +255,7 @@ impl StorageManager {
let routing_table = registry.routing_table(); let routing_table = registry.routing_table();
let fanout_call = FanoutCall::new( let fanout_call = FanoutCall::new(
&routing_table, &routing_table,
key, record_key,
key_count, key_count,
fanout, fanout,
consensus_count, consensus_count,

View file

@ -28,7 +28,7 @@ impl DescriptorInfo {
/// Info tracked per subkey /// Info tracked per subkey
struct SubkeySeqCount { struct SubkeySeqCount {
/// The newest sequence number found for a subkey /// The newest sequence number found for a subkey
pub seq: ValueSeqNum, pub seq: Option<ValueSeqNum>,
/// The set of nodes that had the most recent value for this subkey /// The set of nodes that had the most recent value for this subkey
pub consensus_nodes: Vec<NodeRef>, pub consensus_nodes: Vec<NodeRef>,
/// The set of nodes that had any value for this subkey /// The set of nodes that had any value for this subkey
@ -44,6 +44,7 @@ struct OutboundInspectValueContext {
} }
/// The result of the outbound_get_value operation /// The result of the outbound_get_value operation
#[derive(Debug, Clone)]
pub(super) struct OutboundInspectValueResult { pub(super) struct OutboundInspectValueResult {
/// Fanout results for each subkey /// Fanout results for each subkey
pub subkey_fanout_results: Vec<FanoutResult>, pub subkey_fanout_results: Vec<FanoutResult>,
@ -56,13 +57,14 @@ impl StorageManager {
#[instrument(level = "trace", target = "dht", skip_all, err)] #[instrument(level = "trace", target = "dht", skip_all, err)]
pub(super) async fn outbound_inspect_value( pub(super) async fn outbound_inspect_value(
&self, &self,
key: TypedKey, record_key: TypedKey,
subkeys: ValueSubkeyRangeSet, subkeys: ValueSubkeyRangeSet,
safety_selection: SafetySelection, safety_selection: SafetySelection,
local_inspect_result: InspectResult, local_inspect_result: InspectResult,
use_set_scope: bool, use_set_scope: bool,
) -> VeilidAPIResult<OutboundInspectValueResult> { ) -> VeilidAPIResult<OutboundInspectValueResult> {
let routing_domain = RoutingDomain::PublicInternet; let routing_domain = RoutingDomain::PublicInternet;
let requested_subkeys = subkeys.clone();
// Get the DHT parameters for 'InspectValue' // Get the DHT parameters for 'InspectValue'
// Can use either 'get scope' or 'set scope' depending on the purpose of the inspection // Can use either 'get scope' or 'set scope' depending on the purpose of the inspection
@ -86,7 +88,7 @@ impl StorageManager {
// Get the nodes we know are caching this value to seed the fanout // Get the nodes we know are caching this value to seed the fanout
let init_fanout_queue = { let init_fanout_queue = {
self.get_value_nodes(key) self.get_value_nodes(record_key)
.await? .await?
.unwrap_or_default() .unwrap_or_default()
.into_iter() .into_iter()
@ -99,16 +101,16 @@ impl StorageManager {
}; };
// Make do-inspect-value answer context // Make do-inspect-value answer context
let opt_descriptor_info = if let Some(descriptor) = &local_inspect_result.opt_descriptor { let opt_descriptor_info = if let Some(descriptor) = local_inspect_result.opt_descriptor() {
// Get the descriptor info. This also truncates the subkeys list to what can be returned from the network. // Get the descriptor info. This also truncates the subkeys list to what can be returned from the network.
Some(DescriptorInfo::new(descriptor.clone(), &subkeys)?) Some(DescriptorInfo::new(descriptor, &subkeys)?)
} else { } else {
None None
}; };
let context = Arc::new(Mutex::new(OutboundInspectValueContext { let context = Arc::new(Mutex::new(OutboundInspectValueContext {
seqcounts: local_inspect_result seqcounts: local_inspect_result
.seqs .seqs()
.iter() .iter()
.map(|s| SubkeySeqCount { .map(|s| SubkeySeqCount {
seq: *s, seq: *s,
@ -127,7 +129,7 @@ impl StorageManager {
move |next_node: NodeRef| -> PinBoxFutureStatic<FanoutCallResult> { move |next_node: NodeRef| -> PinBoxFutureStatic<FanoutCallResult> {
let context = context.clone(); let context = context.clone();
let registry = registry.clone(); let registry = registry.clone();
let opt_descriptor = local_inspect_result.opt_descriptor.clone(); let opt_descriptor = local_inspect_result.opt_descriptor();
let subkeys = subkeys.clone(); let subkeys = subkeys.clone();
Box::pin(async move { Box::pin(async move {
let rpc_processor = registry.rpc_processor(); let rpc_processor = registry.rpc_processor();
@ -136,7 +138,7 @@ impl StorageManager {
rpc_processor rpc_processor
.rpc_call_inspect_value( .rpc_call_inspect_value(
Destination::direct(next_node.routing_domain_filtered(routing_domain)).with_safety(safety_selection), Destination::direct(next_node.routing_domain_filtered(routing_domain)).with_safety(safety_selection),
key, record_key,
subkeys.clone(), subkeys.clone(),
opt_descriptor.map(|x| (*x).clone()), opt_descriptor.map(|x| (*x).clone()),
) )
@ -237,13 +239,13 @@ impl StorageManager {
// Then take that sequence number and note that we have gotten newer sequence numbers so we keep // Then take that sequence number and note that we have gotten newer sequence numbers so we keep
// looking for consensus // looking for consensus
// If the sequence number matches the old sequence number, then we keep the value node for reference later // If the sequence number matches the old sequence number, then we keep the value node for reference later
if answer_seq != ValueSeqNum::MAX { if let Some(answer_seq) = answer_seq {
if ctx_seqcnt.seq == ValueSeqNum::MAX || answer_seq > ctx_seqcnt.seq if ctx_seqcnt.seq.is_none() || answer_seq > ctx_seqcnt.seq.unwrap()
{ {
// One node has shown us the latest sequence numbers so far // One node has shown us the latest sequence numbers so far
ctx_seqcnt.seq = answer_seq; ctx_seqcnt.seq = Some(answer_seq);
ctx_seqcnt.consensus_nodes = vec![next_node.clone()]; ctx_seqcnt.consensus_nodes = vec![next_node.clone()];
} else if answer_seq == ctx_seqcnt.seq { } else if answer_seq == ctx_seqcnt.seq.unwrap() {
// Keep the nodes that showed us the latest values // Keep the nodes that showed us the latest values
ctx_seqcnt.consensus_nodes.push(next_node.clone()); ctx_seqcnt.consensus_nodes.push(next_node.clone());
} }
@ -288,7 +290,7 @@ impl StorageManager {
let routing_table = self.routing_table(); let routing_table = self.routing_table();
let fanout_call = FanoutCall::new( let fanout_call = FanoutCall::new(
&routing_table, &routing_table,
key, record_key,
key_count, key_count,
fanout, fanout,
consensus_count, consensus_count,
@ -322,28 +324,41 @@ impl StorageManager {
veilid_log!(self debug "InspectValue Fanout: {:#}:\n{}", fanout_result, debug_fanout_results(&subkey_fanout_results)); veilid_log!(self debug "InspectValue Fanout: {:#}:\n{}", fanout_result, debug_fanout_results(&subkey_fanout_results));
} }
Ok(OutboundInspectValueResult { let result = OutboundInspectValueResult {
subkey_fanout_results, subkey_fanout_results,
inspect_result: InspectResult { inspect_result: InspectResult::new(
subkeys: ctx self,
.opt_descriptor_info requested_subkeys,
"outbound_inspect_value",
ctx.opt_descriptor_info
.as_ref() .as_ref()
.map(|d| d.subkeys.clone()) .map(|d| d.subkeys.clone())
.unwrap_or_default(), .unwrap_or_default(),
seqs: ctx.seqcounts.iter().map(|cs| cs.seq).collect(), ctx.seqcounts.iter().map(|cs| cs.seq).collect(),
opt_descriptor: ctx ctx.opt_descriptor_info
.opt_descriptor_info
.as_ref() .as_ref()
.map(|d| d.descriptor.clone()), .map(|d| d.descriptor.clone()),
}, )?,
}) };
#[allow(clippy::unnecessary_cast)]
{
if result.inspect_result.subkeys().len() as u64
!= result.subkey_fanout_results.len() as u64
{
veilid_log!(self error "mismatch between subkeys returned and fanout results returned: {}!={}", result.inspect_result.subkeys().len(), result.subkey_fanout_results.len());
apibail_internal!("subkey and fanout list length mismatched");
}
}
Ok(result)
} }
/// Handle a received 'Inspect Value' query /// Handle a received 'Inspect Value' query
#[instrument(level = "trace", target = "dht", skip_all)] #[instrument(level = "trace", target = "dht", skip_all)]
pub async fn inbound_inspect_value( pub async fn inbound_inspect_value(
&self, &self,
key: TypedKey, record_key: TypedKey,
subkeys: ValueSubkeyRangeSet, subkeys: ValueSubkeyRangeSet,
want_descriptor: bool, want_descriptor: bool,
) -> VeilidAPIResult<NetworkResult<InspectResult>> { ) -> VeilidAPIResult<NetworkResult<InspectResult>> {
@ -352,20 +367,21 @@ impl StorageManager {
// See if this is a remote or local value // See if this is a remote or local value
let (_is_local, inspect_result) = { let (_is_local, inspect_result) = {
// See if the subkey we are getting has a last known local value // See if the subkey we are getting has a last known local value
let mut local_inspect_result = let mut local_inspect_result = self
Self::handle_inspect_local_value_inner(&mut inner, key, subkeys.clone(), true) .handle_inspect_local_value_inner(&mut inner, record_key, subkeys.clone(), true)
.await?; .await?;
// If this is local, it must have a descriptor already // If this is local, it must have a descriptor already
if local_inspect_result.opt_descriptor.is_some() { if local_inspect_result.opt_descriptor().is_some() {
if !want_descriptor { if !want_descriptor {
local_inspect_result.opt_descriptor = None; local_inspect_result.drop_descriptor();
} }
(true, local_inspect_result) (true, local_inspect_result)
} else { } else {
// See if the subkey we are getting has a last known remote value // See if the subkey we are getting has a last known remote value
let remote_inspect_result = Self::handle_inspect_remote_value_inner( let remote_inspect_result = self
.handle_inspect_remote_value_inner(
&mut inner, &mut inner,
key, record_key,
subkeys, subkeys,
want_descriptor, want_descriptor,
) )

File diff suppressed because it is too large Load diff

View file

@ -133,7 +133,7 @@ impl OutboundWatchManager {
// Watch does not exist, add one if that's what is desired // Watch does not exist, add one if that's what is desired
if let Some(desired) = desired_watch { if let Some(desired) = desired_watch {
self.outbound_watches self.outbound_watches
.insert(record_key, OutboundWatch::new(desired)); .insert(record_key, OutboundWatch::new(record_key, desired));
} }
} }
} }

View file

@ -4,6 +4,9 @@ impl_veilid_log_facility!("stor");
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub(in crate::storage_manager) struct OutboundWatch { pub(in crate::storage_manager) struct OutboundWatch {
/// Record key being watched
record_key: TypedKey,
/// Current state /// Current state
/// None means inactive/cancelled /// None means inactive/cancelled
state: Option<OutboundWatchState>, state: Option<OutboundWatchState>,
@ -34,12 +37,14 @@ impl fmt::Display for OutboundWatch {
impl OutboundWatch { impl OutboundWatch {
/// Create new outbound watch with desired parameters /// Create new outbound watch with desired parameters
pub fn new(desired: OutboundWatchParameters) -> Self { pub fn new(record_key: TypedKey, desired: OutboundWatchParameters) -> Self {
Self { Self {
record_key,
state: None, state: None,
desired: Some(desired), desired: Some(desired),
} }
} }
/// Get current watch state if it exists /// Get current watch state if it exists
pub fn state(&self) -> Option<&OutboundWatchState> { pub fn state(&self) -> Option<&OutboundWatchState> {
self.state.as_ref() self.state.as_ref()
@ -107,7 +112,7 @@ impl OutboundWatch {
/// Returns true if this outbound watch needs to be cancelled /// Returns true if this outbound watch needs to be cancelled
pub fn needs_cancel(&self, registry: &VeilidComponentRegistry) -> bool { pub fn needs_cancel(&self, registry: &VeilidComponentRegistry) -> bool {
if self.is_dead() { if self.is_dead() {
veilid_log!(registry warn "should have checked for is_dead first"); veilid_log!(registry warn "Should have checked for is_dead first");
return false; return false;
} }
@ -118,6 +123,7 @@ impl OutboundWatch {
// If the desired parameters is None then cancel // If the desired parameters is None then cancel
let Some(_desired) = self.desired.as_ref() else { let Some(_desired) = self.desired.as_ref() else {
veilid_log!(registry debug target: "dht", "OutboundWatch({}): needs_cancel because desired is None", self.record_key);
return true; return true;
}; };
@ -132,7 +138,7 @@ impl OutboundWatch {
cur_ts: Timestamp, cur_ts: Timestamp,
) -> bool { ) -> bool {
if self.is_dead() || self.needs_cancel(registry) { if self.is_dead() || self.needs_cancel(registry) {
veilid_log!(registry warn "should have checked for is_dead and needs_cancel first"); veilid_log!(registry warn "Should have checked for is_dead and needs_cancel first");
return false; return false;
} }
@ -156,11 +162,17 @@ impl OutboundWatch {
// If we have a consensus but need to renew because some per-node watches // If we have a consensus but need to renew because some per-node watches
// either expired or had their routes die, do it // either expired or had their routes die, do it
if self.wants_per_node_watch_update(registry, state, cur_ts) { if self.wants_per_node_watch_update(registry, state, cur_ts) {
veilid_log!(registry debug target: "dht", "OutboundWatch({}): needs_renew because per_node_watch wants update", self.record_key);
return true; return true;
} }
// If the desired parameters have changed, then we should renew with them // If the desired parameters have changed, then we should renew with them
state.params() != desired if state.params() != desired {
veilid_log!(registry debug target: "dht", "OutboundWatch({}): needs_renew because desired params have changed: {} != {}", self.record_key, state.params(), desired);
return true;
}
false
} }
/// Returns true if there is work to be done on getting the outbound /// Returns true if there is work to be done on getting the outbound
@ -175,7 +187,7 @@ impl OutboundWatch {
|| self.needs_cancel(registry) || self.needs_cancel(registry)
|| self.needs_renew(registry, consensus_count, cur_ts) || self.needs_renew(registry, consensus_count, cur_ts)
{ {
veilid_log!(registry warn "should have checked for is_dead, needs_cancel, needs_renew first"); veilid_log!(registry warn "Should have checked for is_dead, needs_cancel, needs_renew first");
return false; return false;
} }
@ -187,6 +199,7 @@ impl OutboundWatch {
// If there is a desired watch but no current state, then reconcile // If there is a desired watch but no current state, then reconcile
let Some(state) = self.state() else { let Some(state) = self.state() else {
veilid_log!(registry debug target: "dht", "OutboundWatch({}): needs_reconcile because state is empty", self.record_key);
return true; return true;
}; };
@ -195,13 +208,17 @@ impl OutboundWatch {
if state.nodes().len() < consensus_count if state.nodes().len() < consensus_count
&& cur_ts >= state.next_reconcile_ts().unwrap_or_default() && cur_ts >= state.next_reconcile_ts().unwrap_or_default()
{ {
veilid_log!(registry debug target: "dht", "OutboundWatch({}): needs_reconcile because consensus count is too low {} < {}", self.record_key, state.nodes().len(), consensus_count);
return true; return true;
} }
// Try to reconcile if our number of nodes currently is less than what we got from // Try to reconcile if our number of nodes currently is less than what we got from
// the previous reconciliation attempt // the previous reconciliation attempt
if let Some(last_consensus_node_count) = state.last_consensus_node_count() { if let Some(last_consensus_node_count) = state.last_consensus_node_count() {
if state.nodes().len() < last_consensus_node_count { if state.nodes().len() < last_consensus_node_count
&& state.nodes().len() < consensus_count
{
veilid_log!(registry debug target: "dht", "OutboundWatch({}): needs_reconcile because node count is less than last consensus {} < {}", self.record_key, state.nodes().len(), last_consensus_node_count);
return true; return true;
} }
} }
@ -209,11 +226,17 @@ impl OutboundWatch {
// If we have a consensus, or are not attempting consensus at this time, // If we have a consensus, or are not attempting consensus at this time,
// but need to reconcile because some per-node watches either expired or had their routes die, do it // but need to reconcile because some per-node watches either expired or had their routes die, do it
if self.wants_per_node_watch_update(registry, state, cur_ts) { if self.wants_per_node_watch_update(registry, state, cur_ts) {
veilid_log!(registry debug target: "dht", "OutboundWatch({}): needs_reconcile because per_node_watch wants update", self.record_key);
return true; return true;
} }
// If the desired parameters have changed, then we should reconcile with them // If the desired parameters have changed, then we should reconcile with them
state.params() != desired if state.params() != desired {
veilid_log!(registry debug target: "dht", "OutboundWatch({}): needs_reconcile because desired params have changed: {} != {}", self.record_key, state.params(), desired);
return true;
}
false
} }
/// Returns true if we need to update our per-node watches due to expiration, /// Returns true if we need to update our per-node watches due to expiration,
@ -233,6 +256,7 @@ impl OutboundWatch {
&& (state.params().expiration_ts.as_u64() == 0 && (state.params().expiration_ts.as_u64() == 0
|| renew_ts < state.params().expiration_ts) || renew_ts < state.params().expiration_ts)
{ {
veilid_log!(registry debug target: "dht", "OutboundWatch({}): wants_per_node_watch_update because cur_ts is in expiration renew window", self.record_key);
return true; return true;
} }
@ -244,6 +268,7 @@ impl OutboundWatch {
for vcr in state.value_changed_routes() { for vcr in state.value_changed_routes() {
if rss.get_route_id_for_key(vcr).is_none() { if rss.get_route_id_for_key(vcr).is_none() {
// Route we would receive value changes on is dead // Route we would receive value changes on is dead
veilid_log!(registry debug target: "dht", "OutboundWatch({}): wants_per_node_watch_update because route is dead: {}", self.record_key, vcr);
return true; return true;
} }
} }

View file

@ -4,7 +4,7 @@ const L2_CACHE_DEPTH: usize = 4; // XXX: i just picked this. we could probably d
#[derive(Debug, Clone, Eq, PartialEq)] #[derive(Debug, Clone, Eq, PartialEq)]
pub struct InspectCacheL2Value { pub struct InspectCacheL2Value {
pub seqs: Vec<ValueSeqNum>, pub seqs: Vec<Option<ValueSeqNum>>,
} }
#[derive(Debug, Clone, Eq, PartialEq)] #[derive(Debug, Clone, Eq, PartialEq)]
@ -67,7 +67,7 @@ impl InspectCache {
continue; continue;
}; };
if idx < entry.1.seqs.len() { if idx < entry.1.seqs.len() {
entry.1.seqs[idx] = seq; entry.1.seqs[idx] = Some(seq);
} else { } else {
panic!( panic!(
"representational error in l2 inspect cache: {} >= {}", "representational error in l2 inspect cache: {} >= {}",

View file

@ -128,11 +128,55 @@ pub struct GetResult {
#[derive(Default, Clone, Debug)] #[derive(Default, Clone, Debug)]
pub struct InspectResult { pub struct InspectResult {
/// The actual in-schema subkey range being reported on /// The actual in-schema subkey range being reported on
pub subkeys: ValueSubkeyRangeSet, subkeys: ValueSubkeyRangeSet,
/// The sequence map /// The sequence map
pub seqs: Vec<ValueSeqNum>, seqs: Vec<Option<ValueSeqNum>>,
/// The descriptor if we got a fresh one or empty if no descriptor was needed /// The descriptor if we got a fresh one or empty if no descriptor was needed
pub opt_descriptor: Option<Arc<SignedValueDescriptor>>, opt_descriptor: Option<Arc<SignedValueDescriptor>>,
}
impl InspectResult {
pub fn new(
registry_accessor: &impl VeilidComponentRegistryAccessor,
requested_subkeys: ValueSubkeyRangeSet,
log_context: &str,
subkeys: ValueSubkeyRangeSet,
seqs: Vec<Option<ValueSeqNum>>,
opt_descriptor: Option<Arc<SignedValueDescriptor>>,
) -> VeilidAPIResult<Self> {
#[allow(clippy::unnecessary_cast)]
{
if subkeys.len() as u64 != seqs.len() as u64 {
veilid_log!(registry_accessor error "{}: mismatch between subkeys returned and sequence number list returned: {}!={}", log_context, subkeys.len(), seqs.len());
apibail_internal!("list length mismatch");
}
}
if !subkeys.is_subset(&requested_subkeys) {
veilid_log!(registry_accessor error "{}: more subkeys returned than requested: {} not a subset of {}", log_context, subkeys, requested_subkeys);
apibail_internal!("invalid subkeys returned");
}
Ok(InspectResult {
subkeys,
seqs,
opt_descriptor,
})
}
pub fn subkeys(&self) -> &ValueSubkeyRangeSet {
&self.subkeys
}
pub fn seqs(&self) -> &[Option<ValueSeqNum>] {
&self.seqs
}
pub fn seqs_mut(&mut self) -> &mut [Option<ValueSeqNum>] {
&mut self.seqs
}
pub fn opt_descriptor(&self) -> Option<Arc<SignedValueDescriptor>> {
self.opt_descriptor.clone()
}
pub fn drop_descriptor(&mut self) {
self.opt_descriptor = None;
}
} }
impl<D> RecordStore<D> impl<D> RecordStore<D>
@ -822,18 +866,18 @@ where
pub async fn inspect_record( pub async fn inspect_record(
&mut self, &mut self,
key: TypedKey, key: TypedKey,
subkeys: ValueSubkeyRangeSet, subkeys: &ValueSubkeyRangeSet,
want_descriptor: bool, want_descriptor: bool,
) -> VeilidAPIResult<Option<InspectResult>> { ) -> VeilidAPIResult<Option<InspectResult>> {
// Get record from index // Get record from index
let Some((subkeys, opt_descriptor)) = self.with_record(key, |record| { let Some((schema_subkeys, opt_descriptor)) = self.with_record(key, |record| {
// Get number of subkeys from schema and ensure we are getting the // Get number of subkeys from schema and ensure we are getting the
// right number of sequence numbers betwen that and what we asked for // right number of sequence numbers betwen that and what we asked for
let truncated_subkeys = record let schema_subkeys = record
.schema() .schema()
.truncate_subkeys(&subkeys, Some(MAX_INSPECT_VALUE_A_SEQS_LEN)); .truncate_subkeys(subkeys, Some(MAX_INSPECT_VALUE_A_SEQS_LEN));
( (
truncated_subkeys, schema_subkeys,
if want_descriptor { if want_descriptor {
Some(record.descriptor().clone()) Some(record.descriptor().clone())
} else { } else {
@ -846,56 +890,60 @@ where
}; };
// Check if we can return some subkeys // Check if we can return some subkeys
if subkeys.is_empty() { if schema_subkeys.is_empty() {
apibail_invalid_argument!("subkeys set does not overlap schema", "subkeys", subkeys); apibail_invalid_argument!(
"subkeys set does not overlap schema",
"subkeys",
schema_subkeys
);
} }
// See if we have this inspection cached // See if we have this inspection cached
if let Some(icv) = self.inspect_cache.get(&key, &subkeys) { if let Some(icv) = self.inspect_cache.get(&key, &schema_subkeys) {
return Ok(Some(InspectResult { return Ok(Some(InspectResult::new(
subkeys, self,
seqs: icv.seqs, subkeys.clone(),
"inspect_record",
schema_subkeys.clone(),
icv.seqs,
opt_descriptor, opt_descriptor,
})); )?));
} }
// Build sequence number list to return // Build sequence number list to return
#[allow(clippy::unnecessary_cast)] #[allow(clippy::unnecessary_cast)]
let mut seqs = Vec::with_capacity(subkeys.len() as usize); let mut seqs = Vec::with_capacity(schema_subkeys.len() as usize);
for subkey in subkeys.iter() { for subkey in schema_subkeys.iter() {
let stk = SubkeyTableKey { key, subkey }; let stk = SubkeyTableKey { key, subkey };
let seq = if let Some(record_data) = self.subkey_cache.peek(&stk) { let opt_seq = if let Some(record_data) = self.subkey_cache.peek(&stk) {
record_data.signed_value_data().value_data().seq() Some(record_data.signed_value_data().value_data().seq())
} else { } else {
// If not in cache, try to pull from table store if it is in our stored subkey set // If not in cache, try to pull from table store if it is in our stored subkey set
// XXX: This would be better if it didn't have to pull the whole record data to get the seq. // XXX: This would be better if it didn't have to pull the whole record data to get the seq.
if let Some(record_data) = self self.subkey_table
.subkey_table
.load_json::<RecordData>(0, &stk.bytes()) .load_json::<RecordData>(0, &stk.bytes())
.await .await
.map_err(VeilidAPIError::internal)? .map_err(VeilidAPIError::internal)?
{ .map(|record_data| record_data.signed_value_data().value_data().seq())
record_data.signed_value_data().value_data().seq()
} else {
// Subkey not written to
ValueSubkey::MAX
}
}; };
seqs.push(seq) seqs.push(opt_seq)
} }
// Save seqs cache // Save seqs cache
self.inspect_cache.put( self.inspect_cache.put(
key, key,
subkeys.clone(), schema_subkeys.clone(),
InspectCacheL2Value { seqs: seqs.clone() }, InspectCacheL2Value { seqs: seqs.clone() },
); );
Ok(Some(InspectResult { Ok(Some(InspectResult::new(
subkeys, self,
subkeys.clone(),
"inspect_record",
schema_subkeys,
seqs, seqs,
opt_descriptor, opt_descriptor,
})) )?))
} }
#[instrument(level = "trace", target = "stor", skip_all, err)] #[instrument(level = "trace", target = "stor", skip_all, err)]
@ -1242,7 +1290,7 @@ where
changes.push(ValueChangedInfo { changes.push(ValueChangedInfo {
target: evci.target, target: evci.target,
key: evci.key, record_key: evci.key,
subkeys: evci.subkeys, subkeys: evci.subkeys,
count: evci.count, count: evci.count,
watch_id: evci.watch_id, watch_id: evci.watch_id,

View file

@ -0,0 +1,271 @@
use super::{inspect_value::OutboundInspectValueResult, *};
#[allow(dead_code)]
#[derive(Debug, Clone)]
pub struct RehydrateReport {
/// The record key rehydrated
record_key: TypedKey,
/// The requested range of subkeys to rehydrate if necessary
subkeys: ValueSubkeyRangeSet,
/// The requested consensus count,
consensus_count: usize,
/// The range of subkeys that wanted rehydration
wanted: ValueSubkeyRangeSet,
/// The range of subkeys that actually could be rehydrated
rehydrated: ValueSubkeyRangeSet,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub(super) struct RehydrationRequest {
pub subkeys: ValueSubkeyRangeSet,
pub consensus_count: usize,
}
impl StorageManager {
/// Add a background rehydration request
#[instrument(level = "trace", target = "stor", skip_all)]
pub async fn add_rehydration_request(
&self,
record_key: TypedKey,
subkeys: ValueSubkeyRangeSet,
consensus_count: usize,
) {
let req = RehydrationRequest {
subkeys,
consensus_count,
};
veilid_log!(self debug "Adding rehydration request: {} {:?}", record_key, req);
let mut inner = self.inner.lock().await;
inner
.rehydration_requests
.entry(record_key)
.and_modify(|r| {
r.subkeys = r.subkeys.union(&req.subkeys);
r.consensus_count.max_assign(req.consensus_count);
})
.or_insert(req);
}
/// Sends the local copies of all of a record's subkeys back to the network
/// Triggers a subkey update if the consensus on the subkey is less than
/// the specified 'consensus_count'.
/// The subkey updates are performed in the background if rehydration was
/// determined to be necessary.
/// If a newer copy of a subkey's data is available online, the background
/// write will pick up the newest subkey data as it does the SetValue fanout
/// and will drive the newest values to consensus.
#[instrument(level = "trace", target = "stor", skip(self), ret, err)]
pub(super) async fn rehydrate_record(
&self,
record_key: TypedKey,
subkeys: ValueSubkeyRangeSet,
consensus_count: usize,
) -> VeilidAPIResult<RehydrateReport> {
veilid_log!(self debug "Checking for record rehydration: {} {} @ consensus {}", record_key, subkeys, consensus_count);
// Get subkey range for consideration
let subkeys = if subkeys.is_empty() {
ValueSubkeyRangeSet::full()
} else {
subkeys
};
// Get safety selection
let mut inner = self.inner.lock().await;
let safety_selection = {
if let Some(opened_record) = inner.opened_records.get(&record_key) {
opened_record.safety_selection()
} else {
// See if it's in the local record store
let Some(local_record_store) = inner.local_record_store.as_mut() else {
apibail_not_initialized!();
};
let Some(safety_selection) =
local_record_store.with_record(record_key, |rec| rec.detail().safety_selection)
else {
apibail_key_not_found!(record_key);
};
safety_selection
}
};
// See if the requested record is our local record store
let local_inspect_result = self
.handle_inspect_local_value_inner(&mut inner, record_key, subkeys.clone(), true)
.await?;
// Get rpc processor and drop mutex so we don't block while getting the value from the network
if !self.dht_is_online() {
apibail_try_again!("offline, try again later");
};
// Drop the lock for network access
drop(inner);
// Get the inspect record report from the network
let result = self
.outbound_inspect_value(
record_key,
subkeys.clone(),
safety_selection,
InspectResult::default(),
true,
)
.await?;
// If online result had no subkeys, then trigger writing the entire record in the background
if result.inspect_result.subkeys().is_empty()
|| result.inspect_result.opt_descriptor().is_none()
{
return self
.rehydrate_all_subkeys(
record_key,
subkeys,
consensus_count,
safety_selection,
local_inspect_result,
)
.await;
}
return self
.rehydrate_required_subkeys(
record_key,
subkeys,
consensus_count,
safety_selection,
local_inspect_result,
result,
)
.await;
}
#[instrument(level = "trace", target = "stor", skip(self), ret, err)]
pub(super) async fn rehydrate_all_subkeys(
&self,
record_key: TypedKey,
subkeys: ValueSubkeyRangeSet,
consensus_count: usize,
safety_selection: SafetySelection,
local_inspect_result: InspectResult,
) -> VeilidAPIResult<RehydrateReport> {
let mut inner = self.inner.lock().await;
veilid_log!(self debug "Rehydrating all subkeys: record={} subkeys={}", record_key, local_inspect_result.subkeys());
let mut rehydrated = ValueSubkeyRangeSet::new();
for (n, subkey) in local_inspect_result.subkeys().iter().enumerate() {
if local_inspect_result.seqs()[n].is_some() {
// Add to offline writes to flush
veilid_log!(self debug "Rehydrating: record={} subkey={}", record_key, subkey);
rehydrated.insert(subkey);
Self::add_offline_subkey_write_inner(
&mut inner,
record_key,
subkey,
safety_selection,
);
}
}
if rehydrated.is_empty() {
veilid_log!(self debug "Record wanted full rehydrating, but no subkey data available: record={} subkeys={}", record_key, subkeys);
} else {
veilid_log!(self debug "Record full rehydrating: record={} subkeys={} rehydrated={}", record_key, subkeys, rehydrated);
}
return Ok(RehydrateReport {
record_key,
subkeys,
consensus_count,
wanted: local_inspect_result.subkeys().clone(),
rehydrated,
});
}
#[instrument(level = "trace", target = "stor", skip(self), ret, err)]
pub(super) async fn rehydrate_required_subkeys(
&self,
record_key: TypedKey,
subkeys: ValueSubkeyRangeSet,
consensus_count: usize,
safety_selection: SafetySelection,
local_inspect_result: InspectResult,
outbound_inspect_result: OutboundInspectValueResult,
) -> VeilidAPIResult<RehydrateReport> {
let mut inner = self.inner.lock().await;
// Get cryptosystem
let crypto = self.crypto();
let Some(vcrypto) = crypto.get(record_key.kind) else {
apibail_generic!("unsupported cryptosystem");
};
if local_inspect_result.subkeys().len()
!= outbound_inspect_result.subkey_fanout_results.len() as u64
{
veilid_log!(self debug "Subkey count mismatch when rehydrating required subkeys: record={} {} != {}",
record_key, local_inspect_result.subkeys().len(), outbound_inspect_result.subkey_fanout_results.len());
apibail_internal!("subkey count mismatch");
}
// For each subkey, determine if we should rehydrate it
let mut wanted = ValueSubkeyRangeSet::new();
let mut rehydrated = ValueSubkeyRangeSet::new();
for (n, subkey) in local_inspect_result.subkeys().iter().enumerate() {
let sfr = outbound_inspect_result
.subkey_fanout_results
.get(n)
.unwrap();
// Does the online subkey have enough consensus?
// If not, schedule it to be written in the background
if sfr.consensus_nodes.len() < consensus_count {
wanted.insert(subkey);
if local_inspect_result.seqs()[n].is_some() {
// Add to offline writes to flush
veilid_log!(self debug "Rehydrating: record={} subkey={}", record_key, subkey);
rehydrated.insert(subkey);
Self::add_offline_subkey_write_inner(
&mut inner,
record_key,
subkey,
safety_selection,
);
}
}
}
if wanted.is_empty() {
veilid_log!(self debug "Record did not need rehydrating: record={} local_subkeys={}", record_key, local_inspect_result.subkeys());
} else if rehydrated.is_empty() {
veilid_log!(self debug "Record wanted rehydrating, but no subkey data available: record={} local_subkeys={} wanted={}", record_key, local_inspect_result.subkeys(), wanted);
} else {
veilid_log!(self debug "Record rehydrating: record={} local_subkeys={} wanted={} rehydrated={}", record_key, local_inspect_result.subkeys(), wanted, rehydrated);
}
// Keep the list of nodes that returned a value for later reference
let results_iter = outbound_inspect_result
.inspect_result
.subkeys()
.iter()
.map(ValueSubkeyRangeSet::single)
.zip(outbound_inspect_result.subkey_fanout_results.into_iter());
Self::process_fanout_results_inner(
&mut inner,
&vcrypto,
record_key,
results_iter,
false,
self.config()
.with(|c| c.network.dht.set_value_count as usize),
);
Ok(RehydrateReport {
record_key,
subkeys,
consensus_count,
wanted,
rehydrated,
})
}
}

View file

@ -28,7 +28,7 @@ impl StorageManager {
#[instrument(level = "trace", target = "dht", skip_all, err)] #[instrument(level = "trace", target = "dht", skip_all, err)]
pub(super) async fn outbound_set_value( pub(super) async fn outbound_set_value(
&self, &self,
key: TypedKey, record_key: TypedKey,
subkey: ValueSubkey, subkey: ValueSubkey,
safety_selection: SafetySelection, safety_selection: SafetySelection,
value: Arc<SignedValueData>, value: Arc<SignedValueData>,
@ -48,7 +48,7 @@ impl StorageManager {
// Get the nodes we know are caching this value to seed the fanout // Get the nodes we know are caching this value to seed the fanout
let init_fanout_queue = { let init_fanout_queue = {
self.get_value_nodes(key) self.get_value_nodes(record_key)
.await? .await?
.unwrap_or_default() .unwrap_or_default()
.into_iter() .into_iter()
@ -99,7 +99,7 @@ impl StorageManager {
.rpc_call_set_value( .rpc_call_set_value(
Destination::direct(next_node.routing_domain_filtered(routing_domain)) Destination::direct(next_node.routing_domain_filtered(routing_domain))
.with_safety(safety_selection), .with_safety(safety_selection),
key, record_key,
subkey, subkey,
(*value).clone(), (*value).clone(),
(*descriptor).clone(), (*descriptor).clone(),
@ -228,7 +228,7 @@ impl StorageManager {
let routing_table = registry.routing_table(); let routing_table = registry.routing_table();
let fanout_call = FanoutCall::new( let fanout_call = FanoutCall::new(
&routing_table, &routing_table,
key, record_key,
key_count, key_count,
fanout, fanout,
consensus_count, consensus_count,

View file

@ -2,6 +2,7 @@ pub mod check_inbound_watches;
pub mod check_outbound_watches; pub mod check_outbound_watches;
pub mod flush_record_stores; pub mod flush_record_stores;
pub mod offline_subkey_writes; pub mod offline_subkey_writes;
pub mod rehydrate_records;
pub mod save_metadata; pub mod save_metadata;
pub mod send_value_changes; pub mod send_value_changes;
@ -55,6 +56,15 @@ impl StorageManager {
check_inbound_watches_task, check_inbound_watches_task,
check_inbound_watches_task_routine check_inbound_watches_task_routine
); );
// Set rehydrate records tick task
veilid_log!(self debug "starting rehydrate records task");
impl_setup_task!(
self,
Self,
rehydrate_records_task,
rehydrate_records_task_routine
);
} }
#[instrument(parent = None, level = "trace", target = "stor", name = "StorageManager::tick", skip_all, err)] #[instrument(parent = None, level = "trace", target = "stor", name = "StorageManager::tick", skip_all, err)]
@ -78,6 +88,11 @@ impl StorageManager {
self.offline_subkey_writes_task.tick().await?; self.offline_subkey_writes_task.tick().await?;
} }
// Do requested rehydrations
if self.has_rehydration_requests().await {
self.rehydrate_records_task.tick().await?;
}
// Send value changed notifications // Send value changed notifications
self.send_value_changes_task.tick().await?; self.send_value_changes_task.tick().await?;
} }
@ -106,5 +121,9 @@ impl StorageManager {
if let Err(e) = self.offline_subkey_writes_task.stop().await { if let Err(e) = self.offline_subkey_writes_task.stop().await {
veilid_log!(self warn "offline_subkey_writes_task not stopped: {}", e); veilid_log!(self warn "offline_subkey_writes_task not stopped: {}", e);
} }
veilid_log!(self debug "stopping record rehydration task");
if let Err(e) = self.rehydrate_records_task.stop().await {
veilid_log!(self warn "rehydrate_records_task not stopped: {}", e);
}
} }
} }

View file

@ -0,0 +1,50 @@
use super::*;
impl_veilid_log_facility!("stor");
impl StorageManager {
/// Process background rehydration requests
#[instrument(level = "trace", target = "stor", skip_all, err)]
pub(super) async fn rehydrate_records_task_routine(
&self,
stop_token: StopToken,
_last_ts: Timestamp,
_cur_ts: Timestamp,
) -> EyreResult<()> {
let reqs = {
let mut inner = self.inner.lock().await;
core::mem::take(&mut inner.rehydration_requests)
};
let mut futs = Vec::new();
for req in reqs {
futs.push(async move {
let res = self
.rehydrate_record(req.0, req.1.subkeys.clone(), req.1.consensus_count)
.await;
(req, res)
});
}
process_batched_future_queue(
futs,
REHYDRATE_BATCH_SIZE,
stop_token,
|(req, res)| async move {
let _report = match res {
Ok(v) => v,
Err(e) => {
veilid_log!(self debug "Rehydration request failed: {}", e);
// Try again later
self.add_rehydration_request(req.0, req.1.subkeys, req.1.consensus_count)
.await;
return;
}
};
},
)
.await;
Ok(())
}
}

View file

@ -263,7 +263,7 @@ impl StorageManager {
let disposition = if wva.answer.accepted { let disposition = if wva.answer.accepted {
if wva.answer.expiration_ts.as_u64() > 0 { if wva.answer.expiration_ts.as_u64() > 0 {
// If the expiration time is greater than zero this watch is active // If the expiration time is greater than zero this watch is active
veilid_log!(registry debug "WatchValue accepted for {}: id={} expiration_ts={} ({})", record_key, wva.answer.watch_id, display_ts(wva.answer.expiration_ts.as_u64()), next_node); veilid_log!(registry debug target:"dht", "WatchValue accepted for {}: id={} expiration_ts={} ({})", record_key, wva.answer.watch_id, display_ts(wva.answer.expiration_ts.as_u64()), next_node);
// Add to accepted watches // Add to accepted watches
let mut ctx = context.lock(); let mut ctx = context.lock();
@ -279,7 +279,7 @@ impl StorageManager {
// If the returned expiration time is zero, this watch was cancelled // If the returned expiration time is zero, this watch was cancelled
// If the expiration time is greater than zero this watch is active // If the expiration time is greater than zero this watch is active
veilid_log!(registry debug "WatchValue rejected for {}: id={} expiration_ts={} ({})", record_key, wva.answer.watch_id, display_ts(wva.answer.expiration_ts.as_u64()), next_node); veilid_log!(registry debug target:"dht", "WatchValue rejected for {}: id={} expiration_ts={} ({})", record_key, wva.answer.watch_id, display_ts(wva.answer.expiration_ts.as_u64()), next_node);
// Add to rejected watches // Add to rejected watches
let mut ctx = context.lock(); let mut ctx = context.lock();
@ -344,10 +344,10 @@ impl StorageManager {
let fanout_result = fanout_call.run(init_fanout_queue).await.inspect_err(|e| { let fanout_result = fanout_call.run(init_fanout_queue).await.inspect_err(|e| {
// If we finished with an error, return that // If we finished with an error, return that
veilid_log!(self debug "WatchValue fanout error: {}", e); veilid_log!(self debug target:"dht", "WatchValue fanout error: {}", e);
})?; })?;
veilid_log!(self debug "WatchValue Fanout: {:#}", fanout_result); veilid_log!(self debug target:"dht", "WatchValue Fanout: {:#}", fanout_result);
// Get cryptosystem // Get cryptosystem
let crypto = self.crypto(); let crypto = self.crypto();
@ -476,7 +476,7 @@ impl StorageManager {
cancelled.push(pnk); cancelled.push(pnk);
} }
Err(e) => { Err(e) => {
veilid_log!(self debug "outbound watch cancel error: {}", e); veilid_log!(self debug "Outbound watch cancel error: {}", e);
// xxx should do something different for network unreachable vs host unreachable // xxx should do something different for network unreachable vs host unreachable
// Leave in the 'per node states' for now because we couldn't contact the node // Leave in the 'per node states' for now because we couldn't contact the node
@ -604,7 +604,7 @@ impl StorageManager {
}; };
} }
Err(e) => { Err(e) => {
veilid_log!(self debug "outbound watch change error: {}", e); veilid_log!(self debug "Outbound watch change error: {}", e);
} }
} }
} }
@ -718,7 +718,7 @@ impl StorageManager {
}); });
} }
Err(e) => { Err(e) => {
veilid_log!(self debug "outbound watch fanout error: {}", e); veilid_log!(self debug "Outbound watch fanout error: {}", e);
} }
} }
@ -742,11 +742,11 @@ impl StorageManager {
.outbound_watches .outbound_watches
.get_mut(&record_key) .get_mut(&record_key)
else { else {
veilid_log!(self warn "outbound watch should have still been in the table"); veilid_log!(self warn "Outbound watch should have still been in the table");
return; return;
}; };
let Some(desired) = outbound_watch.desired() else { let Some(desired) = outbound_watch.desired() else {
veilid_log!(self warn "watch with result should have desired params"); veilid_log!(self warn "Watch with result should have desired params");
return; return;
}; };
@ -852,6 +852,10 @@ impl StorageManager {
.config() .config()
.with(|c| c.network.dht.get_value_count as usize); .with(|c| c.network.dht.get_value_count as usize);
// Operate on this watch only if it isn't already being operated on
let watch_lock =
opt_watch_lock.or_else(|| self.outbound_watch_lock_table.try_lock_tag(key))?;
// Terminate the 'desired' params for watches // Terminate the 'desired' params for watches
// that have no remaining count or have expired // that have no remaining count or have expired
outbound_watch.try_expire_desired_state(cur_ts); outbound_watch.try_expire_desired_state(cur_ts);
@ -859,9 +863,6 @@ impl StorageManager {
// Check states // Check states
if outbound_watch.is_dead() { if outbound_watch.is_dead() {
// Outbound watch is dead // Outbound watch is dead
let watch_lock =
opt_watch_lock.or_else(|| self.outbound_watch_lock_table.try_lock_tag(key))?;
let fut = { let fut = {
let registry = self.registry(); let registry = self.registry();
async move { async move {
@ -874,9 +875,6 @@ impl StorageManager {
return Some(pin_dyn_future!(fut)); return Some(pin_dyn_future!(fut));
} else if outbound_watch.needs_cancel(&registry) { } else if outbound_watch.needs_cancel(&registry) {
// Outbound watch needs to be cancelled // Outbound watch needs to be cancelled
let watch_lock =
opt_watch_lock.or_else(|| self.outbound_watch_lock_table.try_lock_tag(key))?;
let fut = { let fut = {
let registry = self.registry(); let registry = self.registry();
async move { async move {
@ -889,9 +887,6 @@ impl StorageManager {
return Some(pin_dyn_future!(fut)); return Some(pin_dyn_future!(fut));
} else if outbound_watch.needs_renew(&registry, consensus_count, cur_ts) { } else if outbound_watch.needs_renew(&registry, consensus_count, cur_ts) {
// Outbound watch expired but can be renewed // Outbound watch expired but can be renewed
let watch_lock =
opt_watch_lock.or_else(|| self.outbound_watch_lock_table.try_lock_tag(key))?;
let fut = { let fut = {
let registry = self.registry(); let registry = self.registry();
async move { async move {
@ -904,9 +899,6 @@ impl StorageManager {
return Some(pin_dyn_future!(fut)); return Some(pin_dyn_future!(fut));
} else if outbound_watch.needs_reconcile(&registry, consensus_count, cur_ts) { } else if outbound_watch.needs_reconcile(&registry, consensus_count, cur_ts) {
// Outbound watch parameters have changed or it needs more nodes // Outbound watch parameters have changed or it needs more nodes
let watch_lock =
opt_watch_lock.or_else(|| self.outbound_watch_lock_table.try_lock_tag(key))?;
let fut = { let fut = {
let registry = self.registry(); let registry = self.registry();
async move { async move {
@ -944,12 +936,12 @@ impl StorageManager {
return; return;
} }
}; };
let mut changed_subkeys = report.changed_subkeys(); let mut newer_online_subkeys = report.newer_online_subkeys();
// Get changed first changed subkey until we find one to report // Get changed first changed subkey until we find one to report
let mut n = 0; let mut n = 0;
while !changed_subkeys.is_empty() { while !newer_online_subkeys.is_empty() {
let first_changed_subkey = changed_subkeys.first().unwrap(); let first_changed_subkey = newer_online_subkeys.first().unwrap();
let value = match this.get_value(record_key, first_changed_subkey, true).await { let value = match this.get_value(record_key, first_changed_subkey, true).await {
Ok(v) => v, Ok(v) => v,
@ -960,7 +952,8 @@ impl StorageManager {
}; };
if let Some(value) = value { if let Some(value) = value {
if value.seq() > report.local_seqs()[n] { let opt_local_seq = report.local_seqs()[n];
if opt_local_seq.is_none() || value.seq() > opt_local_seq.unwrap() {
// Calculate the update // Calculate the update
let (changed_subkeys, remaining_count, value) = { let (changed_subkeys, remaining_count, value) = {
let _watch_lock = let _watch_lock =
@ -991,7 +984,7 @@ impl StorageManager {
}, },
); );
(changed_subkeys, remaining_count, value) (newer_online_subkeys, remaining_count, value)
}; };
// Send the update // Send the update
@ -1008,7 +1001,7 @@ impl StorageManager {
} }
// If we didn't send an update, remove the first changed subkey and try again // If we didn't send an update, remove the first changed subkey and try again
changed_subkeys.pop_first(); newer_online_subkeys.pop_first();
n += 1; n += 1;
} }
} }
@ -1111,14 +1104,14 @@ impl StorageManager {
inner.outbound_watch_manager.per_node_states.get_mut(&pnk) inner.outbound_watch_manager.per_node_states.get_mut(&pnk)
else { else {
// No per node state means no callback // No per node state means no callback
veilid_log!(self warn "missing per node state in outbound watch: {:?}", pnk); veilid_log!(self warn "Missing per node state in outbound watch: {:?}", pnk);
return Ok(NetworkResult::value(())); return Ok(NetworkResult::value(()));
}; };
// If watch id doesn't match it's for an older watch and should be ignored // If watch id doesn't match it's for an older watch and should be ignored
if per_node_state.watch_id != watch_id { if per_node_state.watch_id != watch_id {
// No per node state means no callback // No per node state means no callback
veilid_log!(self warn "incorrect watch id for per node state in outbound watch: {:?} {} != {}", pnk, per_node_state.watch_id, watch_id); veilid_log!(self warn "Incorrect watch id for per node state in outbound watch: {:?} {} != {}", pnk, per_node_state.watch_id, watch_id);
return Ok(NetworkResult::value(())); return Ok(NetworkResult::value(()));
} }
@ -1127,7 +1120,7 @@ impl StorageManager {
// If count is greater than our requested count then this is invalid, cancel the watch // If count is greater than our requested count then this is invalid, cancel the watch
// XXX: Should this be a punishment? // XXX: Should this be a punishment?
veilid_log!(self debug veilid_log!(self debug
"watch count went backward: {} @ {} id={}: {} > {}", "Watch count went backward: {} @ {} id={}: {} > {}",
record_key, record_key,
inbound_node_id, inbound_node_id,
watch_id, watch_id,
@ -1143,7 +1136,7 @@ impl StorageManager {
// Log this because watch counts should always be decrementing non a per-node basis. // Log this because watch counts should always be decrementing non a per-node basis.
// XXX: Should this be a punishment? // XXX: Should this be a punishment?
veilid_log!(self debug veilid_log!(self debug
"watch count duplicate: {} @ {} id={}: {} == {}", "Watch count duplicate: {} @ {} id={}: {} == {}",
record_key, record_key,
inbound_node_id, inbound_node_id,
watch_id, watch_id,
@ -1153,7 +1146,7 @@ impl StorageManager {
} else { } else {
// Reduce the per-node watch count // Reduce the per-node watch count
veilid_log!(self debug veilid_log!(self debug
"watch count decremented: {} @ {} id={}: {} < {}", "Watch count decremented: {} @ {} id={}: {} < {}",
record_key, record_key,
inbound_node_id, inbound_node_id,
watch_id, watch_id,
@ -1285,7 +1278,7 @@ impl StorageManager {
remaining_count, remaining_count,
Some(value), Some(value),
); );
} else if reportable_subkeys.len() > 0 { } else if !reportable_subkeys.is_empty() {
// We have subkeys that have be reported as possibly changed // We have subkeys that have be reported as possibly changed
// but not a specific record reported, so we should defer reporting and // but not a specific record reported, so we should defer reporting and
// inspect the range to see what changed // inspect the range to see what changed

View file

@ -1902,7 +1902,7 @@ impl VeilidAPI {
let (key, rc) = let (key, rc) =
self.clone() self.clone()
.get_opened_dht_record_context(&args, "debug_record_watch", "key", 1)?; .get_opened_dht_record_context(&args, "debug_record_inspect", "key", 1)?;
let mut rest_defaults = false; let mut rest_defaults = false;
@ -1947,6 +1947,62 @@ impl VeilidAPI {
Ok(format!("Success: report={:?}", report)) Ok(format!("Success: report={:?}", report))
} }
async fn debug_record_rehydrate(&self, args: Vec<String>) -> VeilidAPIResult<String> {
let registry = self.core_context()?.registry();
let storage_manager = registry.storage_manager();
let key = get_debug_argument_at(
&args,
1,
"debug_record_rehydrate",
"key",
get_dht_key_no_safety,
)?;
let mut rest_defaults = false;
let subkeys = if rest_defaults {
None
} else {
get_debug_argument_at(&args, 2, "debug_record_rehydrate", "subkeys", get_subkeys)
.inspect_err(|_| {
rest_defaults = true;
})
.ok()
};
let consensus_count = if rest_defaults {
None
} else {
get_debug_argument_at(
&args,
3,
"debug_record_rehydrate",
"consensus_count",
get_number,
)
.inspect_err(|_| {
rest_defaults = true;
})
.ok()
};
// Do a record rehydrate
storage_manager
.add_rehydration_request(
key,
subkeys.unwrap_or_default(),
consensus_count.unwrap_or_else(|| {
registry
.config()
.with(|c| c.network.dht.get_value_count as usize)
}),
)
.await;
Ok("Request added".to_owned())
}
async fn debug_record(&self, args: String) -> VeilidAPIResult<String> { async fn debug_record(&self, args: String) -> VeilidAPIResult<String> {
let args: Vec<String> = let args: Vec<String> =
shell_words::split(&args).map_err(|e| VeilidAPIError::parse_error(e, args))?; shell_words::split(&args).map_err(|e| VeilidAPIError::parse_error(e, args))?;
@ -1977,6 +2033,8 @@ impl VeilidAPI {
self.debug_record_cancel(args).await self.debug_record_cancel(args).await
} else if command == "inspect" { } else if command == "inspect" {
self.debug_record_inspect(args).await self.debug_record_inspect(args).await
} else if command == "rehydrate" {
self.debug_record_rehydrate(args).await
} else { } else {
Ok(">>> Unknown command\n".to_owned()) Ok(">>> Unknown command\n".to_owned())
} }
@ -2144,6 +2202,7 @@ DHT Operations:
watch [<key>] [<subkeys> [<expiration> [<count>]]] - watch a record for changes watch [<key>] [<subkeys> [<expiration> [<count>]]] - watch a record for changes
cancel [<key>] [<subkeys>] - cancel a dht record watch cancel [<key>] [<subkeys>] - cancel a dht record watch
inspect [<key>] [<scope> [<subkeys>]] - display a dht record's subkey status inspect [<key>] [<scope> [<subkeys>]] - display a dht record's subkey status
rehydrate <key> [<subkeys>] [<consensus count>] - send a dht record's expired local data back to the network
TableDB Operations: TableDB Operations:
table list - list the names of all the tables in the TableDB table list - list the names of all the tables in the TableDB

View file

@ -26,7 +26,7 @@ pub struct DHTRecordDescriptor {
from_impl_to_jsvalue!(DHTRecordDescriptor); from_impl_to_jsvalue!(DHTRecordDescriptor);
impl DHTRecordDescriptor { impl DHTRecordDescriptor {
pub fn new( pub(crate) fn new(
key: TypedKey, key: TypedKey,
owner: PublicKey, owner: PublicKey,
owner_secret: Option<SecretKey>, owner_secret: Option<SecretKey>,

View file

@ -16,25 +16,56 @@ pub struct DHTRecordReport {
/// The subkeys that have been writen offline that still need to be flushed /// The subkeys that have been writen offline that still need to be flushed
offline_subkeys: ValueSubkeyRangeSet, offline_subkeys: ValueSubkeyRangeSet,
/// The sequence numbers of each subkey requested from a locally stored DHT Record /// The sequence numbers of each subkey requested from a locally stored DHT Record
local_seqs: Vec<ValueSeqNum>, local_seqs: Vec<Option<ValueSeqNum>>,
/// The sequence numbers of each subkey requested from the DHT over the network /// The sequence numbers of each subkey requested from the DHT over the network
network_seqs: Vec<ValueSeqNum>, network_seqs: Vec<Option<ValueSeqNum>>,
} }
from_impl_to_jsvalue!(DHTRecordReport); from_impl_to_jsvalue!(DHTRecordReport);
impl DHTRecordReport { impl DHTRecordReport {
pub fn new( pub(crate) fn new(
subkeys: ValueSubkeyRangeSet, subkeys: ValueSubkeyRangeSet,
offline_subkeys: ValueSubkeyRangeSet, offline_subkeys: ValueSubkeyRangeSet,
local_seqs: Vec<ValueSeqNum>, local_seqs: Vec<Option<ValueSeqNum>>,
network_seqs: Vec<ValueSeqNum>, network_seqs: Vec<Option<ValueSeqNum>>,
) -> Self { ) -> VeilidAPIResult<Self> {
Self { if subkeys.is_full() {
apibail_invalid_argument!("subkeys range should be exact", "subkeys", subkeys);
}
if subkeys.is_empty() {
apibail_invalid_argument!("subkeys range should not be empty", "subkeys", subkeys);
}
if subkeys.len() > MAX_INSPECT_VALUE_A_SEQS_LEN as u64 {
apibail_invalid_argument!("subkeys range is too large", "subkeys", subkeys);
}
if subkeys.len() != local_seqs.len() as u64 {
apibail_invalid_argument!(
"local seqs list does not match subkey length",
"local_seqs",
local_seqs.len()
);
}
if subkeys.len() != network_seqs.len() as u64 {
apibail_invalid_argument!(
"network seqs list does not match subkey length",
"network_seqs",
network_seqs.len()
);
}
if !offline_subkeys.is_subset(&subkeys) {
apibail_invalid_argument!(
"offline subkeys is not a subset of the whole subkey set",
"offline_subkeys",
offline_subkeys
);
}
Ok(Self {
subkeys, subkeys,
offline_subkeys, offline_subkeys,
local_seqs, local_seqs,
network_seqs, network_seqs,
} })
} }
pub fn subkeys(&self) -> &ValueSubkeyRangeSet { pub fn subkeys(&self) -> &ValueSubkeyRangeSet {
@ -44,26 +75,28 @@ impl DHTRecordReport {
&self.offline_subkeys &self.offline_subkeys
} }
#[must_use] #[must_use]
pub fn local_seqs(&self) -> &[ValueSeqNum] { pub fn local_seqs(&self) -> &[Option<ValueSeqNum>] {
&self.local_seqs &self.local_seqs
} }
#[must_use] #[must_use]
pub fn network_seqs(&self) -> &[ValueSeqNum] { pub fn network_seqs(&self) -> &[Option<ValueSeqNum>] {
&self.network_seqs &self.network_seqs
} }
pub fn changed_subkeys(&self) -> ValueSubkeyRangeSet { pub fn newer_online_subkeys(&self) -> ValueSubkeyRangeSet {
let mut changed = ValueSubkeyRangeSet::new(); let mut newer_online = ValueSubkeyRangeSet::new();
for ((sk, lseq), nseq) in self for ((sk, lseq), nseq) in self
.subkeys .subkeys
.iter() .iter()
.zip(self.local_seqs.iter()) .zip(self.local_seqs.iter())
.zip(self.network_seqs.iter()) .zip(self.network_seqs.iter())
{ {
if nseq > lseq { if let Some(nseq) = nseq {
changed.insert(sk); if lseq.is_none() || *nseq > lseq.unwrap() {
newer_online.insert(sk);
} }
} }
changed }
newer_online
} }
} }

View file

@ -19,7 +19,7 @@ pub type ValueSubkey = u32;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)] #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type ValueSeqNum = u32; pub type ValueSeqNum = u32;
pub(crate) fn debug_seqs(seqs: &[ValueSeqNum]) -> String { pub(crate) fn debug_seqs(seqs: &[Option<ValueSeqNum>]) -> String {
let mut col = 0; let mut col = 0;
let mut out = String::new(); let mut out = String::new();
let mut left = seqs.len(); let mut left = seqs.len();
@ -27,10 +27,10 @@ pub(crate) fn debug_seqs(seqs: &[ValueSeqNum]) -> String {
if col == 0 { if col == 0 {
out += " "; out += " ";
} }
let sc = if *s == ValueSeqNum::MAX { let sc = if let Some(s) = s {
"-".to_owned()
} else {
s.to_string() s.to_string()
} else {
"-".to_owned()
}; };
out += &sc; out += &sc;
out += ","; out += ",";

View file

@ -53,6 +53,24 @@ impl ValueSubkeyRangeSet {
Self::new_with_data(&self.data | &other.data) Self::new_with_data(&self.data | &other.data)
} }
#[must_use]
#[allow(clippy::unnecessary_cast)]
pub fn len(&self) -> u64 {
self.data.len() as u64
}
#[must_use]
pub fn is_empty(&self) -> bool {
self.data.is_empty()
}
#[must_use]
pub fn is_full(&self) -> bool {
self.data.ranges_len() == 1
&& self.data.first().unwrap() == u32::MIN
&& self.data.last().unwrap() == u32::MAX
}
#[must_use] #[must_use]
pub fn data(&self) -> &RangeSetBlaze<ValueSubkey> { pub fn data(&self) -> &RangeSetBlaze<ValueSubkey> {
&self.data &self.data

View file

@ -2,6 +2,7 @@ include: package:lint_hard/all.yaml
analyzer: analyzer:
errors: errors:
invalid_annotation_target: ignore invalid_annotation_target: ignore
one_member_abstracts: ignore
exclude: exclude:
- '**/*.g.dart' - '**/*.g.dart'
- '**/*.freezed.dart' - '**/*.freezed.dart'

View file

@ -241,10 +241,19 @@ Future<void> testOpenWriterDHTValue() async {
throwsA(isA<VeilidAPIException>())); throwsA(isA<VeilidAPIException>()));
// Verify subkey 0 can be set because override with the right writer // Verify subkey 0 can be set because override with the right writer
expect( // Should have prior sequence number as its returned value because it
await rc.setDHTValue(key, 0, va, // exists online at seq 0
writer: KeyPair(key: owner, secret: secret)), vdtemp = await rc.setDHTValue(key, 0, va,
isNull); writer: KeyPair(key: owner, secret: secret));
expect(vdtemp, isNotNull);
expect(vdtemp!.data, equals(vb));
expect(vdtemp.seq, equals(0));
expect(vdtemp.writer, equals(owner));
// Should update the second time to seq 1
vdtemp = await rc.setDHTValue(key, 0, va,
writer: KeyPair(key: owner, secret: secret));
expect(vdtemp, isNull);
// Clean up // Clean up
await rc.closeDHTRecord(key); await rc.closeDHTRecord(key);
@ -452,16 +461,18 @@ Future<void> testInspectDHTRecord() async {
expect(await rc.setDHTValue(rec.key, 0, utf8.encode('BLAH BLAH BLAH')), expect(await rc.setDHTValue(rec.key, 0, utf8.encode('BLAH BLAH BLAH')),
isNull); isNull);
await settle(rc, rec.key, 0);
final rr = await rc.inspectDHTRecord(rec.key); final rr = await rc.inspectDHTRecord(rec.key);
expect(rr.subkeys, equals([ValueSubkeyRange.make(0, 1)])); expect(rr.subkeys, equals([ValueSubkeyRange.make(0, 1)]));
expect(rr.localSeqs, equals([0, 0xFFFFFFFF])); expect(rr.localSeqs, equals([0, null]));
expect(rr.networkSeqs, equals([])); expect(rr.networkSeqs, equals([null, null]));
final rr2 = final rr2 =
await rc.inspectDHTRecord(rec.key, scope: DHTReportScope.syncGet); await rc.inspectDHTRecord(rec.key, scope: DHTReportScope.syncGet);
expect(rr2.subkeys, equals([ValueSubkeyRange.make(0, 1)])); expect(rr2.subkeys, equals([ValueSubkeyRange.make(0, 1)]));
expect(rr2.localSeqs, equals([0, 0xFFFFFFFF])); expect(rr2.localSeqs, equals([0, null]));
expect(rr2.networkSeqs, equals([0, 0xFFFFFFFF])); expect(rr2.networkSeqs, equals([0, null]));
await rc.closeDHTRecord(rec.key); await rc.closeDHTRecord(rec.key);
await rc.deleteDHTRecord(rec.key); await rc.deleteDHTRecord(rec.key);

View file

@ -14,6 +14,7 @@ Future<void> testRoutingContexts() async {
{ {
final rc = await Veilid.instance.routingContext(); final rc = await Veilid.instance.routingContext();
final rcp = rc.withDefaultSafety(); final rcp = rc.withDefaultSafety();
// More debuggable this way
// ignore: cascade_invocations // ignore: cascade_invocations
rcp.close(); rcp.close();
rc.close(); rc.close();
@ -22,6 +23,7 @@ Future<void> testRoutingContexts() async {
{ {
final rc = await Veilid.instance.routingContext(); final rc = await Veilid.instance.routingContext();
final rcp = rc.withSequencing(Sequencing.ensureOrdered); final rcp = rc.withSequencing(Sequencing.ensureOrdered);
// More debuggable this way
// ignore: cascade_invocations // ignore: cascade_invocations
rcp.close(); rcp.close();
rc.close(); rc.close();
@ -34,6 +36,7 @@ Future<void> testRoutingContexts() async {
hopCount: 2, hopCount: 2,
stability: Stability.lowLatency, stability: Stability.lowLatency,
sequencing: Sequencing.noPreference))); sequencing: Sequencing.noPreference)));
// More debuggable this way
// ignore: cascade_invocations // ignore: cascade_invocations
rcp.close(); rcp.close();
rc.close(); rc.close();
@ -42,6 +45,7 @@ Future<void> testRoutingContexts() async {
final rc = await Veilid.instance.routingContext(); final rc = await Veilid.instance.routingContext();
final rcp = rc.withSafety( final rcp = rc.withSafety(
const SafetySelectionUnsafe(sequencing: Sequencing.preferOrdered)); const SafetySelectionUnsafe(sequencing: Sequencing.preferOrdered));
// More debuggable this way
// ignore: cascade_invocations // ignore: cascade_invocations
rcp.close(); rcp.close();
rc.close(); rc.close();

View file

@ -81,7 +81,7 @@ sealed class DHTSchema with _$DHTSchema {
const DHTSchema defaultDHTSchema = DHTSchema.dflt(oCnt: 1); const DHTSchema defaultDHTSchema = DHTSchema.dflt(oCnt: 1);
@freezed @freezed
class DHTSchemaMember with _$DHTSchemaMember { sealed class DHTSchemaMember with _$DHTSchemaMember {
@Assert('mCnt > 0 && mCnt <= 65535', 'value out of range') @Assert('mCnt > 0 && mCnt <= 65535', 'value out of range')
const factory DHTSchemaMember({ const factory DHTSchemaMember({
required PublicKey mKey, required PublicKey mKey,
@ -96,7 +96,7 @@ class DHTSchemaMember with _$DHTSchemaMember {
/// DHTRecordDescriptor /// DHTRecordDescriptor
@freezed @freezed
class DHTRecordDescriptor with _$DHTRecordDescriptor { sealed class DHTRecordDescriptor with _$DHTRecordDescriptor {
const factory DHTRecordDescriptor({ const factory DHTRecordDescriptor({
required TypedKey key, required TypedKey key,
required PublicKey owner, required PublicKey owner,
@ -134,7 +134,7 @@ extension DHTRecordDescriptorExt on DHTRecordDescriptor {
/// ValueData /// ValueData
@freezed @freezed
class ValueData with _$ValueData { sealed class ValueData with _$ValueData {
@Assert('seq >= 0', 'seq out of range') @Assert('seq >= 0', 'seq out of range')
const factory ValueData({ const factory ValueData({
required int seq, required int seq,
@ -224,7 +224,7 @@ class SafetySelectionSafe extends Equatable implements SafetySelection {
/// Options for safety routes (sender privacy) /// Options for safety routes (sender privacy)
@freezed @freezed
class SafetySpec with _$SafetySpec { sealed class SafetySpec with _$SafetySpec {
const factory SafetySpec({ const factory SafetySpec({
required int hopCount, required int hopCount,
required Stability stability, required Stability stability,
@ -239,7 +239,7 @@ class SafetySpec with _$SafetySpec {
////////////////////////////////////// //////////////////////////////////////
/// RouteBlob /// RouteBlob
@freezed @freezed
class RouteBlob with _$RouteBlob { sealed class RouteBlob with _$RouteBlob {
const factory RouteBlob( const factory RouteBlob(
{required String routeId, {required String routeId,
@Uint8ListJsonConverter() required Uint8List blob}) = _RouteBlob; @Uint8ListJsonConverter() required Uint8List blob}) = _RouteBlob;
@ -250,12 +250,12 @@ class RouteBlob with _$RouteBlob {
////////////////////////////////////// //////////////////////////////////////
/// Inspect /// Inspect
@freezed @freezed
class DHTRecordReport with _$DHTRecordReport { sealed class DHTRecordReport with _$DHTRecordReport {
const factory DHTRecordReport({ const factory DHTRecordReport({
required List<ValueSubkeyRange> subkeys, required List<ValueSubkeyRange> subkeys,
required List<ValueSubkeyRange> offlineSubkeys, required List<ValueSubkeyRange> offlineSubkeys,
required List<int> localSeqs, required List<int?> localSeqs,
required List<int> networkSeqs, required List<int?> networkSeqs,
}) = _DHTRecordReport; }) = _DHTRecordReport;
factory DHTRecordReport.fromJson(dynamic json) => factory DHTRecordReport.fromJson(dynamic json) =>
_$DHTRecordReportFromJson(json as Map<String, dynamic>); _$DHTRecordReportFromJson(json as Map<String, dynamic>);

File diff suppressed because it is too large Load diff

View file

@ -6,20 +6,20 @@ part of 'routing_context.dart';
// JsonSerializableGenerator // JsonSerializableGenerator
// ************************************************************************** // **************************************************************************
_$DHTSchemaDFLTImpl _$$DHTSchemaDFLTImplFromJson(Map<String, dynamic> json) => DHTSchemaDFLT _$DHTSchemaDFLTFromJson(Map<String, dynamic> json) =>
_$DHTSchemaDFLTImpl( DHTSchemaDFLT(
oCnt: (json['o_cnt'] as num).toInt(), oCnt: (json['o_cnt'] as num).toInt(),
$type: json['kind'] as String?, $type: json['kind'] as String?,
); );
Map<String, dynamic> _$$DHTSchemaDFLTImplToJson(_$DHTSchemaDFLTImpl instance) => Map<String, dynamic> _$DHTSchemaDFLTToJson(DHTSchemaDFLT instance) =>
<String, dynamic>{ <String, dynamic>{
'o_cnt': instance.oCnt, 'o_cnt': instance.oCnt,
'kind': instance.$type, 'kind': instance.$type,
}; };
_$DHTSchemaSMPLImpl _$$DHTSchemaSMPLImplFromJson(Map<String, dynamic> json) => DHTSchemaSMPL _$DHTSchemaSMPLFromJson(Map<String, dynamic> json) =>
_$DHTSchemaSMPLImpl( DHTSchemaSMPL(
oCnt: (json['o_cnt'] as num).toInt(), oCnt: (json['o_cnt'] as num).toInt(),
members: (json['members'] as List<dynamic>) members: (json['members'] as List<dynamic>)
.map(DHTSchemaMember.fromJson) .map(DHTSchemaMember.fromJson)
@ -27,30 +27,27 @@ _$DHTSchemaSMPLImpl _$$DHTSchemaSMPLImplFromJson(Map<String, dynamic> json) =>
$type: json['kind'] as String?, $type: json['kind'] as String?,
); );
Map<String, dynamic> _$$DHTSchemaSMPLImplToJson(_$DHTSchemaSMPLImpl instance) => Map<String, dynamic> _$DHTSchemaSMPLToJson(DHTSchemaSMPL instance) =>
<String, dynamic>{ <String, dynamic>{
'o_cnt': instance.oCnt, 'o_cnt': instance.oCnt,
'members': instance.members.map((e) => e.toJson()).toList(), 'members': instance.members.map((e) => e.toJson()).toList(),
'kind': instance.$type, 'kind': instance.$type,
}; };
_$DHTSchemaMemberImpl _$$DHTSchemaMemberImplFromJson( _DHTSchemaMember _$DHTSchemaMemberFromJson(Map<String, dynamic> json) =>
Map<String, dynamic> json) => _DHTSchemaMember(
_$DHTSchemaMemberImpl(
mKey: FixedEncodedString43.fromJson(json['m_key']), mKey: FixedEncodedString43.fromJson(json['m_key']),
mCnt: (json['m_cnt'] as num).toInt(), mCnt: (json['m_cnt'] as num).toInt(),
); );
Map<String, dynamic> _$$DHTSchemaMemberImplToJson( Map<String, dynamic> _$DHTSchemaMemberToJson(_DHTSchemaMember instance) =>
_$DHTSchemaMemberImpl instance) =>
<String, dynamic>{ <String, dynamic>{
'm_key': instance.mKey.toJson(), 'm_key': instance.mKey.toJson(),
'm_cnt': instance.mCnt, 'm_cnt': instance.mCnt,
}; };
_$DHTRecordDescriptorImpl _$$DHTRecordDescriptorImplFromJson( _DHTRecordDescriptor _$DHTRecordDescriptorFromJson(Map<String, dynamic> json) =>
Map<String, dynamic> json) => _DHTRecordDescriptor(
_$DHTRecordDescriptorImpl(
key: Typed<FixedEncodedString43>.fromJson(json['key']), key: Typed<FixedEncodedString43>.fromJson(json['key']),
owner: FixedEncodedString43.fromJson(json['owner']), owner: FixedEncodedString43.fromJson(json['owner']),
schema: DHTSchema.fromJson(json['schema']), schema: DHTSchema.fromJson(json['schema']),
@ -59,8 +56,8 @@ _$DHTRecordDescriptorImpl _$$DHTRecordDescriptorImplFromJson(
: FixedEncodedString43.fromJson(json['owner_secret']), : FixedEncodedString43.fromJson(json['owner_secret']),
); );
Map<String, dynamic> _$$DHTRecordDescriptorImplToJson( Map<String, dynamic> _$DHTRecordDescriptorToJson(
_$DHTRecordDescriptorImpl instance) => _DHTRecordDescriptor instance) =>
<String, dynamic>{ <String, dynamic>{
'key': instance.key.toJson(), 'key': instance.key.toJson(),
'owner': instance.owner.toJson(), 'owner': instance.owner.toJson(),
@ -68,29 +65,27 @@ Map<String, dynamic> _$$DHTRecordDescriptorImplToJson(
'owner_secret': instance.ownerSecret?.toJson(), 'owner_secret': instance.ownerSecret?.toJson(),
}; };
_$ValueDataImpl _$$ValueDataImplFromJson(Map<String, dynamic> json) => _ValueData _$ValueDataFromJson(Map<String, dynamic> json) => _ValueData(
_$ValueDataImpl(
seq: (json['seq'] as num).toInt(), seq: (json['seq'] as num).toInt(),
data: const Uint8ListJsonConverter.jsIsArray().fromJson(json['data']), data: const Uint8ListJsonConverter.jsIsArray().fromJson(json['data']),
writer: FixedEncodedString43.fromJson(json['writer']), writer: FixedEncodedString43.fromJson(json['writer']),
); );
Map<String, dynamic> _$$ValueDataImplToJson(_$ValueDataImpl instance) => Map<String, dynamic> _$ValueDataToJson(_ValueData instance) =>
<String, dynamic>{ <String, dynamic>{
'seq': instance.seq, 'seq': instance.seq,
'data': const Uint8ListJsonConverter.jsIsArray().toJson(instance.data), 'data': const Uint8ListJsonConverter.jsIsArray().toJson(instance.data),
'writer': instance.writer.toJson(), 'writer': instance.writer.toJson(),
}; };
_$SafetySpecImpl _$$SafetySpecImplFromJson(Map<String, dynamic> json) => _SafetySpec _$SafetySpecFromJson(Map<String, dynamic> json) => _SafetySpec(
_$SafetySpecImpl(
hopCount: (json['hop_count'] as num).toInt(), hopCount: (json['hop_count'] as num).toInt(),
stability: Stability.fromJson(json['stability']), stability: Stability.fromJson(json['stability']),
sequencing: Sequencing.fromJson(json['sequencing']), sequencing: Sequencing.fromJson(json['sequencing']),
preferredRoute: json['preferred_route'] as String?, preferredRoute: json['preferred_route'] as String?,
); );
Map<String, dynamic> _$$SafetySpecImplToJson(_$SafetySpecImpl instance) => Map<String, dynamic> _$SafetySpecToJson(_SafetySpec instance) =>
<String, dynamic>{ <String, dynamic>{
'hop_count': instance.hopCount, 'hop_count': instance.hopCount,
'stability': instance.stability.toJson(), 'stability': instance.stability.toJson(),
@ -98,21 +93,19 @@ Map<String, dynamic> _$$SafetySpecImplToJson(_$SafetySpecImpl instance) =>
'preferred_route': instance.preferredRoute, 'preferred_route': instance.preferredRoute,
}; };
_$RouteBlobImpl _$$RouteBlobImplFromJson(Map<String, dynamic> json) => _RouteBlob _$RouteBlobFromJson(Map<String, dynamic> json) => _RouteBlob(
_$RouteBlobImpl(
routeId: json['route_id'] as String, routeId: json['route_id'] as String,
blob: const Uint8ListJsonConverter().fromJson(json['blob']), blob: const Uint8ListJsonConverter().fromJson(json['blob']),
); );
Map<String, dynamic> _$$RouteBlobImplToJson(_$RouteBlobImpl instance) => Map<String, dynamic> _$RouteBlobToJson(_RouteBlob instance) =>
<String, dynamic>{ <String, dynamic>{
'route_id': instance.routeId, 'route_id': instance.routeId,
'blob': const Uint8ListJsonConverter().toJson(instance.blob), 'blob': const Uint8ListJsonConverter().toJson(instance.blob),
}; };
_$DHTRecordReportImpl _$$DHTRecordReportImplFromJson( _DHTRecordReport _$DHTRecordReportFromJson(Map<String, dynamic> json) =>
Map<String, dynamic> json) => _DHTRecordReport(
_$DHTRecordReportImpl(
subkeys: (json['subkeys'] as List<dynamic>) subkeys: (json['subkeys'] as List<dynamic>)
.map(ValueSubkeyRange.fromJson) .map(ValueSubkeyRange.fromJson)
.toList(), .toList(),
@ -120,15 +113,14 @@ _$DHTRecordReportImpl _$$DHTRecordReportImplFromJson(
.map(ValueSubkeyRange.fromJson) .map(ValueSubkeyRange.fromJson)
.toList(), .toList(),
localSeqs: (json['local_seqs'] as List<dynamic>) localSeqs: (json['local_seqs'] as List<dynamic>)
.map((e) => (e as num).toInt()) .map((e) => (e as num?)?.toInt())
.toList(), .toList(),
networkSeqs: (json['network_seqs'] as List<dynamic>) networkSeqs: (json['network_seqs'] as List<dynamic>)
.map((e) => (e as num).toInt()) .map((e) => (e as num?)?.toInt())
.toList(), .toList(),
); );
Map<String, dynamic> _$$DHTRecordReportImplToJson( Map<String, dynamic> _$DHTRecordReportToJson(_DHTRecordReport instance) =>
_$DHTRecordReportImpl instance) =>
<String, dynamic>{ <String, dynamic>{
'subkeys': instance.subkeys.map((e) => e.toJson()).toList(), 'subkeys': instance.subkeys.map((e) => e.toJson()).toList(),
'offline_subkeys': 'offline_subkeys':

View file

@ -10,7 +10,8 @@ part 'veilid_config.g.dart';
////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////
// FFI Platform-specific config // FFI Platform-specific config
@freezed @freezed
class VeilidFFIConfigLoggingTerminal with _$VeilidFFIConfigLoggingTerminal { sealed class VeilidFFIConfigLoggingTerminal
with _$VeilidFFIConfigLoggingTerminal {
const factory VeilidFFIConfigLoggingTerminal({ const factory VeilidFFIConfigLoggingTerminal({
required bool enabled, required bool enabled,
required VeilidConfigLogLevel level, required VeilidConfigLogLevel level,
@ -22,7 +23,7 @@ class VeilidFFIConfigLoggingTerminal with _$VeilidFFIConfigLoggingTerminal {
} }
@freezed @freezed
class VeilidFFIConfigLoggingOtlp with _$VeilidFFIConfigLoggingOtlp { sealed class VeilidFFIConfigLoggingOtlp with _$VeilidFFIConfigLoggingOtlp {
const factory VeilidFFIConfigLoggingOtlp({ const factory VeilidFFIConfigLoggingOtlp({
required bool enabled, required bool enabled,
required VeilidConfigLogLevel level, required VeilidConfigLogLevel level,
@ -36,7 +37,7 @@ class VeilidFFIConfigLoggingOtlp with _$VeilidFFIConfigLoggingOtlp {
} }
@freezed @freezed
class VeilidFFIConfigLoggingApi with _$VeilidFFIConfigLoggingApi { sealed class VeilidFFIConfigLoggingApi with _$VeilidFFIConfigLoggingApi {
const factory VeilidFFIConfigLoggingApi({ const factory VeilidFFIConfigLoggingApi({
required bool enabled, required bool enabled,
required VeilidConfigLogLevel level, required VeilidConfigLogLevel level,
@ -48,7 +49,7 @@ class VeilidFFIConfigLoggingApi with _$VeilidFFIConfigLoggingApi {
} }
@freezed @freezed
class VeilidFFIConfigLoggingFlame with _$VeilidFFIConfigLoggingFlame { sealed class VeilidFFIConfigLoggingFlame with _$VeilidFFIConfigLoggingFlame {
const factory VeilidFFIConfigLoggingFlame({ const factory VeilidFFIConfigLoggingFlame({
required bool enabled, required bool enabled,
required String path, required String path,
@ -59,7 +60,7 @@ class VeilidFFIConfigLoggingFlame with _$VeilidFFIConfigLoggingFlame {
} }
@freezed @freezed
class VeilidFFIConfigLogging with _$VeilidFFIConfigLogging { sealed class VeilidFFIConfigLogging with _$VeilidFFIConfigLogging {
const factory VeilidFFIConfigLogging( const factory VeilidFFIConfigLogging(
{required VeilidFFIConfigLoggingTerminal terminal, {required VeilidFFIConfigLoggingTerminal terminal,
required VeilidFFIConfigLoggingOtlp otlp, required VeilidFFIConfigLoggingOtlp otlp,
@ -71,7 +72,7 @@ class VeilidFFIConfigLogging with _$VeilidFFIConfigLogging {
} }
@freezed @freezed
class VeilidFFIConfig with _$VeilidFFIConfig { sealed class VeilidFFIConfig with _$VeilidFFIConfig {
const factory VeilidFFIConfig({ const factory VeilidFFIConfig({
required VeilidFFIConfigLogging logging, required VeilidFFIConfigLogging logging,
}) = _VeilidFFIConfig; }) = _VeilidFFIConfig;
@ -84,7 +85,7 @@ class VeilidFFIConfig with _$VeilidFFIConfig {
// WASM Platform-specific config // WASM Platform-specific config
@freezed @freezed
class VeilidWASMConfigLoggingPerformance sealed class VeilidWASMConfigLoggingPerformance
with _$VeilidWASMConfigLoggingPerformance { with _$VeilidWASMConfigLoggingPerformance {
const factory VeilidWASMConfigLoggingPerformance({ const factory VeilidWASMConfigLoggingPerformance({
required bool enabled, required bool enabled,
@ -100,7 +101,7 @@ class VeilidWASMConfigLoggingPerformance
} }
@freezed @freezed
class VeilidWASMConfigLoggingApi with _$VeilidWASMConfigLoggingApi { sealed class VeilidWASMConfigLoggingApi with _$VeilidWASMConfigLoggingApi {
const factory VeilidWASMConfigLoggingApi({ const factory VeilidWASMConfigLoggingApi({
required bool enabled, required bool enabled,
required VeilidConfigLogLevel level, required VeilidConfigLogLevel level,
@ -112,7 +113,7 @@ class VeilidWASMConfigLoggingApi with _$VeilidWASMConfigLoggingApi {
} }
@freezed @freezed
class VeilidWASMConfigLogging with _$VeilidWASMConfigLogging { sealed class VeilidWASMConfigLogging with _$VeilidWASMConfigLogging {
const factory VeilidWASMConfigLogging( const factory VeilidWASMConfigLogging(
{required VeilidWASMConfigLoggingPerformance performance, {required VeilidWASMConfigLoggingPerformance performance,
required VeilidWASMConfigLoggingApi api}) = _VeilidWASMConfigLogging; required VeilidWASMConfigLoggingApi api}) = _VeilidWASMConfigLogging;
@ -122,7 +123,7 @@ class VeilidWASMConfigLogging with _$VeilidWASMConfigLogging {
} }
@freezed @freezed
class VeilidWASMConfig with _$VeilidWASMConfig { sealed class VeilidWASMConfig with _$VeilidWASMConfig {
const factory VeilidWASMConfig({ const factory VeilidWASMConfig({
required VeilidWASMConfigLogging logging, required VeilidWASMConfigLogging logging,
}) = _VeilidWASMConfig; }) = _VeilidWASMConfig;
@ -151,7 +152,7 @@ enum VeilidConfigLogLevel {
/// VeilidConfig /// VeilidConfig
@freezed @freezed
class VeilidConfigHTTPS with _$VeilidConfigHTTPS { sealed class VeilidConfigHTTPS with _$VeilidConfigHTTPS {
const factory VeilidConfigHTTPS({ const factory VeilidConfigHTTPS({
required bool enabled, required bool enabled,
required String listenAddress, required String listenAddress,
@ -166,7 +167,7 @@ class VeilidConfigHTTPS with _$VeilidConfigHTTPS {
//////////// ////////////
@freezed @freezed
class VeilidConfigHTTP with _$VeilidConfigHTTP { sealed class VeilidConfigHTTP with _$VeilidConfigHTTP {
const factory VeilidConfigHTTP({ const factory VeilidConfigHTTP({
required bool enabled, required bool enabled,
required String listenAddress, required String listenAddress,
@ -181,7 +182,7 @@ class VeilidConfigHTTP with _$VeilidConfigHTTP {
//////////// ////////////
@freezed @freezed
class VeilidConfigApplication with _$VeilidConfigApplication { sealed class VeilidConfigApplication with _$VeilidConfigApplication {
const factory VeilidConfigApplication({ const factory VeilidConfigApplication({
required VeilidConfigHTTPS https, required VeilidConfigHTTPS https,
required VeilidConfigHTTP http, required VeilidConfigHTTP http,
@ -193,7 +194,7 @@ class VeilidConfigApplication with _$VeilidConfigApplication {
//////////// ////////////
@freezed @freezed
class VeilidConfigUDP with _$VeilidConfigUDP { sealed class VeilidConfigUDP with _$VeilidConfigUDP {
const factory VeilidConfigUDP( const factory VeilidConfigUDP(
{required bool enabled, {required bool enabled,
required int socketPoolSize, required int socketPoolSize,
@ -206,7 +207,7 @@ class VeilidConfigUDP with _$VeilidConfigUDP {
//////////// ////////////
@freezed @freezed
class VeilidConfigTCP with _$VeilidConfigTCP { sealed class VeilidConfigTCP with _$VeilidConfigTCP {
const factory VeilidConfigTCP( const factory VeilidConfigTCP(
{required bool connect, {required bool connect,
required bool listen, required bool listen,
@ -220,7 +221,7 @@ class VeilidConfigTCP with _$VeilidConfigTCP {
//////////// ////////////
@freezed @freezed
class VeilidConfigWS with _$VeilidConfigWS { sealed class VeilidConfigWS with _$VeilidConfigWS {
const factory VeilidConfigWS( const factory VeilidConfigWS(
{required bool connect, {required bool connect,
required bool listen, required bool listen,
@ -235,7 +236,7 @@ class VeilidConfigWS with _$VeilidConfigWS {
//////////// ////////////
@freezed @freezed
class VeilidConfigWSS with _$VeilidConfigWSS { sealed class VeilidConfigWSS with _$VeilidConfigWSS {
const factory VeilidConfigWSS( const factory VeilidConfigWSS(
{required bool connect, {required bool connect,
required bool listen, required bool listen,
@ -251,7 +252,7 @@ class VeilidConfigWSS with _$VeilidConfigWSS {
//////////// ////////////
@freezed @freezed
class VeilidConfigProtocol with _$VeilidConfigProtocol { sealed class VeilidConfigProtocol with _$VeilidConfigProtocol {
const factory VeilidConfigProtocol({ const factory VeilidConfigProtocol({
required VeilidConfigUDP udp, required VeilidConfigUDP udp,
required VeilidConfigTCP tcp, required VeilidConfigTCP tcp,
@ -266,7 +267,7 @@ class VeilidConfigProtocol with _$VeilidConfigProtocol {
//////////// ////////////
@freezed @freezed
class VeilidConfigTLS with _$VeilidConfigTLS { sealed class VeilidConfigTLS with _$VeilidConfigTLS {
const factory VeilidConfigTLS({ const factory VeilidConfigTLS({
required String certificatePath, required String certificatePath,
required String privateKeyPath, required String privateKeyPath,
@ -279,7 +280,7 @@ class VeilidConfigTLS with _$VeilidConfigTLS {
//////////// ////////////
@freezed @freezed
class VeilidConfigDHT with _$VeilidConfigDHT { sealed class VeilidConfigDHT with _$VeilidConfigDHT {
const factory VeilidConfigDHT({ const factory VeilidConfigDHT({
required int resolveNodeTimeoutMs, required int resolveNodeTimeoutMs,
required int resolveNodeCount, required int resolveNodeCount,
@ -312,7 +313,7 @@ class VeilidConfigDHT with _$VeilidConfigDHT {
//////////// ////////////
@freezed @freezed
class VeilidConfigRPC with _$VeilidConfigRPC { sealed class VeilidConfigRPC with _$VeilidConfigRPC {
const factory VeilidConfigRPC( const factory VeilidConfigRPC(
{required int concurrency, {required int concurrency,
required int queueSize, required int queueSize,
@ -329,7 +330,7 @@ class VeilidConfigRPC with _$VeilidConfigRPC {
//////////// ////////////
@freezed @freezed
class VeilidConfigRoutingTable with _$VeilidConfigRoutingTable { sealed class VeilidConfigRoutingTable with _$VeilidConfigRoutingTable {
const factory VeilidConfigRoutingTable({ const factory VeilidConfigRoutingTable({
required List<TypedKey> nodeId, required List<TypedKey> nodeId,
required List<TypedSecret> nodeIdSecret, required List<TypedSecret> nodeIdSecret,
@ -348,7 +349,7 @@ class VeilidConfigRoutingTable with _$VeilidConfigRoutingTable {
//////////// ////////////
@freezed @freezed
class VeilidConfigNetwork with _$VeilidConfigNetwork { sealed class VeilidConfigNetwork with _$VeilidConfigNetwork {
const factory VeilidConfigNetwork({ const factory VeilidConfigNetwork({
required int connectionInitialTimeoutMs, required int connectionInitialTimeoutMs,
required int connectionInactivityTimeoutMs, required int connectionInactivityTimeoutMs,
@ -378,7 +379,7 @@ class VeilidConfigNetwork with _$VeilidConfigNetwork {
//////////// ////////////
@freezed @freezed
class VeilidConfigTableStore with _$VeilidConfigTableStore { sealed class VeilidConfigTableStore with _$VeilidConfigTableStore {
const factory VeilidConfigTableStore({ const factory VeilidConfigTableStore({
required String directory, required String directory,
required bool delete, required bool delete,
@ -391,7 +392,7 @@ class VeilidConfigTableStore with _$VeilidConfigTableStore {
//////////// ////////////
@freezed @freezed
class VeilidConfigBlockStore with _$VeilidConfigBlockStore { sealed class VeilidConfigBlockStore with _$VeilidConfigBlockStore {
const factory VeilidConfigBlockStore({ const factory VeilidConfigBlockStore({
required String directory, required String directory,
required bool delete, required bool delete,
@ -404,7 +405,7 @@ class VeilidConfigBlockStore with _$VeilidConfigBlockStore {
//////////// ////////////
@freezed @freezed
class VeilidConfigProtectedStore with _$VeilidConfigProtectedStore { sealed class VeilidConfigProtectedStore with _$VeilidConfigProtectedStore {
const factory VeilidConfigProtectedStore( const factory VeilidConfigProtectedStore(
{required bool allowInsecureFallback, {required bool allowInsecureFallback,
required bool alwaysUseInsecureStorage, required bool alwaysUseInsecureStorage,
@ -420,7 +421,7 @@ class VeilidConfigProtectedStore with _$VeilidConfigProtectedStore {
//////////// ////////////
@freezed @freezed
class VeilidConfigCapabilities with _$VeilidConfigCapabilities { sealed class VeilidConfigCapabilities with _$VeilidConfigCapabilities {
const factory VeilidConfigCapabilities({ const factory VeilidConfigCapabilities({
required List<String> disable, required List<String> disable,
}) = _VeilidConfigCapabilities; }) = _VeilidConfigCapabilities;
@ -432,7 +433,7 @@ class VeilidConfigCapabilities with _$VeilidConfigCapabilities {
//////////// ////////////
@freezed @freezed
class VeilidConfig with _$VeilidConfig { sealed class VeilidConfig with _$VeilidConfig {
const factory VeilidConfig({ const factory VeilidConfig({
required String programName, required String programName,
required String namespace, required String namespace,

File diff suppressed because it is too large Load diff

View file

@ -6,9 +6,9 @@ part of 'veilid_config.dart';
// JsonSerializableGenerator // JsonSerializableGenerator
// ************************************************************************** // **************************************************************************
_$VeilidFFIConfigLoggingTerminalImpl _VeilidFFIConfigLoggingTerminal _$VeilidFFIConfigLoggingTerminalFromJson(
_$$VeilidFFIConfigLoggingTerminalImplFromJson(Map<String, dynamic> json) => Map<String, dynamic> json) =>
_$VeilidFFIConfigLoggingTerminalImpl( _VeilidFFIConfigLoggingTerminal(
enabled: json['enabled'] as bool, enabled: json['enabled'] as bool,
level: VeilidConfigLogLevel.fromJson(json['level']), level: VeilidConfigLogLevel.fromJson(json['level']),
ignoreLogTargets: (json['ignore_log_targets'] as List<dynamic>?) ignoreLogTargets: (json['ignore_log_targets'] as List<dynamic>?)
@ -17,17 +17,17 @@ _$VeilidFFIConfigLoggingTerminalImpl
const [], const [],
); );
Map<String, dynamic> _$$VeilidFFIConfigLoggingTerminalImplToJson( Map<String, dynamic> _$VeilidFFIConfigLoggingTerminalToJson(
_$VeilidFFIConfigLoggingTerminalImpl instance) => _VeilidFFIConfigLoggingTerminal instance) =>
<String, dynamic>{ <String, dynamic>{
'enabled': instance.enabled, 'enabled': instance.enabled,
'level': instance.level.toJson(), 'level': instance.level.toJson(),
'ignore_log_targets': instance.ignoreLogTargets, 'ignore_log_targets': instance.ignoreLogTargets,
}; };
_$VeilidFFIConfigLoggingOtlpImpl _$$VeilidFFIConfigLoggingOtlpImplFromJson( _VeilidFFIConfigLoggingOtlp _$VeilidFFIConfigLoggingOtlpFromJson(
Map<String, dynamic> json) => Map<String, dynamic> json) =>
_$VeilidFFIConfigLoggingOtlpImpl( _VeilidFFIConfigLoggingOtlp(
enabled: json['enabled'] as bool, enabled: json['enabled'] as bool,
level: VeilidConfigLogLevel.fromJson(json['level']), level: VeilidConfigLogLevel.fromJson(json['level']),
grpcEndpoint: json['grpc_endpoint'] as String, grpcEndpoint: json['grpc_endpoint'] as String,
@ -38,8 +38,8 @@ _$VeilidFFIConfigLoggingOtlpImpl _$$VeilidFFIConfigLoggingOtlpImplFromJson(
const [], const [],
); );
Map<String, dynamic> _$$VeilidFFIConfigLoggingOtlpImplToJson( Map<String, dynamic> _$VeilidFFIConfigLoggingOtlpToJson(
_$VeilidFFIConfigLoggingOtlpImpl instance) => _VeilidFFIConfigLoggingOtlp instance) =>
<String, dynamic>{ <String, dynamic>{
'enabled': instance.enabled, 'enabled': instance.enabled,
'level': instance.level.toJson(), 'level': instance.level.toJson(),
@ -48,9 +48,9 @@ Map<String, dynamic> _$$VeilidFFIConfigLoggingOtlpImplToJson(
'ignore_log_targets': instance.ignoreLogTargets, 'ignore_log_targets': instance.ignoreLogTargets,
}; };
_$VeilidFFIConfigLoggingApiImpl _$$VeilidFFIConfigLoggingApiImplFromJson( _VeilidFFIConfigLoggingApi _$VeilidFFIConfigLoggingApiFromJson(
Map<String, dynamic> json) => Map<String, dynamic> json) =>
_$VeilidFFIConfigLoggingApiImpl( _VeilidFFIConfigLoggingApi(
enabled: json['enabled'] as bool, enabled: json['enabled'] as bool,
level: VeilidConfigLogLevel.fromJson(json['level']), level: VeilidConfigLogLevel.fromJson(json['level']),
ignoreLogTargets: (json['ignore_log_targets'] as List<dynamic>?) ignoreLogTargets: (json['ignore_log_targets'] as List<dynamic>?)
@ -59,39 +59,39 @@ _$VeilidFFIConfigLoggingApiImpl _$$VeilidFFIConfigLoggingApiImplFromJson(
const [], const [],
); );
Map<String, dynamic> _$$VeilidFFIConfigLoggingApiImplToJson( Map<String, dynamic> _$VeilidFFIConfigLoggingApiToJson(
_$VeilidFFIConfigLoggingApiImpl instance) => _VeilidFFIConfigLoggingApi instance) =>
<String, dynamic>{ <String, dynamic>{
'enabled': instance.enabled, 'enabled': instance.enabled,
'level': instance.level.toJson(), 'level': instance.level.toJson(),
'ignore_log_targets': instance.ignoreLogTargets, 'ignore_log_targets': instance.ignoreLogTargets,
}; };
_$VeilidFFIConfigLoggingFlameImpl _$$VeilidFFIConfigLoggingFlameImplFromJson( _VeilidFFIConfigLoggingFlame _$VeilidFFIConfigLoggingFlameFromJson(
Map<String, dynamic> json) => Map<String, dynamic> json) =>
_$VeilidFFIConfigLoggingFlameImpl( _VeilidFFIConfigLoggingFlame(
enabled: json['enabled'] as bool, enabled: json['enabled'] as bool,
path: json['path'] as String, path: json['path'] as String,
); );
Map<String, dynamic> _$$VeilidFFIConfigLoggingFlameImplToJson( Map<String, dynamic> _$VeilidFFIConfigLoggingFlameToJson(
_$VeilidFFIConfigLoggingFlameImpl instance) => _VeilidFFIConfigLoggingFlame instance) =>
<String, dynamic>{ <String, dynamic>{
'enabled': instance.enabled, 'enabled': instance.enabled,
'path': instance.path, 'path': instance.path,
}; };
_$VeilidFFIConfigLoggingImpl _$$VeilidFFIConfigLoggingImplFromJson( _VeilidFFIConfigLogging _$VeilidFFIConfigLoggingFromJson(
Map<String, dynamic> json) => Map<String, dynamic> json) =>
_$VeilidFFIConfigLoggingImpl( _VeilidFFIConfigLogging(
terminal: VeilidFFIConfigLoggingTerminal.fromJson(json['terminal']), terminal: VeilidFFIConfigLoggingTerminal.fromJson(json['terminal']),
otlp: VeilidFFIConfigLoggingOtlp.fromJson(json['otlp']), otlp: VeilidFFIConfigLoggingOtlp.fromJson(json['otlp']),
api: VeilidFFIConfigLoggingApi.fromJson(json['api']), api: VeilidFFIConfigLoggingApi.fromJson(json['api']),
flame: VeilidFFIConfigLoggingFlame.fromJson(json['flame']), flame: VeilidFFIConfigLoggingFlame.fromJson(json['flame']),
); );
Map<String, dynamic> _$$VeilidFFIConfigLoggingImplToJson( Map<String, dynamic> _$VeilidFFIConfigLoggingToJson(
_$VeilidFFIConfigLoggingImpl instance) => _VeilidFFIConfigLogging instance) =>
<String, dynamic>{ <String, dynamic>{
'terminal': instance.terminal.toJson(), 'terminal': instance.terminal.toJson(),
'otlp': instance.otlp.toJson(), 'otlp': instance.otlp.toJson(),
@ -99,22 +99,19 @@ Map<String, dynamic> _$$VeilidFFIConfigLoggingImplToJson(
'flame': instance.flame.toJson(), 'flame': instance.flame.toJson(),
}; };
_$VeilidFFIConfigImpl _$$VeilidFFIConfigImplFromJson( _VeilidFFIConfig _$VeilidFFIConfigFromJson(Map<String, dynamic> json) =>
Map<String, dynamic> json) => _VeilidFFIConfig(
_$VeilidFFIConfigImpl(
logging: VeilidFFIConfigLogging.fromJson(json['logging']), logging: VeilidFFIConfigLogging.fromJson(json['logging']),
); );
Map<String, dynamic> _$$VeilidFFIConfigImplToJson( Map<String, dynamic> _$VeilidFFIConfigToJson(_VeilidFFIConfig instance) =>
_$VeilidFFIConfigImpl instance) =>
<String, dynamic>{ <String, dynamic>{
'logging': instance.logging.toJson(), 'logging': instance.logging.toJson(),
}; };
_$VeilidWASMConfigLoggingPerformanceImpl _VeilidWASMConfigLoggingPerformance
_$$VeilidWASMConfigLoggingPerformanceImplFromJson( _$VeilidWASMConfigLoggingPerformanceFromJson(Map<String, dynamic> json) =>
Map<String, dynamic> json) => _VeilidWASMConfigLoggingPerformance(
_$VeilidWASMConfigLoggingPerformanceImpl(
enabled: json['enabled'] as bool, enabled: json['enabled'] as bool,
level: VeilidConfigLogLevel.fromJson(json['level']), level: VeilidConfigLogLevel.fromJson(json['level']),
logsInTimings: json['logs_in_timings'] as bool, logsInTimings: json['logs_in_timings'] as bool,
@ -125,8 +122,8 @@ _$VeilidWASMConfigLoggingPerformanceImpl
const [], const [],
); );
Map<String, dynamic> _$$VeilidWASMConfigLoggingPerformanceImplToJson( Map<String, dynamic> _$VeilidWASMConfigLoggingPerformanceToJson(
_$VeilidWASMConfigLoggingPerformanceImpl instance) => _VeilidWASMConfigLoggingPerformance instance) =>
<String, dynamic>{ <String, dynamic>{
'enabled': instance.enabled, 'enabled': instance.enabled,
'level': instance.level.toJson(), 'level': instance.level.toJson(),
@ -135,9 +132,9 @@ Map<String, dynamic> _$$VeilidWASMConfigLoggingPerformanceImplToJson(
'ignore_log_targets': instance.ignoreLogTargets, 'ignore_log_targets': instance.ignoreLogTargets,
}; };
_$VeilidWASMConfigLoggingApiImpl _$$VeilidWASMConfigLoggingApiImplFromJson( _VeilidWASMConfigLoggingApi _$VeilidWASMConfigLoggingApiFromJson(
Map<String, dynamic> json) => Map<String, dynamic> json) =>
_$VeilidWASMConfigLoggingApiImpl( _VeilidWASMConfigLoggingApi(
enabled: json['enabled'] as bool, enabled: json['enabled'] as bool,
level: VeilidConfigLogLevel.fromJson(json['level']), level: VeilidConfigLogLevel.fromJson(json['level']),
ignoreLogTargets: (json['ignore_log_targets'] as List<dynamic>?) ignoreLogTargets: (json['ignore_log_targets'] as List<dynamic>?)
@ -146,52 +143,48 @@ _$VeilidWASMConfigLoggingApiImpl _$$VeilidWASMConfigLoggingApiImplFromJson(
const [], const [],
); );
Map<String, dynamic> _$$VeilidWASMConfigLoggingApiImplToJson( Map<String, dynamic> _$VeilidWASMConfigLoggingApiToJson(
_$VeilidWASMConfigLoggingApiImpl instance) => _VeilidWASMConfigLoggingApi instance) =>
<String, dynamic>{ <String, dynamic>{
'enabled': instance.enabled, 'enabled': instance.enabled,
'level': instance.level.toJson(), 'level': instance.level.toJson(),
'ignore_log_targets': instance.ignoreLogTargets, 'ignore_log_targets': instance.ignoreLogTargets,
}; };
_$VeilidWASMConfigLoggingImpl _$$VeilidWASMConfigLoggingImplFromJson( _VeilidWASMConfigLogging _$VeilidWASMConfigLoggingFromJson(
Map<String, dynamic> json) => Map<String, dynamic> json) =>
_$VeilidWASMConfigLoggingImpl( _VeilidWASMConfigLogging(
performance: performance:
VeilidWASMConfigLoggingPerformance.fromJson(json['performance']), VeilidWASMConfigLoggingPerformance.fromJson(json['performance']),
api: VeilidWASMConfigLoggingApi.fromJson(json['api']), api: VeilidWASMConfigLoggingApi.fromJson(json['api']),
); );
Map<String, dynamic> _$$VeilidWASMConfigLoggingImplToJson( Map<String, dynamic> _$VeilidWASMConfigLoggingToJson(
_$VeilidWASMConfigLoggingImpl instance) => _VeilidWASMConfigLogging instance) =>
<String, dynamic>{ <String, dynamic>{
'performance': instance.performance.toJson(), 'performance': instance.performance.toJson(),
'api': instance.api.toJson(), 'api': instance.api.toJson(),
}; };
_$VeilidWASMConfigImpl _$$VeilidWASMConfigImplFromJson( _VeilidWASMConfig _$VeilidWASMConfigFromJson(Map<String, dynamic> json) =>
Map<String, dynamic> json) => _VeilidWASMConfig(
_$VeilidWASMConfigImpl(
logging: VeilidWASMConfigLogging.fromJson(json['logging']), logging: VeilidWASMConfigLogging.fromJson(json['logging']),
); );
Map<String, dynamic> _$$VeilidWASMConfigImplToJson( Map<String, dynamic> _$VeilidWASMConfigToJson(_VeilidWASMConfig instance) =>
_$VeilidWASMConfigImpl instance) =>
<String, dynamic>{ <String, dynamic>{
'logging': instance.logging.toJson(), 'logging': instance.logging.toJson(),
}; };
_$VeilidConfigHTTPSImpl _$$VeilidConfigHTTPSImplFromJson( _VeilidConfigHTTPS _$VeilidConfigHTTPSFromJson(Map<String, dynamic> json) =>
Map<String, dynamic> json) => _VeilidConfigHTTPS(
_$VeilidConfigHTTPSImpl(
enabled: json['enabled'] as bool, enabled: json['enabled'] as bool,
listenAddress: json['listen_address'] as String, listenAddress: json['listen_address'] as String,
path: json['path'] as String, path: json['path'] as String,
url: json['url'] as String?, url: json['url'] as String?,
); );
Map<String, dynamic> _$$VeilidConfigHTTPSImplToJson( Map<String, dynamic> _$VeilidConfigHTTPSToJson(_VeilidConfigHTTPS instance) =>
_$VeilidConfigHTTPSImpl instance) =>
<String, dynamic>{ <String, dynamic>{
'enabled': instance.enabled, 'enabled': instance.enabled,
'listen_address': instance.listenAddress, 'listen_address': instance.listenAddress,
@ -199,17 +192,15 @@ Map<String, dynamic> _$$VeilidConfigHTTPSImplToJson(
'url': instance.url, 'url': instance.url,
}; };
_$VeilidConfigHTTPImpl _$$VeilidConfigHTTPImplFromJson( _VeilidConfigHTTP _$VeilidConfigHTTPFromJson(Map<String, dynamic> json) =>
Map<String, dynamic> json) => _VeilidConfigHTTP(
_$VeilidConfigHTTPImpl(
enabled: json['enabled'] as bool, enabled: json['enabled'] as bool,
listenAddress: json['listen_address'] as String, listenAddress: json['listen_address'] as String,
path: json['path'] as String, path: json['path'] as String,
url: json['url'] as String?, url: json['url'] as String?,
); );
Map<String, dynamic> _$$VeilidConfigHTTPImplToJson( Map<String, dynamic> _$VeilidConfigHTTPToJson(_VeilidConfigHTTP instance) =>
_$VeilidConfigHTTPImpl instance) =>
<String, dynamic>{ <String, dynamic>{
'enabled': instance.enabled, 'enabled': instance.enabled,
'listen_address': instance.listenAddress, 'listen_address': instance.listenAddress,
@ -217,31 +208,29 @@ Map<String, dynamic> _$$VeilidConfigHTTPImplToJson(
'url': instance.url, 'url': instance.url,
}; };
_$VeilidConfigApplicationImpl _$$VeilidConfigApplicationImplFromJson( _VeilidConfigApplication _$VeilidConfigApplicationFromJson(
Map<String, dynamic> json) => Map<String, dynamic> json) =>
_$VeilidConfigApplicationImpl( _VeilidConfigApplication(
https: VeilidConfigHTTPS.fromJson(json['https']), https: VeilidConfigHTTPS.fromJson(json['https']),
http: VeilidConfigHTTP.fromJson(json['http']), http: VeilidConfigHTTP.fromJson(json['http']),
); );
Map<String, dynamic> _$$VeilidConfigApplicationImplToJson( Map<String, dynamic> _$VeilidConfigApplicationToJson(
_$VeilidConfigApplicationImpl instance) => _VeilidConfigApplication instance) =>
<String, dynamic>{ <String, dynamic>{
'https': instance.https.toJson(), 'https': instance.https.toJson(),
'http': instance.http.toJson(), 'http': instance.http.toJson(),
}; };
_$VeilidConfigUDPImpl _$$VeilidConfigUDPImplFromJson( _VeilidConfigUDP _$VeilidConfigUDPFromJson(Map<String, dynamic> json) =>
Map<String, dynamic> json) => _VeilidConfigUDP(
_$VeilidConfigUDPImpl(
enabled: json['enabled'] as bool, enabled: json['enabled'] as bool,
socketPoolSize: (json['socket_pool_size'] as num).toInt(), socketPoolSize: (json['socket_pool_size'] as num).toInt(),
listenAddress: json['listen_address'] as String, listenAddress: json['listen_address'] as String,
publicAddress: json['public_address'] as String?, publicAddress: json['public_address'] as String?,
); );
Map<String, dynamic> _$$VeilidConfigUDPImplToJson( Map<String, dynamic> _$VeilidConfigUDPToJson(_VeilidConfigUDP instance) =>
_$VeilidConfigUDPImpl instance) =>
<String, dynamic>{ <String, dynamic>{
'enabled': instance.enabled, 'enabled': instance.enabled,
'socket_pool_size': instance.socketPoolSize, 'socket_pool_size': instance.socketPoolSize,
@ -249,9 +238,8 @@ Map<String, dynamic> _$$VeilidConfigUDPImplToJson(
'public_address': instance.publicAddress, 'public_address': instance.publicAddress,
}; };
_$VeilidConfigTCPImpl _$$VeilidConfigTCPImplFromJson( _VeilidConfigTCP _$VeilidConfigTCPFromJson(Map<String, dynamic> json) =>
Map<String, dynamic> json) => _VeilidConfigTCP(
_$VeilidConfigTCPImpl(
connect: json['connect'] as bool, connect: json['connect'] as bool,
listen: json['listen'] as bool, listen: json['listen'] as bool,
maxConnections: (json['max_connections'] as num).toInt(), maxConnections: (json['max_connections'] as num).toInt(),
@ -259,8 +247,7 @@ _$VeilidConfigTCPImpl _$$VeilidConfigTCPImplFromJson(
publicAddress: json['public_address'] as String?, publicAddress: json['public_address'] as String?,
); );
Map<String, dynamic> _$$VeilidConfigTCPImplToJson( Map<String, dynamic> _$VeilidConfigTCPToJson(_VeilidConfigTCP instance) =>
_$VeilidConfigTCPImpl instance) =>
<String, dynamic>{ <String, dynamic>{
'connect': instance.connect, 'connect': instance.connect,
'listen': instance.listen, 'listen': instance.listen,
@ -269,8 +256,8 @@ Map<String, dynamic> _$$VeilidConfigTCPImplToJson(
'public_address': instance.publicAddress, 'public_address': instance.publicAddress,
}; };
_$VeilidConfigWSImpl _$$VeilidConfigWSImplFromJson(Map<String, dynamic> json) => _VeilidConfigWS _$VeilidConfigWSFromJson(Map<String, dynamic> json) =>
_$VeilidConfigWSImpl( _VeilidConfigWS(
connect: json['connect'] as bool, connect: json['connect'] as bool,
listen: json['listen'] as bool, listen: json['listen'] as bool,
maxConnections: (json['max_connections'] as num).toInt(), maxConnections: (json['max_connections'] as num).toInt(),
@ -279,8 +266,7 @@ _$VeilidConfigWSImpl _$$VeilidConfigWSImplFromJson(Map<String, dynamic> json) =>
url: json['url'] as String?, url: json['url'] as String?,
); );
Map<String, dynamic> _$$VeilidConfigWSImplToJson( Map<String, dynamic> _$VeilidConfigWSToJson(_VeilidConfigWS instance) =>
_$VeilidConfigWSImpl instance) =>
<String, dynamic>{ <String, dynamic>{
'connect': instance.connect, 'connect': instance.connect,
'listen': instance.listen, 'listen': instance.listen,
@ -290,9 +276,8 @@ Map<String, dynamic> _$$VeilidConfigWSImplToJson(
'url': instance.url, 'url': instance.url,
}; };
_$VeilidConfigWSSImpl _$$VeilidConfigWSSImplFromJson( _VeilidConfigWSS _$VeilidConfigWSSFromJson(Map<String, dynamic> json) =>
Map<String, dynamic> json) => _VeilidConfigWSS(
_$VeilidConfigWSSImpl(
connect: json['connect'] as bool, connect: json['connect'] as bool,
listen: json['listen'] as bool, listen: json['listen'] as bool,
maxConnections: (json['max_connections'] as num).toInt(), maxConnections: (json['max_connections'] as num).toInt(),
@ -301,8 +286,7 @@ _$VeilidConfigWSSImpl _$$VeilidConfigWSSImplFromJson(
url: json['url'] as String?, url: json['url'] as String?,
); );
Map<String, dynamic> _$$VeilidConfigWSSImplToJson( Map<String, dynamic> _$VeilidConfigWSSToJson(_VeilidConfigWSS instance) =>
_$VeilidConfigWSSImpl instance) =>
<String, dynamic>{ <String, dynamic>{
'connect': instance.connect, 'connect': instance.connect,
'listen': instance.listen, 'listen': instance.listen,
@ -312,17 +296,17 @@ Map<String, dynamic> _$$VeilidConfigWSSImplToJson(
'url': instance.url, 'url': instance.url,
}; };
_$VeilidConfigProtocolImpl _$$VeilidConfigProtocolImplFromJson( _VeilidConfigProtocol _$VeilidConfigProtocolFromJson(
Map<String, dynamic> json) => Map<String, dynamic> json) =>
_$VeilidConfigProtocolImpl( _VeilidConfigProtocol(
udp: VeilidConfigUDP.fromJson(json['udp']), udp: VeilidConfigUDP.fromJson(json['udp']),
tcp: VeilidConfigTCP.fromJson(json['tcp']), tcp: VeilidConfigTCP.fromJson(json['tcp']),
ws: VeilidConfigWS.fromJson(json['ws']), ws: VeilidConfigWS.fromJson(json['ws']),
wss: VeilidConfigWSS.fromJson(json['wss']), wss: VeilidConfigWSS.fromJson(json['wss']),
); );
Map<String, dynamic> _$$VeilidConfigProtocolImplToJson( Map<String, dynamic> _$VeilidConfigProtocolToJson(
_$VeilidConfigProtocolImpl instance) => _VeilidConfigProtocol instance) =>
<String, dynamic>{ <String, dynamic>{
'udp': instance.udp.toJson(), 'udp': instance.udp.toJson(),
'tcp': instance.tcp.toJson(), 'tcp': instance.tcp.toJson(),
@ -330,26 +314,23 @@ Map<String, dynamic> _$$VeilidConfigProtocolImplToJson(
'wss': instance.wss.toJson(), 'wss': instance.wss.toJson(),
}; };
_$VeilidConfigTLSImpl _$$VeilidConfigTLSImplFromJson( _VeilidConfigTLS _$VeilidConfigTLSFromJson(Map<String, dynamic> json) =>
Map<String, dynamic> json) => _VeilidConfigTLS(
_$VeilidConfigTLSImpl(
certificatePath: json['certificate_path'] as String, certificatePath: json['certificate_path'] as String,
privateKeyPath: json['private_key_path'] as String, privateKeyPath: json['private_key_path'] as String,
connectionInitialTimeoutMs: connectionInitialTimeoutMs:
(json['connection_initial_timeout_ms'] as num).toInt(), (json['connection_initial_timeout_ms'] as num).toInt(),
); );
Map<String, dynamic> _$$VeilidConfigTLSImplToJson( Map<String, dynamic> _$VeilidConfigTLSToJson(_VeilidConfigTLS instance) =>
_$VeilidConfigTLSImpl instance) =>
<String, dynamic>{ <String, dynamic>{
'certificate_path': instance.certificatePath, 'certificate_path': instance.certificatePath,
'private_key_path': instance.privateKeyPath, 'private_key_path': instance.privateKeyPath,
'connection_initial_timeout_ms': instance.connectionInitialTimeoutMs, 'connection_initial_timeout_ms': instance.connectionInitialTimeoutMs,
}; };
_$VeilidConfigDHTImpl _$$VeilidConfigDHTImplFromJson( _VeilidConfigDHT _$VeilidConfigDHTFromJson(Map<String, dynamic> json) =>
Map<String, dynamic> json) => _VeilidConfigDHT(
_$VeilidConfigDHTImpl(
resolveNodeTimeoutMs: (json['resolve_node_timeout_ms'] as num).toInt(), resolveNodeTimeoutMs: (json['resolve_node_timeout_ms'] as num).toInt(),
resolveNodeCount: (json['resolve_node_count'] as num).toInt(), resolveNodeCount: (json['resolve_node_count'] as num).toInt(),
resolveNodeFanout: (json['resolve_node_fanout'] as num).toInt(), resolveNodeFanout: (json['resolve_node_fanout'] as num).toInt(),
@ -378,8 +359,7 @@ _$VeilidConfigDHTImpl _$$VeilidConfigDHTImplFromJson(
maxWatchExpirationMs: (json['max_watch_expiration_ms'] as num).toInt(), maxWatchExpirationMs: (json['max_watch_expiration_ms'] as num).toInt(),
); );
Map<String, dynamic> _$$VeilidConfigDHTImplToJson( Map<String, dynamic> _$VeilidConfigDHTToJson(_VeilidConfigDHT instance) =>
_$VeilidConfigDHTImpl instance) =>
<String, dynamic>{ <String, dynamic>{
'resolve_node_timeout_ms': instance.resolveNodeTimeoutMs, 'resolve_node_timeout_ms': instance.resolveNodeTimeoutMs,
'resolve_node_count': instance.resolveNodeCount, 'resolve_node_count': instance.resolveNodeCount,
@ -407,9 +387,8 @@ Map<String, dynamic> _$$VeilidConfigDHTImplToJson(
'max_watch_expiration_ms': instance.maxWatchExpirationMs, 'max_watch_expiration_ms': instance.maxWatchExpirationMs,
}; };
_$VeilidConfigRPCImpl _$$VeilidConfigRPCImplFromJson( _VeilidConfigRPC _$VeilidConfigRPCFromJson(Map<String, dynamic> json) =>
Map<String, dynamic> json) => _VeilidConfigRPC(
_$VeilidConfigRPCImpl(
concurrency: (json['concurrency'] as num).toInt(), concurrency: (json['concurrency'] as num).toInt(),
queueSize: (json['queue_size'] as num).toInt(), queueSize: (json['queue_size'] as num).toInt(),
timeoutMs: (json['timeout_ms'] as num).toInt(), timeoutMs: (json['timeout_ms'] as num).toInt(),
@ -419,8 +398,7 @@ _$VeilidConfigRPCImpl _$$VeilidConfigRPCImplFromJson(
maxTimestampAheadMs: (json['max_timestamp_ahead_ms'] as num?)?.toInt(), maxTimestampAheadMs: (json['max_timestamp_ahead_ms'] as num?)?.toInt(),
); );
Map<String, dynamic> _$$VeilidConfigRPCImplToJson( Map<String, dynamic> _$VeilidConfigRPCToJson(_VeilidConfigRPC instance) =>
_$VeilidConfigRPCImpl instance) =>
<String, dynamic>{ <String, dynamic>{
'concurrency': instance.concurrency, 'concurrency': instance.concurrency,
'queue_size': instance.queueSize, 'queue_size': instance.queueSize,
@ -431,9 +409,9 @@ Map<String, dynamic> _$$VeilidConfigRPCImplToJson(
'max_timestamp_ahead_ms': instance.maxTimestampAheadMs, 'max_timestamp_ahead_ms': instance.maxTimestampAheadMs,
}; };
_$VeilidConfigRoutingTableImpl _$$VeilidConfigRoutingTableImplFromJson( _VeilidConfigRoutingTable _$VeilidConfigRoutingTableFromJson(
Map<String, dynamic> json) => Map<String, dynamic> json) =>
_$VeilidConfigRoutingTableImpl( _VeilidConfigRoutingTable(
nodeId: (json['node_id'] as List<dynamic>) nodeId: (json['node_id'] as List<dynamic>)
.map(Typed<FixedEncodedString43>.fromJson) .map(Typed<FixedEncodedString43>.fromJson)
.toList(), .toList(),
@ -449,8 +427,8 @@ _$VeilidConfigRoutingTableImpl _$$VeilidConfigRoutingTableImplFromJson(
limitAttachedWeak: (json['limit_attached_weak'] as num).toInt(), limitAttachedWeak: (json['limit_attached_weak'] as num).toInt(),
); );
Map<String, dynamic> _$$VeilidConfigRoutingTableImplToJson( Map<String, dynamic> _$VeilidConfigRoutingTableToJson(
_$VeilidConfigRoutingTableImpl instance) => _VeilidConfigRoutingTable instance) =>
<String, dynamic>{ <String, dynamic>{
'node_id': instance.nodeId.map((e) => e.toJson()).toList(), 'node_id': instance.nodeId.map((e) => e.toJson()).toList(),
'node_id_secret': instance.nodeIdSecret.map((e) => e.toJson()).toList(), 'node_id_secret': instance.nodeIdSecret.map((e) => e.toJson()).toList(),
@ -462,9 +440,8 @@ Map<String, dynamic> _$$VeilidConfigRoutingTableImplToJson(
'limit_attached_weak': instance.limitAttachedWeak, 'limit_attached_weak': instance.limitAttachedWeak,
}; };
_$VeilidConfigNetworkImpl _$$VeilidConfigNetworkImplFromJson( _VeilidConfigNetwork _$VeilidConfigNetworkFromJson(Map<String, dynamic> json) =>
Map<String, dynamic> json) => _VeilidConfigNetwork(
_$VeilidConfigNetworkImpl(
connectionInitialTimeoutMs: connectionInitialTimeoutMs:
(json['connection_initial_timeout_ms'] as num).toInt(), (json['connection_initial_timeout_ms'] as num).toInt(),
connectionInactivityTimeoutMs: connectionInactivityTimeoutMs:
@ -494,8 +471,8 @@ _$VeilidConfigNetworkImpl _$$VeilidConfigNetworkImplFromJson(
networkKeyPassword: json['network_key_password'] as String?, networkKeyPassword: json['network_key_password'] as String?,
); );
Map<String, dynamic> _$$VeilidConfigNetworkImplToJson( Map<String, dynamic> _$VeilidConfigNetworkToJson(
_$VeilidConfigNetworkImpl instance) => _VeilidConfigNetwork instance) =>
<String, dynamic>{ <String, dynamic>{
'connection_initial_timeout_ms': instance.connectionInitialTimeoutMs, 'connection_initial_timeout_ms': instance.connectionInitialTimeoutMs,
'connection_inactivity_timeout_ms': 'connection_inactivity_timeout_ms':
@ -521,37 +498,37 @@ Map<String, dynamic> _$$VeilidConfigNetworkImplToJson(
'network_key_password': instance.networkKeyPassword, 'network_key_password': instance.networkKeyPassword,
}; };
_$VeilidConfigTableStoreImpl _$$VeilidConfigTableStoreImplFromJson( _VeilidConfigTableStore _$VeilidConfigTableStoreFromJson(
Map<String, dynamic> json) => Map<String, dynamic> json) =>
_$VeilidConfigTableStoreImpl( _VeilidConfigTableStore(
directory: json['directory'] as String, directory: json['directory'] as String,
delete: json['delete'] as bool, delete: json['delete'] as bool,
); );
Map<String, dynamic> _$$VeilidConfigTableStoreImplToJson( Map<String, dynamic> _$VeilidConfigTableStoreToJson(
_$VeilidConfigTableStoreImpl instance) => _VeilidConfigTableStore instance) =>
<String, dynamic>{ <String, dynamic>{
'directory': instance.directory, 'directory': instance.directory,
'delete': instance.delete, 'delete': instance.delete,
}; };
_$VeilidConfigBlockStoreImpl _$$VeilidConfigBlockStoreImplFromJson( _VeilidConfigBlockStore _$VeilidConfigBlockStoreFromJson(
Map<String, dynamic> json) => Map<String, dynamic> json) =>
_$VeilidConfigBlockStoreImpl( _VeilidConfigBlockStore(
directory: json['directory'] as String, directory: json['directory'] as String,
delete: json['delete'] as bool, delete: json['delete'] as bool,
); );
Map<String, dynamic> _$$VeilidConfigBlockStoreImplToJson( Map<String, dynamic> _$VeilidConfigBlockStoreToJson(
_$VeilidConfigBlockStoreImpl instance) => _VeilidConfigBlockStore instance) =>
<String, dynamic>{ <String, dynamic>{
'directory': instance.directory, 'directory': instance.directory,
'delete': instance.delete, 'delete': instance.delete,
}; };
_$VeilidConfigProtectedStoreImpl _$$VeilidConfigProtectedStoreImplFromJson( _VeilidConfigProtectedStore _$VeilidConfigProtectedStoreFromJson(
Map<String, dynamic> json) => Map<String, dynamic> json) =>
_$VeilidConfigProtectedStoreImpl( _VeilidConfigProtectedStore(
allowInsecureFallback: json['allow_insecure_fallback'] as bool, allowInsecureFallback: json['allow_insecure_fallback'] as bool,
alwaysUseInsecureStorage: json['always_use_insecure_storage'] as bool, alwaysUseInsecureStorage: json['always_use_insecure_storage'] as bool,
directory: json['directory'] as String, directory: json['directory'] as String,
@ -562,8 +539,8 @@ _$VeilidConfigProtectedStoreImpl _$$VeilidConfigProtectedStoreImplFromJson(
json['new_device_encryption_key_password'] as String?, json['new_device_encryption_key_password'] as String?,
); );
Map<String, dynamic> _$$VeilidConfigProtectedStoreImplToJson( Map<String, dynamic> _$VeilidConfigProtectedStoreToJson(
_$VeilidConfigProtectedStoreImpl instance) => _VeilidConfigProtectedStore instance) =>
<String, dynamic>{ <String, dynamic>{
'allow_insecure_fallback': instance.allowInsecureFallback, 'allow_insecure_fallback': instance.allowInsecureFallback,
'always_use_insecure_storage': instance.alwaysUseInsecureStorage, 'always_use_insecure_storage': instance.alwaysUseInsecureStorage,
@ -574,21 +551,21 @@ Map<String, dynamic> _$$VeilidConfigProtectedStoreImplToJson(
instance.newDeviceEncryptionKeyPassword, instance.newDeviceEncryptionKeyPassword,
}; };
_$VeilidConfigCapabilitiesImpl _$$VeilidConfigCapabilitiesImplFromJson( _VeilidConfigCapabilities _$VeilidConfigCapabilitiesFromJson(
Map<String, dynamic> json) => Map<String, dynamic> json) =>
_$VeilidConfigCapabilitiesImpl( _VeilidConfigCapabilities(
disable: disable:
(json['disable'] as List<dynamic>).map((e) => e as String).toList(), (json['disable'] as List<dynamic>).map((e) => e as String).toList(),
); );
Map<String, dynamic> _$$VeilidConfigCapabilitiesImplToJson( Map<String, dynamic> _$VeilidConfigCapabilitiesToJson(
_$VeilidConfigCapabilitiesImpl instance) => _VeilidConfigCapabilities instance) =>
<String, dynamic>{ <String, dynamic>{
'disable': instance.disable, 'disable': instance.disable,
}; };
_$VeilidConfigImpl _$$VeilidConfigImplFromJson(Map<String, dynamic> json) => _VeilidConfig _$VeilidConfigFromJson(Map<String, dynamic> json) =>
_$VeilidConfigImpl( _VeilidConfig(
programName: json['program_name'] as String, programName: json['program_name'] as String,
namespace: json['namespace'] as String, namespace: json['namespace'] as String,
capabilities: VeilidConfigCapabilities.fromJson(json['capabilities']), capabilities: VeilidConfigCapabilities.fromJson(json['capabilities']),
@ -599,7 +576,7 @@ _$VeilidConfigImpl _$$VeilidConfigImplFromJson(Map<String, dynamic> json) =>
network: VeilidConfigNetwork.fromJson(json['network']), network: VeilidConfigNetwork.fromJson(json['network']),
); );
Map<String, dynamic> _$$VeilidConfigImplToJson(_$VeilidConfigImpl instance) => Map<String, dynamic> _$VeilidConfigToJson(_VeilidConfig instance) =>
<String, dynamic>{ <String, dynamic>{
'program_name': instance.programName, 'program_name': instance.programName,
'namespace': instance.namespace, 'namespace': instance.namespace,

View file

@ -46,7 +46,7 @@ enum VeilidLogLevel {
//////////// ////////////
@freezed @freezed
class LatencyStats with _$LatencyStats { sealed class LatencyStats with _$LatencyStats {
const factory LatencyStats({ const factory LatencyStats({
required TimestampDuration fastest, required TimestampDuration fastest,
required TimestampDuration average, required TimestampDuration average,
@ -64,7 +64,7 @@ class LatencyStats with _$LatencyStats {
//////////// ////////////
@freezed @freezed
class TransferStats with _$TransferStats { sealed class TransferStats with _$TransferStats {
const factory TransferStats({ const factory TransferStats({
required BigInt total, required BigInt total,
required BigInt maximum, required BigInt maximum,
@ -79,7 +79,7 @@ class TransferStats with _$TransferStats {
//////////// ////////////
@freezed @freezed
class TransferStatsDownUp with _$TransferStatsDownUp { sealed class TransferStatsDownUp with _$TransferStatsDownUp {
const factory TransferStatsDownUp({ const factory TransferStatsDownUp({
required TransferStats down, required TransferStats down,
required TransferStats up, required TransferStats up,
@ -92,7 +92,7 @@ class TransferStatsDownUp with _$TransferStatsDownUp {
//////////// ////////////
@freezed @freezed
class StateStats with _$StateStats { sealed class StateStats with _$StateStats {
const factory StateStats({ const factory StateStats({
required TimestampDuration span, required TimestampDuration span,
required TimestampDuration reliable, required TimestampDuration reliable,
@ -109,7 +109,7 @@ class StateStats with _$StateStats {
//////////// ////////////
@freezed @freezed
class StateReasonStats with _$StateReasonStats { sealed class StateReasonStats with _$StateReasonStats {
const factory StateReasonStats({ const factory StateReasonStats({
required TimestampDuration canNotSend, required TimestampDuration canNotSend,
required TimestampDuration tooManyLostAnswers, required TimestampDuration tooManyLostAnswers,
@ -127,7 +127,7 @@ class StateReasonStats with _$StateReasonStats {
//////////// ////////////
@freezed @freezed
class AnswerStats with _$AnswerStats { sealed class AnswerStats with _$AnswerStats {
const factory AnswerStats({ const factory AnswerStats({
required TimestampDuration span, required TimestampDuration span,
required int questions, required int questions,
@ -148,7 +148,7 @@ class AnswerStats with _$AnswerStats {
//////////// ////////////
@freezed @freezed
class RPCStats with _$RPCStats { sealed class RPCStats with _$RPCStats {
const factory RPCStats({ const factory RPCStats({
required int messagesSent, required int messagesSent,
required int messagesRcvd, required int messagesRcvd,
@ -170,7 +170,7 @@ class RPCStats with _$RPCStats {
//////////// ////////////
@freezed @freezed
class PeerStats with _$PeerStats { sealed class PeerStats with _$PeerStats {
const factory PeerStats({ const factory PeerStats({
required Timestamp timeAdded, required Timestamp timeAdded,
required RPCStats rpcStats, required RPCStats rpcStats,
@ -186,7 +186,7 @@ class PeerStats with _$PeerStats {
//////////// ////////////
@freezed @freezed
class PeerTableData with _$PeerTableData { sealed class PeerTableData with _$PeerTableData {
const factory PeerTableData({ const factory PeerTableData({
required List<TypedKey> nodeIds, required List<TypedKey> nodeIds,
required String peerAddress, required String peerAddress,
@ -251,7 +251,7 @@ sealed class VeilidUpdate with _$VeilidUpdate {
/// VeilidStateAttachment /// VeilidStateAttachment
@freezed @freezed
class VeilidStateAttachment with _$VeilidStateAttachment { sealed class VeilidStateAttachment with _$VeilidStateAttachment {
const factory VeilidStateAttachment( const factory VeilidStateAttachment(
{required AttachmentState state, {required AttachmentState state,
required bool publicInternetReady, required bool publicInternetReady,
@ -267,7 +267,7 @@ class VeilidStateAttachment with _$VeilidStateAttachment {
/// VeilidStateNetwork /// VeilidStateNetwork
@freezed @freezed
class VeilidStateNetwork with _$VeilidStateNetwork { sealed class VeilidStateNetwork with _$VeilidStateNetwork {
const factory VeilidStateNetwork( const factory VeilidStateNetwork(
{required bool started, {required bool started,
required BigInt bpsDown, required BigInt bpsDown,
@ -282,7 +282,7 @@ class VeilidStateNetwork with _$VeilidStateNetwork {
/// VeilidStateConfig /// VeilidStateConfig
@freezed @freezed
class VeilidStateConfig with _$VeilidStateConfig { sealed class VeilidStateConfig with _$VeilidStateConfig {
const factory VeilidStateConfig({ const factory VeilidStateConfig({
required VeilidConfig config, required VeilidConfig config,
}) = _VeilidStateConfig; }) = _VeilidStateConfig;
@ -295,7 +295,7 @@ class VeilidStateConfig with _$VeilidStateConfig {
/// VeilidState /// VeilidState
@freezed @freezed
class VeilidState with _$VeilidState { sealed class VeilidState with _$VeilidState {
const factory VeilidState({ const factory VeilidState({
required VeilidStateAttachment attachment, required VeilidStateAttachment attachment,
required VeilidStateNetwork network, required VeilidStateNetwork network,

File diff suppressed because it is too large Load diff

View file

@ -6,8 +6,8 @@ part of 'veilid_state.dart';
// JsonSerializableGenerator // JsonSerializableGenerator
// ************************************************************************** // **************************************************************************
_$LatencyStatsImpl _$$LatencyStatsImplFromJson(Map<String, dynamic> json) => _LatencyStats _$LatencyStatsFromJson(Map<String, dynamic> json) =>
_$LatencyStatsImpl( _LatencyStats(
fastest: TimestampDuration.fromJson(json['fastest']), fastest: TimestampDuration.fromJson(json['fastest']),
average: TimestampDuration.fromJson(json['average']), average: TimestampDuration.fromJson(json['average']),
slowest: TimestampDuration.fromJson(json['slowest']), slowest: TimestampDuration.fromJson(json['slowest']),
@ -17,7 +17,7 @@ _$LatencyStatsImpl _$$LatencyStatsImplFromJson(Map<String, dynamic> json) =>
p75: TimestampDuration.fromJson(json['p75']), p75: TimestampDuration.fromJson(json['p75']),
); );
Map<String, dynamic> _$$LatencyStatsImplToJson(_$LatencyStatsImpl instance) => Map<String, dynamic> _$LatencyStatsToJson(_LatencyStats instance) =>
<String, dynamic>{ <String, dynamic>{
'fastest': instance.fastest.toJson(), 'fastest': instance.fastest.toJson(),
'average': instance.average.toJson(), 'average': instance.average.toJson(),
@ -28,15 +28,15 @@ Map<String, dynamic> _$$LatencyStatsImplToJson(_$LatencyStatsImpl instance) =>
'p75': instance.p75.toJson(), 'p75': instance.p75.toJson(),
}; };
_$TransferStatsImpl _$$TransferStatsImplFromJson(Map<String, dynamic> json) => _TransferStats _$TransferStatsFromJson(Map<String, dynamic> json) =>
_$TransferStatsImpl( _TransferStats(
total: BigInt.parse(json['total'] as String), total: BigInt.parse(json['total'] as String),
maximum: BigInt.parse(json['maximum'] as String), maximum: BigInt.parse(json['maximum'] as String),
average: BigInt.parse(json['average'] as String), average: BigInt.parse(json['average'] as String),
minimum: BigInt.parse(json['minimum'] as String), minimum: BigInt.parse(json['minimum'] as String),
); );
Map<String, dynamic> _$$TransferStatsImplToJson(_$TransferStatsImpl instance) => Map<String, dynamic> _$TransferStatsToJson(_TransferStats instance) =>
<String, dynamic>{ <String, dynamic>{
'total': instance.total.toString(), 'total': instance.total.toString(),
'maximum': instance.maximum.toString(), 'maximum': instance.maximum.toString(),
@ -44,22 +44,20 @@ Map<String, dynamic> _$$TransferStatsImplToJson(_$TransferStatsImpl instance) =>
'minimum': instance.minimum.toString(), 'minimum': instance.minimum.toString(),
}; };
_$TransferStatsDownUpImpl _$$TransferStatsDownUpImplFromJson( _TransferStatsDownUp _$TransferStatsDownUpFromJson(Map<String, dynamic> json) =>
Map<String, dynamic> json) => _TransferStatsDownUp(
_$TransferStatsDownUpImpl(
down: TransferStats.fromJson(json['down']), down: TransferStats.fromJson(json['down']),
up: TransferStats.fromJson(json['up']), up: TransferStats.fromJson(json['up']),
); );
Map<String, dynamic> _$$TransferStatsDownUpImplToJson( Map<String, dynamic> _$TransferStatsDownUpToJson(
_$TransferStatsDownUpImpl instance) => _TransferStatsDownUp instance) =>
<String, dynamic>{ <String, dynamic>{
'down': instance.down.toJson(), 'down': instance.down.toJson(),
'up': instance.up.toJson(), 'up': instance.up.toJson(),
}; };
_$StateStatsImpl _$$StateStatsImplFromJson(Map<String, dynamic> json) => _StateStats _$StateStatsFromJson(Map<String, dynamic> json) => _StateStats(
_$StateStatsImpl(
span: TimestampDuration.fromJson(json['span']), span: TimestampDuration.fromJson(json['span']),
reliable: TimestampDuration.fromJson(json['reliable']), reliable: TimestampDuration.fromJson(json['reliable']),
unreliable: TimestampDuration.fromJson(json['unreliable']), unreliable: TimestampDuration.fromJson(json['unreliable']),
@ -68,7 +66,7 @@ _$StateStatsImpl _$$StateStatsImplFromJson(Map<String, dynamic> json) =>
reason: StateReasonStats.fromJson(json['reason']), reason: StateReasonStats.fromJson(json['reason']),
); );
Map<String, dynamic> _$$StateStatsImplToJson(_$StateStatsImpl instance) => Map<String, dynamic> _$StateStatsToJson(_StateStats instance) =>
<String, dynamic>{ <String, dynamic>{
'span': instance.span.toJson(), 'span': instance.span.toJson(),
'reliable': instance.reliable.toJson(), 'reliable': instance.reliable.toJson(),
@ -78,9 +76,8 @@ Map<String, dynamic> _$$StateStatsImplToJson(_$StateStatsImpl instance) =>
'reason': instance.reason.toJson(), 'reason': instance.reason.toJson(),
}; };
_$StateReasonStatsImpl _$$StateReasonStatsImplFromJson( _StateReasonStats _$StateReasonStatsFromJson(Map<String, dynamic> json) =>
Map<String, dynamic> json) => _StateReasonStats(
_$StateReasonStatsImpl(
canNotSend: TimestampDuration.fromJson(json['can_not_send']), canNotSend: TimestampDuration.fromJson(json['can_not_send']),
tooManyLostAnswers: tooManyLostAnswers:
TimestampDuration.fromJson(json['too_many_lost_answers']), TimestampDuration.fromJson(json['too_many_lost_answers']),
@ -93,8 +90,7 @@ _$StateReasonStatsImpl _$$StateReasonStatsImplFromJson(
TimestampDuration.fromJson(json['in_unreliable_ping_span']), TimestampDuration.fromJson(json['in_unreliable_ping_span']),
); );
Map<String, dynamic> _$$StateReasonStatsImplToJson( Map<String, dynamic> _$StateReasonStatsToJson(_StateReasonStats instance) =>
_$StateReasonStatsImpl instance) =>
<String, dynamic>{ <String, dynamic>{
'can_not_send': instance.canNotSend.toJson(), 'can_not_send': instance.canNotSend.toJson(),
'too_many_lost_answers': instance.tooManyLostAnswers.toJson(), 'too_many_lost_answers': instance.tooManyLostAnswers.toJson(),
@ -105,8 +101,7 @@ Map<String, dynamic> _$$StateReasonStatsImplToJson(
'in_unreliable_ping_span': instance.inUnreliablePingSpan.toJson(), 'in_unreliable_ping_span': instance.inUnreliablePingSpan.toJson(),
}; };
_$AnswerStatsImpl _$$AnswerStatsImplFromJson(Map<String, dynamic> json) => _AnswerStats _$AnswerStatsFromJson(Map<String, dynamic> json) => _AnswerStats(
_$AnswerStatsImpl(
span: TimestampDuration.fromJson(json['span']), span: TimestampDuration.fromJson(json['span']),
questions: (json['questions'] as num).toInt(), questions: (json['questions'] as num).toInt(),
answers: (json['answers'] as num).toInt(), answers: (json['answers'] as num).toInt(),
@ -125,7 +120,7 @@ _$AnswerStatsImpl _$$AnswerStatsImplFromJson(Map<String, dynamic> json) =>
(json['consecutive_lost_answers_minimum'] as num).toInt(), (json['consecutive_lost_answers_minimum'] as num).toInt(),
); );
Map<String, dynamic> _$$AnswerStatsImplToJson(_$AnswerStatsImpl instance) => Map<String, dynamic> _$AnswerStatsToJson(_AnswerStats instance) =>
<String, dynamic>{ <String, dynamic>{
'span': instance.span.toJson(), 'span': instance.span.toJson(),
'questions': instance.questions, 'questions': instance.questions,
@ -142,8 +137,7 @@ Map<String, dynamic> _$$AnswerStatsImplToJson(_$AnswerStatsImpl instance) =>
instance.consecutiveLostAnswersMinimum, instance.consecutiveLostAnswersMinimum,
}; };
_$RPCStatsImpl _$$RPCStatsImplFromJson(Map<String, dynamic> json) => _RPCStats _$RPCStatsFromJson(Map<String, dynamic> json) => _RPCStats(
_$RPCStatsImpl(
messagesSent: (json['messages_sent'] as num).toInt(), messagesSent: (json['messages_sent'] as num).toInt(),
messagesRcvd: (json['messages_rcvd'] as num).toInt(), messagesRcvd: (json['messages_rcvd'] as num).toInt(),
questionsInFlight: (json['questions_in_flight'] as num).toInt(), questionsInFlight: (json['questions_in_flight'] as num).toInt(),
@ -165,8 +159,7 @@ _$RPCStatsImpl _$$RPCStatsImplFromJson(Map<String, dynamic> json) =>
answerOrdered: AnswerStats.fromJson(json['answer_ordered']), answerOrdered: AnswerStats.fromJson(json['answer_ordered']),
); );
Map<String, dynamic> _$$RPCStatsImplToJson(_$RPCStatsImpl instance) => Map<String, dynamic> _$RPCStatsToJson(_RPCStats instance) => <String, dynamic>{
<String, dynamic>{
'messages_sent': instance.messagesSent, 'messages_sent': instance.messagesSent,
'messages_rcvd': instance.messagesRcvd, 'messages_rcvd': instance.messagesRcvd,
'questions_in_flight': instance.questionsInFlight, 'questions_in_flight': instance.questionsInFlight,
@ -180,8 +173,7 @@ Map<String, dynamic> _$$RPCStatsImplToJson(_$RPCStatsImpl instance) =>
'answer_ordered': instance.answerOrdered.toJson(), 'answer_ordered': instance.answerOrdered.toJson(),
}; };
_$PeerStatsImpl _$$PeerStatsImplFromJson(Map<String, dynamic> json) => _PeerStats _$PeerStatsFromJson(Map<String, dynamic> json) => _PeerStats(
_$PeerStatsImpl(
timeAdded: Timestamp.fromJson(json['time_added']), timeAdded: Timestamp.fromJson(json['time_added']),
rpcStats: RPCStats.fromJson(json['rpc_stats']), rpcStats: RPCStats.fromJson(json['rpc_stats']),
transfer: TransferStatsDownUp.fromJson(json['transfer']), transfer: TransferStatsDownUp.fromJson(json['transfer']),
@ -191,7 +183,7 @@ _$PeerStatsImpl _$$PeerStatsImplFromJson(Map<String, dynamic> json) =>
: LatencyStats.fromJson(json['latency']), : LatencyStats.fromJson(json['latency']),
); );
Map<String, dynamic> _$$PeerStatsImplToJson(_$PeerStatsImpl instance) => Map<String, dynamic> _$PeerStatsToJson(_PeerStats instance) =>
<String, dynamic>{ <String, dynamic>{
'time_added': instance.timeAdded.toJson(), 'time_added': instance.timeAdded.toJson(),
'rpc_stats': instance.rpcStats.toJson(), 'rpc_stats': instance.rpcStats.toJson(),
@ -200,8 +192,8 @@ Map<String, dynamic> _$$PeerStatsImplToJson(_$PeerStatsImpl instance) =>
'latency': instance.latency?.toJson(), 'latency': instance.latency?.toJson(),
}; };
_$PeerTableDataImpl _$$PeerTableDataImplFromJson(Map<String, dynamic> json) => _PeerTableData _$PeerTableDataFromJson(Map<String, dynamic> json) =>
_$PeerTableDataImpl( _PeerTableData(
nodeIds: (json['node_ids'] as List<dynamic>) nodeIds: (json['node_ids'] as List<dynamic>)
.map(Typed<FixedEncodedString43>.fromJson) .map(Typed<FixedEncodedString43>.fromJson)
.toList(), .toList(),
@ -209,32 +201,29 @@ _$PeerTableDataImpl _$$PeerTableDataImplFromJson(Map<String, dynamic> json) =>
peerStats: PeerStats.fromJson(json['peer_stats']), peerStats: PeerStats.fromJson(json['peer_stats']),
); );
Map<String, dynamic> _$$PeerTableDataImplToJson(_$PeerTableDataImpl instance) => Map<String, dynamic> _$PeerTableDataToJson(_PeerTableData instance) =>
<String, dynamic>{ <String, dynamic>{
'node_ids': instance.nodeIds.map((e) => e.toJson()).toList(), 'node_ids': instance.nodeIds.map((e) => e.toJson()).toList(),
'peer_address': instance.peerAddress, 'peer_address': instance.peerAddress,
'peer_stats': instance.peerStats.toJson(), 'peer_stats': instance.peerStats.toJson(),
}; };
_$VeilidLogImpl _$$VeilidLogImplFromJson(Map<String, dynamic> json) => VeilidLog _$VeilidLogFromJson(Map<String, dynamic> json) => VeilidLog(
_$VeilidLogImpl(
logLevel: VeilidLogLevel.fromJson(json['log_level']), logLevel: VeilidLogLevel.fromJson(json['log_level']),
message: json['message'] as String, message: json['message'] as String,
backtrace: json['backtrace'] as String?, backtrace: json['backtrace'] as String?,
$type: json['kind'] as String?, $type: json['kind'] as String?,
); );
Map<String, dynamic> _$$VeilidLogImplToJson(_$VeilidLogImpl instance) => Map<String, dynamic> _$VeilidLogToJson(VeilidLog instance) => <String, dynamic>{
<String, dynamic>{
'log_level': instance.logLevel.toJson(), 'log_level': instance.logLevel.toJson(),
'message': instance.message, 'message': instance.message,
'backtrace': instance.backtrace, 'backtrace': instance.backtrace,
'kind': instance.$type, 'kind': instance.$type,
}; };
_$VeilidAppMessageImpl _$$VeilidAppMessageImplFromJson( VeilidAppMessage _$VeilidAppMessageFromJson(Map<String, dynamic> json) =>
Map<String, dynamic> json) => VeilidAppMessage(
_$VeilidAppMessageImpl(
message: message:
const Uint8ListJsonConverter.jsIsArray().fromJson(json['message']), const Uint8ListJsonConverter.jsIsArray().fromJson(json['message']),
sender: json['sender'] == null sender: json['sender'] == null
@ -244,8 +233,7 @@ _$VeilidAppMessageImpl _$$VeilidAppMessageImplFromJson(
$type: json['kind'] as String?, $type: json['kind'] as String?,
); );
Map<String, dynamic> _$$VeilidAppMessageImplToJson( Map<String, dynamic> _$VeilidAppMessageToJson(VeilidAppMessage instance) =>
_$VeilidAppMessageImpl instance) =>
<String, dynamic>{ <String, dynamic>{
'message': 'message':
const Uint8ListJsonConverter.jsIsArray().toJson(instance.message), const Uint8ListJsonConverter.jsIsArray().toJson(instance.message),
@ -254,8 +242,8 @@ Map<String, dynamic> _$$VeilidAppMessageImplToJson(
'kind': instance.$type, 'kind': instance.$type,
}; };
_$VeilidAppCallImpl _$$VeilidAppCallImplFromJson(Map<String, dynamic> json) => VeilidAppCall _$VeilidAppCallFromJson(Map<String, dynamic> json) =>
_$VeilidAppCallImpl( VeilidAppCall(
message: message:
const Uint8ListJsonConverter.jsIsArray().fromJson(json['message']), const Uint8ListJsonConverter.jsIsArray().fromJson(json['message']),
callId: json['call_id'] as String, callId: json['call_id'] as String,
@ -266,7 +254,7 @@ _$VeilidAppCallImpl _$$VeilidAppCallImplFromJson(Map<String, dynamic> json) =>
$type: json['kind'] as String?, $type: json['kind'] as String?,
); );
Map<String, dynamic> _$$VeilidAppCallImplToJson(_$VeilidAppCallImpl instance) => Map<String, dynamic> _$VeilidAppCallToJson(VeilidAppCall instance) =>
<String, dynamic>{ <String, dynamic>{
'message': 'message':
const Uint8ListJsonConverter.jsIsArray().toJson(instance.message), const Uint8ListJsonConverter.jsIsArray().toJson(instance.message),
@ -276,9 +264,9 @@ Map<String, dynamic> _$$VeilidAppCallImplToJson(_$VeilidAppCallImpl instance) =>
'kind': instance.$type, 'kind': instance.$type,
}; };
_$VeilidUpdateAttachmentImpl _$$VeilidUpdateAttachmentImplFromJson( VeilidUpdateAttachment _$VeilidUpdateAttachmentFromJson(
Map<String, dynamic> json) => Map<String, dynamic> json) =>
_$VeilidUpdateAttachmentImpl( VeilidUpdateAttachment(
state: AttachmentState.fromJson(json['state']), state: AttachmentState.fromJson(json['state']),
publicInternetReady: json['public_internet_ready'] as bool, publicInternetReady: json['public_internet_ready'] as bool,
localNetworkReady: json['local_network_ready'] as bool, localNetworkReady: json['local_network_ready'] as bool,
@ -289,8 +277,8 @@ _$VeilidUpdateAttachmentImpl _$$VeilidUpdateAttachmentImplFromJson(
$type: json['kind'] as String?, $type: json['kind'] as String?,
); );
Map<String, dynamic> _$$VeilidUpdateAttachmentImplToJson( Map<String, dynamic> _$VeilidUpdateAttachmentToJson(
_$VeilidUpdateAttachmentImpl instance) => VeilidUpdateAttachment instance) =>
<String, dynamic>{ <String, dynamic>{
'state': instance.state.toJson(), 'state': instance.state.toJson(),
'public_internet_ready': instance.publicInternetReady, 'public_internet_ready': instance.publicInternetReady,
@ -300,9 +288,8 @@ Map<String, dynamic> _$$VeilidUpdateAttachmentImplToJson(
'kind': instance.$type, 'kind': instance.$type,
}; };
_$VeilidUpdateNetworkImpl _$$VeilidUpdateNetworkImplFromJson( VeilidUpdateNetwork _$VeilidUpdateNetworkFromJson(Map<String, dynamic> json) =>
Map<String, dynamic> json) => VeilidUpdateNetwork(
_$VeilidUpdateNetworkImpl(
started: json['started'] as bool, started: json['started'] as bool,
bpsDown: BigInt.parse(json['bps_down'] as String), bpsDown: BigInt.parse(json['bps_down'] as String),
bpsUp: BigInt.parse(json['bps_up'] as String), bpsUp: BigInt.parse(json['bps_up'] as String),
@ -311,8 +298,8 @@ _$VeilidUpdateNetworkImpl _$$VeilidUpdateNetworkImplFromJson(
$type: json['kind'] as String?, $type: json['kind'] as String?,
); );
Map<String, dynamic> _$$VeilidUpdateNetworkImplToJson( Map<String, dynamic> _$VeilidUpdateNetworkToJson(
_$VeilidUpdateNetworkImpl instance) => VeilidUpdateNetwork instance) =>
<String, dynamic>{ <String, dynamic>{
'started': instance.started, 'started': instance.started,
'bps_down': instance.bpsDown.toString(), 'bps_down': instance.bpsDown.toString(),
@ -321,23 +308,21 @@ Map<String, dynamic> _$$VeilidUpdateNetworkImplToJson(
'kind': instance.$type, 'kind': instance.$type,
}; };
_$VeilidUpdateConfigImpl _$$VeilidUpdateConfigImplFromJson( VeilidUpdateConfig _$VeilidUpdateConfigFromJson(Map<String, dynamic> json) =>
Map<String, dynamic> json) => VeilidUpdateConfig(
_$VeilidUpdateConfigImpl(
config: VeilidConfig.fromJson(json['config']), config: VeilidConfig.fromJson(json['config']),
$type: json['kind'] as String?, $type: json['kind'] as String?,
); );
Map<String, dynamic> _$$VeilidUpdateConfigImplToJson( Map<String, dynamic> _$VeilidUpdateConfigToJson(VeilidUpdateConfig instance) =>
_$VeilidUpdateConfigImpl instance) =>
<String, dynamic>{ <String, dynamic>{
'config': instance.config.toJson(), 'config': instance.config.toJson(),
'kind': instance.$type, 'kind': instance.$type,
}; };
_$VeilidUpdateRouteChangeImpl _$$VeilidUpdateRouteChangeImplFromJson( VeilidUpdateRouteChange _$VeilidUpdateRouteChangeFromJson(
Map<String, dynamic> json) => Map<String, dynamic> json) =>
_$VeilidUpdateRouteChangeImpl( VeilidUpdateRouteChange(
deadRoutes: (json['dead_routes'] as List<dynamic>) deadRoutes: (json['dead_routes'] as List<dynamic>)
.map((e) => e as String) .map((e) => e as String)
.toList(), .toList(),
@ -347,17 +332,17 @@ _$VeilidUpdateRouteChangeImpl _$$VeilidUpdateRouteChangeImplFromJson(
$type: json['kind'] as String?, $type: json['kind'] as String?,
); );
Map<String, dynamic> _$$VeilidUpdateRouteChangeImplToJson( Map<String, dynamic> _$VeilidUpdateRouteChangeToJson(
_$VeilidUpdateRouteChangeImpl instance) => VeilidUpdateRouteChange instance) =>
<String, dynamic>{ <String, dynamic>{
'dead_routes': instance.deadRoutes, 'dead_routes': instance.deadRoutes,
'dead_remote_routes': instance.deadRemoteRoutes, 'dead_remote_routes': instance.deadRemoteRoutes,
'kind': instance.$type, 'kind': instance.$type,
}; };
_$VeilidUpdateValueChangeImpl _$$VeilidUpdateValueChangeImplFromJson( VeilidUpdateValueChange _$VeilidUpdateValueChangeFromJson(
Map<String, dynamic> json) => Map<String, dynamic> json) =>
_$VeilidUpdateValueChangeImpl( VeilidUpdateValueChange(
key: Typed<FixedEncodedString43>.fromJson(json['key']), key: Typed<FixedEncodedString43>.fromJson(json['key']),
subkeys: (json['subkeys'] as List<dynamic>) subkeys: (json['subkeys'] as List<dynamic>)
.map(ValueSubkeyRange.fromJson) .map(ValueSubkeyRange.fromJson)
@ -367,8 +352,8 @@ _$VeilidUpdateValueChangeImpl _$$VeilidUpdateValueChangeImplFromJson(
$type: json['kind'] as String?, $type: json['kind'] as String?,
); );
Map<String, dynamic> _$$VeilidUpdateValueChangeImplToJson( Map<String, dynamic> _$VeilidUpdateValueChangeToJson(
_$VeilidUpdateValueChangeImpl instance) => VeilidUpdateValueChange instance) =>
<String, dynamic>{ <String, dynamic>{
'key': instance.key.toJson(), 'key': instance.key.toJson(),
'subkeys': instance.subkeys.map((e) => e.toJson()).toList(), 'subkeys': instance.subkeys.map((e) => e.toJson()).toList(),
@ -377,9 +362,9 @@ Map<String, dynamic> _$$VeilidUpdateValueChangeImplToJson(
'kind': instance.$type, 'kind': instance.$type,
}; };
_$VeilidStateAttachmentImpl _$$VeilidStateAttachmentImplFromJson( _VeilidStateAttachment _$VeilidStateAttachmentFromJson(
Map<String, dynamic> json) => Map<String, dynamic> json) =>
_$VeilidStateAttachmentImpl( _VeilidStateAttachment(
state: AttachmentState.fromJson(json['state']), state: AttachmentState.fromJson(json['state']),
publicInternetReady: json['public_internet_ready'] as bool, publicInternetReady: json['public_internet_ready'] as bool,
localNetworkReady: json['local_network_ready'] as bool, localNetworkReady: json['local_network_ready'] as bool,
@ -389,8 +374,8 @@ _$VeilidStateAttachmentImpl _$$VeilidStateAttachmentImplFromJson(
: TimestampDuration.fromJson(json['attached_uptime']), : TimestampDuration.fromJson(json['attached_uptime']),
); );
Map<String, dynamic> _$$VeilidStateAttachmentImplToJson( Map<String, dynamic> _$VeilidStateAttachmentToJson(
_$VeilidStateAttachmentImpl instance) => _VeilidStateAttachment instance) =>
<String, dynamic>{ <String, dynamic>{
'state': instance.state.toJson(), 'state': instance.state.toJson(),
'public_internet_ready': instance.publicInternetReady, 'public_internet_ready': instance.publicInternetReady,
@ -399,9 +384,8 @@ Map<String, dynamic> _$$VeilidStateAttachmentImplToJson(
'attached_uptime': instance.attachedUptime?.toJson(), 'attached_uptime': instance.attachedUptime?.toJson(),
}; };
_$VeilidStateNetworkImpl _$$VeilidStateNetworkImplFromJson( _VeilidStateNetwork _$VeilidStateNetworkFromJson(Map<String, dynamic> json) =>
Map<String, dynamic> json) => _VeilidStateNetwork(
_$VeilidStateNetworkImpl(
started: json['started'] as bool, started: json['started'] as bool,
bpsDown: BigInt.parse(json['bps_down'] as String), bpsDown: BigInt.parse(json['bps_down'] as String),
bpsUp: BigInt.parse(json['bps_up'] as String), bpsUp: BigInt.parse(json['bps_up'] as String),
@ -409,8 +393,7 @@ _$VeilidStateNetworkImpl _$$VeilidStateNetworkImplFromJson(
(json['peers'] as List<dynamic>).map(PeerTableData.fromJson).toList(), (json['peers'] as List<dynamic>).map(PeerTableData.fromJson).toList(),
); );
Map<String, dynamic> _$$VeilidStateNetworkImplToJson( Map<String, dynamic> _$VeilidStateNetworkToJson(_VeilidStateNetwork instance) =>
_$VeilidStateNetworkImpl instance) =>
<String, dynamic>{ <String, dynamic>{
'started': instance.started, 'started': instance.started,
'bps_down': instance.bpsDown.toString(), 'bps_down': instance.bpsDown.toString(),
@ -418,26 +401,23 @@ Map<String, dynamic> _$$VeilidStateNetworkImplToJson(
'peers': instance.peers.map((e) => e.toJson()).toList(), 'peers': instance.peers.map((e) => e.toJson()).toList(),
}; };
_$VeilidStateConfigImpl _$$VeilidStateConfigImplFromJson( _VeilidStateConfig _$VeilidStateConfigFromJson(Map<String, dynamic> json) =>
Map<String, dynamic> json) => _VeilidStateConfig(
_$VeilidStateConfigImpl(
config: VeilidConfig.fromJson(json['config']), config: VeilidConfig.fromJson(json['config']),
); );
Map<String, dynamic> _$$VeilidStateConfigImplToJson( Map<String, dynamic> _$VeilidStateConfigToJson(_VeilidStateConfig instance) =>
_$VeilidStateConfigImpl instance) =>
<String, dynamic>{ <String, dynamic>{
'config': instance.config.toJson(), 'config': instance.config.toJson(),
}; };
_$VeilidStateImpl _$$VeilidStateImplFromJson(Map<String, dynamic> json) => _VeilidState _$VeilidStateFromJson(Map<String, dynamic> json) => _VeilidState(
_$VeilidStateImpl(
attachment: VeilidStateAttachment.fromJson(json['attachment']), attachment: VeilidStateAttachment.fromJson(json['attachment']),
network: VeilidStateNetwork.fromJson(json['network']), network: VeilidStateNetwork.fromJson(json['network']),
config: VeilidStateConfig.fromJson(json['config']), config: VeilidStateConfig.fromJson(json['config']),
); );
Map<String, dynamic> _$$VeilidStateImplToJson(_$VeilidStateImpl instance) => Map<String, dynamic> _$VeilidStateToJson(_VeilidState instance) =>
<String, dynamic>{ <String, dynamic>{
'attachment': instance.attachment.toJson(), 'attachment': instance.attachment.toJson(),
'network': instance.network.toJson(), 'network': instance.network.toJson(),

View file

@ -9,7 +9,7 @@ include(FetchContent)
FetchContent_Declare( FetchContent_Declare(
Corrosion Corrosion
GIT_REPOSITORY https://github.com/AndrewGaspar/corrosion.git GIT_REPOSITORY https://github.com/AndrewGaspar/corrosion.git
GIT_TAG v0.5.0 # Optionally specify a version tag or branch here GIT_TAG v0.5.1 # Optionally specify a version tag or branch here
) )
FetchContent_MakeAvailable(Corrosion) FetchContent_MakeAvailable(Corrosion)

View file

@ -9,7 +9,7 @@ include(FetchContent)
FetchContent_Declare( FetchContent_Declare(
Corrosion Corrosion
GIT_REPOSITORY https://github.com/AndrewGaspar/corrosion.git GIT_REPOSITORY https://github.com/AndrewGaspar/corrosion.git
GIT_TAG v0.4.10 # Optionally specify a version tag or branch here GIT_TAG v0.5.1 # Optionally specify a version tag or branch here
) )
FetchContent_MakeAvailable(Corrosion) FetchContent_MakeAvailable(Corrosion)
@ -29,4 +29,3 @@ corrosion_import_crate(MANIFEST_PATH ${repository_root}/../veilid/Cargo.toml CRA
set(CRATE_NAME "veilid_flutter") set(CRATE_NAME "veilid_flutter")
target_link_libraries(${PLUGIN_NAME} PUBLIC ${CRATE_NAME}) target_link_libraries(${PLUGIN_NAME} PUBLIC ${CRATE_NAME})
# list(APPEND PLUGIN_BUNDLED_LIBRARIES $<TARGET_FILE:${CRATE_NAME}-shared>)

View file

@ -7,7 +7,7 @@ import os
import veilid import veilid
from veilid import ValueSubkey, Timestamp, SafetySelection from veilid import ValueSubkey, Timestamp, SafetySelection
from veilid.types import VeilidJSONEncoder from veilid.types import ValueSeqNum, VeilidJSONEncoder
################################################################## ##################################################################
BOGUS_KEY = veilid.TypedKey.from_value( BOGUS_KEY = veilid.TypedKey.from_value(
@ -118,8 +118,8 @@ async def test_set_get_dht_value_with_owner(api_connection: veilid.VeilidAPI):
vd4 = await rc.get_dht_value(rec.key, ValueSubkey(1), False) vd4 = await rc.get_dht_value(rec.key, ValueSubkey(1), False)
assert vd4 is None assert vd4 is None
print("vd2: {}", vd2.__dict__) #print("vd2: {}", vd2.__dict__)
print("vd3: {}", vd3.__dict__) #print("vd3: {}", vd3.__dict__)
assert vd2 == vd3 assert vd2 == vd3
@ -135,7 +135,7 @@ async def test_open_writer_dht_value(api_connection: veilid.VeilidAPI):
key = rec.key key = rec.key
owner = rec.owner owner = rec.owner
secret = rec.owner_secret secret = rec.owner_secret
print(f"key:{key}") #print(f"key:{key}")
cs = await api_connection.get_crypto_system(rec.key.kind()) cs = await api_connection.get_crypto_system(rec.key.kind())
async with cs: async with cs:
@ -237,6 +237,14 @@ async def test_open_writer_dht_value(api_connection: veilid.VeilidAPI):
await rc.set_dht_value(key, ValueSubkey(0), va) await rc.set_dht_value(key, ValueSubkey(0), va)
# Verify subkey 0 can be set because override with the right writer # Verify subkey 0 can be set because override with the right writer
# Should have prior sequence number as its returned value because it exists online at seq 0
vdtemp = await rc.set_dht_value(key, ValueSubkey(0), va, veilid.KeyPair.from_parts(owner, secret))
assert vdtemp is not None
assert vdtemp.data == vb
assert vdtemp.seq == 0
assert vdtemp.writer == owner
# Should update the second time to seq 1
vdtemp = await rc.set_dht_value(key, ValueSubkey(0), va, veilid.KeyPair.from_parts(owner, secret)) vdtemp = await rc.set_dht_value(key, ValueSubkey(0), va, veilid.KeyPair.from_parts(owner, secret))
assert vdtemp is None assert vdtemp is None
@ -297,7 +305,7 @@ async def test_watch_dht_values():
await sync(rc0, [rec0]) await sync(rc0, [rec0])
# Server 0: Make a watch on all the subkeys # Server 0: Make a watch on all the subkeys
active = await rc0.watch_dht_values(rec0.key, [], Timestamp(0), 0xFFFFFFFF) active = await rc0.watch_dht_values(rec0.key)
assert active assert active
# Server 1: Open the subkey # Server 1: Open the subkey
@ -462,7 +470,7 @@ async def test_watch_many_dht_values():
assert vd is None assert vd is None
# Server 0: Make a watch on all the subkeys # Server 0: Make a watch on all the subkeys
active = await rc0.watch_dht_values(records[n].key, [], Timestamp(0), 0xFFFFFFFF) active = await rc0.watch_dht_values(records[n].key)
assert active assert active
# Open and set all records # Open and set all records
@ -516,16 +524,18 @@ async def test_inspect_dht_record(api_connection: veilid.VeilidAPI):
assert vd is None assert vd is None
rr = await rc.inspect_dht_record(rec.key, [], veilid.DHTReportScope.LOCAL) rr = await rc.inspect_dht_record(rec.key, [], veilid.DHTReportScope.LOCAL)
print("rr: {}", rr.__dict__) #print("rr: {}", rr.__dict__)
assert rr.subkeys == [(0, 1)] assert rr.subkeys == [(0, 1)]
assert rr.local_seqs == [0, 0xFFFFFFFF] assert rr.local_seqs == [0, None]
assert rr.network_seqs == [] assert rr.network_seqs == [None, None]
await sync(rc, [rec])
rr2 = await rc.inspect_dht_record(rec.key, [], veilid.DHTReportScope.SYNC_GET) rr2 = await rc.inspect_dht_record(rec.key, [], veilid.DHTReportScope.SYNC_GET)
print("rr2: {}", rr2.__dict__) #print("rr2: {}", rr2.__dict__)
assert rr2.subkeys == [(0, 1)] assert rr2.subkeys == [(0, 1)]
assert rr2.local_seqs == [0, 0xFFFFFFFF] assert rr2.local_seqs == [0, None]
assert rr2.network_seqs == [0, 0xFFFFFFFF] assert rr2.network_seqs == [0, None]
await rc.close_dht_record(rec.key) await rc.close_dht_record(rec.key)
await rc.delete_dht_record(rec.key) await rc.delete_dht_record(rec.key)
@ -932,7 +942,7 @@ async def sync_win(
if key is not None: if key is not None:
futurerecords.remove(key) futurerecords.remove(key)
if len(rr.subkeys) == 1 and rr.subkeys[0] == (0, subkey_count-1) and veilid.ValueSeqNum.NONE not in rr.local_seqs and len(rr.offline_subkeys) == 0: if len(rr.subkeys) == 1 and rr.subkeys[0] == (0, subkey_count-1) and None not in rr.local_seqs and len(rr.offline_subkeys) == 0:
if key in recordreports: if key in recordreports:
del recordreports[key] del recordreports[key]
donerecords.add(key) donerecords.add(key)
@ -959,7 +969,7 @@ async def sync_win(
win.addstr(n+2, 1, " " * subkey_count, curses.color_pair(1)) win.addstr(n+2, 1, " " * subkey_count, curses.color_pair(1))
for (a,b) in rr.subkeys: for (a,b) in rr.subkeys:
for m in range(a, b+1): for m in range(a, b+1):
if rr.local_seqs[m] != veilid.ValueSeqNum.NONE: if rr.local_seqs[m] != None:
win.addstr(n+2, m+1, " ", curses.color_pair(2)) win.addstr(n+2, m+1, " ", curses.color_pair(2))
for (a,b) in rr.offline_subkeys: for (a,b) in rr.offline_subkeys:
win.addstr(n+2, a+1, " " * (b-a+1), curses.color_pair(3)) win.addstr(n+2, a+1, " " * (b-a+1), curses.color_pair(3))

View file

@ -2947,7 +2947,10 @@
"description": "The sequence numbers of each subkey requested from a locally stored DHT Record", "description": "The sequence numbers of each subkey requested from a locally stored DHT Record",
"type": "array", "type": "array",
"items": { "items": {
"type": "integer", "type": [
"integer",
"null"
],
"format": "uint32", "format": "uint32",
"minimum": 0.0 "minimum": 0.0
} }
@ -2956,7 +2959,10 @@
"description": "The sequence numbers of each subkey requested from the DHT over the network", "description": "The sequence numbers of each subkey requested from the DHT over the network",
"type": "array", "type": "array",
"items": { "items": {
"type": "integer", "type": [
"integer",
"null"
],
"format": "uint32", "format": "uint32",
"minimum": 0.0 "minimum": 0.0
} }

View file

@ -237,8 +237,7 @@ class ValueSubkey(int):
class ValueSeqNum(int): class ValueSeqNum(int):
NONE = 4294967295 pass
#################################################################### ####################################################################
@ -405,15 +404,15 @@ class DHTRecordDescriptor:
class DHTRecordReport: class DHTRecordReport:
subkeys: list[tuple[ValueSubkey, ValueSubkey]] subkeys: list[tuple[ValueSubkey, ValueSubkey]]
offline_subkeys: list[tuple[ValueSubkey, ValueSubkey]] offline_subkeys: list[tuple[ValueSubkey, ValueSubkey]]
local_seqs: list[ValueSeqNum] local_seqs: list[Optional[ValueSeqNum]]
network_seqs: list[ValueSeqNum] network_seqs: list[Optional[ValueSeqNum]]
def __init__( def __init__(
self, self,
subkeys: list[tuple[ValueSubkey, ValueSubkey]], subkeys: list[tuple[ValueSubkey, ValueSubkey]],
offline_subkeys: list[tuple[ValueSubkey, ValueSubkey]], offline_subkeys: list[tuple[ValueSubkey, ValueSubkey]],
local_seqs: list[ValueSeqNum], local_seqs: list[Optional[ValueSeqNum]],
network_seqs: list[ValueSeqNum], network_seqs: list[Optional[ValueSeqNum]],
): ):
self.subkeys = subkeys self.subkeys = subkeys
self.offline_subkeys = offline_subkeys self.offline_subkeys = offline_subkeys
@ -428,8 +427,8 @@ class DHTRecordReport:
return cls( return cls(
[(p[0], p[1]) for p in j["subkeys"]], [(p[0], p[1]) for p in j["subkeys"]],
[(p[0], p[1]) for p in j["offline_subkeys"]], [(p[0], p[1]) for p in j["offline_subkeys"]],
[ValueSeqNum(s) for s in j["local_seqs"]], [(ValueSeqNum(s) if s is not None else None) for s in j["local_seqs"] ],
[ValueSeqNum(s) for s in j["network_seqs"]], [(ValueSeqNum(s) if s is not None else None) for s in j["network_seqs"] ],
) )
def to_json(self) -> dict: def to_json(self) -> dict:

View file

@ -212,13 +212,13 @@ fn main() -> EyreResult<()> {
settingsrw.logging.terminal.enabled = true; settingsrw.logging.terminal.enabled = true;
settingsrw.logging.terminal.level = LogLevel::Debug; settingsrw.logging.terminal.level = LogLevel::Debug;
settingsrw.logging.api.enabled = true; settingsrw.logging.api.enabled = true;
settingsrw.logging.api.level = LogLevel::Debug; settingsrw.logging.api.level = LogLevel::Info;
} }
if args.logging.trace { if args.logging.trace {
settingsrw.logging.terminal.enabled = true; settingsrw.logging.terminal.enabled = true;
settingsrw.logging.terminal.level = LogLevel::Trace; settingsrw.logging.terminal.level = LogLevel::Trace;
settingsrw.logging.api.enabled = true; settingsrw.logging.api.enabled = true;
settingsrw.logging.api.level = LogLevel::Trace; settingsrw.logging.api.level = LogLevel::Info;
} }
if let Some(subnode_index) = args.subnode_index { if let Some(subnode_index) = args.subnode_index {

View file

@ -13,6 +13,8 @@ export type KeyPair = `${PublicKey}:${SecretKey}`;
export type FourCC = "NONE" | "VLD0" | string; export type FourCC = "NONE" | "VLD0" | string;
export type CryptoTyped<TCryptoKey extends string> = `${FourCC}:${TCryptoKey}`; export type CryptoTyped<TCryptoKey extends string> = `${FourCC}:${TCryptoKey}`;
export type CryptoTypedGroup<TCryptoKey extends string> = Array<CryptoTyped<TCryptoKey>>; export type CryptoTypedGroup<TCryptoKey extends string> = Array<CryptoTyped<TCryptoKey>>;
export
"#; "#;
#[wasm_bindgen] #[wasm_bindgen]

View file

@ -17,7 +17,7 @@
}, },
"scripts": { "scripts": {
"test": "wdio run ./wdio.conf.ts", "test": "wdio run ./wdio.conf.ts",
"test:headless": "WDIO_HEADLESS=true npm run test", "test:headless": "WDIO_HEADLESS=true npm run test --",
"start": "tsc && npm run test:headless" "start": "tsc && npm run test:headless"
} }
} }

View file

@ -149,6 +149,9 @@ describe('VeilidRoutingContext', () => {
); );
expect(setValueRes).toBeUndefined(); expect(setValueRes).toBeUndefined();
// Wait for synchronization
await waitForOfflineSubkeyWrite(routingContext, dhtRecord.key);
const getValueRes = await routingContext.getDhtValue( const getValueRes = await routingContext.getDhtValue(
dhtRecord.key, dhtRecord.key,
0, 0,
@ -282,9 +285,9 @@ describe('VeilidRoutingContext', () => {
"Local", "Local",
); );
expect(inspectRes).toBeDefined(); expect(inspectRes).toBeDefined();
expect(inspectRes.subkeys.concat(inspectRes.offline_subkeys)).toEqual([[0, 0]]); expect(inspectRes.subkeys).toEqual([[0, 0]]);
expect(inspectRes.local_seqs).toEqual([0]); expect(inspectRes.local_seqs).toEqual([0]);
expect(inspectRes.network_seqs).toEqual([]); expect(inspectRes.network_seqs).toEqual([undefined]);
// Wait for synchronization // Wait for synchronization
await waitForOfflineSubkeyWrite(routingContext, dhtRecord.key); await waitForOfflineSubkeyWrite(routingContext, dhtRecord.key);
@ -310,14 +313,17 @@ describe('VeilidRoutingContext', () => {
); );
expect(setValueRes).toBeUndefined(); expect(setValueRes).toBeUndefined();
// Wait for synchronization
await waitForOfflineSubkeyWrite(routingContext, dhtRecord.key);
// Inspect locally // Inspect locally
const inspectRes = await routingContext.inspectDhtRecord( const inspectRes = await routingContext.inspectDhtRecord(
dhtRecord.key, dhtRecord.key,
); );
expect(inspectRes).toBeDefined(); expect(inspectRes).toBeDefined();
expect(inspectRes.subkeys.concat(inspectRes.offline_subkeys)).toEqual([[0, 0]]); expect(inspectRes.offline_subkeys).toEqual([]);
expect(inspectRes.local_seqs).toEqual([0]); expect(inspectRes.local_seqs).toEqual([0]);
expect(inspectRes.network_seqs).toEqual([]); expect(inspectRes.network_seqs).toEqual([undefined]);
}); });
}); });
}); });

View file

@ -21,5 +21,9 @@ export const veilidCoreInitConfig: VeilidWASMConfig = {
export var veilidCoreStartupConfig = (() => { export var veilidCoreStartupConfig = (() => {
var defaultConfig = JSON.parse(veilidClient.defaultConfig()); var defaultConfig = JSON.parse(veilidClient.defaultConfig());
defaultConfig.program_name = 'veilid-wasm-test'; defaultConfig.program_name = 'veilid-wasm-test';
// Ensure we are starting from scratch
defaultConfig.table_store.delete = true;
defaultConfig.protected_store.delete = true;
defaultConfig.block_store.delete = true;
return defaultConfig; return defaultConfig;
})(); })();

View file

@ -5,8 +5,8 @@ import {
veilidCoreStartupConfig, veilidCoreStartupConfig,
} from './utils/veilid-config'; } from './utils/veilid-config';
import { VeilidState, veilidClient } from 'veilid-wasm'; import { VeilidState, veilidClient } from '../../pkg/veilid_wasm';
import { asyncCallWithTimeout, waitForPublicAttachment } from './utils/wait-utils'; import { asyncCallWithTimeout, waitForDetached, waitForPublicAttachment } from './utils/wait-utils';
describe('veilidClient', function () { describe('veilidClient', function () {
before('veilid startup', async function () { before('veilid startup', async function () {
@ -45,16 +45,17 @@ describe('veilidClient', function () {
await veilidClient.attach(); await veilidClient.attach();
await asyncCallWithTimeout(waitForPublicAttachment(), 10000); await asyncCallWithTimeout(waitForPublicAttachment(), 10000);
await veilidClient.detach(); await veilidClient.detach();
await asyncCallWithTimeout(waitForDetached(), 10000);
}); });
describe('kitchen sink', function () { describe('kitchen sink', function () {
before('attach', async function () { before('attach', async function () {
await veilidClient.attach(); await veilidClient.attach();
await waitForPublicAttachment(); await asyncCallWithTimeout(waitForPublicAttachment(), 10000);
}); });
after('detach', async function () { after('detach', async function () {
await veilidClient.detach(); await veilidClient.detach();
await asyncCallWithTimeout(waitForDetached(), 10000);
}); });
let state: VeilidState; let state: VeilidState;

View file

@ -18,7 +18,7 @@ npm install
original_tmpdir=$TMPDIR original_tmpdir=$TMPDIR
mkdir -p ~/tmp mkdir -p ~/tmp
export TMPDIR=~/tmp export TMPDIR=~/tmp
npm run test:headless npm run test:headless -- $@
export TMPDIR=$original_tmpdir export TMPDIR=$original_tmpdir
popd &> /dev/null popd &> /dev/null